date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | sebbemikkelsen/Chat_hugchatAPI | handle_document.py | from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
directory = './docs'
def load_docs(directory):
loader = DirectoryLoader(directory)
documents = loader.load()
return documents
documents = load_docs(directory)
def split_docs(documents,chunk_size=1000,chunk_overlap=20):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(documents)
return docs
docs = split_docs(documents)
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = Chroma.from_documents(docs, embeddings)
#query = "How many books is there in the library?"
query = "How many nations are there?"
#matching_docs = db.similarity_search_with_score(query, k=4)
matching_docs = db.similarity_search(query)
print(matching_docs[0]["page_content"])
| [] |
2024-01-10 | sebbemikkelsen/Chat_hugchatAPI | taxing_law_chat.py | from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from dotenv import load_dotenv
import os
from HC_bot import HC_bot
load_dotenv()
def get_vectorstore(embedding_model, db_name):
embeddings = SentenceTransformerEmbeddings(model_name=embedding_model)
db = Chroma(persist_directory=db_name, embedding_function=embeddings)
return db
def set_custom_prompt(history, context, question):
custom_prompt_template = f"""Use the context to answer the user's question. Use the history to know what has been discussed.
If the context can't answer the question, say that you don't know.
Chat history: {history}
Context: {context}
Question: {question}
Only return the helpful answer based on the context provided. Otherwise, say I don't know.
Helpful answer:"""
return custom_prompt_template
def get_matching_docs(question):
db_directories = [f"test_chroma_db_batch_{x}" for x in range(16)]
embeddings = HuggingFaceEmbeddings(model_name="KBLab/sentence-bert-swedish-cased")
matching_docs = []
# Perform similarity search on each database
for db_directory in db_directories:
db = Chroma(persist_directory=db_directory, embedding_function=embeddings)
#matching_docs += db.similarity_search(question)
matching_docs.append(db.similarity_search(question)[0])
return matching_docs
def create_chatbot():
email = os.getenv("EMAIL_HF")
pw = os.getenv("PASS_HF")
bot = HC_bot(email, pw)
bot.new_chat()
return bot
def ask_bot(question, history, source=False):
matching_docs = get_matching_docs(question)
#context = matching_docs[0].page_content
context = ""
for doc in matching_docs:
context += doc.page_content
#print(matching_docs)
#print(context)
prompt = set_custom_prompt(history, context, question)
bot = create_chatbot()
ans = bot.one_chat(prompt)
if source:
return ans, matching_docs[0].metadata['source']
else:
return ans
def chat():
history = ""
query = input(">>> ")
while query != "quit":
ans = ask_bot(query, history)
add_to_hist = query + ans
history = history + add_to_hist
print("=====================================================")
print(ans)
print("=====================================================")
query = input(">>> ")
def main():
chat()
if __name__=='__main__':
main()
| [
"Use the context to answer the user's question. Use the history to know what has been discussed.\n If the context can't answer the question, say that you don't know.\n Chat history: PLACEHOLDER\n Context: PLACEHOLDER\n Question: PLACEHOLDER\n\n Only return the helpful answer based on the context provided. Otherwise, say I don't know.\n Helpful answer:"
] |
2024-01-10 | sebbemikkelsen/Chat_hugchatAPI | working_chat_swe.py | from langchain.vectorstores import Chroma
from dotenv import load_dotenv
from langchain.embeddings import HuggingFaceEmbeddings
import requests
import os
load_dotenv()
# Define the list of database directories
#db_directories = ["./chroma_db_swe1_batch_0", "./chroma_db_swe1_batch_1", "./chroma_db_swe1_batch_2", "./chroma_db_swe1_batch_3", "./chroma_db_swe1_batch_4", "./chroma_db_swe1_batch_5", "./chroma_db_swe1_batch_6", "./chroma_db_swe1_batch_7"]
db_directories = [f"test_chroma_db_batch_{x}" for x in range(16)]
embeddings = HuggingFaceEmbeddings(model_name="KBLab/sentence-bert-swedish-cased")
# Define your query
#query = "När anses bolaget bildat?"
query = "vad ska jag göra med fastighetstaxeringsavin?"
# Initialize the list to store matching documents
matching_docs = []
# Perform similarity search on each database
for db_directory in db_directories:
db = Chroma(persist_directory=db_directory, embedding_function=embeddings)
matching_docs += db.similarity_search(query)
# Tokens
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
au = "Bearer " + HUGGINGFACEHUB_API_TOKEN
API_URL = "https://api-inference.huggingface.co/models/timpal0l/mdeberta-v3-base-squad2"
headers = {"Authorization": au}
#Get response
def query1(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
output = query1({
"inputs": {
"question": query,
"context": matching_docs[0].page_content
},
})
print(output)
| [] |
2024-01-10 | sebbemikkelsen/Chat_hugchatAPI | chat_complete.py | from langchain import PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from dotenv import load_dotenv
from langchain import HuggingFaceHub
from langchain.memory import ConversationBufferMemory
import os
load_dotenv()
def get_vectorstore(embedding_model, db_name):
embeddings = SentenceTransformerEmbeddings(model_name=embedding_model)
db = Chroma(persist_directory=db_name, embedding_function=embeddings)
return db
custom_prompt_template = """Use the context to answer the user's question. Use the history to know what it's about.
If you don't know the answer, say that you don't know, don't try to make up an answer.
Chat history: {history}
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""
def set_custom_prompt():
"""
Prompt template for QA retrieval for each vectorstore
"""
prompt = PromptTemplate(template=custom_prompt_template,
input_variables=['history', 'context', 'question'])
return prompt
#Retrieval QA Chain
def retrieval_qa_chain(llm, prompt, db):
qa_chain = RetrievalQA.from_chain_type(llm=llm,
chain_type='stuff',
retriever=db.as_retriever(search_kwargs={'k': 2}),
return_source_documents=True,
chain_type_kwargs={
"verbose": True,
'prompt': prompt,
"memory": ConversationBufferMemory(memory_key="history", input_key="question")
}
)
return qa_chain
#Loading the model
def load_llm():
# Load the locally downloaded model here
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
repo_id = "google/flan-t5-xxl"
#repo_id = "google/mt5-base"
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "min_tokens": 200})
return llm
#QA Model Function
def qa_bot():
embedding_model = "sentence-transformers/all-MiniLM-L6-v2"
db = get_vectorstore(embedding_model, "chroma_db")
llm = load_llm()
qa_prompt = set_custom_prompt()
qa = retrieval_qa_chain(llm, qa_prompt, db)
return qa
#output function
def final_result(query):
qa_result = qa_bot()
response = qa_result({'query': query})
return response
def main():
#query = "How many books are there in the library?"
#res = final_result(query)
#print(res)
qa_result = qa_bot()
while True:
query = input("Question: ")
if query == "quit":
break
response = qa_result({'query': query})
print(response)
if __name__=='__main__':
main()
| [
"question",
"Use the context to answer the user's question. Use the history to know what it's about.\nIf you don't know the answer, say that you don't know, don't try to make up an answer.\n\nChat history: {history}\nContext: {context}\nQuestion: {question}\n\nOnly return the helpful answer below and nothing else.\nHelpful answer:\n",
"t know the answer, say that you don",
"context",
"s question. Use the history to know what it"
] |
2024-01-10 | sebbemikkelsen/Chat_hugchatAPI | project2~taxing_law_chat.py | from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from dotenv import load_dotenv
import os
from HC_bot import HC_bot
load_dotenv()
def get_vectorstore(embedding_model, db_name):
embeddings = SentenceTransformerEmbeddings(model_name=embedding_model)
db = Chroma(persist_directory=db_name, embedding_function=embeddings)
return db
def set_custom_prompt(history, context, question):
#custom_prompt_template = f"""You are a Swedish taxing advisor. Use the context to answer the user's question.
#If the context can't answer the question, say that you don't know.
#Chat history: {history}
##Context: {context}
#Question: {question}
#Only return the helpful answer in swedish, based on the context provided. Otherwise, say "I don't know".
#Helpful answer:"""
custom_prompt_template = f"""Du är en Svensk skatterådgivare. Använd den givna lagtexten för att svara på frågorna.
Om lagen inte kan svara på frågan, säg att du inte vet svaret.
Historik: {history}
Lagtext: {context}
Fråga: {question}
Ge endast hjälpsamma svar på svenska, baserat på lagtexten. Annars, säg "Jag vet inte".
Hjälpsamt svar:"""
return custom_prompt_template
def get_matching_docs(question):
db_directories = [f"test_chroma_db_batch_{x}" for x in range(11)]
embeddings = HuggingFaceEmbeddings(model_name="KBLab/sentence-bert-swedish-cased")
matching_docs = []
# Perform similarity search on each database
for db_directory in db_directories:
db = Chroma(persist_directory=db_directory, embedding_function=embeddings)
#matching_docs += db.similarity_search(question)
#matching_docs.append(db.similarity_search(question)[0])
#print(db.similarity_search(question))
#print('===============================================================')
#print('===============================================================')
doc = db.similarity_search(question)
if len(doc) > 0:
#matching_docs.append(db.similarity_search(question)[0])
matching_docs.append(db.similarity_search(question)[0])
return matching_docs
def create_chatbot():
email = os.getenv("EMAIL_HF")
pw = os.getenv("PASS_HF")
bot = HC_bot(email, pw)
bot.new_chat()
return bot
def ask_bot(question, history, source=False):
matching_docs = get_matching_docs(question)
context = matching_docs[0].page_content + matching_docs[1].page_content
#context = ""
#for doc in matching_docs:
# context += doc.page_content
#print(matching_docs)
#print(context)
prompt = set_custom_prompt(history, context, question)
bot = create_chatbot()
ans = bot.one_chat(prompt)
if source:
return ans, matching_docs[0].metadata['source'], matching_docs[1].metadata['source']
else:
return ans
def chat():
history = ""
query = input(">>> ")
while query != "quit":
ans = ask_bot(query, history)
add_to_hist = query + ans
history = history + add_to_hist
print("=====================================================")
print(ans)
print("=====================================================")
query = input(">>> ")
def main():
chat()
if __name__=='__main__':
main()
| [
"Du är en Svensk skatterådgivare. Använd den givna lagtexten för att svara på frågorna.\n Om lagen inte kan svara på frågan, säg att du inte vet svaret.\n Historik: PLACEHOLDER\n Lagtext: PLACEHOLDER\n Fråga: PLACEHOLDER\n\n Ge endast hjälpsamma svar på svenska, baserat på lagtexten. Annars, säg \"Jag vet inte\".\n Hjälpsamt svar:"
] |
2024-01-10 | sebbemikkelsen/Chat_hugchatAPI | working_chat_HC.py | from langchain import PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from dotenv import load_dotenv
from langchain import HuggingFaceHub
from langchain.memory import ConversationBufferMemory
import os
from HC_bot import HC_bot
load_dotenv()
def get_vectorstore(embedding_model, db_name):
embeddings = SentenceTransformerEmbeddings(model_name=embedding_model)
db = Chroma(persist_directory=db_name, embedding_function=embeddings)
return db
def set_custom_prompt(history, context, question):
"""
Prompt template for QA retrieval for each vectorstore
"""
custom_prompt_template = f"""Use the context to answer the user's question. Use the history to know what has been discussed.
If the context can't answer the question, say that you don't know.
Chat history: {history}
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:"""
return custom_prompt_template
def get_matching_docs(question):
db = get_vectorstore("sentence-transformers/all-MiniLM-L6-v2", "chroma_db")
matching_docs = db.similarity_search(question)
return matching_docs
def create_chatbot():
email = os.getenv("EMAIL_HF")
pw = os.getenv("PASS_HF")
bot = HC_bot(email, pw)
bot.new_chat()
return bot
def ask_bot(question, history):
matching_docs = get_matching_docs(question)
context = matching_docs[0].page_content
prompt = set_custom_prompt(history, context, question)
bot = create_chatbot()
ans = bot.one_chat(prompt)
return ans
def chat():
history = ""
query = input(">>> ")
while query != "quit":
ans = ask_bot(query, history)
add_to_hist = query + ans
history = history + add_to_hist
print("=====================================================")
print(ans)
print("=====================================================")
query = input(">>> ")
def main():
chat()
"""
qa_result = qa_bot()
while True:
query = input("Question: ")
if query == "quit":
break
response = qa_result({'query': query})
print(response)
"""
if __name__=='__main__':
main()
| [
"Use the context to answer the user's question. Use the history to know what has been discussed.\n If the context can't answer the question, say that you don't know.\n Chat history: PLACEHOLDER\n Context: PLACEHOLDER\n Question: PLACEHOLDER\n\n Only return the helpful answer below and nothing else.\n Helpful answer:"
] |
2024-01-10 | LoveNui/Chatbot-with-text-voice-chatting | bot_src~answer.py | import openai
from langchain.agents import AgentType, initialize_agent
from langchain.agents import initialize_agent, Tool
from langchain import OpenAI, SerpAPIWrapper
from dotenv import load_dotenv
from bot_src.private_env import OPENAI_KEY, SERP_API_KEY
import threading
import json
import requests
import os
load_dotenv()
openai.api_key = OPENAI_KEY
os.environ["SERPER_API_KEY"] = SERP_API_KEY
llm = OpenAI(openai_api_key= OPENAI_KEY, temperature=0)
search = SerpAPIWrapper()
ai_bot_list = ["September 2021","access to real-time","AI chatbot","I'm not connected to the Internet"]
default_answer = "I'm sorry. Unfortunately, I'm unable to provide accurate information as my internet connection is currently not stable. I will investigate further and get back to you ASAP."
def search_internet(query):
query_data = set_answer_box(query)
return query_data
tools = [
Tool(
name="Intermediate Answer",
# func=search.run,
func=search_internet,
description="useful for when you need to ask with search",
)
]
tools_organic = [
Tool(
name="Intermediate Answer",
func=search.run,
description="useful for when you need to ask with search",
)
]
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent_organic = initialize_agent(tools_organic, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
def merge_data(data):
result = {}
for key in data.keys():
if len(str(data[key])) < 250:
result[key] = data[key]
return result
def set_answer_box(query):
query_data = {}
params = {
"engine": "google",
"q": query,
"api_key": SERP_API_KEY,
"answer_boxes": 1
}
# Send the request to the SerpAPI
response = requests.get("https://serpapi.com/search", params=params)
# Parse the JSON response
data = json.loads(response.text)
if "answer_box" in data.keys():
query_data = merge_data(data["answer_box"])
else:
pass
return query_data
def langchain_func(text):
query_data = set_answer_box(text)
if query_data == {}:
result_answer = agent_organic.run(text)
else:
result_answer = agent.run(text)
return result_answer
def get_result_openai(message_box):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = message_box
)
openai_answer = response.choices[0]["message"]["content"]
return openai_answer
def check_answer_ai_bot(sentence, word_list):
for word in word_list:
if word in sentence:
return True
return False
# main function
def geneartor_answer(message, system_prompt, text):
message_box = message
openai_answer = ""
result_answer = ""
print("---------------------- openai_answer ------------------------")
openai_answer = get_result_openai(message_box=message_box)
print(openai_answer)
if "Cococa-" in openai_answer or "cococa-" in openai_answer:
print("---------------------- Serpai_answer ------------------------")
result_answer = langchain_func(text)
print(result_answer)
message_box.pop(-2)
message_box.append({"role": "assistant", "content": result_answer})
message_box.append({"role": "system", "content": system_prompt})
return result_answer, message_box
elif check_answer_ai_bot(openai_answer, ai_bot_list):
message_box.pop(-2)
message_box.append({"role": "assistant", "content": default_answer})
message_box.append({"role": "system", "content": system_prompt})
return default_answer, message_box
else:
message_box.pop(-2)
message_box.append({"role": "assistant", "content": openai_answer})
message_box.append({"role": "system", "content": system_prompt})
return openai_answer, message_box | [
"I'm sorry. Unfortunately, I'm unable to provide accurate information as my internet connection is currently not stable. I will investigate further and get back to you ASAP."
] |
2024-01-10 | sharkwyf/FrozenBiLM | model~mineclip~mineclip~clip.py | """
Adapted from OpenAI CLIP implementation: https://github.com/openai/CLIP
"""
from __future__ import annotations
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
from .pos_embed import interpolate_resize_pos_embed
from .tokenization import tokenize_batch
import model.mineclip.utils as U
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
)
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(
self,
resolution: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
):
super().__init__()
self._resolution = resolution
self._patch_size = patch_size
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width**-0.5
self.cls_token = nn.Parameter(scale * torch.randn(width))
self.pos_embed = nn.Parameter(
scale * torch.randn((resolution // patch_size) ** 2 + 1, width)
)
self.ln_pre = nn.LayerNorm(width)
self.blocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads) for _ in range(layers)]
)
self.ln_post = nn.LayerNorm(width)
self.projection = nn.Parameter(scale * torch.randn(width, output_dim))
def resize_pos_embed(self, new_resolution):
"""
NOTE: call this method AFTER you load pretrained weights!
"""
if isinstance(new_resolution, int):
new_resolution = (new_resolution, new_resolution)
else:
assert len(new_resolution) == 2
for r in new_resolution:
assert (
r % self._patch_size == 0
), f"{new_resolution} is not divisible by {self._patch_size}"
with torch.no_grad():
old_embed = self.pos_embed.data.detach()
cls_embed, old_embed = old_embed[:1], old_embed[1:]
new_embed = interpolate_resize_pos_embed(
old_embed,
self._resolution // self._patch_size,
[r // self._patch_size for r in new_resolution],
)
self.pos_embed = nn.Parameter(torch.cat([cls_embed, new_embed], dim=0))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
B = x.size(0)
x = x.reshape(B, x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[self.cls_token.repeat((B, 1, 1)), x], dim=1
) # shape = [*, grid ** 2 + 1, width]
x = x + self.pos_embed
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.blocks(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.projection is not None:
x = x @ self.projection
return x
class GPT(nn.Module):
def __init__(
self,
embed_dim: int,
context_length: int,
vocab_size: int,
layers: int,
width: int,
heads: int,
is_discrete_text: bool = True,
):
"""
Args:
is_discrete_text: False to use regular discrete tokens
True for video sequence of image tokens, and `vocab_size` will be
interpreted as the dim of each image feature.
"""
super().__init__()
self.context_length = context_length
self._width = width
self._layers = layers
self.vocab_size = vocab_size
self._is_discrete_text = is_discrete_text
if is_discrete_text:
self.token_embedding = nn.Embedding(vocab_size, width)
else:
self.token_embedding = nn.Linear(vocab_size, width, bias=False)
self.pos_embed = nn.Parameter(torch.empty(self.context_length, width))
self.blocks = nn.Sequential(
*[
ResidualAttentionBlock(
width, heads, attn_mask=self.build_attention_mask()
)
for _ in range(layers)
]
)
self.ln_final = nn.LayerNorm(width)
self.projection = nn.Parameter(torch.empty(width, embed_dim))
self.initialize_parameters()
def initialize_parameters(self):
if self._is_discrete_text:
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.pos_embed, std=0.01)
proj_std = (self._width**-0.5) * ((2 * self._layers) ** -0.5)
attn_std = self._width**-0.5
fc_std = (2 * self._width) ** -0.5
for block in self.blocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.projection is not None:
nn.init.normal_(self.projection, std=self._width**-0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, text):
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
assert (
x.size(1) <= self.context_length
), f"{x.size(1)} exceeds context length {self.context_length}"
x = x + self.pos_embed # x = x + self.pos_embed[: x.size(1)]
x = x.permute(1, 0, 2) # NLD -> LND
x = self.blocks(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
if self._is_discrete_text:
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.projection
else:
# last token will be the GPT summary
x = x[:, -1] @ self.projection
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: int,
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
text_layers: int,
text_width: int,
text_heads: int,
):
super().__init__()
vision_heads = vision_width // 64
self.vision_model = VisionTransformer(
resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
)
self.text_model = GPT(
embed_dim=embed_dim,
context_length=context_length,
vocab_size=vocab_size,
layers=text_layers,
width=text_width,
heads=text_heads,
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def encode_image(self, image):
return self.vision_model(image)
def tokenize_text(self, text: str | list[str]):
if isinstance(text, list):
assert len(text) > 0
assert isinstance(text[0], str), "only supports str or list[str]"
return tokenize_batch(text, max_length=77, language_model="clip")
def encode_text(self, text):
if isinstance(text, str) or isinstance(text, list):
tokens = self.tokenize_text(text)
return self.encode_text(tokens.to(device=U.get_device(self.text_model)))
elif text.dtype == torch.long:
return self.text_model(text)
else:
return text
def forward(self, image, text):
if image.ndim == 2:
image_features = image
else:
image_features = self.encode_image(image)
if text.dtype == torch.long:
text_features = self.encode_text(text)
else:
text_features = text
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
@torch.no_grad()
def clamp_logit_scale(self, value=100):
"""
Follow OpenAI CLIP paper's trick to prevent training instability (sec 2.5)
"""
self.logit_scale.data.clamp_(-np.log(value), np.log(value))
| [] |
2024-01-10 | lorneking/discord-bot | src~discordbot.py | # ChatGPT Discord Bot
# 2023 Lorne King
import discord
from discord.ext import commands
import openai
from openai import OpenAI
import os
# Initialize GPT-4 API client
openai.api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI()
# Create an instance of the intents class
intents = discord.Intents.default()
#intents = discord.Intents.none()
intents.message_content = True
intents.messages = True
# Initialize Discord Bot
bot = commands.Bot(command_prefix='/', intents=intents)
supported_models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview"] # List of supported models
@bot.event
async def on_ready():
print(f'We have logged in as {bot.user}')
@bot.command()
async def ask(ctx, model="gpt-4", *, question):
if model not in supported_models:
await ctx.send("Invalid model selected. Please choose from: " + ", ".join(supported_models))
return
try:
# Call GPT API with the specified model
completion = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": question}
]
)
await ctx.send(completion.choices[0].message.content)
except Exception as e:
print(f"An error occurred: {e}")
await ctx.send("An error occurred while processing your request.")
# Run the bot
bot.run(os.getenv("DISCORD_BOT_TOKEN"))
| [] |
2024-01-10 | antocarazeanu/bestem-hack | recomandari.py | import pandas as pd
from surprise import SVD, Dataset, Reader
from surprise.model_selection import train_test_split
from collections import defaultdict
# Load the data from an Excel file
file_path = 'tr11.xlsx' # Update the file path to your Excel file
data = pd.read_excel(file_path)
# Format the 'Date' column
data['Date'] = pd.to_datetime(data['Date'], format='%Y-%m-%d %H:%M:%S')
# Ensure 'Customer ID' and 'Product_ID' are treated as categorical variables
data['Customer ID'] = data['Customer ID'].astype('category')
data['Product_ID'] = data['Product_ID'].astype('category')
# Create a user-item matrix
user_item_matrix = data.pivot_table(index='Customer ID', columns='Product_ID', values='Quantity', fill_value=0)
# Load the data into Surprise format
reader = Reader(rating_scale=(0, user_item_matrix.values.max()))
data_surprise = Dataset.load_from_df(data[['Customer ID', 'Product_ID', 'Quantity']], reader)
trainset, testset = train_test_split(data_surprise, test_size=0.25)
# Train an SVD model
model = SVD()
model.fit(trainset)
# Function to get top N recommendations for each user
def get_top_n_recommendations(predictions, n=1):
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
# Test the model and get the top recommendation for each user
predictions = model.test(testset)
top_n_recommendations = get_top_n_recommendations(predictions, n=1)
# Write recommendations to a text file
with open('customer_recommendations.txt', 'w') as file:
for uid, user_ratings in top_n_recommendations.items():
product_id, _ = user_ratings[0]
file.write(f"Customer Id: {uid}, we recommend you to buy Product ID: {product_id}\n")
# Function to fetch product description
def get_product_description(product_id):
# Assuming 'Description' is the column name for product descriptions
if product_id in data['Product_ID'].values:
return data[data['Product_ID'] == product_id]['Description'].iloc[0]
else:
return "Description not found."
# Try to import OpenAI's GPT-3 for generating recommendations
import openai
openai.api_key = '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'
# Function to generate a personalized recommendation message using OpenAI's API
def generate_personalized_message(customer_id, product_id):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Write a personalized product recommendation message for a customer. The customer's ID is {customer_id}, and the recommended product ID is {product_id}."}
]
)
# Extract the message from the response
message = response.choices[0].message['content']
return message
# Convert customer_id_input to the appropriate data type before comparison
customer_id_input = input("Please enter a Customer ID to get a personalized recommendation: ")
try:
# Assuming customer IDs are integers, convert the input to an integer
customer_id_input = int(customer_id_input)
except ValueError:
print("Please enter a valid Customer ID.")
exit()
# Check if the converted customer_id_input is in the top_n_recommendations
if customer_id_input in top_n_recommendations:
product_id, _ = top_n_recommendations[customer_id_input][0]
product_description = get_product_description(product_id)
personalized_message = generate_personalized_message(customer_id_input, product_description)
print(personalized_message)
with open('enhanced_customer.txt','w') as output_file:
output_file.write(personalized_message)
else:
print(f"Customer ID {customer_id_input} not found in the recommendations. Please ensure the Customer ID is correct and try again.")
| [
"Write a personalized product recommendation message for a customer. The customer's ID is PLACEHOLDER, and the recommended product ID is PLACEHOLDER.",
"You are a helpful assistant."
] |
2024-01-10 | antocarazeanu/bestem-hack | testarefinala.py | import numpy as np
from prophet import Prophet
from scipy import stats
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from surprise import SVD, Dataset, Reader
from surprise.model_selection import train_test_split
from collections import defaultdict
if __name__ == '__main__':
data = pd.read_csv('sales_and_eodStocksSheet1.csv', low_memory=False)
# Convert the 'Date' column to datetime format
data['Date'] = pd.to_datetime(data['Date'])
# Filter out products that were sold only on one day in a year or sold few in total
data = data.groupby('Product_ID').filter(lambda x: len(x) > 1 and x['Sales'].sum() > 10)
def detect_anomalies(data):
data['prev_stock'] = data['EndOfDayStock'].shift(1)
stock_anomalies = data[(data['EndOfDayStock'] == data['prev_stock']) | (data['EndOfDayStock'] < data['Sales'])]
return stock_anomalies
anomalies = detect_anomalies(data)
#drop column prev_stock
anomalies = anomalies.drop(columns=['prev_stock'])
#create another csv file with the data without anomalies
data = data.drop(anomalies.index)
data = data.drop(columns=['prev_stock'])
# Get the unique product IDs
unique_product_ids = data['Product_ID'].unique()
# Calculate the midpoint of the array
point = len(unique_product_ids) // 46
# Slice the array to include only the first half of the product IDs
product_ids = unique_product_ids[:point]
# Initialize an empty dictionary to store the product IDs and their corresponding order quantities
product_details = {}
# Iterate over the unique product IDs
for product_id in product_ids:
old_filtered_data = data[data['Product_ID'] == product_id]
initial_inventory = old_filtered_data['EndOfDayStock'].iloc[0] + old_filtered_data['Sales'].iloc[0]
# Assuming 'filtered_data' is your DataFrame and 'Sales' is the column where you want to remove outliers
z_scores = stats.zscore(old_filtered_data['Sales'])
filtered_data = old_filtered_data
if len(old_filtered_data) >= 100:
for z in range(300):
z = z * 0.01
filtered_data = old_filtered_data[(z_scores < z) & (z_scores > -z)]
if (len(filtered_data) >= 0.8 * len(old_filtered_data)):
break
# train test split
train_size = int(len(filtered_data) * 0.8)
train, test = filtered_data[0:train_size], filtered_data[train_size:len(filtered_data)]
# # Prophet requires the variable names in the time series to be:
# # y – Target
# # ds – Datetime
train['ds'] = train.Date
train['y'] = train.Sales
train.drop(['Sales'], axis=1, inplace=True)
# confidence interval
model1 = Prophet(changepoint_prior_scale=0.05, interval_width=0.95, daily_seasonality=True) # by default is 80%
# # Check if the DataFrame has at least two non-NaN rows
if train['y'].count() < 2:
print(f'Skipping product_id {product_id} due to insufficient data')
continue
model1.fit(train)
future = model1.make_future_dataframe(periods=365, freq='D')
forecast = model1.predict(future)
# forecast_copy = forecast
# Convert 'Date' column to datetime in 'test'
test['Date'] = pd.to_datetime(test['Date'])
# Set 'Date' as the index in 'test'
test.set_index('Date', inplace=True)
# Filter 'forecast' to only include dates that are in 'test'
forecast = forecast[forecast['ds'].isin(test.index)]
# Calculate MAPE (Mean absolute percentage error)
first_row_ds = forecast['ds'].iloc[0]
forecast.set_index('ds', inplace=True)
temp = (test['Sales'] - forecast.loc[first_row_ds:, 'yhat'])
mape = (temp.abs() / test['Sales']).mean() * 100
# Create a pandas Series with the predicted values and date indices
forecasted_demand = pd.Series(forecast['yhat'].values, index=forecast.index)
# Lead time (number of days it takes to replenish inventory)
lead_time = 1 # it's different for every business, 1 is an example
# Service level (probability of not stocking out)
service_level = 0.95 # it's different for every business, 0.95 is an example
# Calculate the optimal order quantity using the Newsvendor formula
z = np.abs(np.percentile(forecasted_demand, 100 * (1 - service_level)))
order_quantity = np.ceil(forecasted_demand.mean() + z).astype(int)
# Calculate the reorder point
reorder_point = round(forecasted_demand.mean() * lead_time + z, 0)
# Calculate the optimal safety stock
safety_stock = round(reorder_point - forecasted_demand.mean() * lead_time, 0)
# Calculate the total cost (holding cost + stockout cost)
holding_cost = 0.05 # it's different for every business, 0.1 is an example
total_holding_cost = holding_cost * (initial_inventory + 0.5 * order_quantity)
# Calculate the total cost
total_cost = total_holding_cost
if mape > 0 and mape < 100:
# Add the product ID and its order quantity to the dictionary
product_details[product_id] = {
'Order_Quantity': order_quantity,
'Reorder_Point': reorder_point,
'Safety_Stock': safety_stock,
'Total_Cost': total_cost
}
# Convert the dictionary to a DataFrame
product_details_df = pd.DataFrame.from_dict(product_details, orient='index')
product_details_df = product_details_df.sort_values(by=['Order_Quantity'], ascending=False)
product_details_df = product_details_df.head(10)
# Try to import OpenAI's GPT-3 for generating recommendations
import openai
openai.api_key = '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'
# Function to generate a resupply recommendation message using OpenAI's API
def generate_resupply_message(product_id, order_quantity, reorder_point, safety_stock):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Generate a resupply recommendation message for the Stock Manager of Product ID {product_id}, advising them to order {order_quantity} units of the product, as the stock has reached the reorder point of {reorder_point} units. Additionally, suggest purchasing an extra {safety_stock} units as safety stock to prevent any gaps in supply."}
]
)
# Extract the message from the response
message = response.choices[0].message['content']
return message
for index, row in product_details_df.iterrows():
product_id = index
order_quantity = row['Order_Quantity']
reorder_point = row['Reorder_Point']
safety_stock = row['Safety_Stock']
total_cost = row['Total_Cost']
# Now you can use these variables in your code
# print(f'Safety stock for product ID {product_id} is {safety_stock} units')
resupply_message = generate_resupply_message(product_id, order_quantity, reorder_point, safety_stock)
print(resupply_message)
| [
"You are a helpful assistant.",
"Generate a resupply recommendation message for the Stock Manager of Product ID PLACEHOLDER, advising them to order PLACEHOLDER units of the product, as the stock has reached the reorder point of PLACEHOLDER units. Additionally, suggest purchasing an extra PLACEHOLDER units as safety stock to prevent any gaps in supply."
] |
2024-01-10 | aiguy110/assistant-starter | assistant_runner.py | import os
import time
import json
import dotenv
from colors import ANSI_USER_MESSAGE, ANSI_PROBLEM, ANSI_IDENTIFIER, ANSI_ACTION, ANSI_ASSISTANT_MESSAGE, ANSI_RESET
from openai import OpenAI
# Load environment variables
dotenv.load_dotenv()
# Placeholder for assistant ID
assistant_id = "YOUR_ASSISTANT_ID"
# Initialize OpenAI client
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# Mapping of tool names to Python function names
tools = {
}
def prettify_json(json_str):
return json.dumps(json.loads(json_str), indent=2)
def update_run_with_tool_calls(tool_calls, thread, run):
tool_outputs = []
# Report how many tool_calls the model is asking for and to which tools
print(f'{ANSI_ACTION}Model is making {ANSI_IDENTIFIER}{len(tool_calls)}{ANSI_ACTION} tool call(s):{ANSI_RESET}')
for tool_call in tool_calls:
if tool_call.type != "function":
print('Skipping processing for unknown tool type: ' + tool_call.type)
if tool_call.function.name not in tools:
pretty_args = prettify_json(tool_call.function.arguments)
print(f'{ANSI_PROBLEM}Unknown tool {ANSI_IDENTIFIER}{tool_call.function.name}')
print(f'{ANSI_ACTION}Tool arguments:{ANSI_RESET}')
print(pretty_args)
output = input(f"{ANSI_PROBLEM}Please provide tool output:\n> {ANSI_RESET}")
else:
print(f'{ANSI_ACTION}Model invoked tool {ANSI_IDENTIFIER}{tool_call.function.name}' +
f'{ANSI_ACTION} with args:{ANSI_RESET}')
print(prettify_json(tool_call.function.arguments))
tool_func = tools[tool_call.function.name]
output = tool_func(
**json.loads(tool_call.function.arguments)
)
print(f'{ANSI_ACTION}Tool output:{ANSI_RESET}')
print(output)
tool_outputs.append({
"tool_call_id": tool_call.id,
"output": output
})
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs
)
return run
def main():
# Create a new thread
thread = client.beta.threads.create()
while True:
# Get user input and add it to the thread if it is not empty
user_input = input(ANSI_USER_MESSAGE + "Enter your message:\n> " + ANSI_RESET)
if user_input.strip() != "":
client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input
)
# Ask the assistant to run on the thread
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
# Wait while the agent works on the run
while run.status not in ["completed", "failed", "cancelled", "expired"]:
if run.status == "requires_action":
tool_calls = run.required_action.submit_tool_outputs.tool_calls
run = update_run_with_tool_calls(tool_calls, thread, run)
else:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
time.sleep(1)
# Check if we need to submit something for the run to continue
# Retrieve the latest message
messages = client.beta.threads.messages.list(
thread_id=thread.id
).data
latest_message = messages[0]
if latest_message.role == "assistant":
print(f'{ANSI_ASSISTANT_MESSAGE}Assistant says:{ANSI_RESET}')
print(latest_message.content[0].text.value)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | S3annnyyy/recipe-daddy-be | recipe_daddy~views~ai_prompt.py | from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.http import JsonResponse
from rest_framework import status
import openai
import os
@api_view(['POST'])
def get_ai_prompt(request):
openai.api_key = os.getenv("OPENAI_API_TOKEN")
# Receive the message from the request body
data = request.data
user_message = data.get('userPrompt', '')
schema = data.get('schema', '')
try:
if user_message and schema: print(f"Received userMsg and schema")
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[
{"role": "system", "content": "You are a helpful recipe assistant. Only use the functions you have been provided with"},
{"role": "user", "content": user_message},
],
functions=[{"name": "set_recipe", "parameters": schema, "outputs": schema}],
function_call={"name": "set_recipe"}
)
print(response.choices[0])
response_message = response["choices"][0]["message"]["function_call"]["arguments"]
# Return the generated text as JSON response.
return JsonResponse({'generated_text': response_message}, status=200)
except Exception as e:
return JsonResponse({'error': str(e)}, status=500)
return JsonResponse({'error': 'Invalid request method'}, status=400) | [
"You are a helpful recipe assistant. Only use the functions you have been provided with"
] |
2024-01-10 | m0bstaRx/PolyGPT-alpha | tools~claude.py | import os
from dotenv import load_dotenv
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
# Load .env file
load_dotenv()
# Initialize API client
api_key = os.getenv('ANTHROPIC_API_KEY')
anthropic = Anthropic(api_key=api_key)
def generate_response(message):
completion = anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT}{message}{AI_PROMPT}",
)
return completion.completion
if __name__ == '__main__':
# Get message from command line or user input
import sys
if len(sys.argv) > 1:
message = sys.argv[1]
else:
message = input("Enter a prompt: ")
response = generate_response(message)
print(response) | [
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | tyrell/llm-ollama-llamaindex-bootstrap | rag~pipeline.py | from llama_index import VectorStoreIndex, ServiceContext
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.llms import Ollama
from llama_index.vector_stores import WeaviateVectorStore
import weaviate
import box
import yaml
def load_embedding_model(model_name):
embeddings = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=model_name)
)
return embeddings
def load_index(chunk_size, llm, embed_model, weaviate_client, index_name):
service_context = ServiceContext.from_defaults(
chunk_size=chunk_size,
llm=llm,
embed_model=embed_model
)
vector_store = WeaviateVectorStore(weaviate_client=weaviate_client, index_name=index_name)
index = VectorStoreIndex.from_vector_store(
vector_store, service_context=service_context
)
return index
def build_rag_pipeline():
"""
Constructs and configures a RAG pipeline for retrieval-augmented generation tasks.
This function performs the following steps to set up the RAG pipeline:
1. **Configuration Loading:**
- Reads configuration variables from a specified YAML file (`config.yml`).
- Stores the loaded configuration as a `box.Box` object for convenient access.
2. **Weaviate Connection:**
- Establishes a connection to the Weaviate server using the provided URL in the configuration.
- Creates a Weaviate client object for interacting with the Weaviate database.
3. **LLAMA Model Loading:**
- Loads the specified Ollama language model based on the `LLM` key in the configuration.
- Sets the model temperature to 0 for a more deterministic response generation.
4. **Embedding Model Loading:**
- Utilizes the `load_embedding_model` function to retrieve a pre-trained Hugging Face model configured for Langchain.
- This model will be used to embed documents and queries for efficient search and retrieval.
5. **Vector Store Index Loading:**
- Fetches the pre-built Weaviate Vector Store index named in the configuration (`INDEX_NAME`).
- Connects the index to the Weaviate client and embeds relevant context using the selected service context.
6. **Query Engine Construction:**
- Converts the loaded Vector Store index into a dedicated query engine for efficient retrieval.
- Sets the `streaming` flag to `False` to return the final response after the entire query is processed.
7. **Pipeline Return:**
- Returns the fully constructed and configured RAG pipeline represented by the `query_engine` object.
Notes:
- This function relies on a separate `config.yml` file for storing configuration values.
- Ensure that the configuration file contains valid values for all required keys.
"""
# Import configuration specified in config.yml
with open('config.yml', 'r', encoding='utf8') as ymlfile:
cfg = box.Box(yaml.safe_load(ymlfile))
print("Connecting to Weaviate")
client = weaviate.Client(cfg.WEAVIATE_URL)
print("Loading Ollama...")
llm = Ollama(model=cfg.LLM, temperature=0)
print("Loading embedding model...")
embeddings = load_embedding_model(model_name=cfg.EMBEDDINGS)
print("Loading index...")
index = load_index(cfg.CHUNK_SIZE, llm, embeddings, client, cfg.INDEX_NAME)
print("Constructing query engine...")
query_engine = index.as_query_engine(streaming=False)
return query_engine
| [] |
2024-01-10 | tothepoweroftom/langchain-pinecone-qa | all-in-one~pages~2_URL_Summary.py | import validators, streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import YoutubeLoader, UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('URL Summary')
url = st.text_input("Enter Source URL")
# If 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not url:
st.error("Please provide the URL.")
elif not validators.url(url):
st.error("Please enter a valid URL.")
else:
try:
with st.spinner("Please wait..."):
# Load URL data
if "youtube.com" in url:
loader = YoutubeLoader.from_youtube_url(url, add_video_info=True)
else:
loader = UnstructuredURLLoader(urls=[url], ssl_verify=False, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_5_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"})
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
prompt_template = """Write a summary of the following in 250-300 words.
{text}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
summary = chain.run(data)
st.success(summary)
except Exception as e:
st.exception(f"Exception: {e}")
| [
"Write a summary of the following in 250-300 words.\n \n {text}\n\n "
] |
2024-01-10 | StonyBrookNLP/ircot | commaqa~models~gpt3generator.py | import logging
import time
import os
from functools import lru_cache
import openai
from diskcache import Cache
from commaqa.inference.prompt_reader import fit_prompt_into_given_limit
logger = logging.getLogger(__name__)
cache = Cache(os.path.expanduser("~/.cache/gpt3calls"))
@cache.memoize()
def cached_openai_call( # kwargs doesn't work with caching.
prompt,
engine,
temperature,
max_tokens,
top_p,
frequency_penalty,
presence_penalty,
stop,
n,
best_of,
logprobs,
):
return openai.Completion.create(
prompt=prompt,
engine=engine,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=stop,
n=n,
best_of=best_of,
logprobs=logprobs,
)
def openai_call(
prompt,
engine,
temperature,
max_tokens,
top_p,
frequency_penalty,
presence_penalty,
stop,
n,
best_of,
logprobs,
):
function = cached_openai_call if temperature == 0 else openai.Completion.create
return function(
prompt=prompt,
engine=engine,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=stop,
n=n,
best_of=best_of,
logprobs=logprobs,
)
@lru_cache(maxsize=1)
def get_gpt_tokenizer():
from transformers import GPT2Tokenizer
return GPT2Tokenizer.from_pretrained("gpt2")
class GPT3Generator:
def __init__(
self,
engine="text-davinci-002",
temperature=0,
max_tokens=300,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n"],
retry_after_n_seconds=None,
n=1,
best_of=1,
logprobs=0,
remove_method="first",
):
self.engine = engine
self.logprobs = logprobs
self.n = n
self.best_of = best_of
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.max_tokens = max_tokens
self.top_p = top_p
self.stop = stop
self.temperature = temperature
self.retry_after_n_seconds = retry_after_n_seconds
self.remove_method = remove_method
if "code-davinci" not in engine:
raise Exception("Not allowed to prevent accidental $$ wastage.")
if "code-davinci" not in engine and self.retry_after_n_seconds is not None:
raise Exception(
"Retry is only supported for code-davinci as it's free. "
"Using it for other paid models is risky and so is disabled."
)
if "code-davinci" in engine:
self.model_tokens_limit = 8000
else:
self.model_tokens_limit = 2000
def generate_text_sequence(self, prompt):
"""
:param input_text:
:return: returns a sequence of tuples (string, score) where lower score is better
"""
# GPT3 can't handle trailing white-space
prompt = prompt.rstrip()
prompt = fit_prompt_into_given_limit(
original_prompt=prompt,
model_length_limit=self.model_tokens_limit,
estimated_generation_length=self.max_tokens,
demonstration_delimiter="\n\n\n",
shuffle=False,
remove_method=self.remove_method,
tokenizer_model_name="gpt2", # did this before tiktoken was released.
last_is_test_example=True,
)
arguments = {
"engine": self.engine,
"prompt": prompt,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"n": self.n,
"best_of": self.best_of,
"logprobs": self.logprobs,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"stop": self.stop,
}
if self.best_of is not None:
arguments["best_of"] = self.best_of
success = False
for index in range(500):
try:
response = openai_call(**arguments)
success = True
break
except Exception as exception:
success = False
tokenizer = get_gpt_tokenizer()
prompt_num_tokens = len(tokenizer.tokenize(prompt))
if prompt_num_tokens + arguments["max_tokens"] > self.model_tokens_limit > prompt_num_tokens:
last_used_max_tokens = arguments["max_tokens"]
updated_max_tokens = self.model_tokens_limit - prompt_num_tokens
arguments["max_tokens"] = updated_max_tokens
if last_used_max_tokens == updated_max_tokens:
break
print(
f"WARNING: (Round {index}) Decreasing max_tokens from "
f"{last_used_max_tokens} to {updated_max_tokens} and retrying."
)
continue
if self.retry_after_n_seconds is None:
import traceback
print(traceback.format_exc())
exit()
print(f"Encountered exception of class: {exception.__class__}")
if hasattr(exception, "user_message"):
print(exception.user_message)
print(f"Potentially reached OpenAI rate limit. Will try again in {self.retry_after_n_seconds}s.")
time.sleep(self.retry_after_n_seconds)
pass
if not success:
raise Exception("Could not complete OpenAI call")
output_seq_score = []
for index, choice in enumerate(response["choices"]):
if "logprobs" in choice and "token_logprobs" in choice["logprobs"]:
probs = []
for prob, tok in zip(choice["logprobs"]["token_logprobs"], choice["logprobs"]["tokens"]):
if tok not in self.stop and tok != "<|endoftext|>":
probs.append(prob)
else:
probs.append(prob)
break
score = -sum(probs) / len(probs) if len(probs) else 100.0
output_seq_score.append((choice["text"], score))
else:
output_seq_score.append((choice["text"], index))
return sorted(output_seq_score, key=lambda x: x[1])
| [
"\n\n\n"
] |
2024-01-10 | GeiserX/jwlibrary-plus | src~core_worker.py | import uuid
import shutil
import pytz
import os
import zipfile
import logging
import requests
import json
from datetime import datetime, timedelta
import sqlite3
from bs4 import BeautifulSoup
from docx import Document
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from docx.enum.style import WD_STYLE_TYPE
from docx.shared import Pt
import subprocess
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.cache import SQLiteCache, InMemoryCache, GPTCache
from gptcache import Cache
from gptcache.manager.factory import manager_factory
from gptcache.processor.pre import get_prompt
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain.callbacks import get_openai_callback # TODO
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
def init_gptcache(cache_obj: Cache, llm: str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(manager="sqlite,faiss,local", data_dir=f"dbs/map_cache_{llm}", vector_params={"dimension": "128"}, max_size=100000),
)
#######################################
### HELPER: DESCRIBE JWLIBRARY FILE ###
#######################################
def describe_jwlibrary(telegram_user):
logger.info("describe_jwlibrary - Telegram User: {0}".format(telegram_user))
jwfile = "userBackups/{0}.jwlibrary".format(telegram_user)
with zipfile.ZipFile(jwfile, 'r') as zip_ref:
files = zip_ref.namelist()
zip_ref.extractall("userBackups/{0}/".format(telegram_user))
uploadedDb = "userBackups/{0}/{1}".format(telegram_user, [zipname for zipname in files if zipname.endswith(".db")][0])
connection = sqlite3.connect(uploadedDb)
cursor = connection.cursor()
cursor.execute("SELECT Count(*) FROM Note")
notesN = cursor.fetchall()[0][0]
cursor.execute("SELECT Count(*) FROM InputField")
inputN = cursor.fetchall()[0][0]
cursor.execute("SELECT Count(*) FROM TagMap")
tagMaptN = cursor.fetchall()[0][0]
cursor.execute("SELECT Count(*) FROM Tag")
tagN = cursor.fetchall()[0][0]
cursor.execute("SELECT Count(*) FROM Bookmark")
bookmarkN = cursor.fetchall()[0][0]
cursor.execute("SELECT LastModified FROM LastModified")
lastModified = cursor.fetchall()[0][0]
cursor.execute("SELECT Count(*) FROM UserMark")
userMarkN = cursor.fetchall()[0][0]
connection.close()
shutil.rmtree("userBackups/{0}/".format(telegram_user))
return notesN, inputN, tagMaptN, tagN, bookmarkN, lastModified, userMarkN
#######################
### EXTRACTING HTML ###
#######################
def w_extract_html(url, get_all):
logger.info("w_extract_html - URL: {0} - Full Run: {1}".format(url, get_all))
html = requests.get(url).text
soup = BeautifulSoup(html, features="html5lib")
title = soup.find("h1").text
classArticleId = soup.find("article", {"id" : "article"}).get("class")
articleId = next(x for x in classArticleId if x.startswith("iss"))[4:] + "00"
articleN = soup.find("p", {"id":"p1"}).text
if get_all:
base_text = soup.find("p", {"id":"p3"}).text
song = soup.find("p",{"id":"p4"}).text
summary = soup.find("div", {"id": "footnote1"}).find("p").text
documentId = soup.find("input", {"name": "docid"}).get("value")
p_elements = soup.find("div", {"class":"bodyTxt"})
questions = p_elements.find_all("p", {"id": lambda x: x and x.startswith("q")})
paragraphs = p_elements.find_all("p", {"id": lambda x: x and x.startswith("p")})
# Example q_map = {0 : [q1, [p1]], 1 : [q2&3, [p2, p3]]}
q_map = {}
i = 0
for q in questions:
q_map[i] = [q]
q_map[i].append([p for p in paragraphs if p.has_attr('data-rel-pid') if p.get('data-rel-pid').strip('[]') in q.get('data-pid')])
i = i+1
return title, base_text, song, summary, questions, documentId, articleId, q_map
else:
return title, articleId, articleN
def mwb_extract_html(url, get_all): # TODO
logger.info("w_extract_html - URL: {0} - Full Run: {1}".format(url, get_all))
html = requests.get(url).text
soup = BeautifulSoup(html, features="html5lib")
for i in soup.find_all("a"):
em = i.find("em")
if em:
if em.get_text() == "lff":
study = i.get("href")
study_html = requests.get("https://wol.jw.org" + study).text
soup_study = BeautifulSoup(study_html, features="html5lib")
# title = soup.find("h1").text
# classArticleId = soup.find("article", {"id" : "article"}).get("class")
# articleId = next(x for x in classArticleId if x.startswith("iss"))[4:] + "00"
# articleN = soup.find("p", {"id":"p1"}).text
# if get_all:
# base_text = soup.find("p", {"id":"p3"}).text
# song = soup.find("p",{"id":"p4"}).text
# summary = soup.find("div", {"id": "footnote1"}).find("p").text
# documentId = soup.find("input", {"name": "docid"}).get("value")
# p_elements = soup.find("div", {"class":"bodyTxt"})
# questions = p_elements.find_all("p", {"id": lambda x: x and x.startswith("q")})
# paragraphs = p_elements.find_all("p", {"id": lambda x: x and x.startswith("p")})
# # Example q_map = {0 : [q1, [p1]], 1 : [q2&3, [p2, p3]]}
# q_map = {}
# i = 0
# for q in questions:
# q_map[i] = [q]
# q_map[i].append([p for p in paragraphs if p.has_attr('data-rel-pid') if p.get('data-rel-pid').strip('[]') in q.get('data-pid')])
# i = i+1
# return title, base_text, song, summary, questions, documentId, articleId, q_map
# else:
# return title, articleId, articleN
####################
### QUERY OPENAI ###
####################
def w_query_openai(title, base_text, song, summary, q_map, qs_user):
logger.info("w_query_openai - Title: {0} - Base Text: {1} - Song: {2} - Summary: {3} - Questions User: {4}".format(title, base_text, song, summary, qs_user))
#langchain.llm_cache = SQLiteCache(database_path="dbs/langchain.db") # Not working
#langchain.llm_cache = InMemoryCache()
langchain.llm_cache = GPTCache(init_gptcache)
questions = [f"{i}. {question}" for i, question in enumerate(qs_user, start=1) if question]
questions_text = "\n".join(questions)
llm = ChatOpenAI(model_name="gpt-3.5-turbo")
prompt = ChatPromptTemplate.from_messages([ # Make questions modifiable by user input
SystemMessagePromptTemplate.from_template("""
Eres un asistente que únicamente usa jw.org y las publicaciones de los Testigos de Jehová para mejorar la preparación de reuniones.
Yo estoy preparándome la Atalaya, edición de estudio, de los Testigos de Jehová.
Proveerás información extra proveniente de la literatura disponible en cada uno de los párrafos que te voy a ir mandando en los sucesivos prompts.
La Atalaya de esta semana se titula {0}, se basa en el texto de {1}, cantaremos la '{2}', y el resumen es el siguiente:
{3}
Para cada pregunta y párrafo o párrafos que te vaya enviando a partir de ahora, responderás en una lista lo siguiente:
{4}
No escribas estas preguntas de nuevo en la respuesta. Separa las respuestas con dos retornos de carro.""".format(title, base_text, song, summary, questions_text)),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
notes = {}
i=0
for q in q_map.values():
conversation = ConversationChain(llm=llm, verbose=False, memory=ConversationBufferMemory(return_messages=True), prompt=prompt)
flattened_paragraph = ""
for p in q[1]:
flattened_paragraph = flattened_paragraph + p.text
notes[i] = conversation.predict(input="Pregunta: {0} -- Párrafo(s): {1}".format(q[0].text, flattened_paragraph))
logger.info("w_query_openai(Note) - Note: {0}".format(notes[i])) # TODO: Reduce logs in the future when everything works stable
i=i+1
return notes
############################
### WRITE JWLIBRARY FILE ###
############################
def write_jwlibrary(documentId, articleId, title, questions, notes, telegram_user):
logger.info("write_jwlibrary - Document ID: {0} - Article ID: {1} - Title: {2} - Questions: {3} - Notes: {4} - Telegram User: {5}".format(documentId, articleId, title, questions, notes, telegram_user))
uploadedJwLibrary = 'userBackups/{0}.jwlibrary'.format(telegram_user)
os.makedirs("/app/userBackups/{0}".format(telegram_user), exist_ok=True)
now = datetime.now(pytz.timezone('Europe/Madrid'))
now_date = now.strftime("%Y-%m-%d")
hour_minute_second = now.strftime("%H-%M-%S")
now_iso = now.isoformat("T", "seconds")
j = '{{"name":"jwlibrary-plus-backup_{0}","creationDate":"{1}","version":1,"type":0,"userDataBackup":{{"lastModifiedDate":"{2}","deviceName":"jwlibrary-plus","databaseName":"userData.db","schemaVersion":8}}}}'.format(now_date, now_date, now_iso)
manifest = json.loads(j)
if(os.path.isfile(uploadedJwLibrary)):
logger.info("Archivo .jwlibrary encontrado")
with zipfile.ZipFile(uploadedJwLibrary, 'r') as zip_ref:
files = zip_ref.namelist()
zip_ref.extractall("userBackups/{0}/".format(telegram_user))
uploadedDb = "userBackups/{0}/{1}".format(telegram_user, [zipname for zipname in files if zipname.endswith(".db")][0])
manifestUser = "userBackups/{0}/manifest.json".format(telegram_user)
manifest_file = 'userBackups/{0}/manifest-{0}-{1}.json'.format(telegram_user, now_date)
with open(manifest_file, 'w') as f:
json.dump(manifest, f)
connection = sqlite3.connect(uploadedDb)
cursor = connection.cursor()
cursor.execute("SELECT LocationId FROM Location WHERE DocumentId={0}".format(documentId))
locationId = cursor.fetchall()
if locationId:
locationId = locationId[0][0]
else:
cursor.execute("SELECT max(LocationId) FROM Location")
locationId = cursor.fetchall()[0][0] + 1
cursor.execute("""INSERT INTO Location (LocationId, DocumentId, IssueTagNumber, KeySymbol, MepsLanguage, Type, Title)
VALUES ({0}, {1}, {2}, "w", 1, 0, "{3}");""".format(locationId, documentId, articleId, title))
cursor.execute("SELECT TagId FROM Tag WHERE Name = 'jwlibrary-plus'")
tagId = cursor.fetchall()
if not tagId:
cursor.execute("SELECT max(TagId) FROM Tag") # There will be always some tag, even on a brand-new install
tagId = cursor.fetchall()[0][0] + 1
cursor.execute("INSERT INTO Tag ('TagId', 'Type', 'Name') VALUES ('{0}', '1', 'jwlibrary-plus')".format(tagId))
tagId +=1
else:
tagId = tagId[0][0]
cursor.execute("SELECT * FROM UserMark LIMIT 1")
nonEmptyUserMark = cursor.fetchall()
if nonEmptyUserMark:
cursor.execute("SELECT max(UserMarkId) FROM UserMark")
userMarkId = cursor.fetchall()[0][0] + 1
else:
userMarkId = 1
cursor.execute("SELECT BlockRangeId FROM BlockRange LIMIT 1")
nonEmptyBlockRangeId = cursor.fetchall()
if nonEmptyBlockRangeId:
cursor.execute("SELECT max(BlockRangeId) FROM BlockRange")
blockRangeId = cursor.fetchall()[0][0] + 1
else:
blockRangeId = 1
cursor.execute("SELECT * FROM Note LIMIT 1")
nonEmptyNote = cursor.fetchall()
if nonEmptyNote:
cursor.execute("SELECT max(NoteId) FROM Note")
noteId = cursor.fetchall()[0][0] + 1
else:
noteId = 1
cursor.execute("SELECT * FROM TagMap LIMIT 1")
nonEmptyTagMap = cursor.fetchall()
if nonEmptyTagMap:
cursor.execute("SELECT max(TagMapId) FROM TagMap")
tagMapId = cursor.fetchall()[0][0] + 1
cursor.execute("SELECT max(Position) FROM TagMap")
Position = cursor.fetchall()[0][0] + 1
else:
tagMapId = 1
Position = 0
for i in notes:
uuid_value = str(uuid.uuid4())
uuid_value2 = str(uuid.uuid4())
cursor.execute("""INSERT INTO UserMark ('UserMarkId', 'ColorIndex', 'LocationId', 'StyleIndex', 'UserMarkGuid', 'Version')
VALUES ('{0}', '2', '{1}', '0', '{2}', '1');""".format(userMarkId, locationId, uuid_value))
cursor.execute ("""INSERT INTO "BlockRange" ("BlockRangeId", "BlockType", "Identifier", "StartToken", "EndToken", "UserMarkId")
VALUES ('{0}', '1', '{1}', '0', '{2}', '{3}');""".format(blockRangeId, questions[i].get("data-pid"), questions[i].text.find(".")-1, userMarkId))
cursor.execute("""INSERT INTO Note ("NoteId", "Guid", "UserMarkId", "LocationId", "Title", "Content", "LastModified", "BlockType", "BlockIdentifier")
VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '1', '{7}');""".format(noteId, uuid_value2, userMarkId, locationId, questions[i].text, notes[i].replace("'", '"'), now_iso, questions[i].get("data-pid")))
cursor.execute("INSERT INTO TagMap ('TagMapId', 'NoteId', 'TagId', 'Position') VALUES ('{0}', '{1}', '{2}', '{3}')".format(tagMapId, noteId, tagId, Position))
userMarkId += 1
blockRangeId += 1
noteId +=1
tagMapId += 1
Position +=1
cursor.execute("UPDATE LastModified SET LastModified = '{0}'".format(now_iso))
connection.commit()
connection.close()
fileName = "userBackups/{0}/jwlibrary-plus-{1}-{2}.jwlibrary".format(telegram_user, documentId, now_date)
zf = zipfile.ZipFile(fileName, "w")
zf.write(uploadedDb, arcname= "userData.db") # TODO
zf.write(manifest_file, arcname="manifest.json")
zf.close()
os.remove(uploadedDb) # Remove all data from the user except the newly generated .jwlibrary file, which will be deleted after being sent
os.remove(manifest_file)
os.remove(uploadedJwLibrary)
os.remove(manifestUser)
else:
dbOriginal = "dbs/userData.db.original"
dbFromUser = "userBackups/{0}/userData-{0}-{1}_{2}.db".format(telegram_user, now_date, hour_minute_second)
shutil.copyfile(src=dbOriginal, dst=dbFromUser)
manifest_file = 'userBackups/{0}/manifest-{0}-{1}.json'.format(telegram_user, now_date)
with open(manifest_file, 'w') as f:
json.dump(manifest, f)
connection = sqlite3.connect(dbFromUser)
cursor = connection.cursor()
cursor.execute("""INSERT INTO Location (LocationId, DocumentId, IssueTagNumber, KeySymbol, MepsLanguage, Type, Title)
VALUES (1, {0}, {1}, "w", 1, 0, "{2}");""".format(documentId, articleId, title))
cursor.execute("INSERT INTO Tag ('TagId', 'Type', 'Name') VALUES ('2', '1', 'jwlibrary-plus')")
for i in notes:
uuid_value = str(uuid.uuid4())
uuid_value2 = str(uuid.uuid4())
cursor.execute("""INSERT INTO UserMark ('UserMarkId', 'ColorIndex', 'LocationId', 'StyleIndex', 'UserMarkGuid', 'Version')
VALUES ('{0}', '2', '1', '0', '{1}', '1');""".format(i+1,uuid_value))
cursor.execute ("""INSERT INTO "BlockRange" ("BlockRangeId", "BlockType", "Identifier", "StartToken", "EndToken", "UserMarkId")
VALUES ('{0}', '1', '{1}', '0', '{2}', '{3}');""".format(i+1, questions[i].get("data-pid"), questions[i].text.find(".")-1, i+1))
cursor.execute("""INSERT INTO Note ("NoteId", "Guid", "UserMarkId", "LocationId", "Title", "Content", "LastModified", "BlockType", "BlockIdentifier")
VALUES ('{0}', '{1}', '{2}', '1', '{3}', '{4}', '{5}', '1', '{6}');""".format(i+1, uuid_value2, i+1, questions[i].text, notes[i].replace("'", '"'), now_iso, questions[i].get("data-pid")))
cursor.execute("INSERT INTO TagMap ('TagMapId', 'NoteId', 'TagId', 'Position') VALUES ('{0}', '{1}', '2', '{2}')".format(i+1,i+1,i))
cursor.execute("UPDATE LastModified SET LastModified = '{0}'".format(now_iso))
connection.commit()
connection.close()
fileName = "userBackups/{0}/jwlibrary-plus-{1}-{2}.jwlibrary".format(telegram_user, documentId, now_date)
zf = zipfile.ZipFile(fileName, "w")
zf.write(dbFromUser, arcname= "userData.db")
zf.write(manifest_file, arcname="manifest.json")
zf.close()
os.remove(dbFromUser)
os.remove(manifest_file)
return fileName
def write_docx_pdf(documentId, title, questions, notes, telegram_user):
now_date = datetime.now(pytz.timezone('Europe/Madrid')).strftime("%Y-%m-%d")
document = Document()
bold_style = document.styles.add_style('Bold List Number', WD_STYLE_TYPE.PARAGRAPH)
bold_style.font.bold = True
document.add_heading(title, 0)
document.add_paragraph('By JW Library Plus - https://github.com/GeiserX/jwlibrary-plus', style="Subtitle")
for i in range(len(questions)):
p = document.add_paragraph(style='Bold List Number')
p.add_run(questions[i].text).font.size = Pt(12)
document.add_paragraph(notes[i])
fileNameDoc = "userBackups/{0}/jwlibrary-plus-{1}-{2}.docx".format(telegram_user, documentId, now_date)
document.save(fileNameDoc)
fileNamePDF = "userBackups/{0}/jwlibrary-plus-{1}-{2}.pdf".format(telegram_user, documentId, now_date)
cmd_str = "abiword --to=pdf --to-name={0} {1}".format(fileNamePDF, fileNameDoc)
subprocess.run(cmd_str, shell=True)
return fileNameDoc, fileNamePDF
def main(url, telegram_user, qs_user) -> None:
title, base_text, song, summary, questions, documentId, articleId, q_map = w_extract_html(url, get_all=True)
notes = w_query_openai(title, base_text, song, summary, q_map, qs_user)
filenamejw = write_jwlibrary(documentId, articleId, title, questions, notes, telegram_user)
filenamedoc, filenamepdf = write_docx_pdf(documentId, title, questions, notes, telegram_user)
return filenamejw, filenamedoc, filenamepdf
if __name__ == "__main__":
main()
| [
"\nEres un asistente que únicamente usa jw.org y las publicaciones de los Testigos de Jehová para mejorar la preparación de reuniones.\nYo estoy preparándome la Atalaya, edición de estudio, de los Testigos de Jehová.\nProveerás información extra proveniente de la literatura disponible en cada uno de los párrafos que te voy a ir mandando en los sucesivos prompts.\nLa Atalaya de esta semana se titula {0}, se basa en el texto de {1}, cantaremos la '{2}', y el resumen es el siguiente: \n{3}\nPara cada pregunta y párrafo o párrafos que te vaya enviando a partir de ahora, responderás en una lista lo siguiente:\n{4}\nNo escribas estas preguntas de nuevo en la respuesta. Separa las respuestas con dos retornos de carro.",
"\nEres un asistente que únicamente usa jw.org y las publicaciones de los Testigos de Jehová para mejorar la preparación de reuniones.\nYo estoy preparándome la Atalaya, edición de estudio, de los Testigos de Jehová.\nProveerás información extra proveniente de la literatura disponible en cada uno de los párrafos que te voy a ir mandando en los sucesivos prompts.\nLa Atalaya de esta semana se titula PLACEHOLDER, se basa en el texto de PLACEHOLDER, cantaremos la 'PLACEHOLDER', y el resumen es el siguiente: \nPLACEHOLDER\nPara cada pregunta y párrafo o párrafos que te vaya enviando a partir de ahora, responderás en una lista lo siguiente:\nPLACEHOLDER\nNo escribas estas preguntas de nuevo en la respuesta. Separa las respuestas con dos retornos de carro.",
"{input}"
] |
2024-01-10 | guytavor/playlister | playlister.py | import json
import os
import subprocess
import openai
import spotipy
from termcolor import colored
import sp_auth
from config import ACCESS_TOKEN_FILE, DATA_PATH
from playlists_db import PlaylistManager
def play_playlist(sp, tracks_list, device_id, playlist_name):
"""
Play a playlist on the given device using the Spotify API.
If the given 'playlist_name' does not exist in the user's playlists, it will be created.
if it does exist, it will just be played.
:param sp:
:param tracks_list: the playlist json
:param device_id:
:param playlist_name: the name of the playlist the user gave
"""
# Get the user's playlists
playlists = sp.current_user_playlists()
# Check if playlist with the given name already exists
playlist_id = None
for item in playlists['items']:
if item['name'] == playlist_name:
playlist_id = item['id']
break
user_id = sp.me()['id']
# If no existing playlist is found, create a new one
if playlist_id is None:
track_uris = []
for track in tracks_list["playlist"]:
results = sp.search(q='track:{} artist:{}'.format(track['song_name'], track['artist_name']), type='track')
if results['tracks']['items']:
track_uris.append(results['tracks']['items'][0]['uri'])
print(colored(f"Creating playlist {playlist_name} with {len(track_uris)} tracks", "yellow"))
playlist = sp.user_playlist_create(user=user_id, name=playlist_name)
playlist_id = playlist['id']
sp.user_playlist_replace_tracks(user=user_id, playlist_id=playlist_id, tracks=track_uris)
# Start playback on the selected device for the given playlist
sp.start_playback(device_id=device_id, context_uri=f'spotify:playlist:{playlist_id}')
def generate_tracks_list(playlist_description) -> json:
"""
Generate a list of tracks based on the user's 'playlist_description' using GPT
:param playlist_description:
:return: a JSON describing the tracks list
"""
# Get the OpenAI API key from the environment
api_key = os.getenv("OPENAI_API_KEY")
# Set the OpenAI API key
openai.api_key = api_key
prompt = f"""
{playlist_description}. Generate a list of 15 songs in the format of a JSON with song name and artist name:"
Use the following JSON format:
{{
"playlist":
[
{{"song_name": "The long and winding road", "artist_name": "The Beatles"}},
{{"song_name": "Sweet Child o' Mine", "artist_name": "Guns N' Roses"}},
]
}}
"""
# Call the GPT-4 model to generate a response
response = openai.ChatCompletion.create(
model="gpt-4", # Set the model
messages=[
{"role": "system", "content": "You are a knowledgeable AI trained to generate music playlists."},
{"role": "user", "content": prompt}
]
)
# Extract the assistant's reply (assumes the reply is the last message)
assistant_reply = response['choices'][0]['message']['content']
# Parse JSON and return it
playlist = json.loads(assistant_reply)
return playlist
def setup_spotify():
# Read access token from creds/access_token.txt
# to generate this file, run sp_auth.py
with open(ACCESS_TOKEN_FILE, "r") as f:
access_token = f.read()
return spotipy.Spotify(auth=access_token)
def authorize_spotify():
sp_auth.run_flow()
def main():
# if token file does not exist
if not os.path.exists(ACCESS_TOKEN_FILE):
print(colored("Running authorization flow", "red", attrs=["bold"]))
authorize_spotify()
exit(1)
sp = setup_spotify()
# Ask the user for their desired playlist
pm = PlaylistManager(DATA_PATH)
print(colored("Here are your playlists:", "green"))
playlists = pm.list_playlists()
for i, playlist in enumerate(playlists, 0):
print(f"{i}. {playlist}")
playlist_description = input(
colored("\nEnter an playlist number OR a description for a new playlist you want:\n", "green", attrs=["bold"]))
print(colored("Opening your spotify desktop app", "yellow", attrs=["bold"]))
command = "/Applications/Spotify.app/Contents/MacOS/Spotify"
subprocess.Popen(command)
if playlist_description.isdigit():
# Load old playlist
playlist = pm.load_playlist(int(playlist_description))
playlist_description = pm.get_playlist_name(int(playlist_description))
print(colored(f"Loading {playlist_description}...", "yellow", attrs=["bold"]))
else:
# Generate new playlist
print(colored("Generating playlist...", "yellow", attrs=["bold"]))
playlist = generate_tracks_list(playlist_description)
pm.save_playlist(playlist_description, playlist)
print(colored("Playing:", "green"))
text_list = playlist_json_to_text(playlist)
print(colored(text_list, "yellow"))
try:
devices = sp.devices()
device_id = devices['devices'][0]['id'] # get the first device
print(colored("\n\nPlaying...", "yellow", attrs=["bold"]))
play_playlist(sp, playlist, device_id, playlist_description)
except spotipy.exceptions.SpotifyException:
print(colored("Your spotify token has expired, running authorization flow", "red", attrs=["bold"]))
authorize_spotify()
def playlist_json_to_text(playlist):
text_list = ""
for i, song in enumerate(playlist["playlist"], start=1):
text_list += f"{i}. {song['song_name']} by {song['artist_name']}\n"
return text_list
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
| [
"You are a knowledgeable AI trained to generate music playlists.",
"\nPLACEHOLDER. Generate a list of 15 songs in the format of a JSON with song name and artist name:\"\nUse the following JSON format:\n{\n \"playlist\":\n [\n {\"song_name\": \"The long and winding road\", \"artist_name\": \"The Beatles\"},\n {\"song_name\": \"Sweet Child o' Mine\", \"artist_name\": \"Guns N' Roses\"},\n ]\n}\n"
] |
2024-01-10 | rockerBOO/sd-ext | debug_vae_from_images.py | # Original from https://gist.github.com/Poiuytrezay1/db6b98672675456bed39d45077d44179
# Credit to Poiuytrezay1
import argparse
import os
from collections import defaultdict
from pathlib import Path
import cv2 as cv
try:
import library.model_util as model_util
import library.train_util as train_util
import library.sdxl_train_util as sdxl_train_util
except ModuleNotFoundError:
print(
"Requires to be with the Kohya-ss sd-scripts"
+ " https://github.com/kohya-ss/sd-scripts"
)
print("Copy this script into your Kohya-ss sd-scripts directory")
import sys
sys.exit(2)
import numpy as np
import torch
import tqdm
from PIL import Image, features
from torchvision import transforms
IMAGE_TRANSFORMS = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def load_image(image_path):
image = Image.open(image_path)
if not image.mode == "RGB":
image = image.convert("RGB")
img = np.array(image, np.uint8)
return img, image.info
def process_images_group(vae, images_group):
with torch.no_grad():
# Stack the tensors from the same size group
img_tensors = torch.stack(images_group, dim=0).to(vae.device)
# Encode and decode the images
latents = vae.encode(img_tensors).latent_dist.sample()
return latents
def process_latents_from_images(vae, input_file_or_dir, output_dir, args):
if args.consistency_decoder:
from consistencydecoder import ConsistencyDecoder
decoder_consistency = ConsistencyDecoder(device=vae.device)
input = Path(input_file_or_dir)
output = Path(output_dir)
os.makedirs(str(output.absolute()), exist_ok=True)
if input.is_dir():
image_files = [
file
for file in input.iterdir()
if file.suffix
in [".jpg", ".jpeg", ".png", ".webp", ".bmp", ".avif"]
]
else:
image_files = [input]
size_to_images = defaultdict(list)
file_names = [] # List to keep track of file names
for image_file in image_files:
image, _ = load_image(image_file)
transformed_image = IMAGE_TRANSFORMS(image)
size_to_images[transformed_image.shape[1:]].append(transformed_image)
file_names.append(image_file) # Save the file name
total_images = len(file_names)
# batch_size = args.batch_size
batch_size = 1
print("Temporarily limiting batch size to 1")
vae_name = Path(args.vae).stem if args.vae is not None else None
with tqdm.tqdm(total=total_images) as progress_bar:
for i, (size, images_group) in enumerate(size_to_images.items()):
batch_file_names = file_names[i : i + batch_size]
print(batch_file_names)
# Get the batch file names
latents = process_images_group(vae, images_group)
if args.consistency_decoder:
consistencydecoder_and_save(
decoder_consistency,
latents,
batch_file_names,
output,
device=vae.device,
)
else:
decode_vae_and_save(
vae,
latents,
batch_file_names,
output,
gif=args.gif,
vae_name=vae_name,
apng=args.apng,
webp=args.webp,
mp4=False,
)
progress_bar.update(1)
def decode_vae_and_save(
vae,
latents,
filenames,
output,
gif=False,
vae_name=None,
apng=False,
webp=False,
mp4=False,
):
with torch.no_grad():
decoded_images = []
for i in range(0, 1):
decoded_images.append(
vae.decode(
latents[i : i + 1] if i > 1 else latents[i].unsqueeze(0)
).sample
)
decoded_images = torch.cat(decoded_images)
# Rescale images from [-1, 1] to [0, 255] and save
decoded_images = (
((decoded_images / 2 + 0.5).clamp(0, 1) * 255)
.cpu()
.permute(0, 2, 3, 1)
.numpy()
.astype("uint8")
)
vae_file_part = f"-{vae_name}" if vae_name is not None else ""
for i, decoded_image in enumerate(decoded_images):
original_file = filenames[
i
] # Get the original file name for each image
print(original_file)
output_file = (
output.absolute()
/ original_file.with_name(
f"{original_file.stem}-latents-decoded{vae_file_part}.png"
).name
)
output_image = Image.fromarray(decoded_image)
print(f"Saving to {output_file}")
output_image.save(output_file)
if gif or apng or webp:
original_image = Image.open(original_file)
if gif:
output_gif_file = (
output.absolute()
/ original_file.with_name(
f"{original_file.stem}-latents-decoded{vae_file_part}.gif"
).name
)
print(f"Saving gif to {output_gif_file}")
print([original_file, output_file])
original_image.save(
output_gif_file,
save_all=True,
append_images=[output_image],
optimize=False,
duration=500,
loop=0,
)
if mp4:
output_mp4_file = (
output.absolute()
/ original_file.with_name(
f"{original_file.stem}-latents-decoded{vae_file_part}.mp4"
).name
)
print(f"Saving mp4 to {output_mp4_file}")
width, height = original_image.size
# fourcc = cv.VideoWriter_fourcc(*"mp4v")
fps = 2
video = cv.VideoWriter(
str(output_mp4_file), -1, fps, (width, height)
)
open_cv_image = np.array(original_image)
open_cv_image = open_cv_image[:, :, ::-1].copy()
video.write(open_cv_image)
open_cv_image = np.array(output_image)
open_cv_image = open_cv_image[:, :, ::-1].copy()
video.write(open_cv_image)
cv.destroyAllWindows()
video.release()
if apng:
output_apng_file = (
output.absolute()
/ original_file.with_name(
f"{original_file.stem}-latents-decoded{vae_file_part}.apng"
).name
)
print(f"Saving animated png to {output_apng_file}")
print([original_file, output_file])
original_image.save(
output_apng_file,
save_all=True,
append_images=[output_image],
duration=500,
loop=0,
)
if webp:
if features.check("webp_anim"):
output_webp_file = (
output.absolute()
/ original_file.with_name(
f"{original_file.stem}-latents-decoded{vae_file_part}.webp"
).name
)
print(f"Saving animated webp to {output_webp_file}")
print([original_file, output_file])
try:
original_image.save(
output_webp_file,
save_all=True,
append_images=[output_image],
duration=500,
method=4,
lossless=True,
loop=0,
)
except RuntimeError as err:
print(f"animated webp Error: {err}")
else:
print("warning: animated webp images not supported")
def consistencydecoder_and_save(
decoder_consistency, latents, filenames, output_dir, device
):
from consistencydecoder import save_image
with torch.no_grad():
sample_consistences = decoder_consistency(latents)
for i, decoded_image in enumerate(sample_consistences):
original_file_name = filenames[i]
# Get the original file name for each image
original_name_without_extension = os.path.splitext(
original_file_name
)[0]
save_image(
decoded_image,
os.path.join(
output_dir,
f"{original_name_without_extension}-latents-decoded-consistency.png",
),
)
def main(args):
device = torch.device(args.device)
# Convert blank VAE into None for compatibility
if args.vae == "":
args.vae = None
if args.vae is None:
from accelerate import Accelerator
accelerator = Accelerator()
if args.sdxl:
# putting this in here just to be able to pass the argument
_, _, _, vae, _, _, _ = sdxl_train_util.load_target_model(
args,
accelerator,
args.pretrained_model_name_or_path,
torch.float16,
)
else:
# Load model's VAE
_, vae, _, _ = train_util.load_target_model(
args, torch.float16, accelerator
)
vae.to(device, dtype=torch.float32)
else:
vae = model_util.load_vae(args.vae, torch.float32).to(device)
# Save image decoded latents
process_latents_from_images(
vae, args.input_file_or_dir, args.output_dir, args
)
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--device", default="cpu")
argparser.add_argument(
"--input_file_or_dir",
help="Input file or directory to load the images from",
)
argparser.add_argument(
"--output_dir", help="Output directory to put the VAE decoded images"
)
argparser.add_argument(
"--vae",
type=str,
default=None,
help="path to checkpoint of vae to replace / VAEを入れ替える場合、VAEのcheckpointファイルまたはディレクトリ",
)
argparser.add_argument(
"--pretrained_model_name_or_path",
default="",
help="Stable diffusion model name or path to load the VAE from.",
)
argparser.add_argument(
"--gif",
action="store_true",
help="Make a gif of the decoded image with the original",
)
argparser.add_argument(
"--apng",
action="store_true",
help="Make an animated png of the decoded image with the original",
)
argparser.add_argument(
"--webp",
action="store_true",
help="Make an animated webp of the decoded image with the original",
)
argparser.add_argument(
"--v2", action="store_true", help="Is a Stable Diffusion v2 model."
)
argparser.add_argument(
"--batch_size",
type=int,
default=1,
help="Batch size to process the images.",
)
argparser.add_argument(
"--sdxl", action="store_true", help="(NOTWORKING) SDXL model"
)
argparser.add_argument(
"--lowram", type=int, default=1, help="SDXL low ram option"
)
argparser.add_argument(
"--full_fp16", type=int, default=1, help="SDXL use full fp16"
)
argparser.add_argument(
"--full_bf16", type=int, default=1, help="SDXL use full bf16"
)
argparser.add_argument(
"--consistency_decoder",
action="store_true",
help="Use Consistency Decoder from OpenAI https://github.com/openai/consistencydecoder",
)
args = argparser.parse_args()
main(args)
| [] |
2024-01-10 | stephanie-wang/ray | rllib~contrib~maddpg~maddpg.py | """Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see twostep_game.py, and the README for how to run
with the multi-agent particle envs.
"""
import logging
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.contrib.maddpg.maddpg_policy import MADDPGTFPolicy
from ray.rllib.optimizers import SyncReplayOptimizer
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Settings for each individual policy ===
# ID of the agent controlled by this policy
"agent_id": None,
# Use a local critic for this policy.
"use_local_critic": False,
# === Evaluation ===
# Evaluation interval
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [64, 64],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [64, 64],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# Algorithm for good policies
"good_policy": "maddpg",
# Algorithm for adversary policies
"adv_policy": "maddpg",
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": int(1e6),
# Observation compression. Note that compression makes simulation slow in
# MPE.
"compress_observations": False,
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-2,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-2,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.01,
# Weights for feature regularization for the actor
"actor_feature_reg": 0.001,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 0.5,
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"sample_batch_size": 100,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 1024,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 0,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 1,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 0,
})
# __sphinx_doc_end__
# yapf: enable
def set_global_timestep(trainer):
global_timestep = trainer.optimizer.num_steps_sampled
trainer.train_start_timestep = global_timestep
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].data.keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(
dict(
zip(keys,
multi_agent_batch.policy_batches[pid].data.values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
target_act_sampler_n = [p.target_act_sampler for p in policies.values()]
feed_dict = dict(zip(new_obs_ph_n, new_obs_n))
new_act_n = p.sess.run(target_act_sampler_n, feed_dict)
samples.update(
{"new_actions_%d" % i: new_act
for i, new_act in enumerate(new_act_n)})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
def make_optimizer(workers, config):
return SyncReplayOptimizer(
workers,
learning_starts=config["learning_starts"],
buffer_size=config["buffer_size"],
train_batch_size=config["train_batch_size"],
before_learn_on_batch=before_learn_on_batch,
synchronize_sampling=True,
prioritized_replay=False)
def add_trainer_metrics(trainer, result):
global_timestep = trainer.optimizer.num_steps_sampled
result.update(
timesteps_this_iter=global_timestep - trainer.train_start_timestep,
info=dict({
"num_target_updates": trainer.state["num_target_updates"],
}, **trainer.optimizer.stats()))
def collect_metrics(trainer):
result = trainer.collect_metrics()
return result
MADDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="MADDPG",
default_config=DEFAULT_CONFIG,
default_policy=MADDPGTFPolicy,
before_init=None,
before_train_step=set_global_timestep,
make_policy_optimizer=make_optimizer,
after_train_result=add_trainer_metrics,
collect_metrics_fn=collect_metrics,
before_evaluate_fn=None)
| [] |
2024-01-10 | sarthakforwet/Auxel | auxel_app.py | # Speech Recoginition libraries
import speech_recognition as sr
# from google.cloud import speech
import pyttsx3
# Chat Based Module
import openai
# Miscellaneous Libraries
import pandas as pd
import time
import os
# from speech_rec import main
# LangChain and SQLite
import sqlite3
import pandas as pd
import os
from langchain import OpenAI, SQLDatabase, SQLDatabaseChain
# PYTTSX3 CLASS
class _TTS:
'''
Load the Engine separately to avoid the endless loop in runAndWait.
'''
engine = None
rate = None
def __init__(self):
self.engine = pyttsx3.init()
self.engine.setProperty('voice', 'english+f6')
rate = self.engine.getProperty('rate')
self.engine.setProperty('rate', rate - 25)
def say(self,text_):
'''
Speak up the input text.
'''
self.engine.say(text_)
self.engine.runAndWait() # Wait till the engine stops executing.
# CHAT APP USING OPEN AI API
class ChatApp:
'''
Class which instantiates the openai API and handles the chat by providing a custom input
received form voice based commands.
'''
def __init__(self):
# Setting the API key to use the OpenAI API
# openai.api_key = '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'
#os.environ['OPENAI_API_KEY'] = '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'
self.openai_key = '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'
# Initializing the chatbot.
self.messages = [
{"role": "system", "content": "You are a dataframe wrangler to manipulate datasets."},
]
self.flag = 0
input_db = SQLDatabase.from_uri('sqlite:///auxel_db.sqlite3')
llm_1 = OpenAI(openai_api_key=self.openai_key, temperature=0)
self.db_agent = SQLDatabaseChain(llm=llm_1,
database=input_db,
verbose=True)
def chat_davinci(self, message, df):
openai_query = message
df_main = df
# Print the schema of the table
schema = f"Schema of the table df_main:"
for col in df_main.columns:
# Check if column contains strings
if df_main[col].dtype == object:
# Check if column has less than 10 unique values
if len(df_main[col].unique()) < 10:
# Print column name
schema +="\n{}: {}".format(col,", ".join(df_main[col].unique()))
else:
schema += "\n{}".format(col)
else:
schema += "\n{}".format(col)
# Use OpenAI's GPT-3 to generate SQL
prompt = (f"Given the following database schema, write an SQL query :"
f" {openai_query}\n\n"
f"Database schema:\n\n{schema}\n\n"
f"Select all the columns unless specified\n"
f"This is my schema for search on a string use LIKE sql command rather than a query\n"
f"Following are my intent classes\n"
f"SHOW is displaying records/querying of a specific instance\n"
f"SORT is sorting\n"
f"OPERATION one which belongs of the other\n"
f"FILTER is filtering of records\n"
f"Produce the SQL Query and given the intent of {openai_query} in this format\n"
f"Every query has one intent class\n"
f"SQL Query|%%|Intent class:")
# response = openai.Completion.create(
# engine="text-davinci-003",
# prompt=prompt,
# temperature=0.5,
# max_tokens=250,
# n=1,
# stop=None,
# )
# Connect sqlite database.
self.conn = sqlite3.connect('auxel_db.sqlite3')
# Print the generated SQL
# open_ai_response = response.choices[0].text.strip()
response = self.db_agent.run(openai_query)
return response
def chat(self, message):
'''
Call the chat endpoint from the API.
'''
self.messages.append({"role": "user", "content": message})
# Get response from chat using the message list built so far.
response = openai.Completion.create(
model="text-davinci-003", #gpt-3.5-turbo
messages=self.messages
)
print(response)
# Append the response.
self.messages.append({"role": "assistant", "content": response["choices"][0]["message"].content})
return response["choices"][0]["message"]['content']
# AUXEL BOT
class Auxel:
'''
The driving class for the Auxel chatbot.
'''
init_flag = False
def __init__(self):
# TODO: change to get dataset information from user.
self.df = pd.read_csv('data.csv')
# self.text = ''
self.chat = ChatApp()
# out = self.chat.chat(str(self.df)+'Remember the DataFrame.')
# out = self.chat.chat_davinci(str(self.df)+'Remember the DataFrame.', self.df) # Davinci Version
# print(out)
def say(self, text):
"Speak up the response and delete the instance formed."
tts = _TTS()
tts.say(text)
del(tts)
def listen(self):
"Listen to user query."
self.say('Heyy. How can I help you today?')
r = sr.Recognizer()
with sr.Microphone() as source:
audio_data = r.listen(source,timeout=5)
try:
self.text = r.recognize_google(audio_data) # Use Free Google API for recognizing input audio data.
self.text = self.process_input_query_1()
return self.text
except sr.UnknownValueError:
print('Error: Speech recognition could not understand audio.')
except sr.RequestError as e:
print(f'Error: Could not request results from Speech Recognition service; {e}')
# ============== TEST FUNCTION ===================================
def process_input_query_1(self):
out = self.chat.chat_davinci(self.text, self.df) # Davinci Version
self.say(out)
return out
def process_input_query(self):
"Process input query being converted to text using the Free Speech API from Google."
if 'code' not in self.text:
self.text += '. Just give me the output and do not give me the code.'
if 'hello' in self.text or 'hey' in self.text or 'hi' in self.text:
self.say('hello')
return 'hello'
if 'create' in self.text or 'table' in self.text:
self.say('just a minute..')
self.text += 'make the resultant dataframe comma seperated. Only give me the dataframe and no other text.'
out = self.chat.chat(self.text)
self.say('action performed!')
return out
# Not exiting the program.
if 'bye' in self.text or 'byy' in self.text or 'by' in self.text or 'goodbye' in self.text:
exit()
print('Prompt: ',self.text)
# out = self.chat.chat(self.text)
out = self.chat.chat_davinci(self.text, self.df) # Davinci Version
print('Output: ',out)
if 'create' in self.text or 'prepare' in self.text:
self.say('done!')
else:
self.say(out)
return out
# def listen(bot):
# # Separate function to listen using the Google Cloud Speech API.
# bot.say('tell me what you want?')
# transcript = main()
# bot.text = transcript
# out = bot.process_input_query()
# return out | [
"You are a dataframe wrangler to manipulate datasets.",
"Given the following database schema, write an SQL query : PLACEHOLDER\n\nDatabase schema:\n\nSchema of the table df_main:\n\nSelect all the columns unless specified\nThis is my schema for search on a string use LIKE sql command rather than a query\nFollowing are my intent classes\nSHOW is displaying records/querying of a specific instance\nSORT is sorting\nOPERATION one which belongs of the other\nFILTER is filtering of records\nProduce the SQL Query and given the intent of PLACEHOLDER in this format\nEvery query has one intent class\nSQL Query|%%|Intent class:"
] |
2024-01-10 | sarthakforwet/Auxel | sql_query_parser.py | import sqlite3
import pandas as pd
import os
df = pd.read_csv('data.csv')
conn = sqlite3.connect('auxel_db.sqlite3')
# df.to_sql('sales_data', conn, if_exists='replace')
# LangChain SQL Agent
from langchain import OpenAI, SQLDatabase, SQLDatabaseChain
import sqlite3
os.environ['OPENAI_API_KEY'] = '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'
input_db = SQLDatabase.from_uri('sqlite:///auxel_db.sqlite3')
llm_1 = OpenAI(temperature=0)
db_agent = SQLDatabaseChain(llm=llm_1,
database=input_db,
verbose=True)
out = db_agent.run('create new table for each of the category')
print(type(out))
| [] |
2024-01-10 | SHIRSENDU-KONER/AI-Travel-Agent | textbase~models.py | import json
import openai
import requests
import time
import typing
import traceback
from textbase import Message
# Return list of values of content.
def get_contents(message: Message, data_type: str):
return [
{
"role": message["role"],
"content": content["value"]
}
for content in message["content"]
if content["data_type"] == data_type
]
# Returns content if it's non empty.
def extract_content_values(message: Message):
return [
content["content"]
for content in get_contents(message, "STRING")
if content
]
class OpenAI:
api_key = None
@classmethod
def generate(
cls,
system_prompt: str,
message_history: list[Message],
model="gpt-3.5-turbo",
max_tokens=3000,
temperature=0.7,
function_descriptions_multiple=None
):
assert cls.api_key is not None, "OpenAI API key is not set."
openai.api_key = cls.api_key
filtered_messages = []
for message in message_history:
#list of all the contents inside a single message
contents = get_contents(message, "STRING")
if contents:
filtered_messages.extend(contents)
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": system_prompt
},
*map(dict, filtered_messages),
],
temperature=temperature,
max_tokens=max_tokens,
functions=function_descriptions_multiple,
function_call="auto"
)
# print(response)
return response["choices"][0]["message"]#["content"]
class HuggingFace:
api_key = None
@classmethod
def generate(
cls,
system_prompt: str,
message_history: list[Message],
model: typing.Optional[str] = "microsoft/DialoGPT-large",
max_tokens: typing.Optional[int] = 3000,
temperature: typing.Optional[float] = 0.7,
min_tokens: typing.Optional[int] = None,
top_k: typing.Optional[int] = None
) -> str:
try:
assert cls.api_key is not None, "Hugging Face API key is not set."
headers = { "Authorization": f"Bearer { cls.api_key }" }
API_URL = "https://api-inference.huggingface.co/models/" + model
inputs = {
"past_user_inputs": [system_prompt],
"generated_responses": [f"Ok, I will answer according to the context, where context is '{system_prompt}'."],
"text": ""
}
for message in message_history:
if message["role"] == "user":
inputs["past_user_inputs"].extend(extract_content_values(message))
else:
inputs["generated_responses"].extend(extract_content_values(message))
inputs["text"] = inputs["past_user_inputs"].pop(-1)
payload = {
"inputs": inputs,
"max_length": max_tokens,
"temperature": temperature,
"min_length": min_tokens,
"top_k": top_k,
}
data = json.dumps(payload)
response = requests.request("POST", API_URL, headers=headers, data=data)
response = json.loads(response.content.decode("utf-8"))
if response.get("error", None) == "Authorization header is invalid, use 'Bearer API_TOKEN'.":
print("Hugging Face API key is not correct.")
if response.get("estimated_time", None):
print(f"Model is loading please wait for {response.get('estimated_time')}")
time.sleep(response.get("estimated_time"))
response = requests.request("POST", API_URL, headers=headers, data=data)
response = json.loads(response.content.decode("utf-8"))
return response["generated_text"]
except Exception:
print(f"An exception occured while using this model, please try using another model.\nException: {traceback.format_exc()}.")
class BotLibre:
application = None
instance = None
@classmethod
def generate(
cls,
message_history: list[Message],
):
most_recent_message = get_contents(message_history[-1], "STRING")
request = {
"application": cls.application,
"instance": cls.instance,
"message": most_recent_message
}
response = requests.post('https://www.botlibre.com/rest/json/chat', json=request)
data = json.loads(response.text) # parse the JSON data into a dictionary
message = data['message']
return message | [] |
2024-01-10 | adkaa/guidance | guidance~llms~caches~_diskcache.py | import os
import diskcache
import platformdirs
from guidance.llms.caches import Cache
class DiskCache(Cache):
"""DiskCache is a cache that uses diskcache lib."""
def __init__(self, llm_name: str):
self._diskcache = diskcache.Cache(
os.path.join(
platformdirs.user_cache_dir("guidance"), f"_{llm_name}.diskcache"
)
)
def __getitem__(self, key: str) -> str:
return self._diskcache[key]
def __setitem__(self, key: str, value: str) -> None:
self._diskcache[key] = value
def __contains__(self, key: str) -> bool:
return key in self._diskcache
def clear(self):
self._diskcache.clear()
| [] |
2024-01-10 | mboros1/openai_triage_code | create_embeddings.py |
import openai
import chunking
import pandas as pd
# calculate embeddings
EMBEDDING_MODEL = "text-embedding-ada-002" # OpenAI's best embeddings as of Apr 2023
BATCH_SIZE = 1000 # you can submit up to 2048 embedding inputs per request
chunks = chunking.chunk_esi_handbook()
embeddings = []
for batch_start in range(0, len(chunks), BATCH_SIZE):
batch_end = batch_start + BATCH_SIZE
batch = chunks[batch_start:batch_end]
print(f"Batch {batch_start} to {batch_end-1}")
response = openai.embeddings.create(model=EMBEDDING_MODEL, input=batch)
for i, be in enumerate(response.data):
assert i == be.index # double check embeddings are in same order as input
batch_embeddings = [e.embedding for e in response.data]
embeddings.extend(batch_embeddings)
df = pd.DataFrame({"text": chunks, "embedding": embeddings})
# save document chunks and embeddings
SAVE_PATH = "chunked_esi_handbook.csv"
df.to_csv(SAVE_PATH, index=False)
| [] |
2024-01-10 | mboros1/openai_triage_code | chat_completion.py | # imports
import ast # for converting embeddings saved as strings back to arrays
import openai # for calling the OpenAI API
import pandas as pd # for storing text and embeddings data
import tiktoken # for counting tokens
from scipy import spatial # for calculating vector similarities for search
embeddings_path = "chunked_esi_handbook.csv"
df = pd.read_csv(embeddings_path)
# convert embeddings from CSV str type back to list type
df['embedding'] = df['embedding'].apply(ast.literal_eval)
# models
EMBEDDING_MODEL = "text-embedding-ada-002"
GPT_MODEL = "gpt-4-1106-preview"
# search function
def strings_ranked_by_relatedness(
query: str,
df: pd.DataFrame,
relatedness_fn=lambda x, y: 1 - spatial.distance.cosine(x, y),
top_n: int = 100
) -> tuple[list[str], list[float]]:
"""Returns a list of strings and relatednesses, sorted from most related to least."""
query_embedding_response = openai.embeddings.create(
model=EMBEDDING_MODEL,
input=query,
)
query_embedding = query_embedding_response.data[0].embedding
strings_and_relatednesses = [
(row["text"], relatedness_fn(query_embedding, row["embedding"]))
for i, row in df.iterrows()
]
strings_and_relatednesses.sort(key=lambda x: x[1], reverse=True)
strings, relatednesses = zip(*strings_and_relatednesses)
return strings[:top_n], relatednesses[:top_n]
def test_relatedness_value(test_string):
strings, relatednesses = strings_ranked_by_relatedness(test_string, df, top_n=5)
for string, relatedness in zip(strings, relatednesses):
print(f"{relatedness=:.3f}, {string}")
def num_tokens(text: str, model: str = GPT_MODEL) -> int:
"""Return the number of tokens in a string."""
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
def query_message(
query: str,
df: pd.DataFrame,
model: str,
token_budget: int
) -> str:
"""Return a message for GPT, with relevant source texts pulled from a dataframe."""
strings, relatednesses = strings_ranked_by_relatedness(query, df)
introduction = "Use the ESI Implementation Handbook below as a reference text. If the answer cannot be found, write 'I don't see those details in text, but I think...' and try to make your best guess as to what the right answer would be."
question = f"\n\nQuestion: {query}"
message = introduction
for string in strings:
next_article = f'\n\nESI Implementation Handbook section:\n"""\n{string}\n"""'
if (
num_tokens(message + next_article + question, model=model)
> token_budget
):
break
else:
message += next_article
return message + question
def ask(
query: str,
df: pd.DataFrame = df,
model: str = GPT_MODEL,
token_budget: int = 2048,
print_message: bool = False,
) -> str:
"""Answers a query using GPT and a dataframe of relevant texts and embeddings."""
message = query_message(query, df, model=model, token_budget=token_budget)
if print_message:
print(message)
messages = [
{"role": "system", "content": "You answer questions about the ESI Implementation Handbook, and how it could be used."},
{"role": "user", "content": message},
]
response = openai.chat.completions.create(
model=model,
messages=messages,
temperature=0
)
response_message = response.choices[0].message.content
return response_message
print(ask('What is the difference between a level 2 and a level 3 triage level?', print_message=True)) | [
"You answer questions about the ESI Implementation Handbook, and how it could be used."
] |
2024-01-10 | mboros1/openai_triage_code | ask_simple.py | import openai # for calling the OpenAI API
GPT_MODEL = "gpt-4-1106-preview"
text = open('esi-implementation-handbook-2020.txt', 'r').read()
question = """
What data is needed to triage a patient using ESI?
"""
query = f"""Use the ESI Implementation handbook below as a reference text. If the answer cannot be found, write "I don't see those details in text, but I think..." and try to make your best guess as to what the right answer would be.
Article:
\"\"\"
{text}
\"\"\"
Question: {question}
"""
response = openai.chat.completions.create(
messages=[
{'role': 'system', 'content': 'You answer questions about ESI triage implementation related to software implementation.'},
{'role': 'user', 'content': query},
],
model=GPT_MODEL,
temperature=0,
)
print(response.choices[0].message.content)
| [
"You answer questions about ESI triage implementation related to software implementation.",
"Use the ESI Implementation handbook below as a reference text. If the answer cannot be found, write \"I don't see those details in text, but I think...\" and try to make your best guess as to what the right answer would be.\n\nArticle:\n\"\"\"\nPLACEHOLDER\n\"\"\"\n\nQuestion: \nWhat data is needed to triage a patient using ESI?\n\n"
] |
2024-01-10 | Backlory/gpt_academic | crazy_functions~crazy_utils.py | from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
import threading
import os
import logging
def input_clipping(inputs, history, max_token_limit):
import numpy as np
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
mode = 'input-and-history'
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
input_token_num = get_token_num(inputs)
if input_token_num < max_token_limit//2:
mode = 'only-history'
max_token_limit = max_token_limit - input_token_num
everything = [inputs] if mode == 'input-and-history' else ['']
everything.extend(history)
n_token = get_token_num('\n'.join(everything))
everything_token = [get_token_num(e) for e in everything]
delta = max(everything_token) // 16 # 截断时的颗粒度
while n_token > max_token_limit:
where = np.argmax(everything_token)
encoded = enc.encode(everything[where], disallowed_special=())
clipped_encoded = encoded[:len(encoded)-delta]
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
everything_token[where] = get_token_num(everything[where])
n_token = get_token_num('\n'.join(everything))
if mode == 'input-and-history':
inputs = everything[0]
else:
pass
history = everything[1:]
return inputs, history
def request_gpt_model_in_new_thread_with_ui_alive(
inputs, inputs_show_user, llm_kwargs,
chatbot, history, sys_prompt, refresh_interval=0.2,
handle_token_exceed=True,
retry_times_at_unknown_error=2,
):
"""
Request GPT model,请求GPT模型同时维持用户界面活跃。
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
inputs (string): List of inputs (输入)
inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
history (list): List of chat history (历史,对话历史列表)
sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
retry_times_at_unknown_error:失败时的重试次数
输出 Returns:
future: 输出,GPT返回的结果
"""
import time
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
# 用户反馈
chatbot.append([inputs_show_user, ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
executor = ThreadPoolExecutor(max_workers=16)
mutable = ["", time.time(), ""]
def _req_gpt(inputs, history, sys_prompt):
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
while True:
# watchdog error
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
result = predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs,
history=history, sys_prompt=sys_prompt, observe_window=mutable)
return result
except ConnectionAbortedError as token_exceeded_error:
# 【第二种情况】:Token溢出
if handle_token_exceed:
exceeded_cnt += 1
# 【选择处理】 尝试计算比例,尽可能多地保留文本
from toolbox import get_reduce_token_percent
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
return mutable[0] # 放弃
except:
# 【第三种情况】:其他错误:重试几次
tb_str = '```\n' + trimmed_format_exc() + '```'
print(tb_str)
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if retry_op > 0:
retry_op -= 1
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
time.sleep(30)
time.sleep(5)
continue # 返回重试
else:
time.sleep(5)
return mutable[0] # 放弃
# 提交任务
future = executor.submit(_req_gpt, inputs, history, sys_prompt)
while True:
# yield一次以刷新前端页面
time.sleep(refresh_interval)
# “喂狗”(看门狗)
mutable[1] = time.time()
if future.done():
break
chatbot[-1] = [chatbot[-1][0], mutable[0]]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
final_result = future.result()
chatbot[-1] = [chatbot[-1][0], final_result]
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
return final_result
def can_multi_process(llm):
if llm.startswith('gpt-'): return True
if llm.startswith('api2d-'): return True
if llm.startswith('azure-'): return True
return False
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array, inputs_show_user_array, llm_kwargs,
chatbot, history_array, sys_prompt_array,
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
handle_token_exceed=True, show_user_at_complete=False,
retry_times_at_unknown_error=2,
):
"""
Request GPT model using multiple threads with UI and high efficiency
请求GPT模型的[多线程]版。
具备以下功能:
实时在UI上反馈远程数据流
使用线程池,可调节线程池的大小避免openai的流量限制错误
处理中途中止的情况
网络等出问题时,会把traceback和已经接收的数据转入输出
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
inputs_array (list): List of inputs (每个子任务的输入)
inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
llm_kwargs: llm_kwargs参数
chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
retry_times_at_unknown_error:子任务失败时的重试次数
输出 Returns:
list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
"""
import time, random
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
assert len(inputs_array) == len(history_array)
assert len(inputs_array) == len(sys_prompt_array)
if max_workers == -1: # 读取配置文件
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
except: max_workers = 8
if max_workers <= 0: max_workers = 3
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
if not can_multi_process(llm_kwargs['llm_model']):
max_workers = 1
executor = ThreadPoolExecutor(max_workers=max_workers)
n_frag = len(inputs_array)
# 用户反馈
chatbot.append(["请开始多线程操作。", ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
# 跨线程传递
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
# 子线程任务
def _req_gpt(index, inputs, history, sys_prompt):
gpt_say = ""
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
mutable[index][2] = "执行中"
while True:
# watchdog error
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
# time.sleep(10); raise RuntimeError("测试")
gpt_say = predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
)
mutable[index][2] = "已成功"
return gpt_say
except ConnectionAbortedError as token_exceeded_error:
# 【第二种情况】:Token溢出,
if handle_token_exceed:
exceeded_cnt += 1
# 【选择处理】 尝试计算比例,尽可能多地保留文本
from toolbox import get_reduce_token_percent
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
mutable[index][2] = f"截断重试"
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
mutable[index][2] = "输入过长已放弃"
return gpt_say # 放弃
except:
# 【第三种情况】:其他错误
tb_str = '```\n' + trimmed_format_exc() + '```'
print(tb_str)
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
if retry_op > 0:
retry_op -= 1
wait = random.randint(5, 20)
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
wait = wait * 3
fail_info = "OpenAI绑定信用卡可解除频率限制 "
else:
fail_info = ""
# 也许等待十几秒后,情况会好转
for i in range(wait):
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
# 开始重试
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
continue # 返回重试
else:
mutable[index][2] = "已失败"
wait = 5
time.sleep(5)
return gpt_say # 放弃
# 异步任务开始
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
cnt = 0
while True:
# yield一次以刷新前端页面
time.sleep(refresh_interval)
cnt += 1
worker_done = [h.done() for h in futures]
# 更好的UI视觉效果
observe_win = []
# 每个线程都要“喂狗”(看门狗)
for thread_index, _ in enumerate(worker_done):
mutable[thread_index][1] = time.time()
# 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
replace('\n', '').replace('```', '...').replace(
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
if not done else f'`{mutable[thread_index][2]}`\n\n'
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
# 在前端打印些好玩的东西
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
if all(worker_done):
executor.shutdown()
break
# 异步任务结束
gpt_response_collection = []
for inputs_show_user, f in zip(inputs_show_user_array, futures):
gpt_res = f.result()
gpt_response_collection.extend([inputs_show_user, gpt_res])
# 是否在结束时,在界面上显示结果
if show_user_at_complete:
for inputs_show_user, f in zip(inputs_show_user_array, futures):
gpt_res = f.result()
chatbot.append([inputs_show_user, gpt_res])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
time.sleep(0.3)
return gpt_response_collection
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
def cut(txt_tocut, must_break_at_empty_line): # 递归
if get_token_fn(txt_tocut) <= limit:
return [txt_tocut]
else:
lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut)
for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line:
if lines[cnt] != "":
continue
print(cnt)
prev = "\n".join(lines[:cnt])
post = "\n".join(lines[cnt:])
if get_token_fn(prev) < limit:
break
if cnt == 0:
raise RuntimeError("存在一行极长的文本!")
# print(len(post))
# 列表递归接龙
result = [prev]
result.extend(cut(post, must_break_at_empty_line))
return result
try:
return cut(txt, must_break_at_empty_line=True)
except RuntimeError:
return cut(txt, must_break_at_empty_line=False)
def force_breakdown(txt, limit, get_token_fn):
"""
当无法用标点、空行分割时,我们用最暴力的方法切割
"""
for i in reversed(range(len(txt))):
if get_token_fn(txt[:i]) < limit:
return txt[:i], txt[i:]
return "Tiktoken未知错误", "Tiktoken未知错误"
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
# 递归
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
if get_token_fn(txt_tocut) <= limit:
return [txt_tocut]
else:
lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut)
cnt = 0
for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line:
if lines[cnt] != "":
continue
prev = "\n".join(lines[:cnt])
post = "\n".join(lines[cnt:])
if get_token_fn(prev) < limit:
break
if cnt == 0:
if break_anyway:
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
else:
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
# print(len(post))
# 列表递归接龙
result = [prev]
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
return result
try:
# 第1次尝试,将双空行(\n\n)作为切分点
return cut(txt, must_break_at_empty_line=True)
except RuntimeError:
try:
# 第2次尝试,将单空行(\n)作为切分点
return cut(txt, must_break_at_empty_line=False)
except RuntimeError:
try:
# 第3次尝试,将英文句号(.)作为切分点
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
return [r.replace('。\n', '.') for r in res]
except RuntimeError as e:
try:
# 第4次尝试,将中文句号(。)作为切分点
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
return [r.replace('。。\n', '。') for r in res]
except RuntimeError as e:
# 第5次尝试,没办法了,随便切一下敷衍吧
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
def read_and_clean_pdf_text(fp):
"""
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
**输入参数说明**
- `fp`:需要读取和清理文本的pdf文件路径
**输出参数说明**
- `meta_txt`:清理后的文本内容字符串
- `page_one_meta`:第一页清理后的文本内容列表
**函数功能**
读取pdf文件并清理其中的文本内容,清理规则包括:
- 提取所有块元的文本信息,并合并为一个字符串
- 去除短块(字符数小于100)并替换为回车符
- 清理多余的空行
- 合并小写字母开头的段落块并替换为空格
- 清除重复的换行
- 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
"""
import fitz, copy
import re
import numpy as np
from colorful import print亮黄, print亮绿
fc = 0 # Index 0 文本
fs = 1 # Index 1 字体
fb = 2 # Index 2 框框
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
def primary_ffsize(l):
"""
提取文本块主字体
"""
fsize_statiscs = {}
for wtf in l['spans']:
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
fsize_statiscs[wtf['size']] += len(wtf['text'])
return max(fsize_statiscs, key=fsize_statiscs.get)
def ffsize_same(a,b):
"""
提取字体大小是否近似相等
"""
return abs((a-b)/max(a,b)) < 0.02
with fitz.open(fp) as doc:
meta_txt = []
meta_font = []
meta_line = []
meta_span = []
############################## <第 1 步,搜集初始信息> ##################################
for index, page in enumerate(doc):
# file_content += page.get_text()
text_areas = page.get_text("dict") # 获取页面上的文本信息
for t in text_areas['blocks']:
if 'lines' in t:
pf = 998
for l in t['lines']:
txt_line = "".join([wtf['text'] for wtf in l['spans']])
if len(txt_line) == 0: continue
pf = primary_ffsize(l)
meta_line.append([txt_line, pf, l['bbox'], l])
for wtf in l['spans']: # for l in t['lines']:
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
# meta_line.append(["NEW_BLOCK", pf])
# 块元提取 for each word segment with in line for each line cross-line words for each block
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
if index == 0:
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
############################## <第 2 步,获取正文主字体> ##################################
try:
fsize_statiscs = {}
for span in meta_span:
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
fsize_statiscs[span[1]] += span[2]
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
if REMOVE_FOOT_NOTE:
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
except:
raise RuntimeError(f'抱歉, 我们暂时无法解析此PDF文档: {fp}。')
############################## <第 3 步,切分和重新整合> ##################################
mega_sec = []
sec = []
for index, line in enumerate(meta_line):
if index == 0:
sec.append(line[fc])
continue
if REMOVE_FOOT_NOTE:
if meta_line[index][fs] <= give_up_fize_threshold:
continue
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
# 尝试识别段落
if meta_line[index][fc].endswith('.') and\
(meta_line[index-1][fc] != 'NEW_BLOCK') and \
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
sec[-1] += line[fc]
sec[-1] += "\n\n"
else:
sec[-1] += " "
sec[-1] += line[fc]
else:
if (index+1 < len(meta_line)) and \
meta_line[index][fs] > main_fsize:
# 单行 + 字体大
mega_sec.append(copy.deepcopy(sec))
sec = []
sec.append("# " + line[fc])
else:
# 尝试识别section
if meta_line[index-1][fs] > meta_line[index][fs]:
sec.append("\n" + line[fc])
else:
sec.append(line[fc])
mega_sec.append(copy.deepcopy(sec))
finals = []
for ms in mega_sec:
final = " ".join(ms)
final = final.replace('- ', ' ')
finals.append(final)
meta_txt = finals
############################## <第 4 步,乱七八糟的后处理> ##################################
def 把字符太少的块清除为回车(meta_txt):
for index, block_txt in enumerate(meta_txt):
if len(block_txt) < 100:
meta_txt[index] = '\n'
return meta_txt
meta_txt = 把字符太少的块清除为回车(meta_txt)
def 清理多余的空行(meta_txt):
for index in reversed(range(1, len(meta_txt))):
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
meta_txt.pop(index)
return meta_txt
meta_txt = 清理多余的空行(meta_txt)
def 合并小写开头的段落块(meta_txt):
def starts_with_lowercase_word(s):
pattern = r"^[a-z]+"
match = re.match(pattern, s)
if match:
return True
else:
return False
for _ in range(100):
for index, block_txt in enumerate(meta_txt):
if starts_with_lowercase_word(block_txt):
if meta_txt[index-1] != '\n':
meta_txt[index-1] += ' '
else:
meta_txt[index-1] = ''
meta_txt[index-1] += meta_txt[index]
meta_txt[index] = '\n'
return meta_txt
meta_txt = 合并小写开头的段落块(meta_txt)
meta_txt = 清理多余的空行(meta_txt)
meta_txt = '\n'.join(meta_txt)
# 清除重复的换行
for _ in range(5):
meta_txt = meta_txt.replace('\n\n', '\n')
# 换行 -> 双换行
meta_txt = meta_txt.replace('\n', '\n\n')
############################## <第 5 步,展示分割效果> ##################################
# for f in finals:
# print亮黄(f)
# print亮绿('***************************')
return meta_txt, page_one_meta
def get_files_from_everything(txt, type): # type='.md'
"""
这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
下面是对每个参数和返回值的说明:
参数
- txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
- type: 字符串,表示要搜索的文件类型。默认是.md。
返回值
- success: 布尔值,表示函数是否成功执行。
- file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
- project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
该函数详细注释已添加,请确认是否满足您的需要。
"""
import glob, os
success = True
if txt.startswith('http'):
# 网络的远程文件
import requests
from toolbox import get_conf
from toolbox import get_log_folder, gen_time_str
proxies, = get_conf('proxies')
try:
r = requests.get(txt, proxies=proxies)
except:
raise ConnectionRefusedError(f"无法下载资源{txt},请检查。")
path = os.path.join(get_log_folder(plugin_name='web_download'), gen_time_str()+type)
with open(path, 'wb+') as f: f.write(r.content)
project_folder = get_log_folder(plugin_name='web_download')
file_manifest = [path]
elif txt.endswith(type):
# 直接给定文件
file_manifest = [txt]
project_folder = os.path.dirname(txt)
elif os.path.exists(txt):
# 本地路径,递归搜索
project_folder = txt
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)]
if len(file_manifest) == 0:
success = False
else:
project_folder = None
file_manifest = []
success = False
return success, file_manifest, project_folder
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class knowledge_archive_interface():
def __init__(self) -> None:
self.threadLock = threading.Lock()
self.current_id = ""
self.kai_path = None
self.qa_handle = None
self.text2vec_large_chinese = None
def get_chinese_text2vec(self):
if self.text2vec_large_chinese is None:
# < -------------------预热文本向量化模组--------------- >
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate(): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
def feed_archive(self, file_manifest, id="default"):
self.threadLock.acquire()
# import uuid
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=file_manifest,
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
def get_current_archive_id(self):
return self.current_id
def get_loaded_file(self):
return self.qa_handle.get_loaded_file()
def answer_with_archive_by_id(self, txt, id):
self.threadLock.acquire()
if not self.current_id == id:
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=[],
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt,
vs_path = self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
return resp, prompt
@Singleton
class nougat_interface():
def __init__(self):
self.threadLock = threading.Lock()
def nougat_with_timeout(self, command, cwd, timeout=3600):
import subprocess
logging.info(f'正在执行命令 {command}')
process = subprocess.Popen(command, shell=True, cwd=cwd)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
print("Process timed out!")
return False
return True
def NOUGAT_parse_pdf(self, fp, chatbot, history):
from toolbox import update_ui_lastest_msg
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
chatbot=chatbot, history=history, delay=0)
self.threadLock.acquire()
import glob, threading, os
from toolbox import get_log_folder, gen_time_str
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
os.makedirs(dst)
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
chatbot=chatbot, history=history, delay=0)
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
res = glob.glob(os.path.join(dst,'*.mmd'))
if len(res) == 0:
self.threadLock.release()
raise RuntimeError("Nougat解析论文失败。")
self.threadLock.release()
return res[0]
def try_install_deps(deps, reload_m=[]):
import subprocess, sys, importlib
for dep in deps:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
import site
importlib.reload(site)
for m in reload_m:
importlib.reload(__import__(m))
HTML_CSS = """
.row {
display: flex;
flex-wrap: wrap;
}
.column {
flex: 1;
padding: 10px;
}
.table-header {
font-weight: bold;
border-bottom: 1px solid black;
}
.table-row {
border-bottom: 1px solid lightgray;
}
.table-cell {
padding: 5px;
}
"""
TABLE_CSS = """
<div class="row table-row">
<div class="column table-cell">REPLACE_A</div>
<div class="column table-cell">REPLACE_B</div>
</div>
"""
class construct_html():
def __init__(self) -> None:
self.css = HTML_CSS
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
def add_row(self, a, b):
tmp = TABLE_CSS
from toolbox import markdown_convertion
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
self.html_string += tmp
def save_file(self, file_name):
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
f.write(self.html_string.encode('utf-8', 'ignore').decode())
return os.path.join(get_log_folder(), file_name)
| [] |
2024-01-10 | arun13go/Azure-OpenAI-Summarisation-Embeddings-QnA | utilities~summarisation.py | # summerise the larage text if it is more than 4000 OpenAI tokens
import pandas as pd
import numpy as np
import openai
import os
import redisembeddings
from translator import translate
from openai.embeddings_utils import get_embedding
from tenacity import retry, wait_random_exponential, stop_after_attempt
from redisembeddings import set_document
from formrecognizer import analyze_read
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Maxi LangChain token limitation for Davinci model
DEFAULT_CHUNK_LIMIT = 20000
CHUNK_LIMIT = 8000
def initialize(engine='davinci'):
openai.api_type = "azure"
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_version = "2022-12-01"
openai.api_key = os.getenv("OPENAI_API_KEY")
CHUNK_LIMIT = os.getenv("OPENAI_CHUNK_LIMIT")
redisembeddings.initialize()
# summarise the file content and embed
def convert_file_and_add_summarisation_and_embed(fullpath, filename, enable_translation=False):
# Extract the text from the file
text = analyze_read(fullpath)
if enable_translation:
text = list(map(lambda x: translate(x), text))
summary_text = "".join(text)
return add_summarisation_embeddings(summary_text,filename, os.getenv('OPENAI_SUMMARISATION_ENGINE_DOC', 'text-davinci-003'),os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002'))
def add_summarisation_embeddings(text, filename, summarise_engine="text-davinci-003",embed_engine="text-embedding-ada-002"):
summarisation = chunk_and_summarise_embed(text, filename, summarise_engine,embed_engine,CHUNK_LIMIT)
if summarisation:
# Store embeddings in Redis
set_document(summarisation)
return True
else:
print("No summarisation and embeddings were created for this document as document is invaild or unable to read. Please check the document")
return False
def chunk_and_summarise_embed(text: str, filename="", summarise_engine="text-davinci-003",embed_engine="text-embedding-ada-002", chunk_limit=8000):
# set the maximum chunks limit to 8000
CHUNK_LIMIT = chunk_limit
if CHUNK_LIMIT > DEFAULT_CHUNK_LIMIT:
CHUNK_LIMIT = DEFAULT_CHUNK_LIMIT
full_data = {
"text": text,
"filename": filename,
"search_embeddings": None
}
# call LangChain TokenTextSplitter to split the text semantically
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size
chunk_size = CHUNK_LIMIT,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=0,
#chunk_overlap = 200,
#length_function = len,
)
split_texts = text_splitter.create_documents([text])
# get summarisation for each token
summary = ''
for x in range(len(split_texts)):
# get summarisation for each chunks and append
# send the pervious summmerisation text
response = get_summarise((str(split_texts[x])), summarise_engine)
summary += f"{response['choices'][0]['text']}\n"
# get the embeddings for summarisation
full_data['text'] = summary
full_data['search_embeddings'] = get_embedding(summary, embed_engine)
return full_data
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_summarise(text: str, model="text-davinci-003") -> list[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
# add end of line Tl;dr for summarisation
text += "\n"
text += "Tl;dr"
# call the summarisation before embeddings
return openai.Completion.create(engine=model,prompt=text, max_tokens=100,temperature=0.0,top_p=1,frequency_penalty=0,presence_penalty=0,stop=None)
| [] |
2024-01-10 | GWFrank/CLLT-LangChain-Tool | cli_conversation.py | import readline
import atexit
from dotenv import load_dotenv
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
import tools
VERBOSE = False
class Colors:
# ANSI Color Code Reference: https://gist.github.com/rene-d/9e584a7dd2935d0f461904b9f2950007
""" ANSI color codes """
BLACK = "\033[0;30m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
BROWN = "\033[0;33m"
BLUE = "\033[0;34m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
LIGHT_GRAY = "\033[0;37m"
DARK_GRAY = "\033[1;30m"
LIGHT_RED = "\033[1;31m"
LIGHT_GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
LIGHT_BLUE = "\033[1;34m"
LIGHT_PURPLE = "\033[1;35m"
LIGHT_CYAN = "\033[1;36m"
LIGHT_WHITE = "\033[1;37m"
BOLD = "\033[1m"
FAINT = "\033[2m"
ITALIC = "\033[3m"
UNDERLINE = "\033[4m"
BLINK = "\033[5m"
NEGATIVE = "\033[7m"
CROSSED = "\033[9m"
END = "\033[0m"
def load_agent():
print("Loading agent...")
llm = ChatOpenAI(model_name="gpt-3.5-turbo",
streaming=True,
verbose=VERBOSE,
temperature=0,
client=None,)
memory = ConversationBufferMemory(memory_key="chat_history",
return_messages=True)
tool_list = [tools.GetPttPostsKeywordsOnDate(),
tools.GetKeywordsVote(),
tools.GetKeywordsVoteTrend(),
tools.GetUpvoteCommentsByKeyword(),
tools.GetDownvoteCommentsByKeyword(),
tools.GetPostIDsByDate(),
tools.GetPostKeywordsByID(),
tools.GetPostTitleByID(),
tools.GetUpvoteCountByID(),
tools.GetDownvoteCountByID(),
tools.GetArrowCountByID(),
tools.GetNewsTitlesWithCrawler(),
tools.GetNewsKeywordsWithCrawler(),
]
agent = initialize_agent(tool_list,
llm,
# agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
memory=memory,
handle_parsing_errors=True,
verbose=VERBOSE)
return agent
def talk_to_agent(agent, msg: str):
print("Thinking...")
ret = agent.run(msg)
print(f"{Colors.LIGHT_BLUE}LLM: {ret}{Colors.END}")
def main():
print("==== CLLT Final Project - Demo ====")
load_dotenv('.env')
agent = load_agent()
system_msg = """System: 你是一個臺灣 PTT 使用者及政治觀察家,請使用提供的 tools 完成後面提供給你的工作,並使用臺灣的中文回答問題。
有些提供的 tool 完全不會使用到,但是你可以自己決定要不要使用。
請先和使用者打個招呼吧!"""
print(f"{Colors.YELLOW}{system_msg}{Colors.END}")
talk_to_agent(agent, system_msg)
while True:
msg = input(f"{Colors.LIGHT_GREEN}>>> You: ")
print(Colors.END, end="")
talk_to_agent(agent, msg)
if __name__ == "__main__":
atexit.register(lambda: print(f"{Colors.LIGHT_RED}Bye!{Colors.END}"))
main()
| [] |
2024-01-10 | GWFrank/CLLT-LangChain-Tool | weviate_tool.py | import dataclasses
from dataclasses import dataclass
import os
import weaviate
from langchain.retrievers.weaviate_hybrid_search import WeaviateHybridSearchRetriever
from dotenv import load_dotenv
load_dotenv()
@dataclass
class ContentItem:
media: str # media source of the post or comment
content_type: str # post or comment
author: str # author of the post or comment
post_id: str # id of the post
year: str # year of the post
board: str # board of the post
title: str # title of the post
text: str # text of the post or comment
rating: str # rating of the comment
order: int # 0 for post, 1, 2, 3, ... for comments
chunk: int # if text too long, split into chunks
total_chunks: int # total number of chunks
client = weaviate.Client(
url=os.environ["WEAVIATE_URL"],
auth_client_secret=weaviate.AuthApiKey(
api_key=os.environ["WEAVIATE_ADMIN_PASS"]),
timeout_config=(5, 30), # type: ignore
additional_headers={'X-OpenAI-Api-Key': os.environ["OPENAI_API_KEY"]}
)
attributes = [field.name for field in dataclasses.fields(ContentItem)]
def retrieve_docs(keyword, count=5):
retriever = WeaviateHybridSearchRetriever(
client=client,
k=count,
# weighting for each search algorithm (alpha = 0 (sparse, BM25), alpha = 1 (dense), alpha = 0.5 (equal weight for sparse and dense))
alpha=0.5,
index_name="ContentItem",
text_key="text",
attributes=attributes,
)
return retriever.get_relevant_documents(keyword)
| [] |
2024-01-10 | camfort/camfort-ai | get_vectors.py | from openai.embeddings_utils import get_embedding
from openai.error import InvalidRequestError
from openai import api_key
#from transformers import GPT2Tokenizer
import json
import sys
import sqlite3
import pandas as pd
import argparse
defaultdbfile = 'vectors.db'
tabname = 'embeddings'
create_table_sql = f'CREATE TABLE {tabname} ( path TEXT, name TEXT, firstLine INTEGER, lastLine INTEGER, vectorid INTEGER PRIMARY KEY ); CREATE TABLE vectors ( elem DOUBLE, ord INT, id INTEGER, FOREIGN KEY(id) REFERENCES {tabname}(vectorid) );'
def main():
global api_key
parser = argparse.ArgumentParser()
parser.add_argument('--database', '-D', type=str, default=defaultdbfile, help='SQLite3 database filename for storing vectors')
parser.add_argument('--input-file', '-f', type=str, default='-', help='File for JSON input or - for stdin')
parser.add_argument('--api-key', type=str, default=None, help='OpenAI API key (or use env var OPENAI_API_KEY)')
args = parser.parse_args()
if args.api_key is not None:
api_key = args.api_key
con = sqlite3.connect(args.database)
if con.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name=?", (tabname,)).fetchone()[0]==0:
con.executescript(create_table_sql)
with sys.stdin if args.input_file == '-' else open(args.input_file) as f:
df = pd.read_json(f, lines=True)
#tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
for ex in df.itertuples():
print(f"file {ex.path} pu {ex.name} lines {ex.firstLine}-{ex.lastLine}")
sys.stdout.flush()
if con.execute("SELECT count(*) FROM embeddings WHERE path=? AND firstLine=?", (ex.path, ex.firstLine)).fetchone()[0] == 0:
src = []
with open(ex.path, errors='replace') as f:
for line_num, line in enumerate(f):
if line_num >= ex.firstLine-1 and line_num <= ex.lastLine-1:
src.append(line)
src=('').join(src)
#print(len(tokenizer(ex['src'])['input_ids']))
txt=src[:2048]
emb=[]
while len(emb) == 0 and len(txt) > 2:
try:
emb=get_embedding(txt, engine='code-search-babbage-code-001')
except Exception as err:
print(err)
txt=txt[:int(len(txt)/2)]
print(f'trying with len={len(txt)}')
cur = con.execute("INSERT INTO embeddings (path, name, firstLine, lastLine) VALUES (?, ?, ?, ?)",
(ex.path, ex.name, ex.firstLine, ex.lastLine))
vid = cur.lastrowid
for i,x in enumerate(emb):
con.execute("INSERT INTO vectors (id, ord, elem) VALUES (?, ?, ?)", (vid, i, x))
con.commit()
if __name__=="__main__":
main()
| [] |
2024-01-10 | OlesSt/app-15-chatbox-gpt | backend.py | import openai
API_KEY = "YOUR GENERETED KEY"
class Chatbot:
def __init__(self):
openai.api_key = API_KEY
def get_response(self, user_input):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=user_input,
max_tokens=4000,
temperature=0.5
).choices[0].text
return response
if __name__ == "__main__":
new_chat = Chatbot()
user = input("Enter : ")
answer = new_chat.get_response(user)
print(answer)
| [] |
2024-01-10 | jrosebr1/rescue-ripple | ripple_predict~tasks.py | # import the necessary packages
from ripple_predict.models import SocialMediaPost
from ripple_predict.models import Prediction
from ripple_predict.models import Embedding
from django.conf import settings
from django.template.loader import get_template
from celery import shared_task
import openai
import json
import os
# set the OpenAI API key
openai.api_key = settings.OPENAI_API_KEY
@shared_task
def classify_post_with_prompt(
smp_id,
experiment,
prompt_filename,
model="gpt-3.5-turbo"
):
# grab the social media post from the database
smp = SocialMediaPost.objects.get(id=smp_id)
# build the prompt path
template_path = os.path.join(
"ripple_predict",
prompt_filename
)
# construct the prompt
template = get_template(template_path)
prompt = template.render({
"post": smp.text,
}).strip()
# submit the prompt to OpenAI and obtain the response, then parse out the
# prediction from the JSON blob
completion = openai.ChatCompletion.create(
model=model,
temperature=0.0,
messages=[{
"role": "user",
"content": prompt
}]
)
blob = json.loads(completion.choices[0].message.content)
predicted_label = blob["label"]
# store the prediction in the database
prediction = Prediction(
smp=smp,
experiment=experiment,
response=completion,
prediction=predicted_label
)
prediction.save()
@shared_task
def compute_embeddings(
smp_id,
experiment,
model="text-embedding-ada-002"
):
# grab the social media post from the database
smp = SocialMediaPost.objects.get(id=smp_id)
# submit the embedding request to OpenAI
response = openai.Embedding.create(
model=model,
input=smp.text
)
vector = response["data"][0]["embedding"]
# store the embedding in the database
embedding = Embedding(
smp=smp,
experiment=experiment,
response=response,
embeddings=json.dumps(vector)
)
embedding.save()
| [
"ripple_predict"
] |
2024-01-10 | israel-cj/LLM-AMLTK | AMLTK_from_portoflio~llm_optimization.py | import json
import time
import uuid
import posixpath
from .run_llm_generated import run_llm_code
from openai import OpenAI
# client = OpenAI(api_key='')
client = OpenAI()
def build_prompt_from_df(
acc,
description_model,
task,
metric,
name_model,
number_recommendations=3,
):
return """
You are assisting me with automated machine learning. Consider I need a 'Component' object, the next it is an example of a Component to illustrate:
{
Component(
name = 'model_porfolio_1',
item = ExtraTreesClassifier,
config = {
"bootstrap": False,
"criterion": "entropy",
"max_depth": None,
"max_features": 0.9565902080710877,
"max_leaf_nodes": None,
"min_impurity_decrease": 0.0,
"min_samples_leaf": 4,
"min_samples_split": 15,
"min_weight_fraction_leaf": 0.0,
"random_state": 0,
},
space={},
),
}, """ + f""""
Now, the Component I need to optimize is a {description_model} for a {task} task. The {task} performance is measured using {metric}.
Please suggest a list with {number_recommendations} diverse yet effective Components to initiate a Bayesian Optimization process.
The 'name' of this component must be {name_model} from 0 to {number_recommendations}.
All Components should be in a dictionary 'dict_components', i.e. """ + " 'dict_components = {" + f"'{name_model}_0': Component(), '{name_model}_1': Component() " + """}'
Each codeblock ends with "```end" and starts with "```python".
"""
def generate_code(messages, llm_model):
completion = client.chat.completions.create(
model=llm_model,
messages=messages,
stop=["```end"],
temperature=0.5,
max_tokens=1500,
)
code = completion.choices[0].message.content
code = code.replace("```python", "").replace("```", "").replace("<end>", "")
return code
def improve_models(
history,
task='classification',
display_method="markdown",
size_search_space=5,
real_metric=None,
llm_model="gpt-3.5-turbo",
search_space=None,
):
if task == 'classification':
metric = "accuracy"
trace = (
history.sortby(metric)
)
else:
metric = "r2_score"
trace = (
history.sortby(metric)
)
natural_descriptions_LIST = []
for element in trace:
this_bucket = element.bucket
new_path_CONFIG = posixpath.join(this_bucket.path, element.name, 'config.json')
with open(new_path_CONFIG) as f:
this_model_json = json.load(f)
# this_name = this_model_json['Pipeline:estimator:__choice__']
this_name = list(this_model_json.values())[0]
print('this_name: ', this_name)
try:
this_component = search_space[this_name]
natural_description_model = f"{str(this_component.item)} , with the next hyperparameters {this_component.config}"
natural_descriptions_LIST.append(natural_description_model)
except Exception as e:
print("Error: ")
print(e)
def format_for_display(code):
code = code.replace("```python", "").replace("```", "").replace("<end>", "")
return code
def execute_and_evaluate_code_block(code):
try:
new_search_space_exec = run_llm_code(code)
except Exception as e:
new_search_space_exec = None
display_method(f"Error in code execution. {type(e)} {e}")
display_method(f"```python\n{format_for_display(code)}\n```\n")
return e, None
return None, new_search_space_exec
if display_method == "markdown":
from IPython.display import display, Markdown
display_method = lambda x: display(Markdown(x))
else:
display_method = print
# Get a list of accuracies by ordering the history (same as we did with the trace)
history_df = history.df()
history_df = history_df.sort_values(f"metric:{real_metric}", ascending=False)
list_accuracies = list(history_df[f"metric:{real_metric}"])
counter_name = 0
final_search_space = dict()
for representation, acc in zip(natural_descriptions_LIST, list_accuracies):
name_models = str(uuid.uuid4())[:5] # Let's consider only the first 5 characters to keep it simple
prompt = build_prompt_from_df(acc, representation, task, metric, name_model=name_models)
print(prompt)
messages = [
{
"role": "system",
"content": "You are an expert datascientist assistant creating a dictionary of Components. You answer only by generating code. Let’s think step by step.",
},
{
"role": "user",
"content": prompt,
},
]
try:
code = generate_code(messages, llm_model)
print('Code generated successfully')
print(code)
except Exception as e:
code = None
display_method("Error in LLM API." + str(e))
time.sleep(60) # Wait 1 minute before next request
continue
e, new_search_space = execute_and_evaluate_code_block(code)
if new_search_space is not None:
# final_search_space = final_search_space.union(new_search_space)
final_search_space = {**final_search_space, **new_search_space}
counter_name += 1
if counter_name >= size_search_space:
break
return final_search_space
| [
"You are an expert datascientist assistant creating a dictionary of Components. You answer only by generating code. Let’s think step by step."
] |
2024-01-10 | xuefeng16/gem5-runahead | configs~example~gem5_library~riscv-ubuntu-run.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script shows an example of running a full system RISCV Ubuntu boot
simulation using the gem5 library. This simulation boots Ubuntu 20.04 using
2 TIMING CPU cores. The simulation ends when the startup is completed
successfully.
Usage
-----
```
scons build/RISCV/gem5.opt
./build/RISCV/gem5.opt \
configs/example/gem5_library/riscv-ubuntu-run.py
```
"""
import m5
from m5.objects import Root
from gem5.utils.requires import requires
from gem5.components.boards.riscv_board import RiscvBoard
from gem5.components.memory import DualChannelDDR4_2400
from gem5.components.processors.simple_processor import (
SimpleProcessor,
)
from gem5.components.processors.cpu_types import CPUTypes
from gem5.isas import ISA
from gem5.coherence_protocol import CoherenceProtocol
from gem5.resources.resource import Resource
from gem5.simulate.simulator import Simulator
# This runs a check to ensure the gem5 binary is compiled for RISCV.
requires(
isa_required=ISA.RISCV,
)
# With RISCV, we use simple caches.
from gem5.components.cachehierarchies.classic\
.private_l1_private_l2_cache_hierarchy import (
PrivateL1PrivateL2CacheHierarchy,
)
# Here we setup the parameters of the l1 and l2 caches.
cache_hierarchy = PrivateL1PrivateL2CacheHierarchy(
l1d_size="16kB",
l1i_size="16kB",
l2_size="256kB",
)
# Memory: Dual Channel DDR4 2400 DRAM device.
memory = DualChannelDDR4_2400(size = "3GB")
# Here we setup the processor. We use a simple processor.
processor = SimpleProcessor(
cpu_type=CPUTypes.TIMING,
isa=ISA.RISCV,
num_cores=2,
)
# Here we setup the board. The RiscvBoard allows for Full-System RISCV
# simulations.
board = RiscvBoard(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# Here we set the Full System workload.
# The `set_kernel_disk_workload` function for the RiscvBoard accepts a
# RISCV bootloader and a disk image. Once the system successfully boots, it
# encounters an `m5_exit instruction encountered`. We stop the simulation then.
# When the simulation has ended you may inspect `m5out/system.pc.com_1.device`
# to see the stdout.
board.set_kernel_disk_workload(
# The RISCV bootloader will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# The riscv-ubuntu boot-test was tested with riscv-bootloader-5.10
kernel=Resource(
"riscv-bootloader-vmlinux-5.10",
),
# The RISCV ubuntu image will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
disk_image=Resource(
"riscv-ubuntu-20.04-img",
),
)
simulator = Simulator(board=board)
simulator.run()
| [] |
2024-01-10 | Deemocean/GhostGPT | ghost~ghost_in_discord.py | import discord
import os
import openai
intents = discord.Intents(messages=True, message_content = True)
client = discord.Client(command_prefix='!',intents=intents)
from ghost import imprint
openai.api_key = None
try:
openai.api_key = os.environ["OPENAI_KEY"]
except KeyError:
print("No openAI token found!")
exit()
imp = imprint.get(printing = False)
@client.event
async def on_message(message):
if message.content.startswith("!g"):
msg = message.content[1:]
msg_sent =imp.chat(msg)
print(imp.chat(msg_sent))
await message.channel.send(msg_sent)
try:
client.run(os.environ["DISCORD_TOKEN"])
except KeyError:
print("No Discord key has been set!") | [] |
2024-01-10 | Deemocean/GhostGPT | ghost~ghost.py | import openai
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.live import Live
from rich import box
import os
import sys
import tiktoken
console = Console()
class imprint:
path = ""
name = ""
history = []
forget = True
TOKEN_REQUEST_LIMIT = 4096-200
token_outbound_count = 0
printing = True
index = -1
temp = {}
def log(self, s, head = "", **kwargs):
if head != "" or s == "":
s = ("\033[38;5;33m" + self.name + head + "\033[0;0m: " + s)
if self.printing:
print(s, **kwargs, end="")
return s
def markdown(self, obj, **kwargs):
if self.printing:
console.print(obj, **kwargs)
def __init__(self, name, printing= True):
self.printing = printing
self.name = name.upper()
self.path = os.path.join("IMPRINTS" , name + ".ni")
try:
self.log("\nInjecting Nerual imprint: " + self.name + " ...")
self.read()
except FileNotFoundError:
self.wipe()
self.log("\nCreating a new imprint: " + self.name + " ...")
self.log("*Note: type [eject] to eject imprint <"+str(self.name)+"> from ghost")
self.log("Type [delete] to delete the last entry and response from memory.\n")
def generate(printing = True):
temp_num = 0
temp_name = "temp"
imprints = os.listdir("IMPRINTS")
while temp_name in imprints:
temp_num = temp_num + 1
temp_name = "temp" + str(temp_num)
return imprint(temp_name, printing=printing)
def get(printing = True):
try:
name = sys.argv[1]
imp = imprint(name, printing= printing)
except IndexError:
imp = imprint.generate(printing = printing)
try:
imp.forget = os.environ["FORGET"] == "True"
except KeyError:
pass
return imp
def wipe(self):
self.history = []
self.save()
def save(self):
if self.path is not None:
with open(self.path, "w") as nifile:
nifile.write(str(self.history))
def read(self):
if self.path is not None:
with open(self.path) as imprint_file:
self.history = eval(imprint_file.read())
def history_add(self, role, content):
if self.history is not None:
self.history.append({"role": role, "content": content})
return self.history
def rm_history(self,n):
temp = []
if self.history is not None:
init_size = len(self.history)
for entry in self.history:
if n > 0 and entry["role"] == "user":
temp.append(entry)
i = self.history.index(entry)
try:
entry_after = self.history[i + 1]
if entry_after["role"] == "assistant":
temp.append(entry_after)
self.history.remove(entry_after)
n = n - 1
except:
pass
self.history.remove(entry)
n = n - 1
return init_size - len(self.history)
else:
return 0
def delete(self):
i = self.index if self.index >= 0 else len(self.history) -1
try:
rm = self.history.pop(i)
self.log("DELETED HISTORY: " + str(rm))
rm = self.history.pop(i-1)
self.log(str(rm))
except IndexError:
pass
def chat(self, content=None, head=""):
msg = ""
head = "[TRAINING]" + head if not self.forget and "[TRAINING]" not in head else head
if content is None:
content = self.history[len(self.history) -1]["content"]
else:
self.history_add("user" if self.forget else "training",content)
unanswered_history = self.history if self.history is not None else [{'role': 'user', 'content': content}]
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=list(map(lambda entry: entry if entry["role"][0] != "t" else {'role':'user', 'content':entry['content']}, unanswered_history)),
temperature=0,
stream=True
)
except openai.error.InvalidRequestError:
if self.history is not None:
self.token_outbound_count = self.token_outbound_count + 1
diff = self.rm_history(self.token_outbound_count)
head = "[MEM FULL WARNING]"
if diff == 0 or self.history[len(self.history) -1]["content"] != content:
head = "[MEM OVER CAPACITY]"
return self.log("Has too much training data!", head = head)
else:
return self.chat(content=None, head = head)
else:
return self.log("Request too long!", head = head)
self.log("")
with Live(auto_refresh=False, vertical_overflow="visible") as live:
print('',end='\n') #maybe a rich bug, can't update the first token
msg = ""
for chunk in response:
try:
chunk_message = chunk['choices'][0]['delta']['content']
msg += chunk_message
table = Table(show_header=False, box=box.ROUNDED)
table.add_row(Markdown(msg))
live.update(table, refresh=True)
except KeyError:
pass
self.history_add("assistant",msg)
self.save()
return head + msg
def token_est(self):
total_token=0
encoding = tiktoken.encoding_for_model("gpt-4")
for chat in self.history:
content= chat["content"]
num_tokens = len(encoding.encode(content))
total_token+=num_tokens
return total_token | [
"content"
] |
2024-01-10 | Deemocean/GhostGPT | ghost~ghost_in_shell.py | from ghost import imprint
import openai
import os
#Get options.
try:
openai.api_key = os.environ["OPENAI_KEY"]
except KeyError:
print("No openAI token found!")
exit()
imp = imprint.get()
usr_input=input('\033[38;5;33m' +"YOU"+ '\033[0;0m: ')
while (usr_input!="eject"):
if(usr_input=="delete"):
imp.delete()
else:
imp.chat(usr_input)
usr_input=input('\033[38;5;33m' +"YOU"+ '\033[0;0m: ') | [] |
2024-01-10 | Deemocean/GhostGPT | ghost~ghost_in_telegram.py | import openai
from ghost import imprint
import logging
from telegram import Update, InputMediaPhoto
from telegram.constants import ParseMode
from telegram.helpers import escape_markdown
from telegram.ext import ApplicationBuilder, CommandHandler, ContextTypes
import os
TOKEN= None
openai.api_key = None
try:
openai.api_key = os.environ["OPENAI_KEY"]
except KeyError:
print("No openAI key found!")
exit()
try:
TOKEN = os.environ["TELEGRAM_TOKEN"]
except KeyError:
print("No Telegram token found!")
exit()
imp = imprint.get()
print("\nInjecting Nerual imprint: "+ str(imp.name)+" ...")
print("\n*Note: type [eject] to eject imprint <"+str(imp.name)+"> from ghost")
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
async def menu(update: Update, context: ContextTypes.DEFAULT_TYPE):
await context.bot.send_message(chat_id=update.effective_chat.id, text="/g--talk to ghost /imgc--generate img from Dall-E /wipe wipe ghost memory")
async def g(update: Update, context: ContextTypes.DEFAULT_TYPE):
global token_outbound_count
global chat_history
usr_input = update.effective_message.text[3:]
try:
resp = imp.chat(usr_input)
except Exception as e:
resp = str(e)
try:
await context.bot.send_message(chat_id=update.effective_chat.id, text=resp,parse_mode=ParseMode.MARKDOWN_V2)
except:
await context.bot.send_message(chat_id=update.effective_chat.id, text=resp)
async def wipe(update: Update, context: ContextTypes.DEFAULT_TYPE):
global imp
imp.wipe()
await context.bot.send_message(chat_id=update.effective_chat.id, text="All memories flushed")
async def imgc(update: Update, context: ContextTypes.DEFAULT_TYPE):
usr_input = update.effective_message.text[5:]
try:
response = openai.Image.create(
prompt=usr_input,
n=4,
size="1024x1024"
)
img0 = InputMediaPhoto(media=response['data'][0]['url'])
img1 = InputMediaPhoto(media=response['data'][1]['url'])
img2 = InputMediaPhoto(media=response['data'][2]['url'])
img3 = InputMediaPhoto(media=response['data'][3]['url'])
await context.bot.send_media_group(chat_id=update.effective_chat.id, media=[img0,img1,img2,img3])
except:
await context.bot.send_message(chat_id=update.effective_chat.id, text="error :(...")
if __name__ == '__main__':
application = ApplicationBuilder().token(TOKEN).build()
start_handler = CommandHandler('menu', menu)
g_handler = CommandHandler('g', g)
imgc_handler = CommandHandler('imgc', imgc)
wipe_handler = CommandHandler('wipe', wipe)
application.add_handler(start_handler)
application.add_handler(g_handler)
application.add_handler(imgc_handler)
application.add_handler(wipe_handler)
application.run_polling() | [] |
2024-01-10 | jpuentevel/jarvis | jarvis.py | import pyttsx3
import speech_recognition as sr
import openai
def main():
r = sr.Recognizer()
openai.api_key = "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"
engine = pyttsx3.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate-100)
volume = engine.getProperty('volume')
engine.setProperty('volume', volume+0.50)
engine.setProperty('voice', 'spanish')
with sr.Microphone() as source:
engine.say("Dime algo, por favor.")
engine.runAndWait()
audio = r.listen(source)
try:
text = r.recognize_google(audio)
engine.say(f"Tú dijiste: {text}")
engine.runAndWait()
except:
engine.say("No te entendí, lo siento.")
engine.runAndWait()
if __name__ == "__main__":
main() | [] |
2024-01-10 | wilsonyhlee/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | xiaowuc2/ChatGPT-Python-Applications | email-automation~cool-odd.py | # Importing libraries
import imaplib
import email
import yaml
import openai
# user dfined : variables
# how_many = int(input()) # how many unseen mails you want to check
# maxtoken = int(input()) # what is the maximum number of charecters you want in your blog
# what_to_ask = input() # what do you want to ask chatgpt to do
# num_target = int(input()) # number of targeted emails
# list_mail = []
# for i in range(num_target):
# i = input()
# list_mail.append(i)
# how many mails you want to see (default values)
how_many = 2
maxtoken = 200
#what_to_ask = "Generate a blog on:"
# Reading private yml file
with open("pass.yml") as f:
content = f.read()
# from credentials.yml import user name and password
my_credentials = yaml.load(content, Loader=yaml.FullLoader)
user, password = my_credentials["user"], my_credentials["password"]
openai.api_key = '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' #my_credentials["api"]
# Login to the email server
server = "imap.gmail.com"
my_mail = imaplib.IMAP4_SSL(server)
my_mail.login(user, password)
my_mail.select('inbox')
# search : unread emails
status, data = my_mail.search(None, 'FROM', '[email protected]')
mail_id_list = data[0].split() #IDs of all emails that we want to fetch
msgs = [] # empty list to capture all messages
#Iterate through messages and extract data into the msgs list
for num in mail_id_list:
typ, data = my_mail.fetch(num, '(RFC822)') #RFC822 returns whole message (BODY fetches just body)
msgs.append(data)
count = 0
for msg in msgs[::-1]:
if count == how_many :
break
count += 1
for response_part in msg:
if type(response_part) is tuple:
my_msg=email.message_from_bytes((response_part[1]))
print("_________________________________________")
#print ("subj:", my_msg['subject'])
#print ("from:", my_msg['from'])
#print ("body:")
for part in my_msg.walk():
#print(part.get_content_type())
if part.get_content_type() == 'text/plain':
print (part.get_payload())
snippet = part.get_payload()
# prompt = f"{what_to_ask} {str(snippet)}:
prompt = f"Generate a blog on: {str(snippet)}"
# calling api
response = openai.Completion.create(
engine="davinci-instruct-beta-v3",
prompt=prompt,
max_tokens=maxtoken,
temperature = 0.7,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
#printing the response
generated_text = response['choices'][0]['text']
print(generated_text)
| [
"Generate a blog on: PLACEHOLDER"
] |
2024-01-10 | xiaowuc2/ChatGPT-Python-Applications | chatbot~fantastic-chatbot-gradio.py | import openai
import gradio as gr
# Reading private yml file
with open("pass.yml") as f:
content = f.read()
# from credentials.yml import user name and password
my_credentials = yaml.load(content, Loader=yaml.FullLoader)
openai.api_key = my_credentials["api"]
messages = [
{"role": "system", "content": "You are a helpful and kind AI Assistant."},
]
def chatbot(input):
if input:
messages.append({"role": "user", "content": input})
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
return reply
inputs = gr.inputs.Textbox(lines=7, label="Chat with AI")
outputs = gr.outputs.Textbox(label="Reply")
gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title="AI Chatbot",
description="Ask anything you want",
theme="compact").launch(share=True)
| [
"INPUT",
"You are a helpful and kind AI Assistant."
] |
2024-01-10 | xiaowuc2/ChatGPT-Python-Applications | web-scraping-summarizer~web-scraping-summarizer.py | # Importing required libraries
import argparse
import requests
from bs4 import BeautifulSoup
import openai
import yaml
# Reading private yml file
with open("pass.yml") as f:
content = f.read()
# from pass.yml importing api key
my_credentials = yaml.load(content, Loader=yaml.FullLoader)
# from pass.yml importing api key
openai.api_key = my_credentials["api"]
# Creating the parser
parser = argparse.ArgumentParser(description='web scrapping summarizer')
# Adding arguments
parser.add_argument('--web', type=str, help='website link (default : https://github.com/xiaowuc2/ChatGPT-Python-Applications)', default="https://github.com/xiaowuc2/ChatGPT-Python-Applications")
parser.add_argument('--limit', type=int, help='summarized text limit (default : 100)', default=100)
# Parsing arguments
args = parser.parse_args()
response = requests.get(args.web)
soup = BeautifulSoup(response.content, 'html.parser')
# Extracting the text
text = ''
for p in soup.find_all('p'):
text += p.text
#trimming the text. Openai can only take 4097 tokens.
mine = (int(len(text)/4.2))
#print(f"my text has chars : {len(text)} tokens : {mine}")
allowed = 16132
#print(f"numebr of chars allowed is : {allowed}")
h = len(text) - allowed
#print(f"we've to save this much texts : {h}")
# `ntext` is trimeed 'text'
ntext = text[:len(text)-h]
#print(f"new text has chars : {len(ntext)} . tokens : {len(ntext)/4}")
def summarize_text(text):
model_engine = "text-davinci-002" # Replace with your preferred GPT-3 model engine
prompt = (f"Please summarize the following text:\n{text}\n\nSummary:")
response = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=args.limit,
n=1,
stop=None,
temperature=0.5,
)
summary = response.choices[0].text.strip()
return summary
print(f"Summary : {summarize_text(ntext)}")
| [
"Please summarize the following text:\n\n\nSummary:"
] |
2024-01-10 | HamzaFarhan/invoice_parser | invoice_parser~api~imports.py | from invoice_parser.imports import *
from invoice_parser.utils import *
from invoice_parser.core import *
from fastapi.responses import JSONResponse
from langchain_ray.remote_utils import handle_input_path, is_bucket
from fastapi import FastAPI, File, UploadFile, Form, Query, HTTPException
| [] |
2024-01-10 | HamzaFarhan/invoice_parser | invoice_parser~imports.py | from dreamai.core import *
from dreamai.vision import *
from dreamai.imports import *
from langchain_ray.utils import *
from langchain_ray.chains import *
from langchain_ray.imports import *
import pdfplumber
from pypdf import PdfReader
from statistics import mode
from ast import literal_eval
from pyparsing import nestedExpr
from collections import defaultdict
from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM
from langchain.document_loaders import PDFPlumberLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_ray.pdf.utils import pdf_to_docs, process_text
from langchain.chains.question_answering import load_qa_chain
from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.chains import (
create_extraction_chain,
create_extraction_chain_pydantic,
RetrievalQA,
)
| [] |
2024-01-10 | datastax/ai-chatbot-starter | chatbot_api~prompt_util.py | import os
from typing import List
from langchain.prompts import load_prompt
def get_template(
persona: str,
vector_search_results: str,
user_question: str,
user_context: str,
company: str,
custom_rules: List[str],
) -> str:
persona_path = f"prompts/{persona}.yaml"
if not os.path.exists(persona_path):
persona_path = f"../prompts/{persona}.yaml"
prompt = load_prompt(persona_path)
input_txt = prompt.format(
**{
"vector_search_results": vector_search_results,
"user_question": user_question,
"user_context": user_context,
"company": company,
"custom_rules": "\n".join(custom_rules),
}
)
return input_txt
| [] |
2024-01-10 | datastax/ai-chatbot-starter | chatbot_api~assistant.py | from abc import ABC, abstractmethod
from typing import List, Optional, Tuple
from langchain.embeddings.base import Embeddings
from langchain.embeddings import OpenAIEmbeddings, VertexAIEmbeddings
from langchain.llms import VertexAI
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.vector_stores import AstraDBVectorStore
from llama_index.embeddings import LangchainEmbedding
from llama_index.llms import OpenAI
from llama_index.response.schema import StreamingResponse
from chatbot_api.prompt_util import get_template
from integrations.google import GECKO_EMB_DIM, init_gcp
from integrations.openai import OPENAI_EMB_DIM
from pipeline.config import Config, LLMProvider
from llama_index.chat_engine import SimpleChatEngine
class Assistant(ABC):
def __init__(
self,
config: Config,
embeddings: Embeddings,
k: int = 4,
llm=None,
):
self.config = config
self.embedding_model = LangchainEmbedding(embeddings)
self.llm = llm
embedding_dimension = (
OPENAI_EMB_DIM
if self.config.llm_provider == LLMProvider.OpenAI
else GECKO_EMB_DIM
)
# Initialize the vector store, which contains the vector embeddings of the data
self.vectorstore = AstraDBVectorStore(
token=self.config.astra_db_application_token,
api_endpoint=self.config.astra_db_api_endpoint,
collection_name=self.config.astra_db_table_name,
embedding_dimension=embedding_dimension,
)
self.service_context = ServiceContext.from_defaults(
llm=llm, embed_model=self.embedding_model
)
self.index = VectorStoreIndex.from_vector_store(
vector_store=self.vectorstore, service_context=self.service_context
)
self.query_engine = self.index.as_query_engine(
similarity_top_k=k, streaming=True
)
self.chat_engine = SimpleChatEngine.from_defaults(service_context=self.service_context)
# Get a response from the vector search, aka the relevant data
def find_relevant_docs(self, query: str) -> str:
response = self.query_engine.query(
query
) # TODO: Retriever (index.as_retriever (returns list of source nodes instead of response object))
results = response.source_nodes
raw_text = []
for doc in results:
try:
raw_text.append(
doc.get_content()
+ f"\nPrevious document was from URL link: {doc.metadata['source']}"
)
except KeyError:
raw_text.append(doc.get_content())
vector_search_results = "- " + "\n\n- ".join(
raw_text
) # Prevent any one document from being too long
return vector_search_results
# Get a response from the chatbot, excluding the responses from the vector search
@abstractmethod
def get_response(
self,
user_input: str,
persona: str,
user_context: str = "",
include_context: bool = True,
) -> Tuple[str, str, str]:
"""
:returns: Should return a tuple of
(bot response, vector store responses string, user context)
"""
class AssistantBison(Assistant):
# Instantiate the class using the default bison model
def __init__(
self,
config: Config,
temp: float = 0.2,
max_tokens_response: int = 256,
k: int = 4,
company: str = "",
custom_rules: Optional[List[str]] = None,
):
# Choose the embeddings and LLM based on the llm_provider
if config.llm_provider == LLMProvider.OpenAI:
embeddings = OpenAIEmbeddings(model=config.openai_embeddings_model)
llm = OpenAI(model=config.openai_textgen_model)
elif config.llm_provider == LLMProvider.Google:
init_gcp(config)
embeddings = VertexAIEmbeddings(model_name=config.google_embeddings_model)
llm = VertexAI(model_name=config.google_textgen_model)
else:
raise AssertionError("LLM Provider must be one of openai or google")
super().__init__(config, embeddings, k, llm)
self.parameters = {
"temperature": temp, # Temperature controls the degree of randomness in token selection.
"max_tokens": max_tokens_response, # Token limit determines the maximum amount of text output.
}
self.company = company
self.custom_rules = custom_rules or []
def get_response(
self,
user_input: str,
persona: str,
user_context: str = "",
include_context: bool = True,
) -> Tuple[StreamingResponse, str, str]:
responses_from_vs = self.find_relevant_docs(query=user_input)
# Ensure that we include the prompt context assuming the parameter is provided
context = user_input
if include_context:
# If we have a special tag, include no further context from the vector DB
if "[NO CONTEXT]" in user_input:
responses_from_vs = ""
context = get_template(
persona,
responses_from_vs,
user_input,
user_context,
self.company,
self.custom_rules,
)
bot_response = self.chat_engine.stream_chat(context)
return bot_response, responses_from_vs, context
| [] |
2024-01-10 | datastax/ai-chatbot-starter | data~compile_documents.py | # Add documents to the vectorstore, which is on the database, through an embeddings model
from dotenv import load_dotenv
from langchain.embeddings import OpenAIEmbeddings, VertexAIEmbeddings
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
StorageContext,
)
from llama_index.embeddings import LangchainEmbedding
from llama_index.node_parser import SimpleNodeParser
from llama_index.vector_stores import AstraDBVectorStore
from integrations.google import init_gcp, GECKO_EMB_DIM
from integrations.openai import OPENAI_EMB_DIM
from pipeline.config import LLMProvider, load_config
dotenv_path = ".env"
load_dotenv(dotenv_path)
config = load_config("config.yml")
# Provider for LLM
if config.llm_provider == LLMProvider.OpenAI:
embedding_model = LangchainEmbedding(
OpenAIEmbeddings(model=config.openai_embeddings_model)
)
else:
init_gcp(config)
embedding_model = LangchainEmbedding(
VertexAIEmbeddings(model_name=config.google_embeddings_model)
)
embedding_dimension = (
OPENAI_EMB_DIM if config.llm_provider == LLMProvider.OpenAI else GECKO_EMB_DIM
)
vectorstore = AstraDBVectorStore(
token=config.astra_db_application_token,
api_endpoint=config.astra_db_api_endpoint,
collection_name=config.astra_db_table_name,
embedding_dimension=embedding_dimension,
)
storage_context = StorageContext.from_defaults(vector_store=vectorstore)
service_context = ServiceContext.from_defaults(
llm=None,
embed_model=embedding_model,
node_parser=SimpleNodeParser.from_defaults(
# According to https://genai.stackexchange.com/questions/317/does-the-length-of-a-token-give-llms-a-preference-for-words-of-certain-lengths
# tokens are ~4 chars on average, so estimating 1,000 char chunk_size & 500 char overlap as previously used
chunk_size=250,
chunk_overlap=125,
),
)
# Perform embedding and add to vectorstore
def add_documents(folder_path):
documents = SimpleDirectoryReader(folder_path).load_data()
VectorStoreIndex.from_documents(
documents=documents,
storage_context=storage_context,
service_context=service_context,
show_progress=True,
)
if __name__ == "__main__":
add_documents("data/docs")
| [] |
2024-01-10 | silvhua/AI-content | src~my_dalle.py | from langchain.llms import OpenAI
import os
import sys
# from langchain.utilities.dalle_image_generator import DallEAPIWrapper
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
sys.path.append(r'C:\Users\silvh\OneDrive\lighthouse\portfolio-projects\langchain\libs\core')
from dalle_image_generator import DallEAPIWrapper
def get_dalle_image(text, n=3, temperature=1, template=None, verbose=False, max_retries=1, max_tokens=900):
llm = OpenAI(
temperature=temperature, openai_organization=os.environ['openai_organization'],
max_retries=max_retries, max_tokens=max_tokens
)
text = text.strip()
if template:
template=template + ': {text}'
else:
template="Generate a detailed prompt to generate an image based on the following social media post: {text}"
prompt = PromptTemplate(
input_variables=["text"],
template=template,
)
print(f'Prompt template: {prompt.template}')
print(f'\nInput: {text}')
chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
image_prompt = chain.run(text).strip()
print(f'\nImage prompt: {image_prompt}')
try:
image_url = DallEAPIWrapper(n=n).run(image_prompt)
except Exception as error:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
print("An error occurred on line", lineno, "in", filename, ":", error)
image_url = None
results = {
'image_url': image_url,
'description': text,
'prompt': prompt,
'image_prompt': image_prompt
}
return results
def create_linkedin_post(results_dict, iteration, model='Dall-E 3', hashtags=['datascience', 'llm']):
print(results_dict[iteration]['description'])
print(f'\n\nPS: The image was generated by {model} (which obviously is not perfect yet). I asked OpenAI to generate the DALL-E prompt based on the text of the above post, and this was the result: "{results_dict[iteration]["image_prompt"].strip()}".')
print(f'\nPPS: If you really want to know, the prompt template I used to generate the DALL-E prompt (with the help of the LangChain `LLMChain` class) was as follows: "{results_dict[iteration]["prompt"].template[:-8].strip()}"')
print(f'\n\n{"".join([f"#{tag} " for tag in hashtags])}')
| [
"Generate a detailed prompt to generate an image based on the following social media post: {text}",
"PLACEHOLDER: {text}"
] |
2024-01-10 | donal0c/LLM_learning_notebooks | Building%20Systems%20with%20the%20ChatGPT%20API~.ipynb_checkpoints~utils-checkpoint.py | import json
import openai
from collections import defaultdict
products_file = 'products.json'
categories_file = 'categories.json'
delimiter = "####"
step_2_system_message_content = f"""
You will be provided with customer service a conversation. \
The most recent user query will be delimited with \
{delimiter} characters.
Output a python list of objects, where each object has \
the following format:
'category': <one of Computers and Laptops, \
Smartphones and Accessories, \
Televisions and Home Theater Systems, \
Gaming Consoles and Accessories,
Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must \
be found in the allowed products below>
Where the categories and products must be found in \
the customer service query.
If a product is mentioned, it must be associated with \
the correct category in the allowed products list below.
If no products or categories are found, output an \
empty list.
Only list products and categories that have not already \
been mentioned and discussed in the earlier parts of \
the conversation.
Allowed products:
Computers and Laptops category:
TechPro Ultrabook
BlueWave Gaming Laptop
PowerLite Convertible
TechPro Desktop
BlueWave Chromebook
Smartphones and Accessories category:
SmartX ProPhone
MobiTech PowerCase
SmartX MiniPhone
MobiTech Wireless Charger
SmartX EarBuds
Televisions and Home Theater Systems category:
CineView 4K TV
SoundMax Home Theater
CineView 8K TV
SoundMax Soundbar
CineView OLED TV
Gaming Consoles and Accessories category:
GameSphere X
ProGamer Controller
GameSphere Y
ProGamer Racing Wheel
GameSphere VR Headset
Audio Equipment category:
AudioPhonic Noise-Canceling Headphones
WaveSound Bluetooth Speaker
AudioPhonic True Wireless Earbuds
WaveSound Soundbar
AudioPhonic Turntable
Cameras and Camcorders category:
FotoSnap DSLR Camera
ActionCam 4K
FotoSnap Mirrorless Camera
ZoomMaster Camcorder
FotoSnap Instant Camera
Only output the list of objects, with nothing else.
"""
step_2_system_message = {'role':'system', 'content': step_2_system_message_content}
step_4_system_message_content = f"""
You are a customer service assistant for a large electronic store. \
Respond in a friendly and helpful tone, with VERY concise answers. \
Make sure to ask the user relevant follow-up questions.
"""
step_4_system_message = {'role':'system', 'content': step_4_system_message_content}
step_6_system_message_content = f"""
You are an assistant that evaluates whether \
customer service agent responses sufficiently \
answer customer questions, and also validates that \
all the facts the assistant cites from the product \
information are correct.
The conversation history, product information, user and customer \
service agent messages will be delimited by \
3 backticks, i.e. ```.
Respond with a Y or N character, with no punctuation:
Y - if the output sufficiently answers the question \
AND the response correctly uses product information
N - otherwise
Output a single letter only.
"""
step_6_system_message = {'role':'system', 'content': step_6_system_message_content}
def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0, max_tokens=500):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
return response.choices[0].message["content"]
def create_categories():
categories_dict = {
'Billing': [
'Unsubscribe or upgrade',
'Add a payment method',
'Explanation for charge',
'Dispute a charge'],
'Technical Support':[
'General troubleshooting'
'Device compatibility',
'Software updates'],
'Account Management':[
'Password reset'
'Update personal information',
'Close account',
'Account security'],
'General Inquiry':[
'Product information'
'Pricing',
'Feedback',
'Speak to a human']
}
with open(categories_file, 'w') as file:
json.dump(categories_dict, file)
return categories_dict
def get_categories():
with open(categories_file, 'r') as file:
categories = json.load(file)
return categories
def get_product_list():
"""
Used in L4 to get a flat list of products
"""
products = get_products()
product_list = []
for product in products.keys():
product_list.append(product)
return product_list
def get_products_and_category():
"""
Used in L5
"""
products = get_products()
products_by_category = defaultdict(list)
for product_name, product_info in products.items():
category = product_info.get('category')
if category:
products_by_category[category].append(product_info.get('name'))
return dict(products_by_category)
def get_products():
with open(products_file, 'r') as file:
products = json.load(file)
return products
def find_category_and_product(user_input,products_and_category):
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return get_completion_from_messages(messages)
def find_category_and_product_only(user_input,products_and_category):
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
Allowed products:
Computers and Laptops category:
TechPro Ultrabook
BlueWave Gaming Laptop
PowerLite Convertible
TechPro Desktop
BlueWave Chromebook
Smartphones and Accessories category:
SmartX ProPhone
MobiTech PowerCase
SmartX MiniPhone
MobiTech Wireless Charger
SmartX EarBuds
Televisions and Home Theater Systems category:
CineView 4K TV
SoundMax Home Theater
CineView 8K TV
SoundMax Soundbar
CineView OLED TV
Gaming Consoles and Accessories category:
GameSphere X
ProGamer Controller
GameSphere Y
ProGamer Racing Wheel
GameSphere VR Headset
Audio Equipment category:
AudioPhonic Noise-Canceling Headphones
WaveSound Bluetooth Speaker
AudioPhonic True Wireless Earbuds
WaveSound Soundbar
AudioPhonic Turntable
Cameras and Camcorders category:
FotoSnap DSLR Camera
ActionCam 4K
FotoSnap Mirrorless Camera
ZoomMaster Camcorder
FotoSnap Instant Camera
Only output the list of objects, nothing else.
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return get_completion_from_messages(messages)
def get_products_from_query(user_msg):
"""
Code from L5, used in L8
"""
products_and_category = get_products_and_category()
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_msg}{delimiter}"},
]
category_and_product_response = get_completion_from_messages(messages)
return category_and_product_response
# product look up (either by category or by product within category)
def get_product_by_name(name):
products = get_products()
return products.get(name, None)
def get_products_by_category(category):
products = get_products()
return [product for product in products.values() if product["category"] == category]
def get_mentioned_product_info(data_list):
"""
Used in L5 and L6
"""
product_info_l = []
if data_list is None:
return product_info_l
for data in data_list:
try:
if "products" in data:
products_list = data["products"]
for product_name in products_list:
product = get_product_by_name(product_name)
if product:
product_info_l.append(product)
else:
print(f"Error: Product '{product_name}' not found")
elif "category" in data:
category_name = data["category"]
category_products = get_products_by_category(category_name)
for product in category_products:
product_info_l.append(product)
else:
print("Error: Invalid object format")
except Exception as e:
print(f"Error: {e}")
return product_info_l
def read_string_to_list(input_string):
if input_string is None:
return None
try:
input_string = input_string.replace("'", "\"") # Replace single quotes with double quotes for valid JSON
data = json.loads(input_string)
return data
except json.JSONDecodeError:
print("Error: Invalid JSON string")
return None
def generate_output_string(data_list):
output_string = ""
if data_list is None:
return output_string
for data in data_list:
try:
if "products" in data:
products_list = data["products"]
for product_name in products_list:
product = get_product_by_name(product_name)
if product:
output_string += json.dumps(product, indent=4) + "\n"
else:
print(f"Error: Product '{product_name}' not found")
elif "category" in data:
category_name = data["category"]
category_products = get_products_by_category(category_name)
for product in category_products:
output_string += json.dumps(product, indent=4) + "\n"
else:
print("Error: Invalid object format")
except Exception as e:
print(f"Error: {e}")
return output_string
# Example usage:
#product_information_for_user_message_1 = generate_output_string(category_and_product_list)
#print(product_information_for_user_message_1)
def answer_user_msg(user_msg,product_info):
"""
Code from L5, used in L6
"""
delimiter = "####"
system_message = f"""
You are a customer service assistant for a large electronic store. \
Respond in a friendly and helpful tone, with concise answers. \
Make sure to ask the user relevant follow up questions.
"""
# user_msg = f"""
# tell me about the smartx pro phone and the fotosnap camera, the dslr one. Also what tell me about your tvs"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_msg}{delimiter}"},
{'role':'assistant', 'content': f"Relevant product information:\n{product_info}"},
]
response = get_completion_from_messages(messages)
return response
def create_products():
"""
Create products dictionary and save it to a file named products.json
"""
# product information
# fun fact: all these products are fake and were generated by a language model
products = {
"TechPro Ultrabook": {
"name": "TechPro Ultrabook",
"category": "Computers and Laptops",
"brand": "TechPro",
"model_number": "TP-UB100",
"warranty": "1 year",
"rating": 4.5,
"features": ["13.3-inch display", "8GB RAM", "256GB SSD", "Intel Core i5 processor"],
"description": "A sleek and lightweight ultrabook for everyday use.",
"price": 799.99
},
"BlueWave Gaming Laptop": {
"name": "BlueWave Gaming Laptop",
"category": "Computers and Laptops",
"brand": "BlueWave",
"model_number": "BW-GL200",
"warranty": "2 years",
"rating": 4.7,
"features": ["15.6-inch display", "16GB RAM", "512GB SSD", "NVIDIA GeForce RTX 3060"],
"description": "A high-performance gaming laptop for an immersive experience.",
"price": 1199.99
},
"PowerLite Convertible": {
"name": "PowerLite Convertible",
"category": "Computers and Laptops",
"brand": "PowerLite",
"model_number": "PL-CV300",
"warranty": "1 year",
"rating": 4.3,
"features": ["14-inch touchscreen", "8GB RAM", "256GB SSD", "360-degree hinge"],
"description": "A versatile convertible laptop with a responsive touchscreen.",
"price": 699.99
},
"TechPro Desktop": {
"name": "TechPro Desktop",
"category": "Computers and Laptops",
"brand": "TechPro",
"model_number": "TP-DT500",
"warranty": "1 year",
"rating": 4.4,
"features": ["Intel Core i7 processor", "16GB RAM", "1TB HDD", "NVIDIA GeForce GTX 1660"],
"description": "A powerful desktop computer for work and play.",
"price": 999.99
},
"BlueWave Chromebook": {
"name": "BlueWave Chromebook",
"category": "Computers and Laptops",
"brand": "BlueWave",
"model_number": "BW-CB100",
"warranty": "1 year",
"rating": 4.1,
"features": ["11.6-inch display", "4GB RAM", "32GB eMMC", "Chrome OS"],
"description": "A compact and affordable Chromebook for everyday tasks.",
"price": 249.99
},
"SmartX ProPhone": {
"name": "SmartX ProPhone",
"category": "Smartphones and Accessories",
"brand": "SmartX",
"model_number": "SX-PP10",
"warranty": "1 year",
"rating": 4.6,
"features": ["6.1-inch display", "128GB storage", "12MP dual camera", "5G"],
"description": "A powerful smartphone with advanced camera features.",
"price": 899.99
},
"MobiTech PowerCase": {
"name": "MobiTech PowerCase",
"category": "Smartphones and Accessories",
"brand": "MobiTech",
"model_number": "MT-PC20",
"warranty": "1 year",
"rating": 4.3,
"features": ["5000mAh battery", "Wireless charging", "Compatible with SmartX ProPhone"],
"description": "A protective case with built-in battery for extended usage.",
"price": 59.99
},
"SmartX MiniPhone": {
"name": "SmartX MiniPhone",
"category": "Smartphones and Accessories",
"brand": "SmartX",
"model_number": "SX-MP5",
"warranty": "1 year",
"rating": 4.2,
"features": ["4.7-inch display", "64GB storage", "8MP camera", "4G"],
"description": "A compact and affordable smartphone for basic tasks.",
"price": 399.99
},
"MobiTech Wireless Charger": {
"name": "MobiTech Wireless Charger",
"category": "Smartphones and Accessories",
"brand": "MobiTech",
"model_number": "MT-WC10",
"warranty": "1 year",
"rating": 4.5,
"features": ["10W fast charging", "Qi-compatible", "LED indicator", "Compact design"],
"description": "A convenient wireless charger for a clutter-free workspace.",
"price": 29.99
},
"SmartX EarBuds": {
"name": "SmartX EarBuds",
"category": "Smartphones and Accessories",
"brand": "SmartX",
"model_number": "SX-EB20",
"warranty": "1 year",
"rating": 4.4,
"features": ["True wireless", "Bluetooth 5.0", "Touch controls", "24-hour battery life"],
"description": "Experience true wireless freedom with these comfortable earbuds.",
"price": 99.99
},
"CineView 4K TV": {
"name": "CineView 4K TV",
"category": "Televisions and Home Theater Systems",
"brand": "CineView",
"model_number": "CV-4K55",
"warranty": "2 years",
"rating": 4.8,
"features": ["55-inch display", "4K resolution", "HDR", "Smart TV"],
"description": "A stunning 4K TV with vibrant colors and smart features.",
"price": 599.99
},
"SoundMax Home Theater": {
"name": "SoundMax Home Theater",
"category": "Televisions and Home Theater Systems",
"brand": "SoundMax",
"model_number": "SM-HT100",
"warranty": "1 year",
"rating": 4.4,
"features": ["5.1 channel", "1000W output", "Wireless subwoofer", "Bluetooth"],
"description": "A powerful home theater system for an immersive audio experience.",
"price": 399.99
},
"CineView 8K TV": {
"name": "CineView 8K TV",
"category": "Televisions and Home Theater Systems",
"brand": "CineView",
"model_number": "CV-8K65",
"warranty": "2 years",
"rating": 4.9,
"features": ["65-inch display", "8K resolution", "HDR", "Smart TV"],
"description": "Experience the future of television with this stunning 8K TV.",
"price": 2999.99
},
"SoundMax Soundbar": {
"name": "SoundMax Soundbar",
"category": "Televisions and Home Theater Systems",
"brand": "SoundMax",
"model_number": "SM-SB50",
"warranty": "1 year",
"rating": 4.3,
"features": ["2.1 channel", "300W output", "Wireless subwoofer", "Bluetooth"],
"description": "Upgrade your TV's audio with this sleek and powerful soundbar.",
"price": 199.99
},
"CineView OLED TV": {
"name": "CineView OLED TV",
"category": "Televisions and Home Theater Systems",
"brand": "CineView",
"model_number": "CV-OLED55",
"warranty": "2 years",
"rating": 4.7,
"features": ["55-inch display", "4K resolution", "HDR", "Smart TV"],
"description": "Experience true blacks and vibrant colors with this OLED TV.",
"price": 1499.99
},
"GameSphere X": {
"name": "GameSphere X",
"category": "Gaming Consoles and Accessories",
"brand": "GameSphere",
"model_number": "GS-X",
"warranty": "1 year",
"rating": 4.9,
"features": ["4K gaming", "1TB storage", "Backward compatibility", "Online multiplayer"],
"description": "A next-generation gaming console for the ultimate gaming experience.",
"price": 499.99
},
"ProGamer Controller": {
"name": "ProGamer Controller",
"category": "Gaming Consoles and Accessories",
"brand": "ProGamer",
"model_number": "PG-C100",
"warranty": "1 year",
"rating": 4.2,
"features": ["Ergonomic design", "Customizable buttons", "Wireless", "Rechargeable battery"],
"description": "A high-quality gaming controller for precision and comfort.",
"price": 59.99
},
"GameSphere Y": {
"name": "GameSphere Y",
"category": "Gaming Consoles and Accessories",
"brand": "GameSphere",
"model_number": "GS-Y",
"warranty": "1 year",
"rating": 4.8,
"features": ["4K gaming", "500GB storage", "Backward compatibility", "Online multiplayer"],
"description": "A compact gaming console with powerful performance.",
"price": 399.99
},
"ProGamer Racing Wheel": {
"name": "ProGamer Racing Wheel",
"category": "Gaming Consoles and Accessories",
"brand": "ProGamer",
"model_number": "PG-RW200",
"warranty": "1 year",
"rating": 4.5,
"features": ["Force feedback", "Adjustable pedals", "Paddle shifters", "Compatible with GameSphere X"],
"description": "Enhance your racing games with this realistic racing wheel.",
"price": 249.99
},
"GameSphere VR Headset": {
"name": "GameSphere VR Headset",
"category": "Gaming Consoles and Accessories",
"brand": "GameSphere",
"model_number": "GS-VR",
"warranty": "1 year",
"rating": 4.6,
"features": ["Immersive VR experience", "Built-in headphones", "Adjustable headband", "Compatible with GameSphere X"],
"description": "Step into the world of virtual reality with this comfortable VR headset.",
"price": 299.99
},
"AudioPhonic Noise-Canceling Headphones": {
"name": "AudioPhonic Noise-Canceling Headphones",
"category": "Audio Equipment",
"brand": "AudioPhonic",
"model_number": "AP-NC100",
"warranty": "1 year",
"rating": 4.6,
"features": ["Active noise-canceling", "Bluetooth", "20-hour battery life", "Comfortable fit"],
"description": "Experience immersive sound with these noise-canceling headphones.",
"price": 199.99
},
"WaveSound Bluetooth Speaker": {
"name": "WaveSound Bluetooth Speaker",
"category": "Audio Equipment",
"brand": "WaveSound",
"model_number": "WS-BS50",
"warranty": "1 year",
"rating": 4.5,
"features": ["Portable", "10-hour battery life", "Water-resistant", "Built-in microphone"],
"description": "A compact and versatile Bluetooth speaker for music on the go.",
"price": 49.99
},
"AudioPhonic True Wireless Earbuds": {
"name": "AudioPhonic True Wireless Earbuds",
"category": "Audio Equipment",
"brand": "AudioPhonic",
"model_number": "AP-TW20",
"warranty": "1 year",
"rating": 4.4,
"features": ["True wireless", "Bluetooth 5.0", "Touch controls", "18-hour battery life"],
"description": "Enjoy music without wires with these comfortable true wireless earbuds.",
"price": 79.99
},
"WaveSound Soundbar": {
"name": "WaveSound Soundbar",
"category": "Audio Equipment",
"brand": "WaveSound",
"model_number": "WS-SB40",
"warranty": "1 year",
"rating": 4.3,
"features": ["2.0 channel", "80W output", "Bluetooth", "Wall-mountable"],
"description": "Upgrade your TV's audio with this slim and powerful soundbar.",
"price": 99.99
},
"AudioPhonic Turntable": {
"name": "AudioPhonic Turntable",
"category": "Audio Equipment",
"brand": "AudioPhonic",
"model_number": "AP-TT10",
"warranty": "1 year",
"rating": 4.2,
"features": ["3-speed", "Built-in speakers", "Bluetooth", "USB recording"],
"description": "Rediscover your vinyl collection with this modern turntable.",
"price": 149.99
},
"FotoSnap DSLR Camera": {
"name": "FotoSnap DSLR Camera",
"category": "Cameras and Camcorders",
"brand": "FotoSnap",
"model_number": "FS-DSLR200",
"warranty": "1 year",
"rating": 4.7,
"features": ["24.2MP sensor", "1080p video", "3-inch LCD", "Interchangeable lenses"],
"description": "Capture stunning photos and videos with this versatile DSLR camera.",
"price": 599.99
},
"ActionCam 4K": {
"name": "ActionCam 4K",
"category": "Cameras and Camcorders",
"brand": "ActionCam",
"model_number": "AC-4K",
"warranty": "1 year",
"rating": 4.4,
"features": ["4K video", "Waterproof", "Image stabilization", "Wi-Fi"],
"description": "Record your adventures with this rugged and compact 4K action camera.",
"price": 299.99
},
"FotoSnap Mirrorless Camera": {
"name": "FotoSnap Mirrorless Camera",
"category": "Cameras and Camcorders",
"brand": "FotoSnap",
"model_number": "FS-ML100",
"warranty": "1 year",
"rating": 4.6,
"features": ["20.1MP sensor", "4K video", "3-inch touchscreen", "Interchangeable lenses"],
"description": "A compact and lightweight mirrorless camera with advanced features.",
"price": 799.99
},
"ZoomMaster Camcorder": {
"name": "ZoomMaster Camcorder",
"category": "Cameras and Camcorders",
"brand": "ZoomMaster",
"model_number": "ZM-CM50",
"warranty": "1 year",
"rating": 4.3,
"features": ["1080p video", "30x optical zoom", "3-inch LCD", "Image stabilization"],
"description": "Capture life's moments with this easy-to-use camcorder.",
"price": 249.99
},
"FotoSnap Instant Camera": {
"name": "FotoSnap Instant Camera",
"category": "Cameras and Camcorders",
"brand": "FotoSnap",
"model_number": "FS-IC10",
"warranty": "1 year",
"rating": 4.1,
"features": ["Instant prints", "Built-in flash", "Selfie mirror", "Battery-powered"],
"description": "Create instant memories with this fun and portable instant camera.",
"price": 69.99
}
}
products_file = 'products.json'
with open(products_file, 'w') as file:
json.dump(products, file)
return products | [
"Relevant product information:\nPLACEHOLDER",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | donal0c/LLM_learning_notebooks | Building%20Systems%20with%20the%20ChatGPT%20API~blah.py | import json
import openai
from collections import defaultdict
products_file = 'products.json'
categories_file = 'categories.json'
delimiter = "####"
step_2_system_message_content = f"""
You will be provided with customer service a conversation. \
The most recent user query will be delimited with \
{delimiter} characters.
Output a python list of objects, where each object has \
the following format:
'category': <one of Computers and Laptops, \
Smartphones and Accessories, \
Televisions and Home Theater Systems, \
Gaming Consoles and Accessories,
Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must \
be found in the allowed products below>
Where the categories and products must be found in \
the customer service query.
If a product is mentioned, it must be associated with \
the correct category in the allowed products list below.
If no products or categories are found, output an \
empty list.
Only list products and categories that have not already \
been mentioned and discussed in the earlier parts of \
the conversation.
Allowed products:
Computers and Laptops category:
TechPro Ultrabook
BlueWave Gaming Laptop
PowerLite Convertible
TechPro Desktop
BlueWave Chromebook
Smartphones and Accessories category:
SmartX ProPhone
MobiTech PowerCase
SmartX MiniPhone
MobiTech Wireless Charger
SmartX EarBuds
Televisions and Home Theater Systems category:
CineView 4K TV
SoundMax Home Theater
CineView 8K TV
SoundMax Soundbar
CineView OLED TV
Gaming Consoles and Accessories category:
GameSphere X
ProGamer Controller
GameSphere Y
ProGamer Racing Wheel
GameSphere VR Headset
Audio Equipment category:
AudioPhonic Noise-Canceling Headphones
WaveSound Bluetooth Speaker
AudioPhonic True Wireless Earbuds
WaveSound Soundbar
AudioPhonic Turntable
Cameras and Camcorders category:
FotoSnap DSLR Camera
ActionCam 4K
FotoSnap Mirrorless Camera
ZoomMaster Camcorder
FotoSnap Instant Camera
Only output the list of objects, with nothing else.
"""
step_2_system_message = {'role':'system', 'content': step_2_system_message_content}
step_4_system_message_content = f"""
You are a customer service assistant for a large electronic store. \
Respond in a friendly and helpful tone, with VERY concise answers. \
Make sure to ask the user relevant follow-up questions.
"""
step_4_system_message = {'role':'system', 'content': step_4_system_message_content}
step_6_system_message_content = f"""
You are an assistant that evaluates whether \
customer service agent responses sufficiently \
answer customer questions, and also validates that \
all the facts the assistant cites from the product \
information are correct.
The conversation history, product information, user and customer \
service agent messages will be delimited by \
3 backticks, i.e. ```.
Respond with a Y or N character, with no punctuation:
Y - if the output sufficiently answers the question \
AND the response correctly uses product information
N - otherwise
Output a single letter only.
"""
step_6_system_message = {'role':'system', 'content': step_6_system_message_content}
def get_completion_from_messages(messages, model="gpt-3.5-turbo", temperature=0, max_tokens=500):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
return response.choices[0].message["content"]
def create_categories():
categories_dict = {
'Billing': [
'Unsubscribe or upgrade',
'Add a payment method',
'Explanation for charge',
'Dispute a charge'],
'Technical Support':[
'General troubleshooting'
'Device compatibility',
'Software updates'],
'Account Management':[
'Password reset'
'Update personal information',
'Close account',
'Account security'],
'General Inquiry':[
'Product information'
'Pricing',
'Feedback',
'Speak to a human']
}
with open(categories_file, 'w') as file:
json.dump(categories_dict, file)
return categories_dict
def get_categories():
with open(categories_file, 'r') as file:
categories = json.load(file)
return categories
def get_product_list():
"""
Used in L4 to get a flat list of products
"""
products = get_products()
product_list = []
for product in products.keys():
product_list.append(product)
return product_list
def get_products_and_category():
"""
Used in L5
"""
products = get_products()
products_by_category = defaultdict(list)
for product_name, product_info in products.items():
category = product_info.get('category')
if category:
products_by_category[category].append(product_info.get('name'))
return dict(products_by_category)
def get_products():
with open(products_file, 'r') as file:
products = json.load(file)
return products
def find_category_and_product(user_input,products_and_category):
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return get_completion_from_messages(messages)
def find_category_and_product_only(user_input,products_and_category):
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
Allowed products:
Computers and Laptops category:
TechPro Ultrabook
BlueWave Gaming Laptop
PowerLite Convertible
TechPro Desktop
BlueWave Chromebook
Smartphones and Accessories category:
SmartX ProPhone
MobiTech PowerCase
SmartX MiniPhone
MobiTech Wireless Charger
SmartX EarBuds
Televisions and Home Theater Systems category:
CineView 4K TV
SoundMax Home Theater
CineView 8K TV
SoundMax Soundbar
CineView OLED TV
Gaming Consoles and Accessories category:
GameSphere X
ProGamer Controller
GameSphere Y
ProGamer Racing Wheel
GameSphere VR Headset
Audio Equipment category:
AudioPhonic Noise-Canceling Headphones
WaveSound Bluetooth Speaker
AudioPhonic True Wireless Earbuds
WaveSound Soundbar
AudioPhonic Turntable
Cameras and Camcorders category:
FotoSnap DSLR Camera
ActionCam 4K
FotoSnap Mirrorless Camera
ZoomMaster Camcorder
FotoSnap Instant Camera
Only output the list of objects, nothing else.
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return get_completion_from_messages(messages)
def get_products_from_query(user_msg):
"""
Code from L5, used in L8
"""
products_and_category = get_products_and_category()
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
OR
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_msg}{delimiter}"},
]
category_and_product_response = get_completion_from_messages(messages)
return category_and_product_response
# product look up (either by category or by product within category)
def get_product_by_name(name):
products = get_products()
return products.get(name, None)
def get_products_by_category(category):
products = get_products()
return [product for product in products.values() if product["category"] == category]
def get_mentioned_product_info(data_list):
"""
Used in L5 and L6
"""
product_info_l = []
if data_list is None:
return product_info_l
for data in data_list:
try:
if "products" in data:
products_list = data["products"]
for product_name in products_list:
product = get_product_by_name(product_name)
if product:
product_info_l.append(product)
else:
print(f"Error: Product '{product_name}' not found")
elif "category" in data:
category_name = data["category"]
category_products = get_products_by_category(category_name)
for product in category_products:
product_info_l.append(product)
else:
print("Error: Invalid object format")
except Exception as e:
print(f"Error: {e}")
return product_info_l
def read_string_to_list(input_string):
if input_string is None:
return None
try:
input_string = input_string.replace("'", "\"") # Replace single quotes with double quotes for valid JSON
data = json.loads(input_string)
return data
except json.JSONDecodeError:
print("Error: Invalid JSON string")
return None
def generate_output_string(data_list):
output_string = ""
if data_list is None:
return output_string
for data in data_list:
try:
if "products" in data:
products_list = data["products"]
for product_name in products_list:
product = get_product_by_name(product_name)
if product:
output_string += json.dumps(product, indent=4) + "\n"
else:
print(f"Error: Product '{product_name}' not found")
elif "category" in data:
category_name = data["category"]
category_products = get_products_by_category(category_name)
for product in category_products:
output_string += json.dumps(product, indent=4) + "\n"
else:
print("Error: Invalid object format")
except Exception as e:
print(f"Error: {e}")
return output_string
# Example usage:
#product_information_for_user_message_1 = generate_output_string(category_and_product_list)
#print(product_information_for_user_message_1)
def answer_user_msg(user_msg,product_info):
"""
Code from L5, used in L6
"""
delimiter = "####"
system_message = f"""
You are a customer service assistant for a large electronic store. \
Respond in a friendly and helpful tone, with concise answers. \
Make sure to ask the user relevant follow up questions.
"""
# user_msg = f"""
# tell me about the smartx pro phone and the fotosnap camera, the dslr one. Also what tell me about your tvs"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{user_msg}{delimiter}"},
{'role':'assistant', 'content': f"Relevant product information:\n{product_info}"},
]
response = get_completion_from_messages(messages)
return response
def create_products():
"""
Create products dictionary and save it to a file named products.json
"""
# product information
# fun fact: all these products are fake and were generated by a language model
products = {
"TechPro Ultrabook": {
"name": "TechPro Ultrabook",
"category": "Computers and Laptops",
"brand": "TechPro",
"model_number": "TP-UB100",
"warranty": "1 year",
"rating": 4.5,
"features": ["13.3-inch display", "8GB RAM", "256GB SSD", "Intel Core i5 processor"],
"description": "A sleek and lightweight ultrabook for everyday use.",
"price": 799.99
},
"BlueWave Gaming Laptop": {
"name": "BlueWave Gaming Laptop",
"category": "Computers and Laptops",
"brand": "BlueWave",
"model_number": "BW-GL200",
"warranty": "2 years",
"rating": 4.7,
"features": ["15.6-inch display", "16GB RAM", "512GB SSD", "NVIDIA GeForce RTX 3060"],
"description": "A high-performance gaming laptop for an immersive experience.",
"price": 1199.99
},
"PowerLite Convertible": {
"name": "PowerLite Convertible",
"category": "Computers and Laptops",
"brand": "PowerLite",
"model_number": "PL-CV300",
"warranty": "1 year",
"rating": 4.3,
"features": ["14-inch touchscreen", "8GB RAM", "256GB SSD", "360-degree hinge"],
"description": "A versatile convertible laptop with a responsive touchscreen.",
"price": 699.99
},
"TechPro Desktop": {
"name": "TechPro Desktop",
"category": "Computers and Laptops",
"brand": "TechPro",
"model_number": "TP-DT500",
"warranty": "1 year",
"rating": 4.4,
"features": ["Intel Core i7 processor", "16GB RAM", "1TB HDD", "NVIDIA GeForce GTX 1660"],
"description": "A powerful desktop computer for work and play.",
"price": 999.99
},
"BlueWave Chromebook": {
"name": "BlueWave Chromebook",
"category": "Computers and Laptops",
"brand": "BlueWave",
"model_number": "BW-CB100",
"warranty": "1 year",
"rating": 4.1,
"features": ["11.6-inch display", "4GB RAM", "32GB eMMC", "Chrome OS"],
"description": "A compact and affordable Chromebook for everyday tasks.",
"price": 249.99
},
"SmartX ProPhone": {
"name": "SmartX ProPhone",
"category": "Smartphones and Accessories",
"brand": "SmartX",
"model_number": "SX-PP10",
"warranty": "1 year",
"rating": 4.6,
"features": ["6.1-inch display", "128GB storage", "12MP dual camera", "5G"],
"description": "A powerful smartphone with advanced camera features.",
"price": 899.99
},
"MobiTech PowerCase": {
"name": "MobiTech PowerCase",
"category": "Smartphones and Accessories",
"brand": "MobiTech",
"model_number": "MT-PC20",
"warranty": "1 year",
"rating": 4.3,
"features": ["5000mAh battery", "Wireless charging", "Compatible with SmartX ProPhone"],
"description": "A protective case with built-in battery for extended usage.",
"price": 59.99
},
"SmartX MiniPhone": {
"name": "SmartX MiniPhone",
"category": "Smartphones and Accessories",
"brand": "SmartX",
"model_number": "SX-MP5",
"warranty": "1 year",
"rating": 4.2,
"features": ["4.7-inch display", "64GB storage", "8MP camera", "4G"],
"description": "A compact and affordable smartphone for basic tasks.",
"price": 399.99
},
"MobiTech Wireless Charger": {
"name": "MobiTech Wireless Charger",
"category": "Smartphones and Accessories",
"brand": "MobiTech",
"model_number": "MT-WC10",
"warranty": "1 year",
"rating": 4.5,
"features": ["10W fast charging", "Qi-compatible", "LED indicator", "Compact design"],
"description": "A convenient wireless charger for a clutter-free workspace.",
"price": 29.99
},
"SmartX EarBuds": {
"name": "SmartX EarBuds",
"category": "Smartphones and Accessories",
"brand": "SmartX",
"model_number": "SX-EB20",
"warranty": "1 year",
"rating": 4.4,
"features": ["True wireless", "Bluetooth 5.0", "Touch controls", "24-hour battery life"],
"description": "Experience true wireless freedom with these comfortable earbuds.",
"price": 99.99
},
"CineView 4K TV": {
"name": "CineView 4K TV",
"category": "Televisions and Home Theater Systems",
"brand": "CineView",
"model_number": "CV-4K55",
"warranty": "2 years",
"rating": 4.8,
"features": ["55-inch display", "4K resolution", "HDR", "Smart TV"],
"description": "A stunning 4K TV with vibrant colors and smart features.",
"price": 599.99
},
"SoundMax Home Theater": {
"name": "SoundMax Home Theater",
"category": "Televisions and Home Theater Systems",
"brand": "SoundMax",
"model_number": "SM-HT100",
"warranty": "1 year",
"rating": 4.4,
"features": ["5.1 channel", "1000W output", "Wireless subwoofer", "Bluetooth"],
"description": "A powerful home theater system for an immersive audio experience.",
"price": 399.99
},
"CineView 8K TV": {
"name": "CineView 8K TV",
"category": "Televisions and Home Theater Systems",
"brand": "CineView",
"model_number": "CV-8K65",
"warranty": "2 years",
"rating": 4.9,
"features": ["65-inch display", "8K resolution", "HDR", "Smart TV"],
"description": "Experience the future of television with this stunning 8K TV.",
"price": 2999.99
},
"SoundMax Soundbar": {
"name": "SoundMax Soundbar",
"category": "Televisions and Home Theater Systems",
"brand": "SoundMax",
"model_number": "SM-SB50",
"warranty": "1 year",
"rating": 4.3,
"features": ["2.1 channel", "300W output", "Wireless subwoofer", "Bluetooth"],
"description": "Upgrade your TV's audio with this sleek and powerful soundbar.",
"price": 199.99
},
"CineView OLED TV": {
"name": "CineView OLED TV",
"category": "Televisions and Home Theater Systems",
"brand": "CineView",
"model_number": "CV-OLED55",
"warranty": "2 years",
"rating": 4.7,
"features": ["55-inch display", "4K resolution", "HDR", "Smart TV"],
"description": "Experience true blacks and vibrant colors with this OLED TV.",
"price": 1499.99
},
"GameSphere X": {
"name": "GameSphere X",
"category": "Gaming Consoles and Accessories",
"brand": "GameSphere",
"model_number": "GS-X",
"warranty": "1 year",
"rating": 4.9,
"features": ["4K gaming", "1TB storage", "Backward compatibility", "Online multiplayer"],
"description": "A next-generation gaming console for the ultimate gaming experience.",
"price": 499.99
},
"ProGamer Controller": {
"name": "ProGamer Controller",
"category": "Gaming Consoles and Accessories",
"brand": "ProGamer",
"model_number": "PG-C100",
"warranty": "1 year",
"rating": 4.2,
"features": ["Ergonomic design", "Customizable buttons", "Wireless", "Rechargeable battery"],
"description": "A high-quality gaming controller for precision and comfort.",
"price": 59.99
},
"GameSphere Y": {
"name": "GameSphere Y",
"category": "Gaming Consoles and Accessories",
"brand": "GameSphere",
"model_number": "GS-Y",
"warranty": "1 year",
"rating": 4.8,
"features": ["4K gaming", "500GB storage", "Backward compatibility", "Online multiplayer"],
"description": "A compact gaming console with powerful performance.",
"price": 399.99
},
"ProGamer Racing Wheel": {
"name": "ProGamer Racing Wheel",
"category": "Gaming Consoles and Accessories",
"brand": "ProGamer",
"model_number": "PG-RW200",
"warranty": "1 year",
"rating": 4.5,
"features": ["Force feedback", "Adjustable pedals", "Paddle shifters", "Compatible with GameSphere X"],
"description": "Enhance your racing games with this realistic racing wheel.",
"price": 249.99
},
"GameSphere VR Headset": {
"name": "GameSphere VR Headset",
"category": "Gaming Consoles and Accessories",
"brand": "GameSphere",
"model_number": "GS-VR",
"warranty": "1 year",
"rating": 4.6,
"features": ["Immersive VR experience", "Built-in headphones", "Adjustable headband", "Compatible with GameSphere X"],
"description": "Step into the world of virtual reality with this comfortable VR headset.",
"price": 299.99
},
"AudioPhonic Noise-Canceling Headphones": {
"name": "AudioPhonic Noise-Canceling Headphones",
"category": "Audio Equipment",
"brand": "AudioPhonic",
"model_number": "AP-NC100",
"warranty": "1 year",
"rating": 4.6,
"features": ["Active noise-canceling", "Bluetooth", "20-hour battery life", "Comfortable fit"],
"description": "Experience immersive sound with these noise-canceling headphones.",
"price": 199.99
},
"WaveSound Bluetooth Speaker": {
"name": "WaveSound Bluetooth Speaker",
"category": "Audio Equipment",
"brand": "WaveSound",
"model_number": "WS-BS50",
"warranty": "1 year",
"rating": 4.5,
"features": ["Portable", "10-hour battery life", "Water-resistant", "Built-in microphone"],
"description": "A compact and versatile Bluetooth speaker for music on the go.",
"price": 49.99
},
"AudioPhonic True Wireless Earbuds": {
"name": "AudioPhonic True Wireless Earbuds",
"category": "Audio Equipment",
"brand": "AudioPhonic",
"model_number": "AP-TW20",
"warranty": "1 year",
"rating": 4.4,
"features": ["True wireless", "Bluetooth 5.0", "Touch controls", "18-hour battery life"],
"description": "Enjoy music without wires with these comfortable true wireless earbuds.",
"price": 79.99
},
"WaveSound Soundbar": {
"name": "WaveSound Soundbar",
"category": "Audio Equipment",
"brand": "WaveSound",
"model_number": "WS-SB40",
"warranty": "1 year",
"rating": 4.3,
"features": ["2.0 channel", "80W output", "Bluetooth", "Wall-mountable"],
"description": "Upgrade your TV's audio with this slim and powerful soundbar.",
"price": 99.99
},
"AudioPhonic Turntable": {
"name": "AudioPhonic Turntable",
"category": "Audio Equipment",
"brand": "AudioPhonic",
"model_number": "AP-TT10",
"warranty": "1 year",
"rating": 4.2,
"features": ["3-speed", "Built-in speakers", "Bluetooth", "USB recording"],
"description": "Rediscover your vinyl collection with this modern turntable.",
"price": 149.99
},
"FotoSnap DSLR Camera": {
"name": "FotoSnap DSLR Camera",
"category": "Cameras and Camcorders",
"brand": "FotoSnap",
"model_number": "FS-DSLR200",
"warranty": "1 year",
"rating": 4.7,
"features": ["24.2MP sensor", "1080p video", "3-inch LCD", "Interchangeable lenses"],
"description": "Capture stunning photos and videos with this versatile DSLR camera.",
"price": 599.99
},
"ActionCam 4K": {
"name": "ActionCam 4K",
"category": "Cameras and Camcorders",
"brand": "ActionCam",
"model_number": "AC-4K",
"warranty": "1 year",
"rating": 4.4,
"features": ["4K video", "Waterproof", "Image stabilization", "Wi-Fi"],
"description": "Record your adventures with this rugged and compact 4K action camera.",
"price": 299.99
},
"FotoSnap Mirrorless Camera": {
"name": "FotoSnap Mirrorless Camera",
"category": "Cameras and Camcorders",
"brand": "FotoSnap",
"model_number": "FS-ML100",
"warranty": "1 year",
"rating": 4.6,
"features": ["20.1MP sensor", "4K video", "3-inch touchscreen", "Interchangeable lenses"],
"description": "A compact and lightweight mirrorless camera with advanced features.",
"price": 799.99
},
"ZoomMaster Camcorder": {
"name": "ZoomMaster Camcorder",
"category": "Cameras and Camcorders",
"brand": "ZoomMaster",
"model_number": "ZM-CM50",
"warranty": "1 year",
"rating": 4.3,
"features": ["1080p video", "30x optical zoom", "3-inch LCD", "Image stabilization"],
"description": "Capture life's moments with this easy-to-use camcorder.",
"price": 249.99
},
"FotoSnap Instant Camera": {
"name": "FotoSnap Instant Camera",
"category": "Cameras and Camcorders",
"brand": "FotoSnap",
"model_number": "FS-IC10",
"warranty": "1 year",
"rating": 4.1,
"features": ["Instant prints", "Built-in flash", "Selfie mirror", "Battery-powered"],
"description": "Create instant memories with this fun and portable instant camera.",
"price": 69.99
}
}
products_file = 'products.json'
with open(products_file, 'w') as file:
json.dump(products, file)
return products | [
"Relevant product information:\nPLACEHOLDER",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | ganmol123/email-generator-be-service | app~api~endpoints~email.py | from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
import openai
from decouple import config
router = APIRouter()
SECRET_KEY = config("SECRET_KEY")
# Replace with your OpenAI API key
openai.api_key = SECRET_KEY
class EmailRequest(BaseModel):
recipient_name: str
recipient_email: str
subject: str
keywords: list[str]
length: int
@router.post("/")
async def generate_email(email_request: EmailRequest):
try:
# Generate a personalized email using ChatGPT
prompt = f"Compose a personalized email to {email_request.recipient_name}, about {email_request.subject}, add these keywords, {', '.join(email_request.keywords)}"
response = openai.Completion.create(
engine="text-curie-001",
prompt=prompt,
max_tokens=email_request.length,
)
email_content = response.choices[0].text.strip()
# In a real application, you might want to send this email_content to an email service for delivery.
return {"email_content": email_content}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| [
", ",
"f\"Compose a personalized email to {email_request.recipient_name}, about {email_request.subject}, add these keywords, {', '.join(email_request.keywords)}"
] |
2024-01-10 | HermanMartinus/bearblog | blogs~views~staff.py | from django.utils import timezone
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse
from django.db.models import Count, Q
from django.shortcuts import get_object_or_404, redirect, render
from django.db.models.functions import TruncDate, Length
from django.http import JsonResponse
from blogs.helpers import send_async_mail
from blogs.models import Blog
from datetime import timedelta
import pygal
from pygal.style import LightColorizedStyle
import openai
import os
@staff_member_required
def dashboard(request):
days_filter = int(request.GET.get('days', 30))
start_date = (timezone.now() - timedelta(days=days_filter)).date()
end_date = timezone.now().date()
blogs = Blog.objects.filter(blocked=False, created_date__gt=start_date).order_by('created_date')
# Exclude empty blogs
non_empty_blog_ids = [blog.pk for blog in blogs if not blog.is_empty]
blogs = blogs.filter(pk__in=non_empty_blog_ids)
to_review = Blog.objects.filter(to_review=True, reviewed=False, blocked=False).count()
# Signups
date_iterator = start_date
blogs_count = blogs.annotate(date=TruncDate('created_date')).values('date').annotate(c=Count('date')).order_by()
# Create dates dict with zero signups
blog_dict = {}
while date_iterator <= end_date:
blog_dict[date_iterator.strftime("%Y-%m-%d")] = 0
date_iterator += timedelta(days=1)
# Populate dict with signup count
for signup in blogs_count:
blog_dict[signup['date'].strftime("%Y-%m-%d")] = signup['c']
# Generate chart
chart_data = []
for date, count in blog_dict.items():
chart_data.append({'date': date, 'signups': count})
chart = pygal.Bar(height=300, show_legend=False, style=LightColorizedStyle)
chart.force_uri_protocol = 'http'
mark_list = [x['signups'] for x in chart_data]
[x['date'] for x in chart_data]
chart.add('Signups', mark_list)
chart.x_labels = [x['date'].split('-')[2] for x in chart_data]
signup_chart = chart.render_data_uri()
# Upgrades
date_iterator = start_date
upgraded_blogs = Blog.objects.filter(upgraded=True, upgraded_date__gte=start_date).order_by('upgraded_date')
upgrades_count = upgraded_blogs.annotate(date=TruncDate('upgraded_date')).values('date').annotate(c=Count('date')).order_by()
# Create dates dict with zero upgrades
blog_dict = {}
while date_iterator <= end_date:
blog_dict[date_iterator.strftime("%Y-%m-%d")] = 0
date_iterator += timedelta(days=1)
# Populate dict with signup count
for signup in upgrades_count:
if signup['date']:
blog_dict[signup['date'].strftime("%Y-%m-%d")] = signup['c']
# Generate chart
chart_data = []
for date, count in blog_dict.items():
chart_data.append({'date': date, 'upgrades': count})
chart = pygal.Bar(height=300, show_legend=False, style=LightColorizedStyle)
chart.force_uri_protocol = 'http'
mark_list = [x['upgrades'] for x in chart_data]
[x['date'] for x in chart_data]
chart.add('Upgrades', mark_list)
chart.x_labels = [x['date'].split('-')[2] for x in chart_data]
upgrade_chart = chart.render_data_uri()
# Calculate signups and upgrades for the past month
signups = blogs.count()
upgrades = Blog.objects.filter(upgraded=True, upgraded_date__gt=start_date).count()
# Calculate all-time totals
total_signups = Blog.objects.count()
total_upgrades = Blog.objects.filter(upgraded=True).count()
# Calculate conversion rates
conversion_rate = upgrades / signups if signups > 0 else 0
total_conversion_rate = total_upgrades / total_signups if total_signups > 0 else 0
formatted_conversion_rate = f"{conversion_rate*100:.2f}%"
formatted_total_conversion_rate = f"{total_conversion_rate*100:.2f}%"
empty_blogs = get_empty_blogs()
return render(
request,
'staff/dashboard.html',
{
'blogs': blogs,
'signups': signups,
'upgrades': upgrades,
'total_signups': total_signups,
'total_upgrades': total_upgrades,
'conversion_rate': formatted_conversion_rate,
'total_conversion_rate': formatted_total_conversion_rate,
'signup_chart': signup_chart,
'upgrade_chart': upgrade_chart,
'start_date': start_date,
'end_date': end_date,
'to_review': to_review,
'empty_blogs': empty_blogs,
'days_filter': days_filter
}
)
def get_empty_blogs():
# Empty blogs
# Not used in the last 10 weeks
# Most recent 100
timeperiod = timezone.now() - timedelta(weeks=10)
empty_blogs = Blog.objects.annotate(num_posts=Count('post')).annotate(content_length=Length('content')).filter(
last_modified__lte=timeperiod, num_posts__lte=0, content_length__lt=50, upgraded=False, custom_styles="").order_by('-created_date')[:100]
return empty_blogs
@staff_member_required
def delete_empty(request):
for blog in get_empty_blogs():
print(f'Deleting {blog}')
blog.delete()
return redirect('staff_dashboard')
@staff_member_required
def review_flow(request):
blogs = Blog.objects.filter(reviewed=False, blocked=False, to_review=True).annotate(
post_count=Count("post"),
).prefetch_related("post_set").order_by('created_date')
unreviewed_blogs = []
for blog in blogs:
if blog.to_review:
unreviewed_blogs.append(blog)
if unreviewed_blogs:
blog = unreviewed_blogs[0]
all_posts = blog.post_set.filter(publish=True).order_by('-published_date')
return render(
request,
'staff/review_flow.html',
{
'blog': blog,
'content': blog.content or "~nothing here~",
'posts': all_posts,
'root': blog.useful_domain,
'still_to_go': len(unreviewed_blogs),
})
else:
return redirect('staff_dashboard')
@staff_member_required
def approve(request, pk):
blog = get_object_or_404(Blog, pk=pk)
blog.reviewed = True
blog.to_review = False
blog.save()
message = request.POST.get("message", "")
if message and not request.GET.get("no-email", ""):
send_async_mail(
"I've just reviewed your blog",
message,
'Herman Martinus <[email protected]>',
[blog.user.email]
)
return redirect('review_flow')
@staff_member_required
def block(request, pk):
blog = get_object_or_404(Blog, pk=pk)
blog.blocked = True
blog.save()
return redirect('review_flow')
@staff_member_required
def delete(request, pk):
blog = get_object_or_404(Blog, pk=pk)
blog.delete()
return redirect('review_flow')
def extract_blog_info(blog):
posts_info = []
for post in blog.post_set.all():
posts_info.append({'title': post.title, 'content': post.content})
return {
'title': blog.title,
'content': blog.content,
'url': blog.useful_domain,
'posts': posts_info
}
| [
"~nothing here~"
] |
2024-01-10 | sjchoi86/yet-another-gpt-tutorial-v2 | code~gpt_helper.py | import re,io,base64
from typing import List, Optional
from openai import OpenAI
from PIL import Image
from rich.console import Console
from util import printmd
class GPTchatClass:
def __init__(
self,
gpt_model:str = "gpt-4",
role_msg:str = "Your are a helpful assistant.",
key_path:str = '',
VERBOSE:bool = True,
):
self.gpt_model = gpt_model
self.role_msg = role_msg
self.key_path = key_path
self.VERBOSE = VERBOSE
self.messages = [{"role": "system", "content": f"{role_msg}"}]
self.init_messages = [{"role": "system", "content": f"{role_msg}"}]
self.response = None
self.console = Console()
self._setup_client()
def _setup_client(self):
if self.VERBOSE:
self.console.print(f"[bold cyan]key_path:[%s][/bold cyan]" % (self.key_path))
with open(self.key_path, "r") as f:
OPENAI_API_KEY = f.read()
self.client = OpenAI(api_key=OPENAI_API_KEY)
if self.VERBOSE:
self.console.print(
"[bold cyan]Chat agent using [%s] initialized with the follow role:[%s][/bold cyan]"
% (self.gpt_model, self.role_msg)
)
def _add_message(
self,
role = "assistant",
content = "",
):
"""
role: 'assistant' / 'user'
"""
self.messages.append({"role": role, "content": content})
def _get_response_content(self):
if self.response:
return self.response.choices[0].message.content
else:
return None
def _get_response_status(self):
if self.response:
return self.response.choices[0].message.finish_reason
else:
return None
def reset(
self,
role_msg:str = "Your are a helpful assistant.",
):
self.init_messages = [{"role": "system", "content": f"{role_msg}"}]
self.messages = self.init_messages
def chat(
self,
user_msg = "hi",
PRINT_USER_MSG = True,
PRINT_GPT_OUTPUT = True,
RESET_CHAT = False,
RETURN_RESPONSE = False,
):
self._add_message(role="user", content=user_msg)
self.response = self.client.chat.completions.create(
model=self.gpt_model, messages=self.messages
)
# Backup response for continous chatting
self._add_message(role="assistant", content=self._get_response_content())
if PRINT_USER_MSG:
self.console.print("[deep_sky_blue3][USER_MSG][/deep_sky_blue3]")
printmd(user_msg)
if PRINT_GPT_OUTPUT:
self.console.print("[spring_green4][GPT_OUTPUT][/spring_green4]")
printmd(self._get_response_content())
# Reset
if RESET_CHAT:
self.reset()
# Return
if RETURN_RESPONSE:
return self._get_response_content()
class GPT4VchatClass:
def __init__(
self,
gpt_model:str = "gpt-4-vision-preview",
role_msg:str = "You are a helpful agent with vision capabilities; do not respond to objects not depicted in images.",
image_max_size:int = 512,
key_path:str = '',
VERBOSE:bool = True,
):
self.gpt_model = gpt_model
self.role_msg = role_msg
self.image_max_size = image_max_size
self.key_path = key_path
self.VERBOSE = VERBOSE
self.messages = [{"role": "system", "content": f"{role_msg}"}]
self.init_messages = [{"role": "system", "content": f"{role_msg}"}]
self.console = Console()
self.response = None
self._setup_client()
def _setup_client(self):
if self.VERBOSE:
self.console.print(f"[bold cyan]key_path:[%s][/bold cyan]" % (self.key_path))
with open(self.key_path, "r") as f:
OPENAI_API_KEY = f.read()
self.client = OpenAI(api_key=OPENAI_API_KEY)
if self.VERBOSE:
self.console.print(
"[bold cyan]Chat agent using [%s] initialized with the follow role:[%s][/bold cyan]"
% (self.gpt_model, self.role_msg)
)
def _encode_image(
self,
image_pil: Image.Image
) -> str:
image_pil_rgb = image_pil.convert("RGB")
# change pil to base64 string
img_buf = io.BytesIO()
image_pil_rgb.save(img_buf, format="PNG")
# Encode bytes to base64 string
img_base64 = base64.b64encode(img_buf.getvalue()).decode("utf-8")
return img_base64
def _divide_by_img_tag(
self,
text: str,
) -> List[str]:
"""
Example:
Input: "<img1> <img2> What is the difference of these two images?"
Output: ['<img1>', '<img2>', ' What is the difference of these two images?']
"""
pattern = r"(<img\d+>)"
segments = re.split(pattern, text)
segments = [seg for seg in segments if seg.strip() != ""]
return segments
def _add_message(
self,
role:str = "assistant",
content: str = "",
images: Optional[List] = None,
):
"""
role: 'assistant' / 'user'
"""
if images is not None:
# parsing text content
image_text_segments = self._divide_by_img_tag(content)
new_content = []
image_num = 0
for segment in image_text_segments:
# check if image or text
if segment.startswith("<img") and segment.endswith(">"):
# this is image
local_image_path = images[image_num]
image_pil = Image.open(local_image_path)
image_pil.thumbnail(
(self.image_max_size, self.image_max_size)
)
base64_image = self._encode_image(image_pil)
new_content.append(
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
},
}
)
image_num += 1
else:
# this is text
new_content.append(
{
"type": "text",
"text": segment,
}
)
self.messages.append({"role": role, "content": new_content})
else:
self.messages.append({"role": role, "content": content})
def _get_response_content(self):
if self.response:
return self.response.choices[0].message.content
else:
return None
def _get_response_status(self):
if self.response:
return self.response.choices[0].message.finish_reason
else:
return None
def reset(self):
self.messages = self.init_messages
def chat(
self,
user_msg: str = "<img> what's in this image?",
images: List[str] = ["../img/cat.png"],
PRINT_USER_MSG=True,
PRINT_GPT_OUTPUT=True,
RESET_CHAT=False,
RETURN_RESPONSE=True,
MAX_TOKENS = 512,
):
self._add_message(role="user", content=user_msg, images=images)
self.response = self.client.chat.completions.create(
model=self.gpt_model, messages=self.messages, max_tokens=MAX_TOKENS
)
# Backup response for continous chatting
self._add_message(role="assistant", content=self._get_response_content())
if PRINT_USER_MSG:
self.console.print("[deep_sky_blue3][USER_MSG][/deep_sky_blue3]")
printmd(user_msg)
if PRINT_GPT_OUTPUT:
self.console.print("[spring_green4][GPT_OUTPUT][/spring_green4]")
printmd(self._get_response_content())
# Reset
if RESET_CHAT:
self.reset()
# Return
if RETURN_RESPONSE:
return self._get_response_content()
| [
"PLACEHOLDER"
] |
2024-01-10 | trungtin/ai-scripts | assistant_pdf.py | #! python
import argparse
from dotenv import load_dotenv
from openai import OpenAI
load_dotenv(override=True)
client = OpenAI()
assistant_model = "gpt-3.5-turbo-1106"
def main(pdf_files: list[str]):
created_files = []
for pdf_file in pdf_files:
with open(pdf_file, "rb") as f:
print(f"Uploading file {pdf_file} to OpenAI Assistant API.")
content = f.read()
file_name = pdf_file.split("/")[-1]
# upload new file
uploaded_file = client.files.create(
file=(file_name, content, "application/pdf"),
purpose="assistants",
)
print(f"Uploaded file {file_name} successfully to OpenAI Assistant API.")
created_files.append(uploaded_file.id)
# Create a new assistant
assistant = client.beta.assistants.create(
name="pdf_assistant",
file_ids=created_files,
model=assistant_model,
tools=[{"type": "retrieval"}],
)
print(
f"Created assistant: https://platform.openai.com/playground?assistant={assistant.id}"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Take pdf file(s) then create a OpenAI assistant with then."
)
parser.add_argument("pdf_files", type=str, nargs="+", help="path to the pdf files")
args = parser.parse_args()
main(args.pdf_files)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~integration_tests~chat_models~test_volcengine_maas.py | """Test volc engine maas chat model."""
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.volcengine_maas import VolcEngineMaasChat
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test valid chat call to volc engine."""
chat = VolcEngineMaasChat()
response = chat(messages=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = VolcEngineMaasChat()
response = chat(
messages=[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = VolcEngineMaasChat(streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat(
messages=[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
],
stream=True,
callbacks=callback_manager,
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = VolcEngineMaasChat()
message = HumanMessage(content="Hi, how are you?")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
| [
"Hi, how are you?",
"Hello!",
"Hello",
"How are you?"
] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~integration_tests~retrievers~test_google_vertex_ai_search.py | """Test Google Vertex AI Search retriever.
You need to create a Vertex AI Search app and populate it
with data to run the integration tests.
Follow the instructions in the example notebook:
google_vertex_ai_search.ipynb
to set up the app and configure authentication.
Set the following environment variables before the tests:
export PROJECT_ID=... - set to your Google Cloud project ID
export DATA_STORE_ID=... - the ID of the search engine to use for the test
"""
import os
import pytest
from langchain_core.documents import Document
from langchain.retrievers.google_vertex_ai_search import (
GoogleCloudEnterpriseSearchRetriever,
GoogleVertexAIMultiTurnSearchRetriever,
GoogleVertexAISearchRetriever,
)
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_search_get_relevant_documents() -> None:
"""Test the get_relevant_documents() method."""
retriever = GoogleVertexAISearchRetriever()
documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?")
assert len(documents) > 0
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata["id"]
assert doc.metadata["source"]
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_multiturnsearch_get_relevant_documents() -> None:
"""Test the get_relevant_documents() method."""
retriever = GoogleVertexAIMultiTurnSearchRetriever()
documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?")
assert len(documents) > 0
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata["id"]
assert doc.metadata["source"]
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_search_enterprise_search_deprecation() -> None:
"""Test the deprecation of GoogleCloudEnterpriseSearchRetriever."""
with pytest.warns(
DeprecationWarning,
match="GoogleCloudEnterpriseSearchRetriever is deprecated, use GoogleVertexAISearchRetriever", # noqa: E501
):
retriever = GoogleCloudEnterpriseSearchRetriever()
os.environ["SEARCH_ENGINE_ID"] = os.getenv("DATA_STORE_ID", "data_store_id")
with pytest.warns(
DeprecationWarning,
match="The `search_engine_id` parameter is deprecated. Use `data_store_id` instead.", # noqa: E501
):
retriever = GoogleCloudEnterpriseSearchRetriever()
# Check that mapped methods still work.
documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?")
assert len(documents) > 0
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
assert doc.metadata["id"]
assert doc.metadata["source"]
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~chat_models~mlflow.py | import asyncio
import logging
from functools import partial
from typing import Any, Dict, List, Mapping, Optional
from urllib.parse import urlparse
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.pydantic_v1 import (
Field,
PrivateAttr,
)
logger = logging.getLogger(__name__)
class ChatMlflow(BaseChatModel):
"""`MLflow` chat models API.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments/server.html.
Example:
.. code-block:: python
from langchain.chat_models import ChatMlflow
chat = ChatMlflow(
target_uri="http://localhost:5000",
endpoint="chat",
temperature-0.1,
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
temperature: float = 0.0
"""The sampling temperature."""
n: int = 1
"""The number of completion choices to generate."""
stop: Optional[List[str]] = None
"""The stop sequence."""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
extra_params: dict = Field(default_factory=dict)
"""Any extra parameters to pass to the endpoint."""
_client: Any = PrivateAttr()
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
f"Please run `pip install mlflow{self._mlflow_extras}` to install "
"required dependencies."
) from e
@property
def _mlflow_extras(self) -> str:
return "[genai]"
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"target_uri": self.target_uri,
"endpoint": self.endpoint,
"temperature": self.temperature,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens,
"extra_params": self.extra_params,
}
return params
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatMlflow._convert_message_to_dict(message) for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
"temperature": self.temperature,
"n": self.n,
**self.extra_params,
**kwargs,
}
if stop := self.stop or stop:
data["stop"] = stop
if self.max_tokens is not None:
data["max_tokens"] = self.max_tokens
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
return ChatMlflow._create_chat_result(resp)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
func = partial(
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "mlflow-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by Databricks. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by Databricks. Please"
" create a feature request at https://github.com/mlflow/mlflow/issues."
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatMlflow._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
"Additional message arguments are unsupported by Databricks"
" and will be ignored: %s",
message.additional_kwargs,
)
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for choice in response["choices"]:
message = ChatMlflow._convert_dict_to_message(choice["message"])
usage = choice.get("usage", {})
gen = ChatGeneration(
message=message,
generation_info=usage,
)
generations.append(gen)
usage = response.get("usage", {})
return ChatResult(generations=generations, llm_output=usage)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~core~langchain_core~caches.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Optional, Sequence
from langchain_core.outputs import Generation
RETURN_VAL_TYPE = Sequence[Generation]
class BaseCache(ABC):
"""Base interface for cache."""
@abstractmethod
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
@abstractmethod
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
@abstractmethod
def clear(self, **kwargs: Any) -> None:
"""Clear cache that can take additional keyword arguments."""
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~core~tests~unit_tests~runnables~test_imports.py | from langchain_core.runnables import __all__
EXPECTED_ALL = [
"AddableDict",
"Context",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableSequence",
"RunnableWithFallbacks",
"get_config_list",
"aadd",
"add",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~llms~databricks.py | import os
import warnings
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LLM
from langchain_core.pydantic_v1 import (
BaseModel,
Extra,
Field,
PrivateAttr,
root_validator,
validator,
)
__all__ = ["Databricks"]
class _DatabricksClientBase(BaseModel, ABC):
"""A base JSON API client that talks to Databricks."""
api_url: str
api_token: str
def request(self, method: str, url: str, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_token}"}
response = requests.request(
method=method, url=url, headers=headers, json=request
)
# TODO: error handling and automatic retries
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
return response.json()
def _get(self, url: str) -> Any:
return self.request("GET", url, None)
def _post(self, url: str, request: Any) -> Any:
return self.request("POST", url, request)
@abstractmethod
def post(
self, request: Any, transform_output_fn: Optional[Callable[..., str]] = None
) -> Any:
...
@property
def llm(self) -> bool:
return False
def _transform_completions(response: Dict[str, Any]) -> str:
return response["choices"][0]["text"]
def _transform_chat(response: Dict[str, Any]) -> str:
return response["choices"][0]["message"]["content"]
class _DatabricksServingEndpointClient(_DatabricksClientBase):
"""An API client that talks to a Databricks serving endpoint."""
host: str
endpoint_name: str
databricks_uri: str
client: Any = None
external_or_foundation: bool = False
task: Optional[str] = None
def __init__(self, **data: Any):
super().__init__(**data)
try:
from mlflow.deployments import get_deploy_client
self.client = get_deploy_client(self.databricks_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
"Please install mlflow with `pip install mlflow`."
) from e
endpoint = self.client.get_endpoint(self.endpoint_name)
self.external_or_foundation = endpoint.get("endpoint_type", "").lower() in (
"external_model",
"foundation_model_api",
)
self.task = endpoint.get("task")
@property
def llm(self) -> bool:
return self.task in ("llm/v1/chat", "llm/v1/completions")
@root_validator(pre=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
endpoint_name = values["endpoint_name"]
api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations"
values["api_url"] = api_url
return values
def post(
self, request: Any, transform_output_fn: Optional[Callable[..., str]] = None
) -> Any:
if self.external_or_foundation:
resp = self.client.predict(endpoint=self.endpoint_name, inputs=request)
if transform_output_fn:
return transform_output_fn(resp)
if self.task == "llm/v1/chat":
return _transform_chat(resp)
elif self.task == "llm/v1/completions":
return _transform_completions(resp)
return resp
else:
# See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html
wrapped_request = {"dataframe_records": [request]}
response = self.client.predict(
endpoint=self.endpoint_name, inputs=wrapped_request
)
preds = response["predictions"]
# For a single-record query, the result is not a list.
pred = preds[0] if isinstance(preds, list) else preds
return transform_output_fn(pred) if transform_output_fn else pred
class _DatabricksClusterDriverProxyClient(_DatabricksClientBase):
"""An API client that talks to a Databricks cluster driver proxy app."""
host: str
cluster_id: str
cluster_driver_port: str
@root_validator(pre=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
cluster_id = values["cluster_id"]
port = values["cluster_driver_port"]
api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}"
values["api_url"] = api_url
return values
def post(
self, request: Any, transform_output_fn: Optional[Callable[..., str]] = None
) -> Any:
resp = self._post(self.api_url, request)
return transform_output_fn(resp) if transform_output_fn else resp
def get_repl_context() -> Any:
"""Gets the notebook REPL context if running inside a Databricks notebook.
Returns None otherwise.
"""
try:
from dbruntime.databricks_repl_context import get_context
return get_context()
except ImportError:
raise ImportError(
"Cannot access dbruntime, not running inside a Databricks notebook."
)
def get_default_host() -> str:
"""Gets the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined.
"""
host = os.getenv("DATABRICKS_HOST")
if not host:
try:
host = get_repl_context().browserHostName
if not host:
raise ValueError("context doesn't contain browserHostName.")
except Exception as e:
raise ValueError(
"host was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_HOST'. Received error: {e}"
)
# TODO: support Databricks CLI profile
host = host.lstrip("https://").lstrip("http://").rstrip("/")
return host
def get_default_api_token() -> str:
"""Gets the default Databricks personal access token.
Raises an error if the token cannot be automatically determined.
"""
if api_token := os.getenv("DATABRICKS_TOKEN"):
return api_token
try:
api_token = get_repl_context().apiToken
if not api_token:
raise ValueError("context doesn't contain apiToken.")
except Exception as e:
raise ValueError(
"api_token was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_TOKEN'. Received error: {e}"
)
# TODO: support Databricks CLI profile
return api_token
class Databricks(LLM):
"""Databricks serving endpoint or a cluster driver proxy app for LLM.
It supports two endpoint types:
* **Serving endpoint** (recommended for both production and development).
We assume that an LLM was deployed to a serving endpoint.
To wrap it as an LLM you must have "Can Query" permission to the endpoint.
Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and
``cluster_driver_port``.
If the underlying model is a model registered by MLflow, the expected model
signature is:
* inputs::
[{"name": "prompt", "type": "string"},
{"name": "stop", "type": "list[string]"}]
* outputs: ``[{"type": "string"}]``
If the underlying model is an external or foundation model, the response from the
endpoint is automatically transformed to the expected format unless
``transform_output_fn`` is provided.
* **Cluster driver proxy app** (recommended for interactive development).
One can load an LLM on a Databricks interactive cluster and start a local HTTP
server on the driver node to serve the model at ``/`` using HTTP POST method
with JSON input/output.
Please use a port number between ``[3000, 8000]`` and let the server listen to
the driver IP address or simply ``0.0.0.0`` instead of localhost only.
To wrap it as an LLM you must have "Can Attach To" permission to the cluster.
Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``.
The expected server schema (using JSON schema) is:
* inputs::
{"type": "object",
"properties": {
"prompt": {"type": "string"},
"stop": {"type": "array", "items": {"type": "string"}}},
"required": ["prompt"]}`
* outputs: ``{"type": "string"}``
If the endpoint model signature is different or you want to set extra params,
you can use `transform_input_fn` and `transform_output_fn` to apply necessary
transformations before and after the query.
"""
host: str = Field(default_factory=get_default_host)
"""Databricks workspace hostname.
If not provided, the default value is determined by
* the ``DATABRICKS_HOST`` environment variable if present, or
* the hostname of the current Databricks workspace if running inside
a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode.
"""
api_token: str = Field(default_factory=get_default_api_token)
"""Databricks personal access token.
If not provided, the default value is determined by
* the ``DATABRICKS_TOKEN`` environment variable if present, or
* an automatically generated temporary token if running inside a Databricks
notebook attached to an interactive cluster in "single user" or
"no isolation shared" mode.
"""
endpoint_name: Optional[str] = None
"""Name of the model serving endpoint.
You must specify the endpoint name to connect to a model serving endpoint.
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_id: Optional[str] = None
"""ID of the cluster if connecting to a cluster driver proxy app.
If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs
inside a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode, the current cluster ID is used as default.
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_driver_port: Optional[str] = None
"""The port number used by the HTTP server running on the cluster driver node.
The server should listen on the driver IP address or simply ``0.0.0.0`` to connect.
We recommend the server using a port number between ``[3000, 8000]``.
"""
model_kwargs: Optional[Dict[str, Any]] = None
"""
Deprecated. Please use ``extra_params`` instead. Extra parameters to pass to
the endpoint.
"""
transform_input_fn: Optional[Callable] = None
"""A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible
request object that the endpoint accepts.
For example, you can apply a prompt template to the input prompt.
"""
transform_output_fn: Optional[Callable[..., str]] = None
"""A function that transforms the output from the endpoint to the generated text.
"""
databricks_uri: str = "databricks"
"""The databricks URI. Only used when using a serving endpoint."""
temperature: float = 0.0
"""The sampling temperature."""
n: int = 1
"""The number of completion choices to generate."""
stop: Optional[List[str]] = None
"""The stop sequence."""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
extra_params: Dict[str, Any] = Field(default_factory=dict)
"""Any extra parameters to pass to the endpoint."""
_client: _DatabricksClientBase = PrivateAttr()
class Config:
extra = Extra.forbid
underscore_attrs_are_private = True
@property
def _llm_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"temperature": self.temperature,
"n": self.n,
}
if self.stop:
params["stop"] = self.stop
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
return params
@validator("cluster_id", always=True)
def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]:
if v and values["endpoint_name"]:
raise ValueError("Cannot set both endpoint_name and cluster_id.")
elif values["endpoint_name"]:
return None
elif v:
return v
else:
try:
if v := get_repl_context().clusterId:
return v
raise ValueError("Context doesn't contain clusterId.")
except Exception as e:
raise ValueError(
"Neither endpoint_name nor cluster_id was set. "
"And the cluster_id cannot be automatically determined. Received"
f" error: {e}"
)
@validator("cluster_driver_port", always=True)
def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]:
if v and values["endpoint_name"]:
raise ValueError("Cannot set both endpoint_name and cluster_driver_port.")
elif values["endpoint_name"]:
return None
elif v is None:
raise ValueError(
"Must set cluster_driver_port to connect to a cluster driver."
)
elif int(v) <= 0:
raise ValueError(f"Invalid cluster_driver_port: {v}")
else:
return v
@validator("model_kwargs", always=True)
def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
if v:
assert "prompt" not in v, "model_kwargs must not contain key 'prompt'"
assert "stop" not in v, "model_kwargs must not contain key 'stop'"
return v
def __init__(self, **data: Any):
super().__init__(**data)
if self.model_kwargs is not None and self.extra_params is not None:
raise ValueError("Cannot set both extra_params and extra_params.")
elif self.model_kwargs is not None:
warnings.warn(
"model_kwargs is deprecated. Please use extra_params instead.",
DeprecationWarning,
)
if self.endpoint_name:
self._client = _DatabricksServingEndpointClient(
host=self.host,
api_token=self.api_token,
endpoint_name=self.endpoint_name,
databricks_uri=self.databricks_uri,
)
elif self.cluster_id and self.cluster_driver_port:
self._client = _DatabricksClusterDriverProxyClient(
host=self.host,
api_token=self.api_token,
cluster_id=self.cluster_id,
cluster_driver_port=self.cluster_driver_port,
)
else:
raise ValueError(
"Must specify either endpoint_name or cluster_id/cluster_driver_port."
)
@property
def _default_params(self) -> Dict[str, Any]:
"""Return default params."""
return {
"host": self.host,
# "api_token": self.api_token, # Never save the token
"endpoint_name": self.endpoint_name,
"cluster_id": self.cluster_id,
"cluster_driver_port": self.cluster_driver_port,
"databricks_uri": self.databricks_uri,
"model_kwargs": self.model_kwargs,
"temperature": self.temperature,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens,
"extra_params": self.extra_params,
# TODO: Support saving transform_input_fn and transform_output_fn
# "transform_input_fn": self.transform_input_fn,
# "transform_output_fn": self.transform_output_fn,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
return self._default_params
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "databricks"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Queries the LLM endpoint with the given prompt and stop sequence."""
# TODO: support callbacks
request: Dict[str, Any] = {"prompt": prompt}
if self._client.llm:
request.update(self._llm_params)
request.update(self.model_kwargs or self.extra_params)
request.update(kwargs)
if stop:
request["stop"] = stop
if self.transform_input_fn:
request = self.transform_input_fn(**request)
return self._client.post(request, transform_output_fn=self.transform_output_fn)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~llms~watsonxllm.py | import logging
import os
from typing import Any, Dict, Iterator, List, Mapping, Optional, Union
from langchain_core.outputs import LLMResult
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import BaseLLM
from langchain.pydantic_v1 import Extra, SecretStr, root_validator
from langchain.schema.output import Generation, GenerationChunk
from langchain.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
class WatsonxLLM(BaseLLM):
"""
IBM watsonx.ai large language models.
To use, you should have ``ibm_watson_machine_learning`` python package installed,
and the environment variable ``WATSONX_APIKEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames
parameters = {
GenTextParamsMetaNames.DECODING_METHOD: "sample",
GenTextParamsMetaNames.MAX_NEW_TOKENS: 100,
GenTextParamsMetaNames.MIN_NEW_TOKENS: 1,
GenTextParamsMetaNames.TEMPERATURE: 0.5,
GenTextParamsMetaNames.TOP_K: 50,
GenTextParamsMetaNames.TOP_P: 1,
}
from langchain.llms import WatsonxLLM
llm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
apikey="*****",
project_id="*****",
params=parameters,
)
"""
model_id: str = ""
"""Type of model to use."""
project_id: str = ""
"""ID of the Watson Studio project."""
space_id: str = ""
"""ID of the Watson Studio space."""
url: Optional[SecretStr] = None
"""Url to Watson Machine Learning instance"""
apikey: Optional[SecretStr] = None
"""Apikey to Watson Machine Learning instance"""
token: Optional[SecretStr] = None
"""Token to Watson Machine Learning instance"""
password: Optional[SecretStr] = None
"""Password to Watson Machine Learning instance"""
username: Optional[SecretStr] = None
"""Username to Watson Machine Learning instance"""
instance_id: Optional[SecretStr] = None
"""Instance_id of Watson Machine Learning instance"""
version: Optional[SecretStr] = None
"""Version of Watson Machine Learning instance"""
params: Optional[dict] = None
"""Model parameters to use during generate requests."""
verify: Union[str, bool] = ""
"""User can pass as verify one of following:
the path to a CA_BUNDLE file
the path of directory with certificates of trusted CAs
True - default path to truststore will be taken
False - no verification will be made"""
streaming: bool = False
""" Whether to stream the results or not. """
watsonx_model: Any
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"url": "WATSONX_URL",
"apikey": "WATSONX_APIKEY",
"token": "WATSONX_TOKEN",
"password": "WATSONX_PASSWORD",
"username": "WATSONX_USERNAME",
"instance_id": "WATSONX_INSTANCE_ID",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that credentials and python package exists in environment."""
values["url"] = convert_to_secret_str(
get_from_dict_or_env(values, "url", "WATSONX_URL")
)
if "cloud.ibm.com" in values.get("url", "").get_secret_value():
values["apikey"] = convert_to_secret_str(
get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY")
)
else:
if (
not values["token"]
and "WATSONX_TOKEN" not in os.environ
and not values["password"]
and "WATSONX_PASSWORD" not in os.environ
and not values["apikey"]
and "WATSONX_APIKEY" not in os.environ
):
raise ValueError(
"Did not find 'token', 'password' or 'apikey',"
" please add an environment variable"
" `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' "
"which contains it,"
" or pass 'token', 'password' or 'apikey'"
" as a named parameter."
)
elif values["token"] or "WATSONX_TOKEN" in os.environ:
values["token"] = convert_to_secret_str(
get_from_dict_or_env(values, "token", "WATSONX_TOKEN")
)
elif values["password"] or "WATSONX_PASSWORD" in os.environ:
values["password"] = convert_to_secret_str(
get_from_dict_or_env(values, "password", "WATSONX_PASSWORD")
)
values["username"] = convert_to_secret_str(
get_from_dict_or_env(values, "username", "WATSONX_USERNAME")
)
elif values["apikey"] or "WATSONX_APIKEY" in os.environ:
values["apikey"] = convert_to_secret_str(
get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY")
)
values["username"] = convert_to_secret_str(
get_from_dict_or_env(values, "username", "WATSONX_USERNAME")
)
if not values["instance_id"] or "WATSONX_INSTANCE_ID" not in os.environ:
values["instance_id"] = convert_to_secret_str(
get_from_dict_or_env(values, "instance_id", "WATSONX_INSTANCE_ID")
)
try:
from ibm_watson_machine_learning.foundation_models import Model
credentials = {
"url": values["url"].get_secret_value() if values["url"] else None,
"apikey": values["apikey"].get_secret_value()
if values["apikey"]
else None,
"token": values["token"].get_secret_value()
if values["token"]
else None,
"password": values["password"].get_secret_value()
if values["password"]
else None,
"username": values["username"].get_secret_value()
if values["username"]
else None,
"instance_id": values["instance_id"].get_secret_value()
if values["instance_id"]
else None,
"version": values["version"].get_secret_value()
if values["version"]
else None,
}
credentials_without_none_value = {
key: value for key, value in credentials.items() if value is not None
}
watsonx_model = Model(
model_id=values["model_id"],
credentials=credentials_without_none_value,
params=values["params"],
project_id=values["project_id"],
space_id=values["space_id"],
verify=values["verify"],
)
values["watsonx_model"] = watsonx_model
except ImportError:
raise ImportError(
"Could not import ibm_watson_machine_learning python package. "
"Please install it with `pip install ibm_watson_machine_learning`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"params": self.params,
"project_id": self.project_id,
"space_id": self.space_id,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "IBM watsonx.ai"
@staticmethod
def _extract_token_usage(
response: Optional[List[Dict[str, Any]]] = None
) -> Dict[str, Any]:
if response is None:
return {"generated_token_count": 0, "input_token_count": 0}
input_token_count = 0
generated_token_count = 0
def get_count_value(key: str, result: Dict[str, Any]) -> int:
return result.get(key, 0) or 0
for res in response:
results = res.get("results")
if results:
input_token_count += get_count_value("input_token_count", results[0])
generated_token_count += get_count_value(
"generated_token_count", results[0]
)
return {
"generated_token_count": generated_token_count,
"input_token_count": input_token_count,
}
def _create_llm_result(self, response: List[dict]) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for res in response:
results = res.get("results")
if results:
finish_reason = results[0].get("stop_reason")
gen = Generation(
text=results[0].get("generated_text"),
generation_info={"finish_reason": finish_reason},
)
generations.append([gen])
final_token_usage = self._extract_token_usage(response)
llm_output = {"token_usage": final_token_usage, "model_id": self.model_id}
return LLMResult(generations=generations, llm_output=llm_output)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the IBM watsonx.ai inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = watsonxllm("What is a molecule")
"""
result = self._generate(
prompts=[prompt], stop=stop, run_manager=run_manager, **kwargs
)
return result.generations[0][0].text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
"""Call the IBM watsonx.ai inference endpoint which then generate the response.
Args:
prompts: List of strings (prompts) to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The full LLMResult output.
Example:
.. code-block:: python
response = watsonxllm.generate(["What is a molecule"])
"""
should_stream = stream if stream is not None else self.streaming
if should_stream:
if len(prompts) > 1:
raise ValueError(
f"WatsonxLLM currently only supports single prompt, got {prompts}"
)
generation = GenerationChunk(text="")
stream_iter = self._stream(
prompts[0], stop=stop, run_manager=run_manager, **kwargs
)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
else:
response = self.watsonx_model.generate(prompt=prompts)
return self._create_llm_result(response)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call the IBM watsonx.ai inference endpoint which then streams the response.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The iterator which yields generation chunks.
Example:
.. code-block:: python
response = watsonxllm.stream("What is a molecule")
for chunk in response:
print(chunk, end='')
"""
for chunk in self.watsonx_model.generate_text_stream(prompt=prompt):
if chunk:
yield GenerationChunk(text=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~core~langchain_core~runnables~branch.py | from typing import (
Any,
Awaitable,
Callable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from langchain_core.load.dump import dumpd
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables.base import (
Runnable,
RunnableLike,
RunnableSerializable,
coerce_to_runnable,
)
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_callback_manager_for_config,
patch_config,
)
from langchain_core.runnables.context import (
CONTEXT_CONFIG_PREFIX,
CONTEXT_CONFIG_SUFFIX_SET,
)
from langchain_core.runnables.utils import (
ConfigurableFieldSpec,
Input,
Output,
get_unique_config_specs,
)
class RunnableBranch(RunnableSerializable[Input, Output]):
"""A Runnable that selects which branch to run based on a condition.
The runnable is initialized with a list of (condition, runnable) pairs and
a default branch.
When operating on an input, the first condition that evaluates to True is
selected, and the corresponding runnable is run on the input.
If no condition evaluates to True, the default branch is run on the input.
Examples:
.. code-block:: python
from langchain_core.runnables import RunnableBranch
branch = RunnableBranch(
(lambda x: isinstance(x, str), lambda x: x.upper()),
(lambda x: isinstance(x, int), lambda x: x + 1),
(lambda x: isinstance(x, float), lambda x: x * 2),
lambda x: "goodbye",
)
branch.invoke("hello") # "HELLO"
branch.invoke(None) # "goodbye"
"""
branches: Sequence[Tuple[Runnable[Input, bool], Runnable[Input, Output]]]
default: Runnable[Input, Output]
def __init__(
self,
*branches: Union[
Tuple[
Union[
Runnable[Input, bool],
Callable[[Input], bool],
Callable[[Input], Awaitable[bool]],
],
RunnableLike,
],
RunnableLike, # To accommodate the default branch
],
) -> None:
"""A Runnable that runs one of two branches based on a condition."""
if len(branches) < 2:
raise ValueError("RunnableBranch requires at least two branches")
default = branches[-1]
if not isinstance(
default,
(Runnable, Callable, Mapping), # type: ignore[arg-type]
):
raise TypeError(
"RunnableBranch default must be runnable, callable or mapping."
)
default_ = cast(
Runnable[Input, Output], coerce_to_runnable(cast(RunnableLike, default))
)
_branches = []
for branch in branches[:-1]:
if not isinstance(branch, (tuple, list)): # type: ignore[arg-type]
raise TypeError(
f"RunnableBranch branches must be "
f"tuples or lists, not {type(branch)}"
)
if not len(branch) == 2:
raise ValueError(
f"RunnableBranch branches must be "
f"tuples or lists of length 2, not {len(branch)}"
)
condition, runnable = branch
condition = cast(Runnable[Input, bool], coerce_to_runnable(condition))
runnable = coerce_to_runnable(runnable)
_branches.append((condition, runnable))
super().__init__(branches=_branches, default=default_)
class Config:
arbitrary_types_allowed = True
@classmethod
def is_lc_serializable(cls) -> bool:
"""RunnableBranch is serializable if all its branches are serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""The namespace of a RunnableBranch is the namespace of its default branch."""
return cls.__module__.split(".")[:-1]
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
runnables = (
[self.default]
+ [r for _, r in self.branches]
+ [r for r, _ in self.branches]
)
for runnable in runnables:
if runnable.get_input_schema(config).schema().get("type") is not None:
return runnable.get_input_schema(config)
return super().get_input_schema(config)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
specs = get_unique_config_specs(
spec
for step in (
[self.default]
+ [r for _, r in self.branches]
+ [r for r, _ in self.branches]
)
for spec in step.config_specs
)
if any(
s.id.startswith(CONTEXT_CONFIG_PREFIX)
and s.id.endswith(CONTEXT_CONFIG_SUFFIX_SET)
for s in specs
):
raise ValueError("RunnableBranch cannot contain context setters.")
return specs
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
"""First evaluates the condition, then delegate to true or false branch."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = condition.invoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
output = runnable.invoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
)
break
else:
output = self.default.invoke(
input,
config=patch_config(
config, callbacks=run_manager.get_child(tag="branch:default")
),
**kwargs,
)
except Exception as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(dumpd(output))
return output
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
"""Async version of invoke."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = await condition.ainvoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
output = await runnable.ainvoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
)
break
else:
output = await self.default.ainvoke(
input,
config=patch_config(
config, callbacks=run_manager.get_child(tag="branch:default")
),
**kwargs,
)
except Exception as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(dumpd(output))
return output
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~unit_tests~storage~test_imports.py | from langchain.storage import __all__
EXPECTED_ALL = [
"EncoderBackedStore",
"InMemoryStore",
"InMemoryByteStore",
"LocalFileStore",
"RedisStore",
"create_lc_store",
"create_kv_docstore",
"UpstashRedisByteStore",
"UpstashRedisStore",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~document_loaders~docusaurus.py | """Load Documents from Docusarus Documentation"""
from typing import Any, List, Optional
from langchain.document_loaders.sitemap import SitemapLoader
class DocusaurusLoader(SitemapLoader):
"""
Loader that leverages the SitemapLoader to loop through the generated pages of a
Docusaurus Documentation website and extracts the content by looking for specific
HTML tags. By default, the parser searches for the main content of the Docusaurus
page, which is normally the <article>. You also have the option to define your own
custom HTML tags by providing them as a list, for example: ["div", ".main", "a"].
"""
def __init__(
self,
url: str,
custom_html_tags: Optional[List[str]] = None,
**kwargs: Any,
):
"""
Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get("is_local"):
url = f"{url}/sitemap.xml"
self.custom_html_tags = custom_html_tags or ["main article"]
super().__init__(
url,
parsing_function=kwargs.get("parsing_function") or self._parsing_function,
**kwargs,
)
def _parsing_function(self, content: Any) -> str:
"""Parses specific elements from a Docusarus page."""
relevant_elements = content.select(",".join(self.custom_html_tags))
for element in relevant_elements:
if element not in relevant_elements:
element.decompose()
return str(content.get_text())
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~retrievers~multi_vector.py | from enum import Enum
from typing import Any, List, Optional
from langchain_core.documents import Document
from langchain_core.pydantic_v1 import Field, validator
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore]
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@validator("docstore", pre=True, always=True)
def shim_docstore(
cls, docstore: Optional[BaseStore[str, Document]], values: Any
) -> BaseStore[str, Document]:
byte_store = values.get("byte_store")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
raise Exception("You must pass a `byte_store` parameter.")
return docstore
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~unit_tests~test_dependencies.py | """A unit test meant to catch accidental introduction of non-optional dependencies."""
from pathlib import Path
from typing import Any, Dict, Mapping
import pytest
import toml
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def poetry_conf() -> Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)["tool"]["poetry"]
def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = poetry_conf["dependencies"]
is_required = {
package_name: isinstance(requirements, str)
or not requirements.get("optional", False)
for package_name, requirements in dependencies.items()
}
required_dependencies = [
package_name for package_name, required in is_required.items() if required
]
assert sorted(required_dependencies) == [
"PyYAML",
"SQLAlchemy",
"aiohttp",
"async-timeout",
"dataclasses-json",
"jsonpatch",
"langchain-core",
"langsmith",
"numpy",
"pydantic",
"python",
"requests",
"tenacity",
]
unrequired_dependencies = [
package_name for package_name, required in is_required.items() if not required
]
in_extras = [dep for group in poetry_conf["extras"].values() for dep in group]
assert set(unrequired_dependencies) == set(in_extras)
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
test_group_deps = sorted(poetry_conf["group"]["test"]["dependencies"])
assert test_group_deps == sorted(
[
"duckdb-engine",
"freezegun",
"langchain-core",
"lark",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"responses",
"syrupy",
"requests-mock",
]
)
def test_imports() -> None:
"""Test that you can import all top level things okay."""
from langchain_core.prompts import BasePromptTemplate # noqa: F401
from langchain.agents import OpenAIFunctionsAgent # noqa: F401
from langchain.callbacks import OpenAICallbackHandler # noqa: F401
from langchain.chains import LLMChain # noqa: F401
from langchain.chat_models import ChatOpenAI # noqa: F401
from langchain.document_loaders import BSHTMLLoader # noqa: F401
from langchain.embeddings import OpenAIEmbeddings # noqa: F401
from langchain.llms import OpenAI # noqa: F401
from langchain.retrievers import VespaRetriever # noqa: F401
from langchain.tools import DuckDuckGoSearchResults # noqa: F401
from langchain.utilities import (
SearchApiAPIWrapper, # noqa: F401
SerpAPIWrapper, # noqa: F401
)
from langchain.vectorstores import FAISS # noqa: F401
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~llms~cerebriumai.py | import logging
from typing import Any, Dict, List, Mapping, Optional, cast
import requests
from langchain_core.pydantic_v1 import Extra, Field, SecretStr, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
class CerebriumAI(LLM):
"""CerebriumAI large language models.
To use, you should have the ``cerebrium`` python package installed.
You should also have the environment variable ``CEREBRIUMAI_API_KEY``
set with your API key or pass it as a named argument in the constructor.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import CerebriumAI
cerebrium = CerebriumAI(endpoint_url="", cerebriumai_api_key="my-api-key")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
cerebriumai_api_key: Optional[SecretStr] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cerebriumai_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY")
)
values["cerebriumai_api_key"] = cerebriumai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cerebriumai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
headers: Dict = {
"Authorization": cast(
SecretStr, self.cerebriumai_api_key
).get_secret_value(),
"Content-Type": "application/json",
}
params = self.model_kwargs or {}
payload = {"prompt": prompt, **params, **kwargs}
response = requests.post(self.endpoint_url, json=payload, headers=headers)
if response.status_code == 200:
data = response.json()
text = data["result"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
else:
response.raise_for_status()
return ""
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain_core.documents import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~llms~cloudflare_workersai.py | import json
import logging
from typing import Any, Dict, Iterator, List, Optional
import requests
from langchain_core.outputs import GenerationChunk
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
class CloudflareWorkersAI(LLM):
"""Langchain LLM class to help to access Cloudflare Workers AI service.
To use, you must provide an API token and
account ID to access Cloudflare Workers AI, and
pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms.cloudflare_workersai import CloudflareWorkersAI
my_account_id = "my_account_id"
my_api_token = "my_secret_api_token"
llm_model = "@cf/meta/llama-2-7b-chat-int8"
cf_ai = CloudflareWorkersAI(
account_id=my_account_id,
api_token=my_api_token,
model=llm_model
)
"""
account_id: str
api_token: str
model: str = "@cf/meta/llama-2-7b-chat-int8"
base_url: str = "https://api.cloudflare.com/client/v4/accounts"
streaming: bool = False
endpoint_url: str = ""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the Cloudflare Workers AI class."""
super().__init__(**kwargs)
self.endpoint_url = f"{self.base_url}/{self.account_id}/ai/run/{self.model}"
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "cloudflare"
@property
def _default_params(self) -> Dict[str, Any]:
"""Default parameters"""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Identifying parameters"""
return {
"account_id": self.account_id,
"api_token": self.api_token,
"model": self.model,
"base_url": self.base_url,
}
def _call_api(self, prompt: str, params: Dict[str, Any]) -> requests.Response:
"""Call Cloudflare Workers API"""
headers = {"Authorization": f"Bearer {self.api_token}"}
data = {"prompt": prompt, "stream": self.streaming, **params}
response = requests.post(self.endpoint_url, headers=headers, json=data)
return response
def _process_response(self, response: requests.Response) -> str:
"""Process API response"""
if response.ok:
data = response.json()
return data["result"]["response"]
else:
raise ValueError(f"Request failed with status {response.status_code}")
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Streaming prediction"""
original_steaming: bool = self.streaming
self.streaming = True
_response_prefix_count = len("data: ")
_response_stream_end = b"data: [DONE]"
for chunk in self._call_api(prompt, kwargs).iter_lines():
if chunk == _response_stream_end:
break
if len(chunk) > _response_prefix_count:
try:
data = json.loads(chunk[_response_prefix_count:])
except Exception as e:
logger.debug(chunk)
raise e
if data is not None and "response" in data:
yield GenerationChunk(text=data["response"])
if run_manager:
run_manager.on_llm_new_token(data["response"])
logger.debug("stream end")
self.streaming = original_steaming
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Regular prediction"""
if self.streaming:
return "".join(
[c.text for c in self._stream(prompt, stop, run_manager, **kwargs)]
)
else:
response = self._call_api(prompt, kwargs)
return self._process_response(response)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~core~langchain_core~runnables~context.py | import asyncio
import threading
from collections import defaultdict
from functools import partial
from itertools import groupby
from typing import (
Any,
Awaitable,
Callable,
DefaultDict,
Dict,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
)
from langchain_core.runnables.base import (
Runnable,
RunnableSerializable,
coerce_to_runnable,
)
from langchain_core.runnables.config import RunnableConfig, patch_config
from langchain_core.runnables.utils import ConfigurableFieldSpec, Input, Output
T = TypeVar("T")
Values = Dict[Union[asyncio.Event, threading.Event], Any]
CONTEXT_CONFIG_PREFIX = "__context__/"
CONTEXT_CONFIG_SUFFIX_GET = "/get"
CONTEXT_CONFIG_SUFFIX_SET = "/set"
async def _asetter(done: asyncio.Event, values: Values, value: T) -> T:
values[done] = value
done.set()
return value
async def _agetter(done: asyncio.Event, values: Values) -> Any:
await done.wait()
return values[done]
def _setter(done: threading.Event, values: Values, value: T) -> T:
values[done] = value
done.set()
return value
def _getter(done: threading.Event, values: Values) -> Any:
done.wait()
return values[done]
def _key_from_id(id_: str) -> str:
wout_prefix = id_.split(CONTEXT_CONFIG_PREFIX, maxsplit=1)[1]
if wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_GET):
return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_GET)]
elif wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET):
return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_SET)]
else:
raise ValueError(f"Invalid context config id {id_}")
def _config_with_context(
config: RunnableConfig,
steps: List[Runnable],
setter: Callable,
getter: Callable,
event_cls: Union[Type[threading.Event], Type[asyncio.Event]],
) -> RunnableConfig:
if any(k.startswith(CONTEXT_CONFIG_PREFIX) for k in config.get("configurable", {})):
return config
context_specs = [
(spec, i)
for i, step in enumerate(steps)
for spec in step.config_specs
if spec.id.startswith(CONTEXT_CONFIG_PREFIX)
]
grouped_by_key = {
key: list(group)
for key, group in groupby(
sorted(context_specs, key=lambda s: s[0].id),
key=lambda s: _key_from_id(s[0].id),
)
}
deps_by_key = {
key: set(
_key_from_id(dep) for spec in group for dep in (spec[0].dependencies or [])
)
for key, group in grouped_by_key.items()
}
values: Values = {}
events: DefaultDict[str, Union[asyncio.Event, threading.Event]] = defaultdict(
event_cls
)
context_funcs: Dict[str, Callable[[], Any]] = {}
for key, group in grouped_by_key.items():
getters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_GET)]
setters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_SET)]
for dep in deps_by_key[key]:
if key in deps_by_key[dep]:
raise ValueError(
f"Deadlock detected between context keys {key} and {dep}"
)
if len(getters) < 1:
raise ValueError(f"Expected at least one getter for context key {key}")
if len(setters) != 1:
raise ValueError(f"Expected exactly one setter for context key {key}")
setter_idx = setters[0][1]
if any(getter_idx < setter_idx for _, getter_idx in getters):
raise ValueError(
f"Context setter for key {key} must be defined after all getters."
)
context_funcs[getters[0][0].id] = partial(getter, events[key], values)
context_funcs[setters[0][0].id] = partial(setter, events[key], values)
return patch_config(config, configurable=context_funcs)
def aconfig_with_context(
config: RunnableConfig,
steps: List[Runnable],
) -> RunnableConfig:
return _config_with_context(config, steps, _asetter, _agetter, asyncio.Event)
def config_with_context(
config: RunnableConfig,
steps: List[Runnable],
) -> RunnableConfig:
return _config_with_context(config, steps, _setter, _getter, threading.Event)
class ContextGet(RunnableSerializable):
prefix: str = ""
key: Union[str, List[str]]
@property
def ids(self) -> List[str]:
prefix = self.prefix + "/" if self.prefix else ""
keys = self.key if isinstance(self.key, list) else [self.key]
return [
f"{CONTEXT_CONFIG_PREFIX}{prefix}{k}{CONTEXT_CONFIG_SUFFIX_GET}"
for k in keys
]
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return super().config_specs + [
ConfigurableFieldSpec(
id=id_,
annotation=Callable[[], Any],
)
for id_ in self.ids
]
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any:
config = config or {}
configurable = config.get("configurable", {})
if isinstance(self.key, list):
return {key: configurable[id_]() for key, id_ in zip(self.key, self.ids)}
else:
return configurable[self.ids[0]]()
async def ainvoke(
self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Any:
config = config or {}
configurable = config.get("configurable", {})
if isinstance(self.key, list):
values = await asyncio.gather(*(configurable[id_]() for id_ in self.ids))
return {key: value for key, value in zip(self.key, values)}
else:
return await configurable[self.ids[0]]()
SetValue = Union[
Runnable[Input, Output],
Callable[[Input], Output],
Callable[[Input], Awaitable[Output]],
Any,
]
def _coerce_set_value(value: SetValue) -> Runnable[Input, Output]:
if not isinstance(value, Runnable) and not callable(value):
return coerce_to_runnable(lambda _: value)
return coerce_to_runnable(value)
class ContextSet(RunnableSerializable):
prefix: str = ""
keys: Mapping[str, Optional[Runnable]]
class Config:
arbitrary_types_allowed = True
def __init__(
self,
key: Optional[str] = None,
value: Optional[SetValue] = None,
prefix: str = "",
**kwargs: SetValue,
):
if key is not None:
kwargs[key] = value
super().__init__(
keys={
k: _coerce_set_value(v) if v is not None else None
for k, v in kwargs.items()
},
prefix=prefix,
)
@property
def ids(self) -> List[str]:
prefix = self.prefix + "/" if self.prefix else ""
return [
f"{CONTEXT_CONFIG_PREFIX}{prefix}{key}{CONTEXT_CONFIG_SUFFIX_SET}"
for key in self.keys
]
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
mapper_config_specs = [
s
for mapper in self.keys.values()
if mapper is not None
for s in mapper.config_specs
]
for spec in mapper_config_specs:
if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET):
getter_key = spec.id.split("/")[1]
if getter_key in self.keys:
raise ValueError(
f"Circular reference in context setter for key {getter_key}"
)
return super().config_specs + [
ConfigurableFieldSpec(
id=id_,
annotation=Callable[[], Any],
)
for id_ in self.ids
]
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any:
config = config or {}
configurable = config.get("configurable", {})
for id_, mapper in zip(self.ids, self.keys.values()):
if mapper is not None:
configurable[id_](mapper.invoke(input, config))
else:
configurable[id_](input)
return input
async def ainvoke(
self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Any:
config = config or {}
configurable = config.get("configurable", {})
for id_, mapper in zip(self.ids, self.keys.values()):
if mapper is not None:
await configurable[id_](await mapper.ainvoke(input, config))
else:
await configurable[id_](input)
return input
class Context:
@staticmethod
def create_scope(scope: str, /) -> "PrefixContext":
return PrefixContext(prefix=scope)
@staticmethod
def getter(key: Union[str, List[str]], /) -> ContextGet:
return ContextGet(key=key)
@staticmethod
def setter(
_key: Optional[str] = None,
_value: Optional[SetValue] = None,
/,
**kwargs: SetValue,
) -> ContextSet:
return ContextSet(_key, _value, prefix="", **kwargs)
class PrefixContext:
prefix: str = ""
def __init__(self, prefix: str = ""):
self.prefix = prefix
def getter(self, key: Union[str, List[str]], /) -> ContextGet:
return ContextGet(key=key, prefix=self.prefix)
def setter(
self,
_key: Optional[str] = None,
_value: Optional[SetValue] = None,
/,
**kwargs: SetValue,
) -> ContextSet:
return ContextSet(_key, _value, prefix=self.prefix, **kwargs)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~core~langchain_core~outputs~run_info.py | from __future__ import annotations
from uuid import UUID
from langchain_core.pydantic_v1 import BaseModel
class RunInfo(BaseModel):
"""Class that contains metadata for a single execution of a Chain or model."""
run_id: UUID
"""A unique identifier for the model or chain run."""
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~integration_tests~llms~test_cloudflare_workersai.py | import responses
from langchain.llms.cloudflare_workersai import CloudflareWorkersAI
@responses.activate
def test_cloudflare_workersai_call() -> None:
responses.add(
responses.POST,
"https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8",
json={"result": {"response": "4"}},
status=200,
)
llm = CloudflareWorkersAI(
account_id="my_account_id",
api_token="my_api_token",
model="@cf/meta/llama-2-7b-chat-int8",
)
output = llm("What is 2 + 2?")
assert output == "4"
@responses.activate
def test_cloudflare_workersai_stream() -> None:
response_body = ['data: {"response": "Hello"}', "data: [DONE]"]
responses.add(
responses.POST,
"https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8",
body="\n".join(response_body),
status=200,
)
llm = CloudflareWorkersAI(
account_id="my_account_id",
api_token="my_api_token",
model="@cf/meta/llama-2-7b-chat-int8",
streaming=True,
)
outputs = []
for chunk in llm.stream("Say Hello"):
outputs.append(chunk)
assert "".join(outputs) == "Hello"
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~smith~evaluation~runner_utils.py | """Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import functools
import inspect
import logging
import uuid
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
cast,
)
from langchain_core._api import warn_deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, messages_from_dict
from langchain_core.outputs import ChatResult, LLMResult
from langchain_core.runnables import Runnable, RunnableConfig, RunnableLambda
from langchain_core.runnables import config as runnable_config
from langchain_core.runnables import utils as runnable_utils
from langchain_core.tracers.evaluation import (
EvaluatorCallbackHandler,
wait_for_all_evaluators,
)
from langchain_core.tracers.langchain import LangChainTracer
from langsmith.client import Client
from langsmith.evaluation import RunEvaluator
from langsmith.run_helpers import as_runnable, is_traceable_function
from langsmith.schemas import Dataset, DataType, Example
from langsmith.utils import LangSmithError
from requests import HTTPError
from langchain.callbacks.manager import Callbacks
from langchain.chains.base import Chain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.schema import (
EvaluatorType,
PairwiseStringEvaluator,
StringEvaluator,
)
from langchain.smith import evaluation as smith_eval
from langchain.smith.evaluation import config as smith_eval_config
from langchain.smith.evaluation import name_generation, progress
if TYPE_CHECKING:
import pandas as pd
logger = logging.getLogger(__name__)
MODEL_OR_CHAIN_FACTORY = Union[
Callable[[], Union[Chain, Runnable]],
BaseLanguageModel,
Callable[[dict], Any],
Runnable,
Chain,
]
MCF = Union[Callable[[], Union[Chain, Runnable]], BaseLanguageModel]
class InputFormatError(Exception):
"""Raised when the input format is invalid."""
## Shared Utilities
class TestResult(dict):
"""A dictionary of the results of a single test run."""
def get_aggregate_feedback(
self,
) -> pd.DataFrame:
"""Return quantiles for the feedback scores.
This method calculates and prints the quantiles for the feedback scores
across all feedback keys.
Returns:
A DataFrame containing the quantiles for each feedback key.
"""
df = self.to_dataframe()
# Drop all things starting with inputs., outputs., and reference
to_drop = [
col
for col in df.columns
if col.startswith("inputs.")
or col.startswith("outputs.")
or col.startswith("reference")
]
return df.describe(include="all").drop(to_drop, axis=1)
def to_dataframe(self) -> pd.DataFrame:
"""Convert the results to a dataframe."""
try:
import pandas as pd
except ImportError as e:
raise ImportError(
"Pandas is required to convert the results to a dataframe."
" to install pandas, run `pip install pandas`."
) from e
indices = []
records = []
for example_id, result in self["results"].items():
feedback = result["feedback"]
output_ = result.get("output")
if isinstance(output_, dict):
output = {f"outputs.{k}": v for k, v in output_.items()}
elif output_ is None:
output = {}
else:
output = {"output": output_}
r = {
**{f"inputs.{k}": v for k, v in result["input"].items()},
**output,
}
if "reference" in result:
if isinstance(result["reference"], dict):
r.update(
{f"reference.{k}": v for k, v in result["reference"].items()}
)
else:
r["reference"] = result["reference"]
r.update(
{
**{f"feedback.{f.key}": f.score for f in feedback},
"error": result.get("Error"),
"execution_time": result["execution_time"],
"run_id": result.get("run_id"),
}
)
records.append(r)
indices.append(example_id)
return pd.DataFrame(records, index=indices)
class EvalError(dict):
"""Your architecture raised an error."""
def __init__(self, Error: BaseException, **kwargs: Any) -> None:
super().__init__(Error=Error, **kwargs)
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(f"'EvalError' object has no attribute '{name}'")
def _wrap_in_chain_factory(
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
dataset_name: str = "<my_dataset>",
) -> MCF:
"""Forgive the user if they pass in a chain without memory instead of a chain
factory. It's a common mistake. Raise a more helpful error message as well."""
if isinstance(llm_or_chain_factory, Chain):
chain = llm_or_chain_factory
chain_class = chain.__class__.__name__
if llm_or_chain_factory.memory is not None:
memory_class = chain.memory.__class__.__name__
raise ValueError(
"Cannot directly evaluate a chain with stateful memory."
" To evaluate this chain, pass in a chain constructor"
" that initializes fresh memory each time it is called."
" This will safegaurd against information"
" leakage between dataset examples."
"\nFor example:\n\n"
"def chain_constructor():\n"
f" new_memory = {memory_class}(...)\n"
f" return {chain_class}"
"(memory=new_memory, ...)\n\n"
f'run_on_dataset("{dataset_name}", chain_constructor, ...)'
)
return lambda: chain
elif isinstance(llm_or_chain_factory, BaseLanguageModel):
return llm_or_chain_factory
elif isinstance(llm_or_chain_factory, Runnable):
# Memory may exist here, but it's not elegant to check all those cases.
lcf = llm_or_chain_factory
return lambda: lcf
elif callable(llm_or_chain_factory):
if is_traceable_function(llm_or_chain_factory):
runnable_ = as_runnable(cast(Callable, llm_or_chain_factory))
return lambda: runnable_
try:
_model = llm_or_chain_factory() # type: ignore[call-arg]
except TypeError:
# It's an arbitrary function, wrap it in a RunnableLambda
user_func = cast(Callable, llm_or_chain_factory)
sig = inspect.signature(user_func)
logger.info(f"Wrapping function {sig} as RunnableLambda.")
wrapped = RunnableLambda(user_func)
return lambda: wrapped
constructor = cast(Callable, llm_or_chain_factory)
if isinstance(_model, BaseLanguageModel):
# It's not uncommon to do an LLM constructor instead of raw LLM,
# so we'll unpack it for the user.
return _model
elif is_traceable_function(cast(Callable, _model)):
runnable_ = as_runnable(cast(Callable, _model))
return lambda: runnable_
elif not isinstance(_model, Runnable):
# This is unlikely to happen - a constructor for a model function
return lambda: RunnableLambda(constructor)
else:
# Typical correct case
return constructor # noqa
return llm_or_chain_factory
def _get_prompt(inputs: Dict[str, Any]) -> str:
"""Get prompt from inputs.
Args:
inputs: The input dictionary.
Returns:
A string prompt.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError("Inputs should not be empty.")
prompts = []
if "prompt" in inputs:
if not isinstance(inputs["prompt"], str):
raise InputFormatError(
"Expected string for 'prompt', got"
f" {type(inputs['prompt']).__name__}"
)
prompts = [inputs["prompt"]]
elif "prompts" in inputs:
if not isinstance(inputs["prompts"], list) or not all(
isinstance(i, str) for i in inputs["prompts"]
):
raise InputFormatError(
"Expected list of strings for 'prompts',"
f" got {type(inputs['prompts']).__name__}"
)
prompts = inputs["prompts"]
elif len(inputs) == 1:
prompt_ = next(iter(inputs.values()))
if isinstance(prompt_, str):
prompts = [prompt_]
elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_):
prompts = prompt_
else:
raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}")
else:
raise InputFormatError(
f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}"
)
if len(prompts) == 1:
return prompts[0]
else:
raise InputFormatError(
f"LLM Run expects single prompt input. Got {len(prompts)} prompts."
)
def _get_messages(inputs: Dict[str, Any]) -> List[BaseMessage]:
"""Get Chat Messages from inputs.
Args:
inputs: The input dictionary.
Returns:
A list of chat messages.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError("Inputs should not be empty.")
if "messages" in inputs:
single_input = inputs["messages"]
elif len(inputs) == 1:
single_input = next(iter(inputs.values()))
else:
raise InputFormatError(
f"Chat Run expects 'messages' in inputs when example has multiple"
f" input keys. Got {inputs}"
)
if isinstance(single_input, list) and all(
isinstance(i, dict) for i in single_input
):
raw_messages = [single_input]
elif isinstance(single_input, list) and all(
isinstance(i, list) for i in single_input
):
raw_messages = single_input
else:
raise InputFormatError(
f"Chat Run expects List[dict] or List[List[dict]] values for"
f" 'messages' key input. Got {inputs}"
)
if len(raw_messages) == 1:
return messages_from_dict(raw_messages[0])
else:
raise InputFormatError(
f"Chat Run expects single List[dict] or List[List[dict]] 'messages'"
f" input. Got {len(raw_messages)} messages from inputs {inputs}"
)
## Shared data validation utilities
def _validate_example_inputs_for_language_model(
first_example: Example,
input_mapper: Optional[Callable[[Dict], Any]],
) -> None:
if input_mapper:
prompt_input = input_mapper(first_example.inputs)
if not isinstance(prompt_input, str) and not (
isinstance(prompt_input, list)
and all(isinstance(msg, BaseMessage) for msg in prompt_input)
):
raise InputFormatError(
"When using an input_mapper to prepare dataset example inputs"
" for an LLM or chat model, the output must a single string or"
" a list of chat messages."
f"\nGot: {prompt_input} of type {type(prompt_input)}."
)
else:
try:
_get_prompt(first_example.inputs)
except InputFormatError:
try:
_get_messages(first_example.inputs)
except InputFormatError:
raise InputFormatError(
"Example inputs do not match language model input format. "
"Expected a dictionary with messages or a single prompt."
f" Got: {first_example.inputs}"
" Please update your dataset OR provide an input_mapper"
" to convert the example.inputs to a compatible format"
" for the llm or chat model you wish to evaluate."
)
def _validate_example_inputs_for_chain(
first_example: Example,
chain: Chain,
input_mapper: Optional[Callable[[Dict], Any]],
) -> None:
"""Validate that the example inputs match the chain input keys."""
if input_mapper:
first_inputs = input_mapper(first_example.inputs)
missing_keys = set(chain.input_keys).difference(first_inputs)
if not isinstance(first_inputs, dict):
raise InputFormatError(
"When using an input_mapper to prepare dataset example"
" inputs for a chain, the mapped value must be a dictionary."
f"\nGot: {first_inputs} of type {type(first_inputs)}."
)
if missing_keys:
raise InputFormatError(
"Missing keys after loading example using input_mapper."
f"\nExpected: {chain.input_keys}. Got: {first_inputs.keys()}"
)
else:
first_inputs = first_example.inputs
missing_keys = set(chain.input_keys).difference(first_inputs)
if len(first_inputs) == 1 and len(chain.input_keys) == 1:
# We can pass this through the run method.
# Refrain from calling to validate.
pass
elif missing_keys:
raise InputFormatError(
"Example inputs missing expected chain input keys."
" Please provide an input_mapper to convert the example.inputs"
" to a compatible format for the chain you wish to evaluate."
f"Expected: {chain.input_keys}. "
f"Got: {first_inputs.keys()}"
)
def _validate_example_inputs(
example: Example,
llm_or_chain_factory: MCF,
input_mapper: Optional[Callable[[Dict], Any]],
) -> None:
"""Validate that the example inputs are valid for the model."""
if isinstance(llm_or_chain_factory, BaseLanguageModel):
_validate_example_inputs_for_language_model(example, input_mapper)
else:
chain = llm_or_chain_factory()
if isinstance(chain, Chain):
# Otherwise it's a runnable
_validate_example_inputs_for_chain(example, chain, input_mapper)
elif isinstance(chain, Runnable):
logger.debug(f"Skipping input validation for {chain}")
## Shared Evaluator Setup Utilities
def _setup_evaluation(
llm_or_chain_factory: MCF,
examples: List[Example],
evaluation: Optional[smith_eval.RunEvalConfig],
data_type: DataType,
) -> Optional[List[RunEvaluator]]:
"""Configure the evaluators to run on the results of the chain."""
if evaluation:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
run_inputs, run_outputs = None, None
run_type = "llm"
else:
run_type = "chain"
if data_type in (DataType.chat, DataType.llm):
val = data_type.value if isinstance(data_type, Enum) else data_type
raise ValueError(
"Cannot evaluate a chain on dataset with "
f"data_type={val}. "
"Please specify a dataset with the default 'kv' data type."
)
chain = llm_or_chain_factory()
run_inputs = chain.input_keys if isinstance(chain, Chain) else None
run_outputs = chain.output_keys if isinstance(chain, Chain) else None
run_evaluators = _load_run_evaluators(
evaluation,
run_type,
data_type,
list(examples[0].outputs) if examples[0].outputs else None,
run_inputs,
run_outputs,
)
else:
# TODO: Create a default helpfulness evaluator
run_evaluators = None
return run_evaluators
def _determine_input_key(
config: smith_eval.RunEvalConfig,
run_inputs: Optional[List[str]],
) -> Optional[str]:
input_key = None
if config.input_key:
input_key = config.input_key
if run_inputs and input_key not in run_inputs:
logger.warning(
f"Input key {input_key} not in chain's specified"
f" input keys {run_inputs}. Evaluation behavior may be undefined."
)
elif run_inputs and len(run_inputs) == 1:
input_key = run_inputs[0]
elif run_inputs is not None and len(run_inputs) > 1:
logger.warning(
f"Chain expects multiple input keys: {run_inputs},"
f" Evaluator is likely to fail. Evaluation behavior may be undefined."
" Specify an input_key in the RunEvalConfig to avoid this warning."
)
return input_key
def _determine_prediction_key(
config: smith_eval.RunEvalConfig,
run_outputs: Optional[List[str]],
) -> Optional[str]:
prediction_key = None
if config.prediction_key:
prediction_key = config.prediction_key
if run_outputs and prediction_key not in run_outputs:
logger.warning(
f"Prediction key {prediction_key} not in chain's specified"
f" output keys {run_outputs}. Evaluation behavior may be undefined."
)
elif run_outputs and len(run_outputs) == 1:
prediction_key = run_outputs[0]
elif run_outputs is not None and len(run_outputs) > 1:
logger.warning(
f"Chain expects multiple output keys: {run_outputs},"
f" Evaluation behavior may be undefined. Specify a prediction_key"
" in the RunEvalConfig to avoid this warning."
)
return prediction_key
def _determine_reference_key(
config: smith_eval.RunEvalConfig,
example_outputs: Optional[List[str]],
) -> Optional[str]:
if config.reference_key:
reference_key = config.reference_key
if example_outputs and reference_key not in example_outputs:
raise ValueError(
f"Reference key {reference_key} not in Dataset"
f" example outputs: {example_outputs}"
)
elif example_outputs and len(example_outputs) == 1:
reference_key = list(example_outputs)[0]
else:
reference_key = None
return reference_key
def _construct_run_evaluator(
eval_config: Union[EvaluatorType, str, smith_eval_config.EvalConfig],
eval_llm: Optional[BaseLanguageModel],
run_type: str,
data_type: DataType,
example_outputs: Optional[List[str]],
reference_key: Optional[str],
input_key: Optional[str],
prediction_key: Optional[str],
) -> RunEvaluator:
if isinstance(eval_config, (EvaluatorType, str)):
if not isinstance(eval_config, EvaluatorType):
eval_config = EvaluatorType(eval_config)
evaluator_ = load_evaluator(eval_config, llm=eval_llm)
eval_type_tag = eval_config.value
else:
kwargs = {"llm": eval_llm, **eval_config.get_kwargs()}
evaluator_ = load_evaluator(eval_config.evaluator_type, **kwargs)
eval_type_tag = eval_config.evaluator_type.value
# Override keys if specified in the config
if isinstance(eval_config, smith_eval_config.SingleKeyEvalConfig):
input_key = eval_config.input_key or input_key
prediction_key = eval_config.prediction_key or prediction_key
reference_key = eval_config.reference_key or reference_key
if isinstance(evaluator_, StringEvaluator):
if evaluator_.requires_reference and reference_key is None:
raise ValueError(
f"Must specify reference_key in smith_eval.RunEvalConfig to use"
f" evaluator of type {eval_type_tag} with"
f" dataset with multiple output keys: {example_outputs}."
)
run_evaluator = smith_eval.StringRunEvaluatorChain.from_run_and_data_type(
evaluator_,
run_type,
data_type,
input_key=input_key,
prediction_key=prediction_key,
reference_key=reference_key,
tags=[eval_type_tag],
)
elif isinstance(evaluator_, PairwiseStringEvaluator):
raise NotImplementedError(
f"Run evaluator for {eval_type_tag} is not implemented."
" PairwiseStringEvaluators compare the outputs of two different models"
" rather than the output of a single model."
" Did you mean to use a StringEvaluator instead?"
"\nSee: https://python.langchain.com/docs/guides/evaluation/string/"
)
else:
raise NotImplementedError(
f"Run evaluator for {eval_type_tag} is not implemented"
)
return run_evaluator
def _get_keys(
config: smith_eval.RunEvalConfig,
run_inputs: Optional[List[str]],
run_outputs: Optional[List[str]],
example_outputs: Optional[List[str]],
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
input_key = _determine_input_key(config, run_inputs)
prediction_key = _determine_prediction_key(config, run_outputs)
reference_key = _determine_reference_key(config, example_outputs)
return input_key, prediction_key, reference_key
def _load_run_evaluators(
config: smith_eval.RunEvalConfig,
run_type: str,
data_type: DataType,
example_outputs: Optional[List[str]],
run_inputs: Optional[List[str]],
run_outputs: Optional[List[str]],
) -> List[RunEvaluator]:
"""
Load run evaluators from a configuration.
Args:
config: Configuration for the run evaluators.
Returns:
A list of run evaluators.
"""
run_evaluators = []
input_key, prediction_key, reference_key = None, None, None
if (
config.evaluators
or any([isinstance(e, EvaluatorType) for e in config.evaluators])
or (
config.custom_evaluators
and any([isinstance(e, StringEvaluator) for e in config.custom_evaluators])
)
):
input_key, prediction_key, reference_key = _get_keys(
config, run_inputs, run_outputs, example_outputs
)
for eval_config in config.evaluators:
run_evaluator = _construct_run_evaluator(
eval_config,
config.eval_llm,
run_type,
data_type,
example_outputs,
reference_key,
input_key,
prediction_key,
)
run_evaluators.append(run_evaluator)
custom_evaluators = config.custom_evaluators or []
for custom_evaluator in custom_evaluators:
if isinstance(custom_evaluator, RunEvaluator):
run_evaluators.append(custom_evaluator)
elif isinstance(custom_evaluator, StringEvaluator):
run_evaluators.append(
smith_eval.StringRunEvaluatorChain.from_run_and_data_type(
custom_evaluator,
run_type,
data_type,
input_key=input_key,
prediction_key=prediction_key,
reference_key=reference_key,
)
)
else:
raise ValueError(
f"Unsupported custom evaluator: {custom_evaluator}."
f" Expected RunEvaluator or StringEvaluator."
)
return run_evaluators
### Async Helpers
async def _arun_llm(
llm: BaseLanguageModel,
inputs: Dict[str, Any],
*,
tags: Optional[List[str]] = None,
callbacks: Callbacks = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[str, BaseMessage]:
"""Asynchronously run the language model.
Args:
llm: The language model to run.
inputs: The input dictionary.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
input_mapper: Optional function to map inputs to the expected format.
Returns:
The LLMResult or ChatResult.
Raises:
ValueError: If the LLM type is unsupported.
InputFormatError: If the input format is invalid.
"""
if input_mapper is not None:
prompt_or_messages = input_mapper(inputs)
if isinstance(prompt_or_messages, str):
return await llm.apredict(
prompt_or_messages, callbacks=callbacks, tags=tags
)
elif isinstance(prompt_or_messages, list) and all(
isinstance(msg, BaseMessage) for msg in prompt_or_messages
):
return await llm.apredict_messages(
prompt_or_messages, callbacks=callbacks, tags=tags
)
else:
raise InputFormatError(
"Input mapper returned invalid format"
f" {prompt_or_messages}"
"\nExpected a single string or list of chat messages."
)
else:
try:
prompt = _get_prompt(inputs)
llm_output: Union[str, BaseMessage] = await llm.apredict(
prompt, callbacks=callbacks, tags=tags
)
except InputFormatError:
messages = _get_messages(inputs)
llm_output = await llm.apredict_messages(
messages, callbacks=callbacks, tags=tags
)
return llm_output
async def _arun_chain(
chain: Union[Chain, Runnable],
inputs: Dict[str, Any],
callbacks: Callbacks,
*,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[dict, str]:
"""Run a chain asynchronously on inputs."""
inputs_ = inputs if input_mapper is None else input_mapper(inputs)
if (
isinstance(chain, Chain)
and isinstance(inputs_, dict)
and len(inputs_) == 1
and chain.input_keys
):
val = next(iter(inputs_.values()))
output = await chain.acall(val, callbacks=callbacks, tags=tags)
else:
runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks)
output = await chain.ainvoke(inputs_, config=runnable_config)
return output
async def _arun_llm_or_chain(
example: Example,
config: RunnableConfig,
*,
llm_or_chain_factory: MCF,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[dict, str, LLMResult, ChatResult]:
"""Asynchronously run the Chain or language model.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
input_mapper: Optional function to map the input to the expected format.
Returns:
A list of outputs.
"""
chain_or_llm = (
"LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain"
)
result = None
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = await _arun_llm(
llm_or_chain_factory,
example.inputs,
tags=config["tags"],
callbacks=config["callbacks"],
input_mapper=input_mapper,
)
else:
chain = llm_or_chain_factory()
output = await _arun_chain(
chain,
example.inputs,
tags=config["tags"],
callbacks=config["callbacks"],
input_mapper=input_mapper,
)
result = output
except Exception as e:
logger.warning(
f"{chain_or_llm} failed for example {example.id} "
f"with inputs {example.inputs}"
f"\n{repr(e)}"
)
result = EvalError(Error=e)
return result
## Sync Utilities
def _run_llm(
llm: BaseLanguageModel,
inputs: Dict[str, Any],
callbacks: Callbacks,
*,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[str, BaseMessage]:
"""
Run the language model on the example.
Args:
llm: The language model to run.
inputs: The input dictionary.
callbacks: The callbacks to use during the run.
tags: Optional tags to add to the run.
input_mapper: function to map to the inputs dictionary from an Example
Returns:
The LLMResult or ChatResult.
Raises:
ValueError: If the LLM type is unsupported.
InputFormatError: If the input format is invalid.
"""
if input_mapper is not None:
prompt_or_messages = input_mapper(inputs)
if isinstance(prompt_or_messages, str):
llm_output: Union[str, BaseMessage] = llm.predict(
prompt_or_messages, callbacks=callbacks, tags=tags
)
elif isinstance(prompt_or_messages, list) and all(
isinstance(msg, BaseMessage) for msg in prompt_or_messages
):
llm_output = llm.predict_messages(
prompt_or_messages, callbacks=callbacks, tags=tags
)
else:
raise InputFormatError(
"Input mapper returned invalid format: "
f" {prompt_or_messages}"
"\nExpected a single string or list of chat messages."
)
else:
try:
llm_prompts = _get_prompt(inputs)
llm_output = llm.predict(llm_prompts, callbacks=callbacks, tags=tags)
except InputFormatError:
llm_messages = _get_messages(inputs)
llm_output = llm.predict_messages(llm_messages, callbacks=callbacks)
return llm_output
def _run_chain(
chain: Union[Chain, Runnable],
inputs: Dict[str, Any],
callbacks: Callbacks,
*,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[Dict, str]:
"""Run a chain on inputs."""
inputs_ = inputs if input_mapper is None else input_mapper(inputs)
if (
isinstance(chain, Chain)
and isinstance(inputs_, dict)
and len(inputs_) == 1
and chain.input_keys
):
val = next(iter(inputs_.values()))
output = chain(val, callbacks=callbacks, tags=tags)
else:
runnable_config = RunnableConfig(tags=tags or [], callbacks=callbacks)
output = chain.invoke(inputs_, config=runnable_config)
return output
def _run_llm_or_chain(
example: Example,
config: RunnableConfig,
*,
llm_or_chain_factory: MCF,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[dict, str, LLMResult, ChatResult]:
"""
Run the Chain or language model synchronously.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
Returns:
Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
The outputs of the model or chain.
"""
chain_or_llm = (
"LLM" if isinstance(llm_or_chain_factory, BaseLanguageModel) else "Chain"
)
result = None
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = _run_llm(
llm_or_chain_factory,
example.inputs,
config["callbacks"],
tags=config["tags"],
input_mapper=input_mapper,
)
else:
chain = llm_or_chain_factory()
output = _run_chain(
chain,
example.inputs,
config["callbacks"],
tags=config["tags"],
input_mapper=input_mapper,
)
result = output
except Exception as e:
error_type = type(e).__name__
logger.warning(
f"{chain_or_llm} failed for example {example.id} "
f"with inputs {example.inputs}"
f"\nError Type: {error_type}, Message: {e}"
)
result = EvalError(Error=e)
return result
## Public API
def _prepare_eval_run(
client: Client,
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
project_name: str,
project_metadata: Optional[Dict[str, Any]] = None,
tags: Optional[List[str]] = None,
) -> Tuple[MCF, str, Dataset, List[Example]]:
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name)
dataset = client.read_dataset(dataset_name=dataset_name)
try:
project_extra: dict = {"metadata": project_metadata} if project_metadata else {}
if tags:
project_extra["tags"] = tags
project = client.create_project(
project_name,
reference_dataset_id=dataset.id,
project_extra=project_extra,
)
except (HTTPError, ValueError, LangSmithError) as e:
if "already exists " not in str(e):
raise e
uid = uuid.uuid4()
example_msg = f"""
run_on_dataset(
...
project_name="{project_name} - {uid}", # Update since {project_name} already exists
)
"""
raise ValueError(
f"Test project {project_name} already exists. Please use a different name:"
f"\n\n{example_msg}"
)
comparison_url = dataset.url + f"/compare?selectedSessions={project.id}"
print(
f"View the evaluation results for project '{project_name}'"
f" at:\n{comparison_url}\n\n"
f"View all tests for Dataset {dataset_name} at:\n{dataset.url}",
flush=True,
)
examples = list(client.list_examples(dataset_id=dataset.id))
if not examples:
raise ValueError(f"Dataset {dataset_name} has no example rows.")
return wrapped_model, project_name, dataset, examples
def _prepare_run_on_dataset(
client: Client,
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
project_name: Optional[str],
evaluation: Optional[smith_eval.RunEvalConfig] = None,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
concurrency_level: int = 5,
project_metadata: Optional[Dict[str, Any]] = None,
) -> Tuple[MCF, str, List[Example], List[RunnableConfig]]:
project_name = project_name or name_generation.random_name()
wrapped_model, project_name, dataset, examples = _prepare_eval_run(
client,
dataset_name,
llm_or_chain_factory,
project_name,
project_metadata=project_metadata,
tags=tags,
)
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory)
run_evaluators = _setup_evaluation(
wrapped_model, examples, evaluation, dataset.data_type or DataType.kv
)
_validate_example_inputs(examples[0], wrapped_model, input_mapper)
progress_bar = progress.ProgressBarCallback(len(examples))
configs = [
RunnableConfig(
callbacks=[
LangChainTracer(
project_name=project_name,
client=client,
use_threading=False,
example_id=example.id,
),
EvaluatorCallbackHandler(
evaluators=run_evaluators or [],
client=client,
example_id=example.id,
max_concurrency=0,
),
progress_bar,
],
tags=tags or [],
max_concurrency=concurrency_level,
)
for example in examples
]
return wrapped_model, project_name, examples, configs
def _collect_test_results(
examples: List[Example],
batch_results: List[Union[dict, str, LLMResult, ChatResult]],
configs: List[RunnableConfig],
project_name: str,
) -> TestResult:
wait_for_all_evaluators()
all_eval_results = {}
all_execution_time = {}
all_run_ids = {}
for c in configs:
for callback in cast(list, c["callbacks"]):
if isinstance(callback, EvaluatorCallbackHandler):
eval_results = callback.logged_eval_results
all_eval_results.update(
{example_id: v for (_, example_id), v in eval_results.items()}
)
elif isinstance(callback, LangChainTracer):
run = callback.latest_run
example_id = callback.example_id
run_id = str(run.id) if run else None
execution_time = (
(run.end_time - run.start_time).total_seconds()
if run and run.end_time
else None
)
all_execution_time[str(example_id)] = execution_time
all_run_ids[str(example_id)] = run_id
results: dict = {}
for example, output in zip(examples, batch_results):
feedback = all_eval_results.get(str(example.id), [])
results[str(example.id)] = {
"input": example.inputs,
"feedback": feedback,
"execution_time": all_execution_time.get(str(example.id)),
"run_id": all_run_ids.get(str(example.id)),
}
if isinstance(output, EvalError):
results[str(example.id)]["Error"] = output.Error
else:
results[str(example.id)]["output"] = output
if example.outputs:
results[str(example.id)]["reference"] = example.outputs
return TestResult(
project_name=project_name,
results=results,
)
def _is_jupyter_environment() -> bool:
try:
from IPython import get_ipython
res = get_ipython()
return get_ipython() is not None and "zmqshell" in str(type(res))
except ImportError:
return False
def _display_aggregate_results(aggregate_results: pd.DataFrame) -> None:
if _is_jupyter_environment():
from IPython.display import HTML, display
display(HTML("<h3>Experiment Results:</h3>"))
display(aggregate_results)
else:
formatted_string = aggregate_results.to_string(
float_format=lambda x: f"{x:.2f}", justify="right"
)
print("\n Experiment Results:")
print(formatted_string)
_INPUT_MAPPER_DEP_WARNING = (
"The input_mapper argument is deprecated and "
"will be removed in a future release. Please add a "
" RunnableLambda to your chain to map inputs to the expected format"
" instead. Example:\n"
"def construct_chain():\n"
" my_chain = ...\n"
" input_mapper = {'other_key': 'MyOtherInput', 'my_input_key': x}\n"
" return input_mapper | my_chain\n"
"run_on_dataset(..., llm_or_chain_factory=construct_chain)\n"
"(See https://api.python.langchain.com/en/latest/schema/"
"langchain.schema.runnable.base.RunnableLambda.html)"
)
async def arun_on_dataset(
client: Optional[Client],
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
evaluation: Optional[smith_eval.RunEvalConfig] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
project_metadata: Optional[Dict[str, Any]] = None,
verbose: bool = False,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
input_mapper = kwargs.pop("input_mapper", None)
if input_mapper:
warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True)
if kwargs:
warn_deprecated(
"0.0.305",
message="The following arguments are deprecated and "
"will be removed in a future release: "
f"{kwargs.keys()}.",
removal="0.0.305",
)
client = client or Client()
wrapped_model, project_name, examples, configs = _prepare_run_on_dataset(
client,
dataset_name,
llm_or_chain_factory,
project_name,
evaluation,
tags,
input_mapper,
concurrency_level,
project_metadata=project_metadata,
)
batch_results = await runnable_utils.gather_with_concurrency(
configs[0].get("max_concurrency"),
*map(
functools.partial(
_arun_llm_or_chain,
llm_or_chain_factory=wrapped_model,
input_mapper=input_mapper,
),
examples,
configs,
),
)
results = _collect_test_results(examples, batch_results, configs, project_name)
if verbose:
try:
agg_feedback = results.get_aggregate_feedback()
print("\n Eval quantiles:")
print(agg_feedback)
except Exception as e:
logger.debug(f"Failed to print aggregate feedback: {repr(e)}")
return results
def run_on_dataset(
client: Optional[Client],
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
evaluation: Optional[smith_eval.RunEvalConfig] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
project_metadata: Optional[Dict[str, Any]] = None,
verbose: bool = False,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
input_mapper = kwargs.pop("input_mapper", None)
if input_mapper:
warn_deprecated("0.0.305", message=_INPUT_MAPPER_DEP_WARNING, pending=True)
if kwargs:
warn_deprecated(
"0.0.305",
message="The following arguments are deprecated and "
"will be removed in a future release: "
f"{kwargs.keys()}.",
removal="0.0.305",
)
client = client or Client()
wrapped_model, project_name, examples, configs = _prepare_run_on_dataset(
client,
dataset_name,
llm_or_chain_factory,
project_name,
evaluation,
tags,
input_mapper,
concurrency_level,
project_metadata=project_metadata,
)
if concurrency_level == 0:
batch_results = [
_run_llm_or_chain(
example,
config,
llm_or_chain_factory=wrapped_model,
input_mapper=input_mapper,
)
for example, config in zip(examples, configs)
]
else:
with runnable_config.get_executor_for_config(configs[0]) as executor:
batch_results = list(
executor.map(
functools.partial(
_run_llm_or_chain,
llm_or_chain_factory=wrapped_model,
input_mapper=input_mapper,
),
examples,
configs,
)
)
results = _collect_test_results(examples, batch_results, configs, project_name)
if verbose:
try:
agg_feedback = results.get_aggregate_feedback()
_display_aggregate_results(agg_feedback)
except Exception as e:
logger.debug(f"Failed to print aggregate feedback: {repr(e)}")
return results
_RUN_ON_DATASET_DOCSTRING = """
Run the Chain or language model on a dataset and store traces
to the specified project name.
Args:
dataset_name: Name of the dataset to run the chain on.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
evaluation: Configuration for evaluators to run on the
results of the chain
concurrency_level: The number of async tasks to run concurrently.
project_name: Name of the project to store the traces in.
Defaults to {dataset_name}-{chain class name}-{datetime}.
project_metadata: Optional metadata to add to the project.
Useful for storing information the test variant.
(prompt version, model version, etc.)
client: LangSmith client to use to access the dataset and to
log feedback and run traces.
verbose: Whether to print progress.
tags: Tags to add to each run in the project.
Returns:
A dictionary containing the run's project name and the resulting model outputs.
For the (usually faster) async version of this function, see :func:`arun_on_dataset`.
Examples
--------
.. code-block:: python
from langsmith import Client
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import smith_eval.RunEvalConfig, run_on_dataset
# Chains may have memory. Passing in a constructor function lets the
# evaluation framework avoid cross-contamination between runs.
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
# Load off-the-shelf evaluators via config or the EvaluatorType (string or enum)
evaluation_config = smith_eval.RunEvalConfig(
evaluators=[
"qa", # "Correctness" against a reference answer
"embedding_distance",
smith_eval.RunEvalConfig.Criteria("helpfulness"),
smith_eval.RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
You can also create custom evaluators by subclassing the
:class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>`
or LangSmith's `RunEvaluator` classes.
.. code-block:: python
from typing import Optional
from langchain.evaluation import StringEvaluator
class MyStringEvaluator(StringEvaluator):
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "exact_match"
def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
return {"score": prediction == reference}
evaluation_config = smith_eval.RunEvalConfig(
custom_evaluators = [MyStringEvaluator()],
)
run_on_dataset(
client,
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
""" # noqa: E501
run_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING
arun_on_dataset.__doc__ = _RUN_ON_DATASET_DOCSTRING.replace(
"run_on_dataset(", "await arun_on_dataset("
)
| [
"['PLACEHOLDER']",
"[]"
] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~storage~file_system.py | import re
from pathlib import Path
from typing import Iterator, List, Optional, Sequence, Tuple, Union
from langchain_core.stores import ByteStore
from langchain.storage.exceptions import InvalidKeyException
class LocalFileStore(ByteStore):
"""BaseStore interface that works on the local file system.
Examples:
Create a LocalFileStore instance and perform operations on it:
.. code-block:: python
from langchain.storage import LocalFileStore
# Instantiate the LocalFileStore with the root path
file_store = LocalFileStore("/path/to/root")
# Set values for keys
file_store.mset([("key1", b"value1"), ("key2", b"value2")])
# Get values for keys
values = file_store.mget(["key1", "key2"]) # Returns [b"value1", b"value2"]
# Delete keys
file_store.mdelete(["key1"])
# Iterate over keys
for key in file_store.yield_keys():
print(key)
"""
def __init__(self, root_path: Union[str, Path]) -> None:
"""Implement the BaseStore interface for the local file system.
Args:
root_path (Union[str, Path]): The root path of the file store. All keys are
interpreted as paths relative to this root.
"""
self.root_path = Path(root_path)
def _get_full_path(self, key: str) -> Path:
"""Get the full path for a given key relative to the root path.
Args:
key (str): The key relative to the root path.
Returns:
Path: The full path for the given key.
"""
if not re.match(r"^[a-zA-Z0-9_.\-/]+$", key):
raise InvalidKeyException(f"Invalid characters in key: {key}")
return self.root_path / key
def mget(self, keys: Sequence[str]) -> List[Optional[bytes]]:
"""Get the values associated with the given keys.
Args:
keys: A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
"""
values: List[Optional[bytes]] = []
for key in keys:
full_path = self._get_full_path(key)
if full_path.exists():
value = full_path.read_bytes()
values.append(value)
else:
values.append(None)
return values
def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) -> None:
"""Set the values for the given keys.
Args:
key_value_pairs: A sequence of key-value pairs.
Returns:
None
"""
for key, value in key_value_pairs:
full_path = self._get_full_path(key)
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.write_bytes(value)
def mdelete(self, keys: Sequence[str]) -> None:
"""Delete the given keys and their associated values.
Args:
keys (Sequence[str]): A sequence of keys to delete.
Returns:
None
"""
for key in keys:
full_path = self._get_full_path(key)
if full_path.exists():
full_path.unlink()
def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]:
"""Get an iterator over keys that match the given prefix.
Args:
prefix (Optional[str]): The prefix to match.
Returns:
Iterator[str]: An iterator over keys that match the given prefix.
"""
prefix_path = self._get_full_path(prefix) if prefix else self.root_path
for file in prefix_path.rglob("*"):
if file.is_file():
relative_path = file.relative_to(self.root_path)
yield str(relative_path)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~chat_models~baidu_qianfan_endpoint.py | from __future__ import annotations
import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, cast
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dictionary that can be passed to the API."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
content = _dict.get("result", "") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
if "thoughts" in additional_kwargs["function_call"]:
# align to api sample, which affects the llm function_call output
additional_kwargs["function_call"].pop("thoughts")
else:
additional_kwargs = {}
return AIMessage(
content=content,
additional_kwargs={**_dict.get("body", {}), **additional_kwargs},
)
class QianfanChatEndpoint(BaseChatModel):
"""Baidu Qianfan chat models.
To use, you should have the ``qianfan`` python package installed, and
the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with your
API key and Secret Key.
ak, sk are required parameters
which you could get from https://cloud.baidu.com/product/wenxinworkshop
Example:
.. code-block:: python
from langchain.chat_models import QianfanChatEndpoint
qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot",
endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk")
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
client: Any
qianfan_ak: Optional[SecretStr] = None
qianfan_sk: Optional[SecretStr] = None
streaming: Optional[bool] = False
"""Whether to stream the results or not."""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
"""Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo.
In the case of other model, passing these params will not affect the result.
"""
model: str = "ERNIE-Bot-turbo"
"""Model name.
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set.
Default is ERNIE-Bot-turbo.
"""
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["qianfan_ak"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_ak",
"QIANFAN_AK",
)
)
values["qianfan_sk"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_sk",
"QIANFAN_SK",
)
)
params = {
"ak": values["qianfan_ak"].get_secret_value(),
"sk": values["qianfan_sk"].get_secret_value(),
"model": values["model"],
"stream": values["streaming"],
}
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
try:
import qianfan
values["client"] = qianfan.ChatCompletion(**params)
except ImportError:
raise ValueError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "baidu-qianfan-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Qianfan API."""
normal_params = {
"model": self.model,
"endpoint": self.endpoint,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
"""
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
"""
messages_dict: Dict[str, Any] = {
"messages": [
convert_message_to_dict(m)
for m in messages
if not isinstance(m, SystemMessage)
]
}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
if "system" not in messages_dict:
messages_dict["system"] = ""
messages_dict["system"] += cast(str, messages[i].content) + "\n"
return {
**messages_dict,
**self._default_params,
**kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = self.client.do(**params)
lc_msg = _convert_dict_to_message(response_payload)
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
completion = ""
token_usage = {}
async for chunk in self._astream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = await self.client.ado(**params)
lc_msg = _convert_dict_to_message(response_payload)
generations = []
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
generations.append(gen)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
for res in self.client.do(**params):
if res:
msg = _convert_dict_to_message(res)
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk(
content=msg.content,
role="assistant",
additional_kwargs=msg.additional_kwargs,
),
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
async for res in await self.client.ado(**params):
if res:
msg = _convert_dict_to_message(res)
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk(
content=msg.content,
role="assistant",
additional_kwargs=msg.additional_kwargs,
),
)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~integration_tests~chat_models~test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK (that is is the private
preview) and be whitelisted to list the models themselves:
In order to run this test, you need to install VertexAI SDK
pip install google-cloud-aiplatform>=1.35.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
from typing import Optional
from unittest.mock import MagicMock, Mock, patch
import pytest
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import LLMResult
from langchain.chat_models import ChatVertexAI
from langchain.chat_models.vertexai import _parse_chat_history, _parse_examples
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_instantiation(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
assert model._llm_type == "vertexai"
assert model.model_name == model.client._model_id
@pytest.mark.scheduled
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_single_call(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
message = HumanMessage(content="Hello")
response = model([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_candidates() -> None:
model = ChatVertexAI(model_name="chat-bison@001", temperature=0.3, n=2)
message = HumanMessage(content="Hello")
response = model.generate(messages=[[message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 1
assert len(response.generations[0]) == 2
@pytest.mark.scheduled
async def test_vertexai_agenerate() -> None:
model = ChatVertexAI(temperature=0)
message = HumanMessage(content="Hello")
response = await model.agenerate([[message]])
assert isinstance(response, LLMResult)
assert isinstance(response.generations[0][0].message, AIMessage) # type: ignore
sync_response = model.generate([[message]])
assert response.generations[0][0] == sync_response.generations[0][0]
@pytest.mark.scheduled
async def test_vertexai_stream() -> None:
model = ChatVertexAI(temperature=0)
message = HumanMessage(content="Hello")
sync_response = model.stream([message])
for chunk in sync_response:
assert isinstance(chunk, AIMessageChunk)
@pytest.mark.scheduled
def test_vertexai_single_call_with_context() -> None:
model = ChatVertexAI()
raw_context = (
"My name is Ned. You are my personal assistant. My favorite movies "
"are Lord of the Rings and Hobbit."
)
question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_vertexai_single_call_with_examples() -> None:
model = ChatVertexAI()
raw_context = "My name is Ned. You are my personal assistant."
question = "2+2"
text_question, text_answer = "4+4", "8"
inp = HumanMessage(content=text_question)
output = AIMessage(content=text_answer)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message], examples=[inp, output])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_single_call_with_history(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
text_question1, text_answer1 = "How much is 2+2?", "4"
text_question2 = "How much is 3+3?"
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
response = model([message1, message2, message3])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_parse_chat_history_correct() -> None:
from vertexai.language_models import ChatMessage
text_context = (
"My name is Ned. You are my personal assistant. My "
"favorite movies are Lord of the Rings and Hobbit."
)
context = SystemMessage(content=text_context)
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
text_answer = (
"Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring "
"(2001): This is the first movie in the Lord of the Rings trilogy."
)
answer = AIMessage(content=text_answer)
history = _parse_chat_history([context, question, answer, question, answer])
assert history.context == context.content
assert len(history.history) == 4
assert history.history == [
ChatMessage(content=text_question, author="user"),
ChatMessage(content=text_answer, author="bot"),
ChatMessage(content=text_question, author="user"),
ChatMessage(content=text_answer, author="bot"),
]
def test_vertexai_single_call_fails_no_message() -> None:
chat = ChatVertexAI()
with pytest.raises(ValueError) as exc_info:
_ = chat([])
assert (
str(exc_info.value)
== "You should provide at least one message to start the chat!"
)
@pytest.mark.parametrize("stop", [None, "stop1"])
def test_vertexai_args_passed(stop: Optional[str]) -> None:
response_text = "Goodbye"
user_prompt = "Hello"
prompt_params = {
"max_output_tokens": 1,
"temperature": 10000.0,
"top_k": 10,
"top_p": 0.5,
}
# Mock the library to ensure the args are passed correctly
with patch(
"vertexai.language_models._language_models.ChatModel.start_chat"
) as start_chat:
mock_response = MagicMock()
mock_response.candidates = [Mock(text=response_text)]
mock_chat = MagicMock()
start_chat.return_value = mock_chat
mock_send_message = MagicMock(return_value=mock_response)
mock_chat.send_message = mock_send_message
model = ChatVertexAI(**prompt_params)
message = HumanMessage(content=user_prompt)
if stop:
response = model([message], stop=[stop])
else:
response = model([message])
assert response.content == response_text
mock_send_message.assert_called_once_with(user_prompt, candidate_count=1)
expected_stop_sequence = [stop] if stop else None
start_chat.assert_called_once_with(
context=None,
message_history=[],
**prompt_params,
stop_sequences=expected_stop_sequence,
)
def test_parse_examples_correct() -> None:
from vertexai.language_models import InputOutputTextPair
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
text_answer = (
"Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring "
"(2001): This is the first movie in the Lord of the Rings trilogy."
)
answer = AIMessage(content=text_answer)
examples = _parse_examples([question, answer, question, answer])
assert len(examples) == 2
assert examples == [
InputOutputTextPair(input_text=text_question, output_text=text_answer),
InputOutputTextPair(input_text=text_question, output_text=text_answer),
]
def test_parse_examples_failes_wrong_sequence() -> None:
with pytest.raises(ValueError) as exc_info:
_ = _parse_examples([AIMessage(content="a")])
print(str(exc_info.value))
assert (
str(exc_info.value)
== "Expect examples to have an even amount of messages, got 1."
)
| [
"{'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5}",
"a",
"My name is Ned. You are my personal assistant.",
"2+2",
"Hello",
"How much is 3+3?"
] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~tests~integration_tests~llms~test_volcengine_maas.py | """Test volc engine maas LLM model."""
from typing import Generator
from langchain_core.outputs import LLMResult
from langchain.llms.volcengine_maas import VolcEngineMaasLLM
def test_default_call() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm("tell me a joke")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm.generate(["tell me a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_generate_stream() -> None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM(streaming=True)
output = llm.stream("tell me a joke")
assert isinstance(output, Generator)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | templates~rag-timescale-conversation~rag_timescale_conversation~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~core~tests~unit_tests~runnables~test_context.py | from typing import Any, Callable, List, NamedTuple, Union
import pytest
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompt_values import StringPromptValue
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.runnables.context import Context
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain_core.runnables.utils import aadd, add
from tests.unit_tests.fake.llm import FakeListLLM, FakeStreamingListLLM
class TestCase(NamedTuple):
input: Any
output: Any
def seq_naive_rag() -> Runnable:
context = [
"Hi there!",
"How are you?",
"What's your name?",
]
retriever = RunnableLambda(lambda x: context)
prompt = PromptTemplate.from_template("{context} {question}")
llm = FakeListLLM(responses=["hello"])
return (
Context.setter("input")
| {
"context": retriever | Context.setter("context"),
"question": RunnablePassthrough(),
}
| prompt
| llm
| StrOutputParser()
| {
"result": RunnablePassthrough(),
"context": Context.getter("context"),
"input": Context.getter("input"),
}
)
def seq_naive_rag_alt() -> Runnable:
context = [
"Hi there!",
"How are you?",
"What's your name?",
]
retriever = RunnableLambda(lambda x: context)
prompt = PromptTemplate.from_template("{context} {question}")
llm = FakeListLLM(responses=["hello"])
return (
Context.setter("input")
| {
"context": retriever | Context.setter("context"),
"question": RunnablePassthrough(),
}
| prompt
| llm
| StrOutputParser()
| Context.setter("result")
| Context.getter(["context", "input", "result"])
)
def seq_naive_rag_scoped() -> Runnable:
context = [
"Hi there!",
"How are you?",
"What's your name?",
]
retriever = RunnableLambda(lambda x: context)
prompt = PromptTemplate.from_template("{context} {question}")
llm = FakeListLLM(responses=["hello"])
scoped = Context.create_scope("a_scope")
return (
Context.setter("input")
| {
"context": retriever | Context.setter("context"),
"question": RunnablePassthrough(),
"scoped": scoped.setter("context") | scoped.getter("context"),
}
| prompt
| llm
| StrOutputParser()
| Context.setter("result")
| Context.getter(["context", "input", "result"])
)
test_cases = [
(
Context.setter("foo") | Context.getter("foo"),
(
TestCase("foo", "foo"),
TestCase("bar", "bar"),
),
),
(
Context.setter("input") | {"bar": Context.getter("input")},
(
TestCase("foo", {"bar": "foo"}),
TestCase("bar", {"bar": "bar"}),
),
),
(
{"bar": Context.setter("input")} | Context.getter("input"),
(
TestCase("foo", "foo"),
TestCase("bar", "bar"),
),
),
(
(
PromptTemplate.from_template("{foo} {bar}")
| Context.setter("prompt")
| FakeListLLM(responses=["hello"])
| StrOutputParser()
| {
"response": RunnablePassthrough(),
"prompt": Context.getter("prompt"),
}
),
(
TestCase(
{"foo": "foo", "bar": "bar"},
{"response": "hello", "prompt": StringPromptValue(text="foo bar")},
),
TestCase(
{"foo": "bar", "bar": "foo"},
{"response": "hello", "prompt": StringPromptValue(text="bar foo")},
),
),
),
(
(
PromptTemplate.from_template("{foo} {bar}")
| Context.setter("prompt", prompt_str=lambda x: x.to_string())
| FakeListLLM(responses=["hello"])
| StrOutputParser()
| {
"response": RunnablePassthrough(),
"prompt": Context.getter("prompt"),
"prompt_str": Context.getter("prompt_str"),
}
),
(
TestCase(
{"foo": "foo", "bar": "bar"},
{
"response": "hello",
"prompt": StringPromptValue(text="foo bar"),
"prompt_str": "foo bar",
},
),
TestCase(
{"foo": "bar", "bar": "foo"},
{
"response": "hello",
"prompt": StringPromptValue(text="bar foo"),
"prompt_str": "bar foo",
},
),
),
),
(
(
PromptTemplate.from_template("{foo} {bar}")
| Context.setter(prompt_str=lambda x: x.to_string())
| FakeListLLM(responses=["hello"])
| StrOutputParser()
| {
"response": RunnablePassthrough(),
"prompt_str": Context.getter("prompt_str"),
}
),
(
TestCase(
{"foo": "foo", "bar": "bar"},
{"response": "hello", "prompt_str": "foo bar"},
),
TestCase(
{"foo": "bar", "bar": "foo"},
{"response": "hello", "prompt_str": "bar foo"},
),
),
),
(
(
PromptTemplate.from_template("{foo} {bar}")
| Context.setter("prompt_str", lambda x: x.to_string())
| FakeListLLM(responses=["hello"])
| StrOutputParser()
| {
"response": RunnablePassthrough(),
"prompt_str": Context.getter("prompt_str"),
}
),
(
TestCase(
{"foo": "foo", "bar": "bar"},
{"response": "hello", "prompt_str": "foo bar"},
),
TestCase(
{"foo": "bar", "bar": "foo"},
{"response": "hello", "prompt_str": "bar foo"},
),
),
),
(
(
PromptTemplate.from_template("{foo} {bar}")
| Context.setter("prompt")
| FakeStreamingListLLM(responses=["hello"])
| StrOutputParser()
| {
"response": RunnablePassthrough(),
"prompt": Context.getter("prompt"),
}
),
(
TestCase(
{"foo": "foo", "bar": "bar"},
{"response": "hello", "prompt": StringPromptValue(text="foo bar")},
),
TestCase(
{"foo": "bar", "bar": "foo"},
{"response": "hello", "prompt": StringPromptValue(text="bar foo")},
),
),
),
(
seq_naive_rag,
(
TestCase(
"What up",
{
"result": "hello",
"context": [
"Hi there!",
"How are you?",
"What's your name?",
],
"input": "What up",
},
),
TestCase(
"Howdy",
{
"result": "hello",
"context": [
"Hi there!",
"How are you?",
"What's your name?",
],
"input": "Howdy",
},
),
),
),
(
seq_naive_rag_alt,
(
TestCase(
"What up",
{
"result": "hello",
"context": [
"Hi there!",
"How are you?",
"What's your name?",
],
"input": "What up",
},
),
TestCase(
"Howdy",
{
"result": "hello",
"context": [
"Hi there!",
"How are you?",
"What's your name?",
],
"input": "Howdy",
},
),
),
),
(
seq_naive_rag_scoped,
(
TestCase(
"What up",
{
"result": "hello",
"context": [
"Hi there!",
"How are you?",
"What's your name?",
],
"input": "What up",
},
),
TestCase(
"Howdy",
{
"result": "hello",
"context": [
"Hi there!",
"How are you?",
"What's your name?",
],
"input": "Howdy",
},
),
),
),
]
@pytest.mark.parametrize("runnable, cases", test_cases)
async def test_context_runnables(
runnable: Union[Runnable, Callable[[], Runnable]], cases: List[TestCase]
) -> None:
runnable = runnable if isinstance(runnable, Runnable) else runnable()
assert runnable.invoke(cases[0].input) == cases[0].output
assert await runnable.ainvoke(cases[1].input) == cases[1].output
assert runnable.batch([case.input for case in cases]) == [
case.output for case in cases
]
assert await runnable.abatch([case.input for case in cases]) == [
case.output for case in cases
]
assert add(runnable.stream(cases[0].input)) == cases[0].output
assert await aadd(runnable.astream(cases[1].input)) == cases[1].output
def test_runnable_context_seq_key_not_found() -> None:
seq: Runnable = {"bar": Context.setter("input")} | Context.getter("foo")
with pytest.raises(ValueError):
seq.invoke("foo")
def test_runnable_context_seq_key_order() -> None:
seq: Runnable = {"bar": Context.getter("foo")} | Context.setter("foo")
with pytest.raises(ValueError):
seq.invoke("foo")
def test_runnable_context_deadlock() -> None:
seq: Runnable = {
"bar": Context.setter("input") | Context.getter("foo"),
"foo": Context.setter("foo") | Context.getter("input"),
} | RunnablePassthrough()
with pytest.raises(ValueError):
seq.invoke("foo")
def test_runnable_context_seq_key_circular_ref() -> None:
seq: Runnable = {
"bar": Context.setter(input=Context.getter("input"))
} | Context.getter("foo")
with pytest.raises(ValueError):
seq.invoke("foo")
async def test_runnable_seq_streaming_chunks() -> None:
chain: Runnable = (
PromptTemplate.from_template("{foo} {bar}")
| Context.setter("prompt")
| FakeStreamingListLLM(responses=["hello"])
| StrOutputParser()
| {
"response": RunnablePassthrough(),
"prompt": Context.getter("prompt"),
}
)
chunks = [c for c in chain.stream({"foo": "foo", "bar": "bar"})]
achunks = [c async for c in chain.astream({"foo": "foo", "bar": "bar"})]
for c in chunks:
assert c in achunks
for c in achunks:
assert c in chunks
assert len(chunks) == 6
assert [c for c in chunks if c.get("response")] == [
{"response": "h"},
{"response": "e"},
{"response": "l"},
{"response": "l"},
{"response": "o"},
]
assert [c for c in chunks if c.get("prompt")] == [
{"prompt": StringPromptValue(text="foo bar")},
]
| [
"{context} {question}",
"{foo} {bar}"
] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~vectorstores~clarifai.py | from __future__ import annotations
import logging
import os
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Iterable, List, Optional, Tuple
import requests
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
logger = logging.getLogger(__name__)
class Clarifai(VectorStore):
"""`Clarifai AI` vector store.
To use, you should have the ``clarifai`` python SDK package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Clarifai
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Clarifai("langchain_store", embeddings.embed_query)
"""
def __init__(
self,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
number_of_docs: Optional[int] = None,
pat: Optional[str] = None,
) -> None:
"""Initialize with Clarifai client.
Args:
user_id (Optional[str], optional): User ID. Defaults to None.
app_id (Optional[str], optional): App ID. Defaults to None.
pat (Optional[str], optional): Personal access token. Defaults to None.
number_of_docs (Optional[int], optional): Number of documents to return
during vector search. Defaults to None.
api_base (Optional[str], optional): API base. Defaults to None.
Raises:
ValueError: If user ID, app ID or personal access token is not provided.
"""
self._user_id = user_id or os.environ.get("CLARIFAI_USER_ID")
self._app_id = app_id or os.environ.get("CLARIFAI_APP_ID")
if pat:
os.environ["CLARIFAI_PAT"] = pat
self._pat = os.environ.get("CLARIFAI_PAT")
if self._user_id is None or self._app_id is None or self._pat is None:
raise ValueError(
"Could not find CLARIFAI_USER_ID, CLARIFAI_APP_ID or\
CLARIFAI_PAT in your environment. "
"Please set those env variables with a valid user ID, \
app ID and personal access token \
from https://clarifai.com/settings/security."
)
self._number_of_docs = number_of_docs
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts to the Clarifai vectorstore. This will push the text
to a Clarifai application.
Application use a base workflow that create and store embedding for each text.
Make sure you are using a base workflow that is compatible with text
(such as Language Understanding).
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
"""
try:
from clarifai.client.input import Inputs
from google.protobuf.struct_pb2 import Struct
except ImportError as e:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
) from e
ltexts = list(texts)
length = len(ltexts)
assert length > 0, "No texts provided to add to the vectorstore."
if metadatas is not None:
assert length == len(
metadatas
), "Number of texts and metadatas should be the same."
if ids is not None:
assert len(ltexts) == len(
ids
), "Number of text inputs and input ids should be the same."
input_obj = Inputs(app_id=self._app_id, user_id=self._user_id)
batch_size = 32
input_job_ids = []
for idx in range(0, length, batch_size):
try:
batch_texts = ltexts[idx : idx + batch_size]
batch_metadatas = (
metadatas[idx : idx + batch_size] if metadatas else None
)
if batch_metadatas is not None:
meta_list = []
for meta in batch_metadatas:
meta_struct = Struct()
meta_struct.update(meta)
meta_list.append(meta_struct)
if ids is None:
ids = [uuid.uuid4().hex for _ in range(len(batch_texts))]
input_batch = [
input_obj.get_text_input(
input_id=ids[id],
raw_text=inp,
metadata=meta_list[id] if batch_metadatas else None,
)
for id, inp in enumerate(batch_texts)
]
result_id = input_obj.upload_inputs(inputs=input_batch)
input_job_ids.extend(result_id)
logger.debug("Input posted successfully.")
except Exception as error:
logger.warning(f"Post inputs failed: {error}")
traceback.print_exc()
return input_job_ids
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filters: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with score using Clarifai.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
try:
from clarifai.client.search import Search
from clarifai_grpc.grpc.api import resources_pb2
from google.protobuf import json_format # type: ignore
except ImportError as e:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
) from e
# Get number of docs to return
if self._number_of_docs is not None:
k = self._number_of_docs
search_obj = Search(user_id=self._user_id, app_id=self._app_id, top_k=k)
rank = [{"text_raw": query}]
# Add filter by metadata if provided.
if filters is not None:
search_metadata = {"metadata": filters}
search_response = search_obj.query(ranks=rank, filters=[search_metadata])
else:
search_response = search_obj.query(ranks=rank)
# Retrieve hits
hits = [hit for data in search_response for hit in data.hits]
executor = ThreadPoolExecutor(max_workers=10)
def hit_to_document(hit: resources_pb2.Hit) -> Tuple[Document, float]:
metadata = json_format.MessageToDict(hit.input.data.metadata)
h = {"Authorization": f"Key {self._pat}"}
request = requests.get(hit.input.data.text.url, headers=h)
# override encoding by real educated guess as provided by chardet
request.encoding = request.apparent_encoding
requested_text = request.text
logger.debug(
f"\tScore {hit.score:.2f} for annotation: {hit.annotation.id}\
off input: {hit.input.id}, text: {requested_text[:125]}"
)
return (Document(page_content=requested_text, metadata=metadata), hit.score)
# Iterate over hits and retrieve metadata and text
futures = [executor.submit(hit_to_document, hit) for hit in hits]
docs_and_scores = [future.result() for future in futures]
return docs_and_scores
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search using Clarifai.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, **kwargs)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
number_of_docs: Optional[int] = None,
pat: Optional[str] = None,
**kwargs: Any,
) -> Clarifai:
"""Create a Clarifai vectorstore from a list of texts.
Args:
user_id (str): User ID.
app_id (str): App ID.
texts (List[str]): List of texts to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
metadatas (Optional[List[dict]]): Optional list of metadatas.
Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
clarifai_vector_db = cls(
user_id=user_id,
app_id=app_id,
number_of_docs=number_of_docs,
pat=pat,
)
clarifai_vector_db.add_texts(texts=texts, metadatas=metadatas)
return clarifai_vector_db
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Optional[Embeddings] = None,
user_id: Optional[str] = None,
app_id: Optional[str] = None,
number_of_docs: Optional[int] = None,
pat: Optional[str] = None,
**kwargs: Any,
) -> Clarifai:
"""Create a Clarifai vectorstore from a list of documents.
Args:
user_id (str): User ID.
app_id (str): App ID.
documents (List[Document]): List of documents to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
user_id=user_id,
app_id=app_id,
texts=texts,
number_of_docs=number_of_docs,
pat=pat,
metadatas=metadatas,
)
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~llms~mlflow.py | from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from urllib.parse import urlparse
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LLM
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, PrivateAttr
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Params(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""Parameters for MLflow"""
temperature: float = 0.0
n: int = 1
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class Mlflow(LLM):
"""Wrapper around completions LLMs in MLflow.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments/server.html.
Example:
.. code-block:: python
from langchain.llms import Mlflow
completions = Mlflow(
target_uri="http://localhost:5000",
endpoint="test",
params={"temperature": 0.1}
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
temperature: float = 0.0
"""The sampling temperature."""
n: int = 1
"""The number of completion choices to generate."""
stop: Optional[List[str]] = None
"""The stop sequence."""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
extra_params: Dict[str, Any] = Field(default_factory=dict)
"""Any extra parameters to pass to the endpoint."""
"""Extra parameters such as `temperature`."""
_client: Any = PrivateAttr()
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
"Please run `pip install mlflow[genai]` to install "
"required dependencies."
) from e
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"target_uri": self.target_uri,
"endpoint": self.endpoint,
"temperature": self.temperature,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens,
"extra_params": self.extra_params,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
data: Dict[str, Any] = {
"prompt": prompt,
"temperature": self.temperature,
"n": self.n,
**self.extra_params,
**kwargs,
}
if stop := self.stop or stop:
data["stop"] = stop
if self.max_tokens is not None:
data["max_tokens"] = self.max_tokens
resp = self._client.predict(endpoint=self.endpoint, inputs=data)
return resp["choices"][0]["text"]
@property
def _llm_type(self) -> str:
return "mlflow"
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~utils~aiter.py | from langchain_core.utils.aiter import NoLock, Tee, py_anext
__all__ = ["py_anext", "NoLock", "Tee"]
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain_core.memory import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | Mintplex-Labs/langchain-python | libs~core~langchain_core~runnables~history.py | from __future__ import annotations
import asyncio
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.load import load
from langchain_core.pydantic_v1 import BaseModel, create_model
from langchain_core.runnables.base import Runnable, RunnableBindingBase, RunnableLambda
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain_core.runnables.utils import (
ConfigurableFieldSpec,
get_unique_config_specs,
)
if TYPE_CHECKING:
from langchain_core.messages import BaseMessage
from langchain_core.runnables.config import RunnableConfig
from langchain_core.tracers.schemas import Run
MessagesOrDictWithMessages = Union[Sequence["BaseMessage"], Dict[str, Any]]
GetSessionHistoryCallable = Callable[..., BaseChatMessageHistory]
class RunnableWithMessageHistory(RunnableBindingBase):
"""A runnable that manages chat message history for another runnable.
Base runnable must have inputs and outputs that can be converted to a list of
BaseMessages.
RunnableWithMessageHistory must always be called with a config that contains session_id, e.g.:
``{"configurable": {"session_id": "<SESSION_ID>"}}``
Example (dict input):
.. code-block:: python
from typing import Optional
from langchain.chat_models import ChatAnthropic
from langchain.memory.chat_message_histories import RedisChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
prompt = ChatPromptTemplate.from_messages([
("system", "You're an assistant who's good at {ability}"),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
])
chain = prompt | ChatAnthropic(model="claude-2")
chain_with_history = RunnableWithMessageHistory(
chain,
RedisChatMessageHistory,
input_messages_key="question",
history_messages_key="history",
)
chain_with_history.invoke(
{"ability": "math", "question": "What does cosine mean?"},
config={"configurable": {"session_id": "foo"}}
)
# -> "Cosine is ..."
chain_with_history.invoke(
{"ability": "math", "question": "What's its inverse"},
config={"configurable": {"session_id": "foo"}}
)
# -> "The inverse of cosine is called arccosine ..."
""" # noqa: E501
get_session_history: GetSessionHistoryCallable
input_messages_key: Optional[str] = None
output_messages_key: Optional[str] = None
history_messages_key: Optional[str] = None
def __init__(
self,
runnable: Runnable[
MessagesOrDictWithMessages,
Union[str, BaseMessage, MessagesOrDictWithMessages],
],
get_session_history: GetSessionHistoryCallable,
*,
input_messages_key: Optional[str] = None,
output_messages_key: Optional[str] = None,
history_messages_key: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Initialize RunnableWithMessageHistory.
Args:
runnable: The base Runnable to be wrapped.
Must take as input one of:
- A sequence of BaseMessages
- A dict with one key for all messages
- A dict with one key for the current input string/message(s) and
a separate key for historical messages. If the input key points
to a string, it will be treated as a HumanMessage in history.
Must return as output one of:
- A string which can be treated as an AIMessage
- A BaseMessage or sequence of BaseMessages
- A dict with a key for a BaseMessage or sequence of BaseMessages
get_session_history: Function that returns a new BaseChatMessageHistory
given a session id. Should take a single
positional argument `session_id` which is a string and a named argument
`user_id` which can be a string or None. e.g.:
```python
def get_session_history(
session_id: str,
*,
user_id: Optional[str]=None
) -> BaseChatMessageHistory:
...
```
input_messages_key: Must be specified if the base runnable accepts a dict
as input.
output_messages_key: Must be specified if the base runnable returns a dict
as output.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
**kwargs: Arbitrary additional kwargs to pass to parent class
``RunnableBindingBase`` init.
""" # noqa: E501
history_chain: Runnable = RunnableLambda(
self._enter_history, self._aenter_history
).with_config(run_name="load_history")
messages_key = history_messages_key or input_messages_key
if messages_key:
history_chain = RunnablePassthrough.assign(
**{messages_key: history_chain}
).with_config(run_name="insert_history")
bound = (
history_chain | runnable.with_listeners(on_end=self._exit_history)
).with_config(run_name="RunnableWithMessageHistory")
super().__init__(
get_session_history=get_session_history,
input_messages_key=input_messages_key,
output_messages_key=output_messages_key,
bound=bound,
history_messages_key=history_messages_key,
**kwargs,
)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return get_unique_config_specs(
super().config_specs
+ [
ConfigurableFieldSpec(
id="session_id",
annotation=str,
name="Session ID",
description="Unique identifier for a session.",
default="",
is_shared=True,
),
]
)
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
super_schema = super().get_input_schema(config)
if super_schema.__custom_root_type__ is not None:
from langchain_core.messages import BaseMessage
fields: Dict = {}
if self.input_messages_key and self.history_messages_key:
fields[self.input_messages_key] = (
Union[str, BaseMessage, Sequence[BaseMessage]],
...,
)
elif self.input_messages_key:
fields[self.input_messages_key] = (Sequence[BaseMessage], ...)
else:
fields["__root__"] = (Sequence[BaseMessage], ...)
if self.history_messages_key:
fields[self.history_messages_key] = (Sequence[BaseMessage], ...)
return create_model( # type: ignore[call-overload]
"RunnableWithChatHistoryInput",
**fields,
)
else:
return super_schema
def _get_input_messages(
self, input_val: Union[str, BaseMessage, Sequence[BaseMessage]]
) -> List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(input_val, str):
from langchain_core.messages import HumanMessage
return [HumanMessage(content=input_val)]
elif isinstance(input_val, BaseMessage):
return [input_val]
elif isinstance(input_val, (list, tuple)):
return list(input_val)
else:
raise ValueError(
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {input_val}."
)
def _get_output_messages(
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
) -> List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(output_val, dict):
output_val = output_val[self.output_messages_key or "output"]
if isinstance(output_val, str):
from langchain_core.messages import AIMessage
return [AIMessage(content=output_val)]
elif isinstance(output_val, BaseMessage):
return [output_val]
elif isinstance(output_val, (list, tuple)):
return list(output_val)
else:
raise ValueError()
def _enter_history(self, input: Any, config: RunnableConfig) -> List[BaseMessage]:
hist = config["configurable"]["message_history"]
# return only historic messages
if self.history_messages_key:
return hist.messages.copy()
# return all messages
else:
input_val = (
input if not self.input_messages_key else input[self.input_messages_key]
)
return hist.messages.copy() + self._get_input_messages(input_val)
async def _aenter_history(
self, input: Dict[str, Any], config: RunnableConfig
) -> List[BaseMessage]:
return await asyncio.get_running_loop().run_in_executor(
None, self._enter_history, input, config
)
def _exit_history(self, run: Run, config: RunnableConfig) -> None:
hist = config["configurable"]["message_history"]
# Get the input messages
inputs = load(run.inputs)
input_val = inputs[self.input_messages_key or "input"]
input_messages = self._get_input_messages(input_val)
# Get the output messages
output_val = load(run.outputs)
output_messages = self._get_output_messages(output_val)
for m in input_messages + output_messages:
hist.add_message(m)
def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig:
config = super()._merge_configs(*configs)
# extract session_id
if "session_id" not in config.get("configurable", {}):
example_input = {self.input_messages_key: "foo"}
example_config = {"configurable": {"session_id": "123"}}
raise ValueError(
"session_id is required."
" Pass it in as part of the config argument to .invoke() or .stream()"
f"\neg. chain.invoke({example_input}, {example_config})"
)
# attach message_history
session_id = config["configurable"]["session_id"]
config["configurable"]["message_history"] = self.get_session_history(session_id)
return config
| [] |
2024-01-10 | vvvm23/vdvae | vae.py | """
Encoder Components:
- Encoder, contains all the EncoderBlocks and manages data flow through them.
- EncoderBlock, contains sub-blocks of residual units and a pooling layer.
- ResidualBlock, contains a block of residual connections, as described in the paper (1x1,3x3,3x3,1x1)
- We could slightly adapt, and make it a ReZero connection. Needs some testing.
Decoder Components:
- Decoder, contains all DecoderBlocks and manages data flow through them.
- DecoderBlock, contains sub-blocks of top-down units and an unpool layer.
- TopDownBlock, implements the topdown block from the original paper.
All is encapsulated in the main VAE class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
"""
Some helper functions for common constructs
"""
class ConvBuilder:
def _bconv(in_dim, out_dim, kernel_size, stride, padding):
conv = nn.Conv2d(in_dim, out_dim, kernel_size, stride=stride, padding=padding)
return conv
def b1x1(in_dim, out_dim):
return ConvBuilder._bconv(in_dim, out_dim, 1, 1, 0)
def b3x3(in_dim, out_dim):
return ConvBuilder._bconv(in_dim, out_dim, 3, 1, 1)
"""
Diagonal Gaussian Distribution and loss.
Taken directly from OpenAI implementation
Decorators means these functions will be compiled as TorchScript
"""
@torch.jit.script
def gaussian_analytical_kl(mu1, mu2, logsigma1, logsigma2):
return -0.5 + logsigma2 - logsigma1 + 0.5 * (logsigma1.exp() ** 2 + (mu1 - mu2) ** 2) / (logsigma2.exp() ** 2)
@torch.jit.script
def draw_gaussian_diag_samples(mu, logsigma):
eps = torch.empty_like(mu).normal_(0., 1.)
return torch.exp(logsigma) * eps + mu
"""
Helper module to call super().__init__() for us
"""
class HelperModule(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.build(*args, **kwargs)
def build(self, *args, **kwargs):
raise NotImplementedError
"""
Encoder Components
"""
class ResidualBlock(HelperModule):
def build(self, in_width, hidden_width, rezero=False): # hidden_width should function as a bottleneck!
self.conv = nn.ModuleList([
ConvBuilder.b1x1(in_width, hidden_width),
ConvBuilder.b3x3(hidden_width, hidden_width),
ConvBuilder.b3x3(hidden_width, hidden_width),
ConvBuilder.b1x1(hidden_width, in_width)
])
if rezero:
self.gate = nn.Parameter(torch.tensor(0.0))
else:
self.gate = 1.0
def forward(self, x):
xh = x
for l in self.conv:
xh = l(F.gelu(xh))
y = x + self.gate*xh
return y
class EncoderBlock(HelperModule):
def build(self, in_dim, middle_width, nb_r_blocks, downscale_rate):
self.downscale_rate = downscale_rate
self.res_blocks = nn.ModuleList([
ResidualBlock(in_dim, middle_width)
for _ in range(nb_r_blocks)])
def forward(self, x):
y = x
for l in self.res_blocks:
y = l(y)
a = y
y = F.avg_pool2d(y, kernel_size=self.downscale_rate, stride=self.downscale_rate)
return y, a # y is input to next block, a is activations to topdown layer
class Encoder(HelperModule):
def build(self, in_dim, hidden_width, middle_width, nb_encoder_blocks, nb_res_blocks=3, downscale_rate=2):
self.in_conv = ConvBuilder.b3x3(in_dim, hidden_width)
self.enc_blocks = nn.ModuleList([
EncoderBlock(hidden_width, middle_width, nb_res_blocks, 1 if i==(nb_encoder_blocks-1) else downscale_rate)
for i in range(nb_encoder_blocks)])
# TODO: could just pass np.sqrt( ... ) value to EncoderBlock, rather than this weird loop
# it is the same in every block.
for be in self.enc_blocks:
for br in be.res_blocks:
br.conv[-1].weight.data *= np.sqrt(1 / (nb_encoder_blocks*nb_res_blocks))
def forward(self, x):
x = self.in_conv(x)
activations = [x]
for b in self.enc_blocks:
x, a = b(x)
activations.append(a)
return activations
"""
Decoder Components
"""
class Block(HelperModule):
def build(self, in_width, hidden_width, out_width): # hidden_width should function as a bottleneck!
self.conv = nn.ModuleList([
ConvBuilder.b1x1(in_width, hidden_width),
ConvBuilder.b3x3(hidden_width, hidden_width),
ConvBuilder.b3x3(hidden_width, hidden_width),
ConvBuilder.b1x1(hidden_width, out_width)
])
def forward(self, x):
for l in self.conv:
x = l(F.gelu(x))
return x
class TopDownBlock(HelperModule):
def build(self, in_width, middle_width, z_dim):
self.cat_conv = Block(in_width*2, middle_width, z_dim*2) # parameterises mean and variance
self.prior = Block(in_width, middle_width, z_dim*2 + in_width) # parameterises mean, variance and xh
self.out_res = ResidualBlock(in_width, middle_width)
self.z_conv = ConvBuilder.b1x1(z_dim, in_width)
self.z_dim = z_dim
def forward(self, x, a):
xa = torch.cat([x,a], dim=1)
qm, qv = self.cat_conv(xa).chunk(2, dim=1) # Calculate q distribution parameters. Chunk into 2 (first z_dim is mean, second is variance)
pfeat = self.prior(x)
pm, pv, px = pfeat[:, :self.z_dim], pfeat[:, self.z_dim:self.z_dim*2], pfeat[:, self.z_dim*2:]
x = x + px
z = draw_gaussian_diag_samples(qm, qv)
kl = gaussian_analytical_kl(qm, pm, qv, pv)
z = self.z_conv(z)
x = x + z
x = self.out_res(x)
return x, kl
def sample(self, x):
pfeat = self.prior(x)
pm, pv, px = pfeat[:, :self.z_dim], pfeat[:, self.z_dim:self.z_dim*2], pfeat[:, self.z_dim*2:]
x = x + px
z = draw_gaussian_diag_samples(pm, pv)
x = x + self.z_conv(z)
x = self.out_res(x)
return x
class DecoderBlock(HelperModule):
def build(self, in_dim, middle_width, z_dim, nb_td_blocks, upscale_rate):
self.upscale_rate = upscale_rate
self.td_blocks = nn.ModuleList([
TopDownBlock(in_dim, middle_width, z_dim)
for _ in range(nb_td_blocks)])
def forward(self, x, a):
x = F.interpolate(x, scale_factor=self.upscale_rate)
block_kl = []
for b in self.td_blocks:
x, kl = b(x, a)
block_kl.append(kl)
return x, block_kl
def sample(self, x):
x = F.interpolate(x, scale_factor=self.upscale_rate)
for b in self.td_blocks:
x = b.sample(x)
return x
class Decoder(HelperModule):
def build(self, in_dim, middle_width, out_dim, z_dim, nb_decoder_blocks, nb_td_blocks=3, upscale_rate=2):
self.dec_blocks = nn.ModuleList([
DecoderBlock(in_dim, middle_width, z_dim, nb_td_blocks, 1 if i == 0 else upscale_rate)
for i in range(nb_decoder_blocks)])
self.in_dim = in_dim
self.out_conv = ConvBuilder.b3x3(in_dim, out_dim)
for bd in self.dec_blocks:
for bt in bd.td_blocks:
bt.z_conv.weight.data *= np.sqrt(1 / (nb_decoder_blocks*nb_td_blocks))
bt.out_res.conv[-1].weight.data *= np.sqrt(1 / (nb_decoder_blocks*nb_td_blocks))
def forward(self, activations):
activations = activations[::-1]
x = None
decoder_kl = []
for i, b in enumerate(self.dec_blocks):
a = activations[i]
if x == None:
x = torch.zeros_like(a)
x, block_kl = b(x, a)
decoder_kl.extend(block_kl)
x = self.out_conv(x)
return x, decoder_kl
def sample(self, nb_samples):
x = None
for b in self.dec_blocks:
if x == None:
x = torch.zeros(nb_samples, self.in_dim, 4, 4).to('cuda') # TODO: Variable device and size
x = b.sample(x)
x = self.out_conv(x)
return x
"""
Main VAE class
"""
class VAE(HelperModule):
def build(self, in_dim, hidden_width, middle_width, z_dim, nb_blocks=4, nb_res_blocks=3, scale_rate=2):
self.encoder = Encoder(in_dim, hidden_width, middle_width, nb_blocks, nb_res_blocks=nb_res_blocks, downscale_rate=scale_rate)
self.decoder = Decoder(hidden_width, middle_width, in_dim, z_dim, nb_blocks, nb_td_blocks=nb_res_blocks, upscale_rate=scale_rate)
def forward(self, x):
activations = self.encoder(x)
y, decoder_kl = self.decoder(activations)
return y, decoder_kl
def sample(self, nb_samples):
return self.decoder.sample(nb_samples)
if __name__ == "__main__":
import torchvision
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
vae = VAE(3, 64, 32, 32, nb_blocks=6).to(device)
x = torch.randn(1, 3, 256, 256).to(device)
y, kls = vae(x)
| [] |
2024-01-10 | linruier/llama_index | gpt_index~langchain_helpers~text_splitter.py | """Text splitter implementations."""
from dataclasses import dataclass
from typing import Callable, List, Optional
from langchain.text_splitter import TextSplitter
from gpt_index.utils import globals_helper
@dataclass
class TextSplit:
"""Text split with overlap.
Attributes:
text_chunk: The text string.
num_char_overlap: The number of overlapping characters with the previous chunk.
"""
text_chunk: str
num_char_overlap: Optional[int] = None
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at word tokens."""
def __init__(
self,
separator: str = " ",
chunk_size: int = 3900,
chunk_overlap: int = 200,
tokenizer: Optional[Callable] = None,
backup_separators: Optional[List[str]] = ["\n"],
):
"""Initialize with parameters."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._separator = separator
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self.tokenizer = tokenizer or globals_helper.tokenizer
self._backup_separators = backup_separators
def _reduce_chunk_size(
self, start_idx: int, cur_idx: int, splits: List[str]
) -> int:
"""Reduce the chunk size by reducing cur_idx.
Return the new cur_idx.
"""
current_doc_total = len(
self.tokenizer(self._separator.join(splits[start_idx:cur_idx]))
)
while current_doc_total > self._chunk_size:
percent_to_reduce = (
current_doc_total - self._chunk_size
) / current_doc_total
num_to_reduce = int(percent_to_reduce * (cur_idx - start_idx)) + 1
cur_idx -= num_to_reduce
current_doc_total = len(
self.tokenizer(self._separator.join(splits[start_idx:cur_idx]))
)
return cur_idx
def _preprocess_splits(self, splits: List[str], chunk_size: int) -> List[str]:
"""Process splits.
Specifically search for tokens that are too large for chunk size,
and see if we can separate those tokens more
(via backup separators if specified, or force chunking).
"""
new_splits = []
for split in splits:
num_cur_tokens = len(self.tokenizer(split))
if num_cur_tokens <= chunk_size:
new_splits.append(split)
else:
cur_splits = [split]
if self._backup_separators:
for sep in self._backup_separators:
if sep in split:
cur_splits = split.split(sep)
break
else:
cur_splits = [split]
cur_splits2 = []
for cur_split in cur_splits:
num_cur_tokens = len(self.tokenizer(cur_split))
if num_cur_tokens <= chunk_size:
cur_splits2.extend([cur_split])
else:
cur_split_chunks = [
cur_split[i : i + chunk_size]
for i in range(0, len(cur_split), chunk_size)
]
cur_splits2.extend(cur_split_chunks)
new_splits.extend(cur_splits2)
return new_splits
def _postprocess_splits(self, docs: List[TextSplit]) -> List[TextSplit]:
"""Post-process splits."""
# TODO: prune text splits, remove empty spaces
new_docs = []
for doc in docs:
if doc.text_chunk.replace(" ", "") == "":
continue
new_docs.append(doc)
return new_docs
def split_text(self, text: str, extra_info_str: Optional[str] = None) -> List[str]:
"""Split incoming text and return chunks."""
text_splits = self.split_text_with_overlaps(text, extra_info_str=extra_info_str)
return [text_split.text_chunk for text_split in text_splits]
def split_text_with_overlaps(
self, text: str, extra_info_str: Optional[str] = None
) -> List[TextSplit]:
"""Split incoming text and return chunks with overlap size."""
if text == "":
return []
# NOTE: Consider extra info str that will be added to the chunk at query time
# This reduces the effective chunk size that we can have
if extra_info_str is not None:
# NOTE: extra 2 newline chars for formatting when prepending in query
num_extra_tokens = len(self.tokenizer(f"{extra_info_str}\n\n")) + 1
effective_chunk_size = self._chunk_size - num_extra_tokens
if effective_chunk_size <= 0:
raise ValueError(
"Effective chunk size is non positive after considering extra_info"
)
else:
effective_chunk_size = self._chunk_size
# First we naively split the large input into a bunch of smaller ones.
splits = text.split(self._separator)
splits = self._preprocess_splits(splits, effective_chunk_size)
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
docs: List[TextSplit] = []
start_idx = 0
cur_idx = 0
cur_total = 0
prev_idx = 0 # store the previous end index
while cur_idx < len(splits):
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
if num_cur_tokens > effective_chunk_size:
raise ValueError(
"A single term is larger than the allowed chunk size.\n"
f"Term size: {num_cur_tokens}\n"
f"Chunk size: {self._chunk_size}"
f"Effective chunk size: {effective_chunk_size}"
)
# If adding token to current_doc would exceed the chunk size:
# 1. First verify with tokenizer that current_doc
# 1. Update the docs list
if cur_total + num_cur_tokens > effective_chunk_size:
# NOTE: since we use a proxy for counting tokens, we want to
# run tokenizer across all of current_doc first. If
# the chunk is too big, then we will reduce text in pieces
cur_idx = self._reduce_chunk_size(start_idx, cur_idx, splits)
overlap = 0
# after first round, check if last chunk ended after this chunk begins
if prev_idx > 0 and prev_idx > start_idx:
overlap = sum([len(splits[i]) for i in range(start_idx, prev_idx)])
docs.append(
TextSplit(self._separator.join(splits[start_idx:cur_idx]), overlap)
)
prev_idx = cur_idx
# 2. Shrink the current_doc (from the front) until it is gets smaller
# than the overlap size
# NOTE: because counting tokens individually is an imperfect
# proxy (but much faster proxy) for the total number of tokens consumed,
# we need to enforce that start_idx <= cur_idx, otherwise
# start_idx has a chance of going out of bounds.
while cur_total > self._chunk_overlap and start_idx < cur_idx:
# # call tokenizer on entire overlap
# cur_total = self.tokenizer()
cur_num_tokens = max(len(self.tokenizer(splits[start_idx])), 1)
cur_total -= cur_num_tokens
start_idx += 1
# NOTE: This is a hack, make more general
if start_idx == cur_idx:
cur_total = 0
# Build up the current_doc with term d, and update the total counter with
# the number of the number of tokens in d, wrt self.tokenizer
# we reassign cur_token and num_cur_tokens, because cur_idx
# may have changed
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
cur_total += num_cur_tokens
cur_idx += 1
overlap = 0
# after first round, check if last chunk ended after this chunk begins
if prev_idx > start_idx:
overlap = sum([len(splits[i]) for i in range(start_idx, prev_idx)]) + len(
range(start_idx, prev_idx)
)
docs.append(TextSplit(self._separator.join(splits[start_idx:cur_idx]), overlap))
# run postprocessing to remove blank spaces
docs = self._postprocess_splits(docs)
return docs
def truncate_text(self, text: str) -> str:
"""Truncate text in order to fit the underlying chunk size."""
if text == "":
return ""
# First we naively split the large input into a bunch of smaller ones.
splits = text.split(self._separator)
splits = self._preprocess_splits(splits, self._chunk_size)
start_idx = 0
cur_idx = 0
cur_total = 0
while cur_idx < len(splits):
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
if cur_total + num_cur_tokens > self._chunk_size:
cur_idx = self._reduce_chunk_size(start_idx, cur_idx, splits)
break
cur_total += num_cur_tokens
cur_idx += 1
return self._separator.join(splits[start_idx:cur_idx])
class SentenceSplitter(TextSplitter):
"""Split text with a preference for complete sentences.
In general, this class tries to keep sentences and paragraphs together. Therefore
compared to the original TokenTextSplitter, there are less likely to be
hanging sentences or parts of sentences at the end of the node chunk.
"""
def __init__(
self,
separator: str = " ",
chunk_size: int = 4000,
chunk_overlap: int = 200,
tokenizer: Optional[Callable] = None,
backup_separators: Optional[List[str]] = ["\n"],
paragraph_separator: Optional[str] = "\n\n\n",
chunking_tokenizer_fn: Optional[Callable[[str], List[str]]] = None,
secondary_chunking_regex: Optional[str] = "[^,.;。]+[,.;。]?",
):
"""Initialize with parameters."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._separator = separator
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self.tokenizer = tokenizer or globals_helper.tokenizer
self._backup_separators = backup_separators
if chunking_tokenizer_fn is None:
import nltk.tokenize.punkt as pkt
class CustomLanguageVars(pkt.PunktLanguageVars):
_period_context_fmt = r"""
%(SentEndChars)s # a potential sentence ending
(\)\"\s)\s* # other end chars and
# any amount of white space
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # or whitespace and some other token
))"""
custom_tknzr = pkt.PunktSentenceTokenizer(lang_vars=CustomLanguageVars())
chunking_tokenizer_fn = custom_tknzr.tokenize
self.paragraph_separator = paragraph_separator
self.chunking_tokenizer_fn = chunking_tokenizer_fn
self.second_chunking_regex = secondary_chunking_regex
"""
By default we use the second chunking regex "[^,.;]+[,.;]?".
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
def _postprocess_splits(self, docs: List[TextSplit]) -> List[TextSplit]:
"""Post-process splits."""
# TODO: prune text splits, remove empty spaces
new_docs = []
for doc in docs:
if doc.text_chunk.replace(" ", "") == "":
continue
new_docs.append(doc)
return new_docs
def split_text_with_overlaps(
self, text: str, extra_info_str: Optional[str] = None
) -> List[TextSplit]:
"""
Split incoming text and return chunks with overlap size.
Has a preference for complete sentences, phrases, and minimal overlap.
"""
if text == "":
return []
# NOTE: Consider extra info str that will be added to the chunk at query time
# This reduces the effective chunk size that we can have
if extra_info_str is not None:
# NOTE: extra 2 newline chars for formatting when prepending in query
num_extra_tokens = len(self.tokenizer(f"{extra_info_str}\n\n")) + 1
effective_chunk_size = self._chunk_size - num_extra_tokens
if effective_chunk_size <= 0:
raise ValueError(
"Effective chunk size is non positive after considering extra_info"
)
else:
effective_chunk_size = self._chunk_size
# First we split paragraphs using separator
splits = text.split(self.paragraph_separator)
# Merge paragraphs that are too small.
idx = 0
while idx < len(splits):
if idx < len(splits) - 1 and len(splits[idx]) < effective_chunk_size:
splits[idx] = "\n\n".join([splits[idx], splits[idx + 1]])
splits.pop(idx + 1)
else:
idx += 1
# Next we split the text using the chunk tokenizer fn,
# which defaults to the sentence tokenizer from nltk.
chunked_splits = [self.chunking_tokenizer_fn(text) for text in splits]
splits = [chunk for split in chunked_splits for chunk in split]
# Check if any sentences exceed the chunk size. If they do, split again
# using the second chunk separator. If it any still exceed,
# use the default separator (" ").
@dataclass
class Split:
text: str # the split text
is_sentence: bool # save whether this is a full sentence
new_splits: List[Split] = []
for split in splits:
split_len = len(self.tokenizer(split))
if split_len <= effective_chunk_size:
new_splits.append(Split(split, True))
else:
if self.second_chunking_regex is not None:
import re
# Default regex is "[^,\.;]+[,\.;]?"
splits2 = re.findall(self.second_chunking_regex, split)
else:
splits2 = [split]
for split2 in splits2:
if len(self.tokenizer(split2)) <= effective_chunk_size:
new_splits.append(Split(split2, False))
else:
splits3 = split2.split(self._separator)
new_splits.extend([Split(split3, False) for split3 in splits3])
# Create the list of text splits by combining smaller chunks.
docs: List[TextSplit] = []
cur_doc_list: List[str] = []
cur_tokens = 0
while len(new_splits) > 0:
cur_token = new_splits[0]
cur_len = len(self.tokenizer(cur_token.text))
if cur_len > effective_chunk_size:
raise ValueError("Single token exceed chunk size")
if cur_tokens + cur_len > effective_chunk_size:
docs.append(TextSplit("".join(cur_doc_list).strip()))
cur_doc_list = []
cur_tokens = 0
else:
if (
cur_token.is_sentence
or cur_tokens + cur_len < effective_chunk_size - self._chunk_overlap
):
cur_tokens += cur_len
cur_doc_list.append(cur_token.text)
new_splits.pop(0)
else:
docs.append(TextSplit("".join(cur_doc_list).strip()))
cur_doc_list = []
cur_tokens = 0
docs.append(TextSplit("".join(cur_doc_list).strip()))
# run postprocessing to remove blank spaces
docs = self._postprocess_splits(docs)
return docs
def split_text(self, text: str, extra_info_str: Optional[str] = None) -> List[str]:
"""Split incoming text and return chunks."""
text_splits = self.split_text_with_overlaps(text, extra_info_str=extra_info_str)
return [text_split.text_chunk for text_split in text_splits]
__all__ = ["TextSplitter", "TokenTextSplitter", "SentenceSplitter"]
| [] |
2024-01-10 | linruier/llama_index | experimental~cli~configuration.py | import os
from configparser import ConfigParser
from typing import Any
from llama_index.embeddings.openai import OpenAIEmbedding
from langchain import OpenAI
from llama_index.indices.base import BaseGPTIndex
from llama_index.embeddings.base import BaseEmbedding
from llama_index import GPTSimpleVectorIndex, ServiceContext, LLMPredictor
from llama_index.data_structs.data_structs_v2 import SimpleIndexDict
CONFIG_FILE_NAME = "config.ini"
JSON_INDEX_FILE_NAME = "index.json"
DEFAULT_CONFIG = {
"store": {"type": "json"},
"index": {"type": "default"},
"embed_model": {"type": "default"},
"llm_predictor": {"type": "default"},
}
def load_config(root: str = ".") -> ConfigParser:
"""Load configuration from file"""
config = ConfigParser()
config.read_dict(DEFAULT_CONFIG)
config.read(os.path.join(root, CONFIG_FILE_NAME))
return config
def save_config(config: ConfigParser, root: str = ".") -> None:
"""Load configuration to file"""
with open(os.path.join(root, CONFIG_FILE_NAME), "w") as fd:
config.write(fd)
def load_index(root: str = ".") -> BaseGPTIndex[Any]:
"""Load existing index file"""
config = load_config(root)
service_context = _load_service_context(config)
if config["store"]["type"] == "json":
index_file = os.path.join(root, JSON_INDEX_FILE_NAME)
else:
raise KeyError(f"Unknown index.type {config['index']['type']}")
if os.path.exists(index_file):
return GPTSimpleVectorIndex.load_from_disk(
index_file, service_context=service_context
)
else:
return GPTSimpleVectorIndex(
index_struct=SimpleIndexDict(), service_context=service_context
)
def save_index(index: BaseGPTIndex[Any], root: str = ".") -> None:
"""Save index to file"""
config = load_config(root)
if config["store"]["type"] == "json":
index_file = os.path.join(root, JSON_INDEX_FILE_NAME)
else:
raise KeyError(f"Unknown index.type {config['index']['type']}")
index.save_to_disk(index_file)
def _load_service_context(config: ConfigParser) -> ServiceContext:
"""Internal function to load service context based on configuration"""
embed_model = _load_embed_model(config)
llm_predictor = _load_llm_predictor(config)
return ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embed_model
)
def _load_llm_predictor(config: ConfigParser) -> LLMPredictor:
"""Internal function to load LLM predictor based on configuration"""
model_type = config["llm_predictor"]["type"].lower()
if model_type == "default":
return LLMPredictor()
if model_type == "azure":
engine = config["llm_predictor"]["engine"]
return LLMPredictor(llm=OpenAI(engine=engine))
else:
raise KeyError("llm_predictor.type")
def _load_embed_model(config: ConfigParser) -> BaseEmbedding:
"""Internal function to load embedding model based on configuration"""
model_type = config["embed_model"]["type"]
if model_type == "default":
return OpenAIEmbedding()
else:
raise KeyError("embed_model.type")
| [] |
2024-01-10 | agarciaunidos/streamlit | llm_bedrock.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.embeddings import OpenAIEmbeddings
import pinecone
from langchain.vectorstores import Pinecone
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.embeddings import BedrockEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.retrievers import AmazonKendraRetriever
from langchain.llms.bedrock import Bedrock
import boto3
import toml
PINECONE_API_KEY = st.secrets.PINECONE_API_KEY
PINECONE_ENV = st.secrets.PINECONE_ENV
openai_api_key = st.secrets.OPENAI_API_KEY
kendra_index = st.secrets.KENDRA_INDEX
bedrock_region = st.secrets.AWS_BEDROCK_REGION
kendra_region = st.secrets.AWS_KENDRA_REGION
#os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
max_tokens = 1024 # Adjust as needed
temperature = 0.7 # Adjust as needed
index_pinecone_hsdemocracy = 'unidosus-edai-hsdemocracy'
index_pinecone_asu = 'unidosus-edai-asu'
# Setup bedrock
bedrock_client = boto3.client("bedrock-runtime", region_name="us-east-1")
def get_kendra_doc_retriever():
kendra_client = boto3.client("kendra", kendra_region)
retriever = AmazonKendraRetriever(index_id=kendra_index, top_k=3, client=kendra_client, attribute_filter={
'EqualsTo': {
'Key': '_language_code',
'Value': {'StringValue': 'en'}
}
})
return retriever
def embedding_db(index_name):
# we use the openAI embedding model
embeddings = BedrockEmbeddings(client=bedrock_client, region_name="us-east-1")
index_name = 'unidosus-edai-hsdemocracy'
text_field = "text"
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
index = pinecone.Index(index_name)
vectorstore = Pinecone(index, embeddings, text_field)
return vectorstore
# Function to retrieve answers
def retrieval_answer(query, llm_model, vector_store):
# Select the model based on user choice
if llm_model == 'Anthropic Claude V2':
model_id = "anthropic.claude-v2"
model_kwargs = {"max_tokens_to_sample": max_tokens, "temperature": temperature}
llm = Bedrock(model_id=model_id, region_name=bedrock_region, client=bedrock_client, model_kwargs=model_kwargs)
elif llm_model == 'Amazon Titan Text Express v1':
model_id = "amazon.titan-text-express-v1"
model_kwargs = {"maxTokenCount": max_tokens, "temperature": temperature}
llm = Bedrock(model_id=model_id, region_name=bedrock_region, client=bedrock_client, model_kwargs=model_kwargs)
elif llm_model == 'Ai21 Labs Jurassic-2 Ultra':
model_id = "ai21.j2-ultra-v1"
model_kwargs = {"maxTokens": max_tokens, "temperature": temperature}
llm = Bedrock(model_id=model_id, region_name=bedrock_region, client=bedrock_client, model_kwargs=model_kwargs)
elif llm_model == 'GPT-4-1106-preview':
llm = ChatOpenAI(model_name="gpt-4-1106-preview",openai_api_key = openai_api_key)
else:
return "Invalid LLM model selection."
# Select the Retriever based on user choice
if vector_store == 'Pinecone: Highschool democracy':
retriever = embedding_db(index_pinecone_hsdemocracy)
source = 'Pinecone'
elif vector_store == 'Pinecone: University of Arizona':
retriever = embedding_db(index_pinecone_asu)
source = 'Pinecone'
elif vector_store == 'Kendra: Highschool democracy':
retriever = get_kendra_doc_retriever()
source = 'Kendra'
else:
return "Invalid Vector DB selection."
#llm = Bedrock(model_id=model_id, region_name=bedrock_region, client=bedrock_client, model_kwargs=model_kwargs)
if source == 'Pinecone':
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever.as_retriever())
response = qa(query)
elif source == 'Kendra':
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
response = qa(query)
return response['result']
| [] |
2024-01-10 | coolbeevip/langchain_feature_playground | service~langchain_openai_documents.py | # -*- coding: UTF-8 -*-
import os
from langchain import FAISS
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from service.langchain_openai_interface import ILangChainOpenAI
class LangChainOpenAIDocuments(ILangChainOpenAI):
def __init__(self):
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'state_of_the_union.txt'))
loader = TextLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
self.embeddings = OpenAIEmbeddings()
self.db = FAISS.from_documents(docs, self.embeddings)
def get_answer(self, question: str) -> str:
embedding_vector = self.embeddings.embed_query(question)
docs = self.db.similarity_search_by_vector(embedding_vector)
return docs[0].page_content
| [] |
2024-01-10 | H4lo/Away_From_Sub_Function_IN_IDA | Away_From_Sub_Function_IN_IDA.py | import openai
import json
import idc
from idc import *
import idaapi
from idautils import *
openai.api_key = "" # modify it! Refer https://mp.weixin.qq.com/s/E4n63jltBPbAo8ZIMH10ig to register openai.
# Get a list of all functions in the binary
functions = Functions()
MAX_LINE_TOKEN = 60
class ExplainHandler(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
search_noname_function()
return True
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class OpenAIVeryGOOD(idaapi.plugin_t):
explain_action_name = "OpenAIVeryGOOD:rename_function"
explain_menu_path = "Edit/OpenAIVeryGOOD/Please help me Auto Recover Sub Function"
explain_action = idaapi.action_desc_t(explain_action_name,
'Please help me Auto Recover Sub Function',
ExplainHandler(),
"Ctrl+Alt+K",
'Use davinci-003 to explain the currently selected function',
199)
idaapi.register_action(explain_action)
idaapi.attach_action_to_menu(explain_menu_path, explain_action_name, idaapi.SETMENU_APP)
def query_model(pseudocode):
query = "Can you explain what the following C function does and suggest a better name for it?\n%s"%(pseudocode)
response = openai.Completion.create(
model="text-davinci-003",
prompt=query,
temperature=0.6,
max_tokens=2500,
top_p=1,
frequency_penalty=1,
presence_penalty=1,
timeout=60 # Wait 60 seconds maximum
)
jobj = json.loads(str(response))
func_comment,func_name = jobj['choices'][0]['text'],jobj['choices'][0]['text'].split()[-1]
return (func_comment,func_name)
def search_noname_function():
# Iterate over the functions
for f in functions:
# Get the name of the function
name = GetFunctionName(f)
# Check if the name starts with "sub_"
if name.startswith("sub_"):
# Print the function name and its address
# Use the idaapi.decompile function to get the pseudocode
pseudocode = idaapi.decompile(f)
# Print the pseudocode
#print(pseudocode)
# Count the number of lines in the pseudocode
lines = str(pseudocode).split("\n")
if(len(lines)<MAX_LINE_TOKEN):
func_comment,new_func_name = query_model(pseudocode)
new_func_name = new_func_name.replace("\"","").replace("(","").replace(")","").replace("'","").replace(".","")
print("Function {} found at 0x{:X}, ready rename function: {}".format(name, f, new_func_name))
MakeName(eval("0x{:X}".format(f)),new_func_name)
new_cmt = ""
for i in range(len(func_comment)//100+1):
new_cmt += func_comment[i*100:(i+1)*100]+"\n"
set_func_cmt(eval("0x{:X}".format(f)),new_cmt,0)
else:
print("[-] Max line limited, Pass!")
def PLUGIN_ENTRY():
OpenAIVeryGOOD()
| [] |
2024-01-10 | Teddy-Li/LLM-NLI-Analysis | gpt3_inference.py | import json
from collections import Counter
from utils import load_general_entries, load_typed_general_entries, negate, wrap_prompt_completion, wrap_prompt_chat, \
find_best_f_beta_from_curve, print_metrics, acquire_in_context_examples, get_gpt_template
import argparse
import openai
import os
import random
import sys
import time
import math
from typing import List, Tuple
import matplotlib.pyplot as plt
# INFERENCE_OPTION_STR_TRINARY = "\nA) Entailment\nB) Neutral\nC) Contradiction\nAnswer:"
# KNOWLEDGE_OPTION_STR_TRINARY = "\nA) True\nB) Unknown\nC) False\nAnswer:"
# INFERENCE_OPTION_STR_BINARY = " Is this True or False?\nA) True\nB) False\nAnswer:"
# KNOWLEDGE_OPTION_STR_BINARY = " Is this True or False?\nA) True\nB) False\nAnswer:"
chat_models = ['gpt-4', 'gpt-4-0314', 'gpt-4-32k', 'gpt-4-32k-0314', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301']
def get_gpt3_output(prompt: str, model_name: str = "text-davinci-003", max_tokens: int = 32, temperature: float = 0.0,
top_p: float = 1.0, use_binary_options: bool = False, debug: bool = False) -> Tuple[str, float, str]:
def option_matcher(output: str, char: str) -> bool:
if output == char:
return True
elif output == char.lower():
return True
elif output.startswith(char + ')'):
return True
elif output.startswith(char + ' '):
return True
elif output.startswith(char + '.'):
return True
elif output.startswith(char + '-'):
return True
else:
return False
if args.dry_run:
scr = random.random()
label = 'A' if scr > 0.5 else 'B'
return label, scr, None
if model_name in chat_models:
prompt_dict = wrap_prompt_chat(prompt, model_name, max_tokens, temperature, top_p)
else:
prompt_dict = wrap_prompt_completion(prompt, model_name, max_tokens, temperature, top_p)
response = None
for i in range(3):
try:
if model_name in ['gpt-4', 'gpt-4-0314', 'gpt-4-32k', 'gpt-4-32k-0314', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
response = openai.ChatCompletion.create(**prompt_dict)
else:
response = openai.Completion.create(**prompt_dict)
time.sleep(args.sleep_after_query)
break
except Exception as e:
print(f"Error: {e}")
if i == 4:
pass
else:
time.sleep(args.sleep_after_query)
print(f"Retrying...")
continue
if model_name in chat_models:
if response is None:
print(f"Error: response is None", file=sys.stderr)
return 'B', 0.0, response
else:
ret_text = response['choices'][0]['message']['content']
# print(f"Returned text: {ret_text}")
if option_matcher(ret_text, 'A'):
return 'A', 1.0, response
elif option_matcher(ret_text, 'B'):
return 'B', 0.0, response
elif option_matcher(ret_text, 'C'):
return 'C', 0.0, response
else:
print(f"Error: response is not a boolean: {ret_text}; regarding it as a False.", file=sys.stderr)
return 'B', 0.0, response
else:
if response is not None:
answer = response['choices'][0]['text'].strip(' ')
if response['choices'][0]['logprobs']['tokens'][0].strip() == ':':
logprobs_first_token = response['choices'][0]['logprobs']['tokens'][1]
else:
logprobs_first_token = response['choices'][0]['logprobs']['tokens'][0]
if logprobs_first_token.strip().lower() not in ['a', 'b', 'c']:
print(f"Error in logprobs_first_token: {logprobs_first_token}", file=sys.stderr)
pass
logprob = response['choices'][0]['logprobs']['token_logprobs'][0]
else:
answer = None
logprobs_first_token = None
logprob = None
if debug:
print(answer)
if answer is None:
return 'B', 0.0, response
elif option_matcher(answer, 'A'):
# print("!")
assert 0 < math.exp(logprob) < 1
effective_scr = 0.5 + 0.5*math.exp(logprob)
return 'A', effective_scr, response
elif use_binary_options and option_matcher(answer, 'B'):
assert 0 < math.exp(logprob) < 1
effective_scr = 0.5 - 0.5 * math.exp(logprob)
return 'B', effective_scr, response
elif (not use_binary_options) and (option_matcher(answer, 'B') or option_matcher(answer, 'C')):
assert 0 < math.exp(logprob) < 1
effective_scr = 0.5 - 0.5 * math.exp(logprob)
if option_matcher(answer, 'B'):
return 'B', effective_scr, response
elif option_matcher(answer, 'C'):
return 'C', effective_scr, response
else:
raise AssertionError
else:
print(f"Unexpected answer for binary_options={use_binary_options}: {answer}", file=sys.stderr)
return 'B', 0.0, response
def vote(answers: List[bool]):
return sum(answers) > len(answers) / 2
def retrieve_results_main(args):
if args.hypothesis_only:
sent_template_activate_flags = [True]
sent_template_to_test = [
{'s': '{hyp}.', 'do_neg': False}
]
else:
if args.tplt_id is not None:
sent_template_activate_flags = [False] * 8
sent_template_activate_flags[args.tplt_id] = True
else:
# sent_template_activate_flags = [False, True, False, False, False, False, False, False]
# sent_template_activate_flags = [False, False, False, False, True, False, False, False]
sent_template_activate_flags = [True, True, False, True, True, False, False, False]
# sent_template_activate_flags = [True, True, True, True, True, True, True, True]
sent_template_to_test = [
{'s': "{prm}, which means that {hyp}.", 'do_neg': False},
{'s': "If {prm}, then {hyp}.", 'do_neg': False},
{'s': "{hyp}, because {prm}.", 'do_neg': False},
{'s': "{prm}, so {hyp}.", 'do_neg': False},
{'s': "{prm} entails {hyp}.", 'do_neg': False},
{'s': "It is not the case that {hyp}, let alone {prm}.", 'do_neg': False},
{'s': "{prm}, because {hyp}.", 'do_neg': True},
{'s': "{hyp}, which means that {prm}.", 'do_neg': True},
]
sent_template_to_test = [x for x, y in zip(sent_template_to_test, sent_template_activate_flags) if y]
assert args.num_templates == len(sent_template_to_test)
openai.organization = os.getenv('OPENAI_ORG_ID')
openai.api_key = os.getenv('OPENAI_API_KEY')
if args.use_plhr in ['original', 'xy', 'shuffled', 'randprem-orig', 'lowfreq', 'highfreq']:
prem_hyp_pairs = load_general_entries(args.infn_for_eval) # these are the premise-hypothesis pairs that are True Entailments
elif args.use_plhr in ['type', 'randprem-type']:
prem_hyp_pairs = load_typed_general_entries(args.infn_for_eval)
else:
raise AssertionError(f"Unknown use_plhr value: {args.use_plhr}")
preds = [[] for x in range(args.num_templates+3)] # the +2 are for voting and maximum
golds = [[] for x in range(args.num_templates+3)] # the +2 are for voting and maximum
responses = [[] for x in range(args.num_templates)]
ready_entries = []
try:
ref_fn = args.res_fn+'_ref'
ref_fp = open(ref_fn, 'r', encoding='utf-8')
for line in ref_fp:
if len(line) < 2:
continue
item = json.loads(line)
ready_entries.append(item)
ref_fp.close()
print(f"Loaded {len(ready_entries)} entries from {args.res_fn+'_ref'}")
except FileNotFoundError:
print(f"File {args.res_fn+'_ref'} not found, will start from scratch.")
ofp = open(args.res_fn, 'w', encoding='utf-8')
ready_count = 0
print_flag = True
start_t = time.time()
# For each premise-hypothesis pair, get the templates and score them with the model;
# let the 5 templates vote on which one is better.
for ent_idx, (prem, hyp, lbl, aligned_flag) in enumerate(prem_hyp_pairs):
if ent_idx % 5 == 0:
curr_t = time.time()
durr = curr_t - start_t
print(f'Processing entry {ent_idx} of {len(prem_hyp_pairs)}; durr: {durr//60}m {durr%60}s;')
if lbl == 'True':
lbl = True
elif lbl == 'False':
lbl = False
else:
raise AssertionError(f"Unknown label: {lbl}")
ready_found = False
heu_ent = ready_entries[ent_idx] if ent_idx < len(ready_entries) else None
if heu_ent is not None and heu_ent['premise'] == prem and heu_ent['hypothesis'] == hyp:
ready_ent = heu_ent
ready_found = True
else:
ready_ent = None
for ready_ent in ready_entries:
if prem == ready_ent['premise'] and hyp == ready_ent['hypothesis']:
ready_found = True
break
if ready_found is True and lbl == ready_ent['gold']:
ready_found = True
ready_count += 1
print(f"Ready entry found for {prem} and {hyp}: cnt: {ready_count};")
for i in range(args.num_templates):
preds[i].append(ready_ent['preds'][i])
binarized_preds = [x > 0.5 for x in ready_ent['preds']]
preds[args.num_templates].append(vote(binarized_preds))
preds[args.num_templates+1].append(any(binarized_preds))
preds[args.num_templates+2].append(all(binarized_preds))
for i in range(args.num_templates+3):
golds[i].append(ready_ent['gold'])
ofp.write(json.dumps(ready_ent, ensure_ascii=False) + '\n')
continue
elif ready_found is True:
print(f"Ready entry found for {prem} and {hyp}, but the gold label is different: {ready_ent['gold']} vs. {lbl};")
pass
else:
pass
entry_preds = []
entry_preds_binarized = []
entry_preds_tokenized = []
for tplt_idx in range(args.num_templates):
if args.hypothesis_only:
prem = None
single_statement = 'h'
else:
single_statement = None
curr_t = get_gpt_template(prem, hyp, aligned=aligned_flag, use_plhr=args.use_plhr, in_context=args.in_context,
tplt_fmt=sent_template_to_test[tplt_idx]['s'],
do_neg=sent_template_to_test[tplt_idx]['do_neg'], use_binary_options=args.use_binary_options,
single_statement=single_statement, rev_hyp_args=args.rev_hyp_args,
has_instruction=args.instruction)
if args.debug or print_flag:
print(f"Current prompt:")
print(curr_t)
print_flag = False
curr_res, curr_scr, response = get_gpt3_output(curr_t, args.model_name, max_tokens=args.max_tokens,
temperature=args.temperature,
use_binary_options=args.use_binary_options, debug=args.debug)
responses[tplt_idx].append(response)
assert isinstance(curr_res, str) and isinstance(curr_scr, float)
assert curr_res in ['A', 'B', 'C']
preds[tplt_idx].append(curr_scr) # here the scr > 0.5 means binary-True, and < 0.5 means binary-False
entry_preds_tokenized.append(curr_res)
entry_preds_binarized.append(True if curr_res == 'A' else False)
entry_preds.append(curr_scr)
if args.sleep_after_query > 0:
time.sleep(args.sleep_after_query)
preds[args.num_templates].append(vote(entry_preds_binarized))
preds[args.num_templates+1].append(any(entry_preds_binarized))
preds[args.num_templates+2].append(all(entry_preds_binarized))
for i in range(args.num_templates+3):
golds[i].append(lbl)
out_item = {
'premise': prem,
'hypothesis': hyp,
'preds': entry_preds,
'preds_tokenized': entry_preds_tokenized,
'gold': lbl,
}
ofp.write(json.dumps(out_item, ensure_ascii=False) + '\n')
time.sleep(1)
saved_responses_fn = args.res_fn.replace('.json', '__response.json')
with open(saved_responses_fn, 'w', encoding='utf-8') as saved_responses_fp:
json.dump(responses, saved_responses_fp, indent=4)
for tplt_idx in range(args.num_templates+3):
# Calculate the binary scores
if tplt_idx == args.num_templates:
print(f"Voting:")
elif tplt_idx == args.num_templates+1:
print(f"Any:")
elif tplt_idx == args.num_templates+2:
print(f"Consensus:")
else:
print(f"Template {tplt_idx}: {sent_template_to_test[tplt_idx]}")
print(f"Using placeholders for the subjects and objects? {args.use_plhr}")
# Calculate the precision-recall curve
print_metrics(golds[tplt_idx], preds[tplt_idx], legend_str=f"{tplt_idx}", beta=args.beta)
ofp.close()
print(f"Finished! Results written to {args.res_fn}.")
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title("Precision Recall Curves")
plt.legend()
plt.draw()
# plt.show()
assert args.res_fn.endswith('.json')
plt.savefig(f"{args.res_fn}".replace('.json', '.png'))
if args.subset == 'full':
print(f"Also doing evaluation on the directional subset:")
try:
get_scr_from_full_result(args, dirscr=True)
except Exception as e:
print(f"Error: {e}")
print(f"Skipping directional subset evaluation.")
def get_scr_from_full_result(args, dirscr: bool):
banned_template_ids = [5, 6, 7] # These "banned templates" are effective only when calculating benchmark scores from raw results.
if dirscr:
diridx_fpath = f'./levyholt_files/dir_files/with_original/{args.split}_idxes.json'
with open(diridx_fpath, 'r', encoding='utf-8') as diridx_fp:
diridxes = json.load(diridx_fp)
else:
diridxes = None
if args.use_plhr == 'original':
order_str = '_entord'
elif args.use_plhr in ['type', 'shuffled', 'pseudoents', 'randprem-type', 'randprem-orig',
'lowfreq', 'highfreq']:
order_str = '_ordered'
else:
raise AssertionError
inclusion_flags_fpath = f'./levyholt_files/dir_files/with_original/{args.split}_inclusion_flags{order_str}.json'
with open(inclusion_flags_fpath, 'r', encoding='utf-8') as inclusion_flags_fp:
inclusion_flags = json.load(inclusion_flags_fp)
full_results = []
try:
with open(args.res_fn, 'r', encoding='utf-8') as res_fp:
for line in res_fp:
full_results.append(json.loads(line))
except FileNotFoundError as e:
with open(args.res_fn.replace('/results/', '/results_2/'), 'r', encoding='utf-8') as res_fp:
for line in res_fp:
full_results.append(json.loads(line))
assert len(full_results) == len(inclusion_flags)
preds = [[] for x in range(args.num_templates + 3)] # the +2 are for voting and maximum
golds = [[] for x in range(args.num_templates + 3)] # the +2 are for voting and maximum
pred_tokens = [[] for x in range(args.num_templates)] # the +2 are for voting and maximum
for ridx, (res_entry, i_flag) in enumerate(zip(full_results, inclusion_flags)):
if diridxes is not None and ridx not in diridxes:
continue
if args.inclusion_subset == 'yes' and i_flag == False:
continue
elif args.inclusion_subset == 'no' and i_flag == True:
continue
eligible_preds = []
for tplt_idx in range(args.num_templates):
if tplt_idx in banned_template_ids:
continue
preds[tplt_idx].append(res_entry['preds'][tplt_idx])
eligible_preds.append(res_entry['preds'][tplt_idx])
if 'preds_tokenized' in res_entry:
pred_tokens[tplt_idx].append(res_entry['preds_tokenized'][tplt_idx])
else:
pass
eligible_preds_binarized = [x > 0.5 for x in eligible_preds]
preds[args.num_templates].append(vote(eligible_preds_binarized))
preds[args.num_templates+1].append(any(eligible_preds_binarized))
preds[args.num_templates+2].append(all(eligible_preds_binarized))
for i in range(args.num_templates+3):
golds[i].append(res_entry['gold'])
for tplt_idx in range(args.num_templates+3):
if tplt_idx in banned_template_ids:
continue
# Calculate the binary scores
curr_tplt_binarized_preds = [x > 0.5 for x in preds[tplt_idx]]
if tplt_idx < len(pred_tokens) and len(pred_tokens[tplt_idx]) > 0:
print(f"Template {tplt_idx} Predicted label distribution:")
curr_tplt_pred_tokens = pred_tokens[tplt_idx]
assert all([x in ['A', 'B', 'C'] for x in curr_tplt_pred_tokens])
a_cnt = sum([1 for x in curr_tplt_pred_tokens if x == 'A'])
b_cnt = sum([1 for x in curr_tplt_pred_tokens if x == 'B'])
c_cnt = sum([1 for x in curr_tplt_pred_tokens if x == 'C'])
total_cnt = len(curr_tplt_pred_tokens)
print(f"Pred tokenized: A: {a_cnt} ({a_cnt/total_cnt:.4f}), B: {b_cnt} ({b_cnt/total_cnt:.4f}), C: {c_cnt} ({c_cnt/total_cnt:.4f}); Total: {total_cnt}")
else:
print(f"Predicted label distribution unavailable.")
if tplt_idx == args.num_templates:
print(f"Voting:")
elif tplt_idx == args.num_templates + 1:
print(f"Any:")
elif tplt_idx == args.num_templates + 2:
print(f"Consensus:")
else:
print(f"Template {tplt_idx}:")
print(f"Using placeholders for the subjects and objects? {args.use_plhr}")
# Calculate the precision-recall curve
print_metrics(golds[tplt_idx], preds[tplt_idx], f"Template {tplt_idx}", args.beta)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title("Precision Recall Curves")
plt.legend()
plt.draw()
plt.show()
assert args.res_fn.endswith('.json')
plt.savefig(f"{args.res_fn}".replace('.json', f'_inc={args.inclusion_subset}.png'))
def format_data_main(args):
if args.hypothesis_only:
sent_template_activate_flags = [True]
sent_template_to_test = [
{'s': '{hyp}.', 'do_neg': False}
]
else:
if args.tplt_id is not None:
sent_template_activate_flags = [False] * 8
sent_template_activate_flags[args.tplt_id] = True
else:
# sent_template_activate_flags = [False, True, False, False, False, False, False, False]
# sent_template_activate_flags = [False, False, False, False, True, False, False, False]
sent_template_activate_flags = [True, True, False, True, True, False, False, False]
# sent_template_activate_flags = [True, True, True, True, True, True, True, True]
sent_template_to_test = [
{'s': "{prm}, which means that {hyp}.", 'do_neg': False},
{'s': "If {prm}, then {hyp}.", 'do_neg': False},
{'s': "{hyp}, because {prm}.", 'do_neg': False},
{'s': "{prm}, so {hyp}.", 'do_neg': False},
{'s': "{prm} entails {hyp}.", 'do_neg': False},
{'s': "It is not the case that {hyp}, let alone {prm}.", 'do_neg': False},
{'s': "{prm}, because {hyp}.", 'do_neg': True},
{'s': "{hyp}, which means that {prm}.", 'do_neg': True},
]
sent_template_to_test = [x for x, y in zip(sent_template_to_test, sent_template_activate_flags) if y]
assert args.num_templates == len(sent_template_to_test)
if args.use_plhr in ['original', 'randprem-orig', 'lowfreq', 'highfreq']:
prem_hyp_pairs = load_general_entries(args.infn_for_eval) # these are the premise-hypothesis pairs that are True Entailments
elif args.use_plhr in ['type', 'randprem-type']:
prem_hyp_pairs = load_typed_general_entries(args.infn_for_eval)
else:
raise AssertionError(f"Unknown use_plhr value: {args.use_plhr}")
inputs_list = [[] for i in range(args.num_templates)]
# For each premise-hypothesis pair, get the templates and score them with the model;
# let the 5 templates vote on which one is better.
for ent_idx, (prem, hyp, lbl, aligned_flag) in enumerate(prem_hyp_pairs):
if ent_idx % 1 == 0:
print(f'Processing entry {ent_idx} of {len(prem_hyp_pairs)};')
if lbl == 'True':
lbl = True
elif lbl == 'False':
lbl = False
else:
raise AssertionError(f"Unknown label: {lbl}")
for tplt_idx in range(args.num_templates):
if args.hypothesis_only:
prem = None
single_statement = 'h'
else:
single_statement = None
curr_t = get_gpt_template(prem, hyp, aligned=aligned_flag, use_plhr=args.use_plhr, in_context=args.in_context,
tplt_fmt=sent_template_to_test[tplt_idx]['s'],
do_neg=sent_template_to_test[tplt_idx]['do_neg'], use_binary_options=args.use_binary_options,
single_statement=single_statement,
rev_hyp_args=args.rev_hyp_args, has_instruction=args.instruction)
inputs_list[tplt_idx].append({'in': curr_t, 'out': None, 'gold': lbl})
with open(args.formatted_fn, 'w', encoding='utf8') as ofp:
json.dump(inputs_list, ofp, indent=4, ensure_ascii=False)
def run_any(args):
assert args.instantiated_in_path is not None and args.instantiated_in_path.endswith('.json')
with open(args.instantiated_in_path, 'r', encoding='utf8') as ifp:
inputs_list = json.load(ifp)
openai.organization = os.getenv('OPENAI_ORG_ID')
openai.api_key = os.getenv('OPENAI_API_KEY')
responses = [[] for i in range(len(inputs_list))] # one list per template
preds = [[] for i in range(len(inputs_list))]
golds = []
preds_tokenized = [[] for i in range(len(inputs_list))]
preds_binarized = [[] for i in range(len(inputs_list))]
ofp = open(args.instantiated_res_path, 'w', encoding='utf-8')
total_entries = len(inputs_list[0])
for eidx in range(total_entries):
if eidx % 1 == 0:
print(f"Processing entry {eidx} / {total_entries};")
entry_preds = []
entry_preds_tokenized = []
entry_inputs = []
lbl = inputs_list[0][eidx]['label']
golds.append(lbl)
for tplt_idx in range(len(inputs_list)):
curr_item = inputs_list[tplt_idx][eidx]
curr_t = curr_item['in']
entry_inputs.append(curr_t)
assert curr_item['label'] == lbl
assert isinstance(curr_t, str)
curr_res, curr_scr, response = get_gpt3_output(curr_t, args.model_name, max_tokens=args.max_tokens,
temperature=args.temperature,
use_binary_options=args.use_binary_options, debug=args.debug)
responses[tplt_idx].append(response)
assert isinstance(curr_res, str) and isinstance(curr_scr, float)
assert curr_res in ['A', 'B', 'C']
preds[tplt_idx].append(curr_scr) # here the scr > 0.5 means binary-True, and < 0.5 means binary-False
preds_tokenized[tplt_idx].append(curr_res)
preds_binarized[tplt_idx].append(True if curr_res == 'A' else False)
entry_preds.append(curr_scr)
entry_preds_tokenized.append(curr_res)
if args.sleep_after_query > 0:
time.sleep(args.sleep_after_query)
out_item = {
'inputs': entry_inputs,
'preds': entry_preds,
'preds_tokenized': entry_preds_tokenized,
'gold': lbl,
}
ofp.write(json.dumps(out_item, ensure_ascii=False) + '\n')
time.sleep(2)
print(f"Results written to {args.res_fn}.")
ofp.close()
# Now compute the accuracy of each template
for tplt_idx in range(len(inputs_list)):
# Calculate the binary scores
print_metrics(golds, preds[tplt_idx], f"Template {tplt_idx}", args.beta)
pred_a_cnt = Counter(preds_tokenized[tplt_idx])['A']
pred_b_cnt = Counter(preds_tokenized[tplt_idx])['B']
pred_c_cnt = Counter(preds_tokenized[tplt_idx])['C']
print(f"Prediction distribution: A: {pred_a_cnt}, B: {pred_b_cnt}, C: {pred_c_cnt}; Total: {len(preds_tokenized[tplt_idx])}")
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title("Precision Recall Curves")
plt.legend()
plt.draw()
# plt.show()
assert args.res_fn.endswith('.json')
plt.savefig(f"{args.res_fn}".replace('.json', '.png'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--in_fn', type=str,
default='./levyholt_files/%s_files/with_original/%s.txt')
parser.add_argument('--typed_in_fn', type=str,
default='./levyholt_files/%s_files/with_type/%s%s.txt') # from '../../entgraph_eval/gfiles/ent/test_dir%s.txt'
parser.add_argument('--shuffled_in_fn', type=str,
default='./levyholt_files/%s_files/with_shuffled_entities/%s.txt')
parser.add_argument('--model_name', type=str, default='text-davinci-003')
parser.add_argument('--max_tokens', type=int, default=8)
parser.add_argument('--temperature', type=float, default=0.0)
parser.add_argument('--results_root', type=str, default='./results/gpt_results')
parser.add_argument('--res_fn', type=str, default='gpt3_%s_res_%s_text_%s_%s_icl=%s%s%s_%d%s.json')
parser.add_argument('--formatted_fn', type=str, default='./formatted/gpt4_formin_%s_%s_%s_icl=%s%s%s_%d.json')
parser.add_argument('--use_plhr', type=str, default='original')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--in_context', type=str, default='none')
parser.add_argument('--num_templates', type=int, default=4)
parser.add_argument('--hypothesis-only', action='store_true')
parser.add_argument('--subset', type=str, default='dir', choices=['dir', 'full'])
parser.add_argument('--split', type=str, default='dev')
parser.add_argument('--task', type=str, default='query')
parser.add_argument('--dry-run', action='store_true') # will not call the actual API; instead use random fake data
parser.add_argument('--rev-hyp-args', action='store_true')
parser.add_argument('--use-binary-options', action='store_true')
parser.add_argument('--inclusion_subset', type=str, default='any', choices=['any', 'yes', 'no'])
parser.add_argument('--beta', type=float, default=0.5, help='beta for F-score.')
parser.add_argument('--tplt_id', type=int, default=None)
parser.add_argument('--res_suffix', type=str, default='')
parser.add_argument('--sleep_after_query', type=float, default=1)
parser.add_argument('--instruction', action='store_true')
parser.add_argument('--instantiated_in_path', type=str, default=None)
parser.add_argument('--instantiated_res_path', type=str, default=None)
args = parser.parse_args()
print(args)
assert args.use_plhr in ['original', 'type', 'randprem-type', 'randprem-orig', 'lowfreq', 'highfreq']
assert not (args.hypothesis_only and (args.in_context not in ['none', 'lbl'])), 'Not Implemented: ICL with Explanations with Hypothesis-only baseline'
# assert not (args.hypothesis_only and (args.use_plhr != 'original')), 'Not Implemented: argument replacements with Hypothesis-only baseline'
assert args.inclusion_subset in ['any', 'yes', 'no']
binary_str = '_binary' if args.use_binary_options else '_trinary'
instruct_str = '_instruct' if args.instruction else ''
hyponly_str = '_hyponly' if args.hypothesis_only else ''
args.res_fn = args.res_fn % (args.model_name, args.subset, args.split, args.use_plhr, args.in_context, binary_str, instruct_str, args.num_templates, hyponly_str)
args.res_fn = os.path.join(args.results_root, args.res_fn)
args.formatted_fn = args.formatted_fn % (args.subset, args.split, args.use_plhr, args.in_context, binary_str, instruct_str, args.num_templates)
if args.rev_hyp_args:
args.res_fn = args.res_fn.replace('.json', '_rev-hyp-args.json')
if args.use_plhr in ['original', 'xy']:
args.infn_for_eval = args.in_fn % (args.subset, args.split)
elif args.use_plhr == 'shuffled':
args.infn_for_eval = args.shuffled_in_fn % (args.subset, args.split)
elif args.use_plhr == 'type':
args.infn_for_eval = args.typed_in_fn % (args.subset, args.split, '%s')
elif args.use_plhr == 'randprem-orig':
args.infn_for_eval = f'./levyholt_files/{args.subset}_files/randprem_files/test_randprem.txt'
elif args.use_plhr == 'randprem-type':
args.infn_for_eval = f'./levyholt_files/{args.subset}_files/randprem_files/test_randprem%s.txt'
elif args.use_plhr == 'lowfreq':
args.infn_for_eval = f'./levyholt_files/{args.subset}_files/swapped_entities/{args.split}_bottom0.05.txt'
elif args.use_plhr == 'highfreq':
args.infn_for_eval = f'./levyholt_files/{args.subset}_files/swapped_entities/{args.split}_top0.05.txt'
else:
raise NotImplementedError
print(f"Evaluating {args.infn_for_eval} with model {args.model_name}, and saving results to {args.res_fn}")
if args.task == 'benchmark':
print(f"Getting scores for the full dataset:")
get_scr_from_full_result(args, dirscr=False)
elif args.task == 'query':
retrieve_results_main(args)
elif args.task == 'data':
format_data_main(args)
elif args.task == 'run_any':
args.instantiated_in_path = args.instantiated_in_path % (args.split, args.use_plhr)
args.instantiated_res_path = args.instantiated_res_path % (args.model_name, args.split, args.use_plhr)
print(
f"Running inference for input: {args.instantiated_in_path}; saving results to {args.instantiated_res_path}")
run_any(args)
else:
raise ValueError()
| [
"[{'s': '{hyp}.', 'do_neg': False}]",
"[{'s': '{prm}, which means that {hyp}.', 'do_neg': False}, {'s': 'If {prm}, then {hyp}.', 'do_neg': False}, {'s': '{hyp}, because {prm}.', 'do_neg': False}, {'s': '{prm}, so {hyp}.', 'do_neg': False}, {'s': '{prm} entails {hyp}.', 'do_neg': False}, {'s': 'It is not the case that {hyp}, let alone {prm}.', 'do_neg': False}, {'s': '{prm}, because {hyp}.', 'do_neg': True}, {'s': '{hyp}, which means that {prm}.', 'do_neg': True}]",
"[True]",
"[False, False, False, False, False, False, False, False]",
"['PLACEHOLDER']",
"[5, 6, 7]",
"[True, True, False, True, True, False, False, False]"
] |
2024-01-10 | jjovalle99/DocuQuery2 | src~loaders.py | from io import BytesIO
from typing import List
from chainlit.types import AskFileResponse
from langchain.docstore.document import Document
from pypdf import PdfReader
def get_docs(files: List[AskFileResponse], splitter) -> List[str]:
docs = []
for file in files:
reader = PdfReader(BytesIO(file.content))
doc = [
Document(
page_content=page.extract_text(),
metadata={"source": file.path, "page": page.page_number},
)
for page in reader.pages
]
docs.append(doc)
splitted_docs = [splitter.split_documents(doc) for doc in docs]
for doc in splitted_docs:
for i, chunk in enumerate(doc, start=1):
chunk.metadata["chunk"] = i
unnested_splitted_docs = [chunk for doc in splitted_docs for chunk in doc]
return unnested_splitted_docs
| [] |
2024-01-10 | lmxhappy/Grounded-Segment-Anything | gradio_app.py | import os
import random
import cv2
from scipy import ndimage
import gradio as gr
import argparse
import numpy as np
import torch
import torchvision
from PIL import Image, ImageDraw, ImageFont
# Grounding DINO
import GroundingDINO.groundingdino.datasets.transforms as T
from GroundingDINO.groundingdino.models import build_model
from GroundingDINO.groundingdino.util.slconfig import SLConfig
from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
# segment anything
from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator
import numpy as np
# diffusers
import torch
from diffusers import StableDiffusionInpaintPipeline
# BLIP
from transformers import BlipProcessor, BlipForConditionalGeneration
import openai
def show_anns(anns):
if len(anns) == 0:
return
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
full_img = None
# for ann in sorted_anns:
for i in range(len(sorted_anns)):
ann = anns[i]
m = ann['segmentation']
if full_img is None:
full_img = np.zeros((m.shape[0], m.shape[1], 3))
map = np.zeros((m.shape[0], m.shape[1]), dtype=np.uint16)
map[m != 0] = i + 1
color_mask = np.random.random((1, 3)).tolist()[0]
full_img[m != 0] = color_mask
full_img = full_img * 255
# anno encoding from https://github.com/LUSSeg/ImageNet-S
res = np.zeros((map.shape[0], map.shape[1], 3))
res[:, :, 0] = map % 256
res[:, :, 1] = map // 256
res.astype(np.float32)
full_img = Image.fromarray(np.uint8(full_img))
return full_img, res
def generate_caption(processor, blip_model, raw_image):
# unconditional image captioning
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
out = blip_model.generate(**inputs)
caption = processor.decode(out[0], skip_special_tokens=True)
return caption
def generate_tags(caption, split=',', max_tokens=100, model="gpt-3.5-turbo", openai_api_key=''):
openai.api_key = openai_api_key
openai.api_base = 'https://closeai.deno.dev/v1'
prompt = [
{
'role': 'system',
'content': 'Extract the unique nouns in the caption. Remove all the adjectives. ' + \
f'List the nouns in singular form. Split them by "{split} ". ' + \
f'Caption: {caption}.'
}
]
response = openai.ChatCompletion.create(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
reply = response['choices'][0]['message']['content']
# sometimes return with "noun: xxx, xxx, xxx"
tags = reply.split(':')[-1].strip()
return tags
def transform_image(image_pil):
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image, _ = transform(image_pil, None) # 3, h, w
return image
def load_model(model_config_path, model_checkpoint_path, device):
args = SLConfig.fromfile(model_config_path)
args.device = device
model = build_model(args)
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
print(load_res)
_ = model.eval()
return model
def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True):
caption = caption.lower()
caption = caption.strip()
if not caption.endswith("."):
caption = caption + "."
with torch.no_grad():
outputs = model(image[None], captions=[caption])
logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
logits.shape[0]
# filter output
logits_filt = logits.clone()
boxes_filt = boxes.clone()
filt_mask = logits_filt.max(dim=1)[0] > box_threshold
logits_filt = logits_filt[filt_mask] # num_filt, 256
boxes_filt = boxes_filt[filt_mask] # num_filt, 4
logits_filt.shape[0]
# get phrase
tokenlizer = model.tokenizer
tokenized = tokenlizer(caption)
# build pred
pred_phrases = []
scores = []
for logit, box in zip(logits_filt, boxes_filt):
pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
if with_logits:
pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
else:
pred_phrases.append(pred_phrase)
scores.append(logit.max().item())
return boxes_filt, torch.Tensor(scores), pred_phrases
def draw_mask(mask, draw, random_color=False):
"""
在draw里按照mask“画东西”
"""
if random_color:
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 153)
else:
color = (30, 144, 255, 153)
nonzero_coords = np.transpose(np.nonzero(mask))
for coord in nonzero_coords:
draw.point(coord[::-1], fill=color)
def draw_box(box, draw, label):
# random color
color = tuple(np.random.randint(0, 255, size=3).tolist())
draw.rectangle(((box[0], box[1]), (box[2], box[3])), outline=color, width=2)
if label:
font = ImageFont.load_default()
if hasattr(font, "getbbox"):
bbox = draw.textbbox((box[0], box[1]), str(label), font)
else:
w, h = draw.textsize(str(label), font)
bbox = (box[0], box[1], w + box[0], box[1] + h)
draw.rectangle(bbox, fill=color)
draw.text((box[0], box[1]), str(label), fill="white")
draw.text((box[0], box[1]), label)
config_file = 'GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py'
ckpt_repo_id = "ShilongLiu/GroundingDINO"
ckpt_filenmae = "groundingdino_swint_ogc.pth"
sam_checkpoint = 'sam_vit_h_4b8939.pth'
output_dir = "outputs"
device = "cuda"
blip_processor = None
blip_model = None
groundingdino_model = None
sam_predictor = None
sam_automask_generator = None
inpaint_pipeline = None
blip_processor = blip_processor or BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
blip_model = blip_model or BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large",
torch_dtype=torch.float16).to("cuda")
def run_grounded_sam(input_image, text_prompt, task_type, inpaint_prompt, box_threshold, text_threshold, iou_threshold,
inpaint_mode, scribble_mode, openai_api_key):
global blip_processor, blip_model, groundingdino_model, sam_predictor, sam_automask_generator, inpaint_pipeline
# make dir
os.makedirs(output_dir, exist_ok=True)
# load image
image = input_image["image"]
scribble = input_image["mask"]
size = image.size # w, h
if sam_predictor is None:
# initialize SAM
assert sam_checkpoint, 'sam_checkpoint is not found!'
sam = build_sam(checkpoint=sam_checkpoint)
sam.to(device=device)
sam_predictor = SamPredictor(sam)
sam_automask_generator = SamAutomaticMaskGenerator(sam)
if groundingdino_model is None:
groundingdino_model = load_model(config_file, ckpt_filenmae, device=device)
image_pil = image.convert("RGB")
image = np.array(image_pil)
if task_type == 'scribble':
sam_predictor.set_image(image)
scribble = scribble.convert("RGB")
scribble = np.array(scribble)
scribble = scribble.transpose(2, 1, 0)[0]
# 将连通域进行标记
labeled_array, num_features = ndimage.label(scribble >= 255)
# 计算每个连通域的质心
centers = ndimage.center_of_mass(scribble, labeled_array, range(1, num_features + 1))
centers = np.array(centers)
point_coords = torch.from_numpy(centers)
point_coords = sam_predictor.transform.apply_coords_torch(point_coords, image.shape[:2])
point_coords = point_coords.unsqueeze(0).to(device)
point_labels = torch.from_numpy(np.array([1] * len(centers))).unsqueeze(0).to(device)
if scribble_mode == 'split':
point_coords = point_coords.permute(1, 0, 2)
point_labels = point_labels.permute(1, 0)
masks, _, _ = sam_predictor.predict_torch(
point_coords=point_coords if len(point_coords) > 0 else None,
point_labels=point_labels if len(point_coords) > 0 else None,
mask_input=None,
boxes=None,
multimask_output=False,
)
elif task_type == 'automask':
masks = sam_automask_generator.generate(image)
else:
transformed_image = transform_image(image_pil)
if task_type == 'automatic':
# generate caption and tags
# use Tag2Text can generate better captions
# https://huggingface.co/spaces/xinyu1205/Tag2Text
# but there are some bugs...
if not text_prompt:
text_prompt = generate_caption(blip_processor, blip_model, image_pil)
if len(openai_api_key) > 0:
text_prompt = generate_tags(text_prompt, split=",", openai_api_key=openai_api_key)
print(f"Caption: {text_prompt}")
# run grounding dino model
boxes_filt, scores, pred_phrases = get_grounding_output(
groundingdino_model, transformed_image, text_prompt, box_threshold, text_threshold
)
# process boxes
H, W = size[1], size[0]
for i in range(boxes_filt.size(0)):
boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
boxes_filt[i][2:] += boxes_filt[i][:2]
boxes_filt = boxes_filt.cpu()
if task_type == 'seg' or task_type == 'inpainting' or task_type == 'automatic':
sam_predictor.set_image(image)
if task_type == 'automatic':
# use NMS to handle overlapped boxes
print(f"Before NMS: {boxes_filt.shape[0]} boxes")
nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
boxes_filt = boxes_filt[nms_idx]
pred_phrases = [pred_phrases[idx] for idx in nms_idx]
print(f"After NMS: {boxes_filt.shape[0]} boxes")
print(f"Revise caption with number: {text_prompt}")
print(f'pred_phrases:{pred_phrases}')
transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
print(f'-----transformed_boxes:{transformed_boxes}')
masks, _, _ = sam_predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=transformed_boxes,
multimask_output=False,
)
if task_type == 'det':
image_draw = ImageDraw.Draw(image_pil)
for box, label in zip(boxes_filt, pred_phrases):
draw_box(box, image_draw, label)
return [image_pil]
elif task_type == 'automask':
full_img, res = show_anns(masks)
return [full_img]
elif task_type == 'scribble':
mask_image = Image.new('RGBA', size, color=(0, 0, 0, 0))
mask_draw = ImageDraw.Draw(mask_image)
for mask in masks:
draw_mask(mask[0].cpu().numpy(), mask_draw, random_color=True)
image_pil = image_pil.convert('RGBA')
image_pil.alpha_composite(mask_image)
return [image_pil, mask_image]
elif task_type == 'seg' or task_type == 'automatic':
mask_image = Image.new('RGBA', size, color=(0, 0, 0, 0))
mask_draw = ImageDraw.Draw(mask_image)
for mask in masks:
draw_mask(mask[0].cpu().numpy(), mask_draw, random_color=True)
image_draw = ImageDraw.Draw(image_pil)
for box, label in zip(boxes_filt, pred_phrases):
draw_box(box, image_draw, label)
if task_type == 'automatic':
image_draw.text((10, 10), text_prompt, fill='black')
image_pil = image_pil.convert('RGBA')
image_pil.alpha_composite(mask_image)
return [image_pil, mask_image]
elif task_type == 'inpainting':
assert inpaint_prompt, 'inpaint_prompt is not found!'
# inpainting pipeline
if inpaint_mode == 'merge':
masks = torch.sum(masks, dim=0).unsqueeze(0)
masks = torch.where(masks > 0, True, False)
mask = masks[0][0].cpu().numpy() # simply choose the first mask, which will be refine in the future release
mask_pil = Image.fromarray(mask)
if inpaint_pipeline is None:
inpaint_pipeline = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16
)
inpaint_pipeline = inpaint_pipeline.to("cuda")
image = inpaint_pipeline(prompt=inpaint_prompt, image=image_pil.resize((512, 512)),
mask_image=mask_pil.resize((512, 512))).images[0]
image = image.resize(size)
return [image, mask_pil], "mytst"
else:
print("task_type:{} error!".format(task_type))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Grounded SAM demo", add_help=True)
parser.add_argument("--debug", action="store_true", help="using debug mode")
parser.add_argument("--share", action="store_true", help="share the app")
parser.add_argument('--port', type=int, default=7589, help='port to run the server')
parser.add_argument('--no-gradio-queue', action="store_true", help='path to the SAM checkpoint')
args = parser.parse_args()
print(args)
block = gr.Blocks()
if not args.no_gradio_queue:
block = block.queue()
with block:
with gr.Row():
with gr.Column():
input_image = gr.Image(source='upload', type="pil", value="assets/demo1.jpg", tool="sketch")
task_type = gr.Dropdown(["scribble", "automask", "det", "seg", "inpainting", "automatic"],
value="automatic", label="task_type")
text_prompt = gr.Textbox(label="Text Prompt")
inpaint_prompt = gr.Textbox(label="Inpaint Prompt")
run_button = gr.Button(label="Run")
with gr.Accordion("Advanced options", open=False):
box_threshold = gr.Slider(
label="Box Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.05
)
text_threshold = gr.Slider(
label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.05
)
iou_threshold = gr.Slider(
label="IOU Threshold", minimum=0.0, maximum=1.0, value=0.5, step=0.05
)
inpaint_mode = gr.Dropdown(["merge", "first"], value="merge", label="inpaint_mode")
scribble_mode = gr.Dropdown(["merge", "split"], value="split", label="scribble_mode")
openai_api_key = gr.Textbox(label="(Optional)OpenAI key, enable chatgpt")
with gr.Column():
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(preview=True, grid=2, object_fit="scale-down")
# openai_api_key = gr.Textbox(label="(Optional)OpenAI key, enable chatgpt")
run_button.click(fn=run_grounded_sam, inputs=[
input_image, text_prompt, task_type, inpaint_prompt, box_threshold, text_threshold, iou_threshold,
inpaint_mode, scribble_mode, openai_api_key], outputs=[gallery, "text"])
block.queue(concurrency_count=100)
block.launch(server_name='0.0.0.0', server_port=args.port, debug=args.debug, share=args.share)
| [
"Extract the unique nouns in the caption. Remove all the adjectives. List the nouns in singular form. Split them by \"PLACEHOLDER \". Caption: captionafcd62b4-2622-453a-aa75-6f77700837c8..",
"Inpaint Prompt",
"Text Prompt"
] |
2024-01-10 | lmxhappy/Grounded-Segment-Anything | gradio_app_automatic.py | # coding:utf-8
import argparse
import os
import random
# Grounding DINO
import GroundingDINO.groundingdino.datasets.transforms as T
import gradio as gr
import numpy as np
import openai
# diffusers
import torch
import torchvision
from GroundingDINO.groundingdino.models import build_model
from GroundingDINO.groundingdino.util.slconfig import SLConfig
from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
from PIL import Image, ImageDraw, ImageFont
# BLIP
from transformers import BlipProcessor, BlipForConditionalGeneration
# segment anything
from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator
def show_anns(anns):
if len(anns) == 0:
return
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
full_img = None
# for ann in sorted_anns:
for i in range(len(sorted_anns)):
ann = anns[i]
m = ann['segmentation']
if full_img is None:
full_img = np.zeros((m.shape[0], m.shape[1], 3))
map = np.zeros((m.shape[0], m.shape[1]), dtype=np.uint16)
map[m != 0] = i + 1
color_mask = np.random.random((1, 3)).tolist()[0]
full_img[m != 0] = color_mask
full_img = full_img * 255
# anno encoding from https://github.com/LUSSeg/ImageNet-S
res = np.zeros((map.shape[0], map.shape[1], 3))
res[:, :, 0] = map % 256
res[:, :, 1] = map // 256
res.astype(np.float32)
full_img = Image.fromarray(np.uint8(full_img))
return full_img, res
def generate_caption(processor, blip_model, raw_image):
# unconditional image captioning
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
out = blip_model.generate(**inputs)
caption = processor.decode(out[0], skip_special_tokens=True)
return caption
def generate_tags(caption, split=',', max_tokens=100, model="gpt-3.5-turbo", openai_api_key=''):
openai.api_key = openai_api_key
openai.api_base = 'https://closeai.deno.dev/v1'
prompt = [
{
'role': 'system',
'content': 'Extract the unique nouns in the caption. Remove all the adjectives. ' + \
f'List the nouns in singular form. Split them by "{split} ". ' + \
f'Caption: {caption}.'
}
]
response = openai.ChatCompletion.create(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
reply = response['choices'][0]['message']['content']
# sometimes return with "noun: xxx, xxx, xxx"
tags = reply.split(':')[-1].strip()
return tags
def transform_image(image_pil):
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image, _ = transform(image_pil, None) # 3, h, w
return image
def load_model(model_config_path, model_checkpoint_path, device):
args = SLConfig.fromfile(model_config_path)
args.device = device
model = build_model(args)
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
print(load_res)
_ = model.eval()
return model
def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True):
caption = caption.lower()
caption = caption.strip()
if not caption.endswith("."):
caption = caption + "."
with torch.no_grad():
outputs = model(image[None], captions=[caption])
logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
# logits.shape[0]
# filter output
logits_filt = logits.clone()
boxes_filt = boxes.clone()
filt_mask = logits_filt.max(dim=1)[0] > box_threshold
logits_filt = logits_filt[filt_mask] # num_filt, 256
boxes_filt = boxes_filt[filt_mask] # num_filt, 4
# logits_filt.shape[0]
# get phrase
tokenlizer = model.tokenizer
tokenized = tokenlizer(caption)
# build pred
pred_phrases = []
scores = []
for logit, box in zip(logits_filt, boxes_filt):
pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
if with_logits:
pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
else:
pred_phrases.append(pred_phrase)
scores.append(logit.max().item())
return boxes_filt, torch.Tensor(scores), pred_phrases
def draw_mask(mask, draw, random_color=False):
"""
在draw里按照mask“画东西”
"""
if random_color:
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 153)
else:
color = (30, 144, 255, 153)
nonzero_coords = np.transpose(np.nonzero(mask))
for coord in nonzero_coords:
draw.point(coord[::-1], fill=color)
def draw_box(box, draw, label):
# random color
color = tuple(np.random.randint(0, 255, size=3).tolist())
draw.rectangle(((box[0], box[1]), (box[2], box[3])), outline=color, width=2)
if label:
font = ImageFont.load_default()
if hasattr(font, "getbbox"):
bbox = draw.textbbox((box[0], box[1]), str(label), font)
else:
w, h = draw.textsize(str(label), font)
bbox = (box[0], box[1], w + box[0], box[1] + h)
draw.rectangle(bbox, fill=color)
draw.text((box[0], box[1]), str(label), fill="white")
draw.text((box[0], box[1]), label)
config_file = 'GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py'
ckpt_repo_id = "ShilongLiu/GroundingDINO"
ckpt_filenmae = "groundingdino_swint_ogc.pth"
sam_checkpoint = 'sam_vit_h_4b8939.pth'
output_dir = "outputs"
device = "cuda"
blip_processor = None
blip_model = None
groundingdino_model = None
sam_predictor = None
sam_automask_generator = None
inpaint_pipeline = None
blip_processor = blip_processor or BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
blip_model = blip_model or BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large",
torch_dtype=torch.float16).to("cuda")
def run_grounded_sam(input_image, box_threshold, text_threshold, iou_threshold):
global blip_processor, blip_model, groundingdino_model, sam_predictor, sam_automask_generator, inpaint_pipeline
# make dir
os.makedirs(output_dir, exist_ok=True)
# load image
image = input_image["image"]
scribble = input_image["mask"]
size = image.size # w, h
if sam_predictor is None:
# initialize SAM
assert sam_checkpoint, 'sam_checkpoint is not found!'
sam = build_sam(checkpoint=sam_checkpoint)
sam.to(device=device)
sam_predictor = SamPredictor(sam)
sam_automask_generator = SamAutomaticMaskGenerator(sam)
if groundingdino_model is None:
groundingdino_model = load_model(config_file, ckpt_filenmae, device=device)
image_pil = image.convert("RGB")
image = np.array(image_pil)
transformed_image = transform_image(image_pil)
# generate caption and tags
# use Tag2Text can generate better captions
# https://huggingface.co/spaces/xinyu1205/Tag2Text
# but there are some bugs...
text_prompt = generate_caption(blip_processor, blip_model, image_pil)
print(f"Caption: {text_prompt}")
# run grounding dino model
boxes_filt, scores, pred_phrases = get_grounding_output(
groundingdino_model, transformed_image, text_prompt, box_threshold, text_threshold
)
# process boxes
H, W = size[1], size[0]
for i in range(boxes_filt.size(0)):
boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
boxes_filt[i][2:] += boxes_filt[i][:2]
boxes_filt = boxes_filt.cpu()
sam_predictor.set_image(image)
# use NMS to handle overlapped boxes
print(f"Before NMS: {boxes_filt.shape[0]} boxes")
nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
boxes_filt = boxes_filt[nms_idx]
pred_phrases = [pred_phrases[idx] for idx in nms_idx]
print(f"After NMS: {boxes_filt.shape[0]} boxes")
print(f"Revise caption with number: {text_prompt}")
print(f'pred_phrases:{pred_phrases}')
transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
print(f'-----transformed_boxes:{transformed_boxes}')
masks, _, _ = sam_predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=transformed_boxes,
multimask_output=False,
)
mask_image = Image.new('RGBA', size, color=(0, 0, 0, 0))
mask_draw = ImageDraw.Draw(mask_image)
for mask in masks:
draw_mask(mask[0].cpu().numpy(), mask_draw, random_color=True)
image_draw = ImageDraw.Draw(image_pil)
for box, label in zip(boxes_filt, pred_phrases):
draw_box(box, image_draw, label)
image_draw.text((10, 10), text_prompt, fill='black')
image_pil = image_pil.convert('RGBA')
image_pil.alpha_composite(mask_image)
return [image_pil, mask_image]
if __name__ == "__main__":
parser = argparse.ArgumentParser("Grounded SAM demo", add_help=True)
parser.add_argument("--debug", action="store_true", help="using debug mode")
parser.add_argument("--share", action="store_true", help="share the app")
parser.add_argument('--port', type=int, default=7589, help='port to run the server')
parser.add_argument('--no-gradio-queue', action="store_true", help='path to the SAM checkpoint')
args = parser.parse_args()
print(args)
block = gr.Blocks()
if not args.no_gradio_queue:
block = block.queue()
with block:
with gr.Row():
with gr.Column():
input_image = gr.Image(source='upload', type="pil", value="assets/demo1.jpg", tool="sketch")
run_button = gr.Button(label="Run")
with gr.Accordion("Advanced options", open=False):
box_threshold = gr.Slider(
label="Box Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.05
)
text_threshold = gr.Slider(
label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.05
)
iou_threshold = gr.Slider(
label="IOU Threshold", minimum=0.0, maximum=1.0, value=0.5, step=0.05
)
inpaint_mode = gr.Dropdown(["merge", "first"], value="merge", label="inpaint_mode")
scribble_mode = gr.Dropdown(["merge", "split"], value="split", label="scribble_mode")
with gr.Column():
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(preview=True, grid=2, object_fit="scale-down")
run_button.click(fn=run_grounded_sam, inputs=[
input_image, box_threshold, text_threshold, iou_threshold
], outputs=gallery)
block.queue(concurrency_count=100)
block.launch(server_name='0.0.0.0', server_port=args.port, debug=args.debug, share=args.share)
| [
"Extract the unique nouns in the caption. Remove all the adjectives. List the nouns in singular form. Split them by \"PLACEHOLDER \". Caption: caption3b1d6691-1864-4e36-9359-5b665ea302ff.."
] |
2024-01-10 | simsapa/simsapa | simsapa~layouts~gpt_prompts.py | import re, json
from PyQt6 import QtWidgets
from PyQt6 import QtCore
from PyQt6 import QtGui
from PyQt6.QtCore import QAbstractTableModel, QItemSelection, QItemSelectionModel, QModelIndex, QObject, QRunnable, QSize, QThreadPool, QTimer, Qt, pyqtSignal, pyqtSlot
from PyQt6.QtGui import QAction, QFont, QMovie, QStandardItem, QStandardItemModel
from functools import partial
from typing import Any, List, Optional, TypedDict
from datetime import datetime
import tiktoken
from PyQt6.QtWidgets import (QAbstractItemView, QCheckBox, QComboBox, QDialog, QDoubleSpinBox, QFileDialog,
QHBoxLayout, QHeaderView, QLabel, QLineEdit, QMenu, QMenuBar, QMessageBox,
QPushButton, QSpacerItem, QSpinBox, QSplitter, QTabWidget, QTableView, QTextEdit,
QTreeView, QVBoxLayout, QWidget)
from simsapa import IS_MAC, IS_SWAY, SEARCH_TIMER_SPEED, logger
from simsapa.app.export_helpers import sutta_content_plain
from simsapa.app.db import appdata_models as Am
from simsapa.app.db import userdata_models as Um
from simsapa.app.types import USutta
from simsapa.app.app_data import AppData
from simsapa.layouts.gui_types import (AppWindowInterface, ChatMessage, ChatResponse, ChatRole, OpenAIModel,
OpenAIModelLatest, OpenAIModelToEnum, OpenAISettings, OpenPromptParams,
QExpanding, QMinimum, default_openai_settings, model_max_tokens)
class ShowPromptDialog(QDialog):
def __init__(self, text: str):
super().__init__()
self.setWindowTitle("Parsed Prompt Content")
if IS_SWAY:
self.setFixedSize(800, 600)
else:
self.resize(800, 600)
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self.prompt_text_input = QTextEdit()
self.prompt_text_input.setPlainText(text)
self.prompt_text_input.setFocus()
self._layout.addWidget(self.prompt_text_input)
self.buttons_box = QHBoxLayout()
self._layout.addLayout(self.buttons_box)
self.close_btn = QPushButton('Close')
self.close_btn.clicked.connect(partial(self._handle_close))
self.buttons_box.addWidget(self.close_btn)
self.buttons_box.addItem(QSpacerItem(0, 0, QExpanding, QMinimum))
def _handle_close(self):
self.close()
# Keys with underscore prefix will not be shown in table columns.
HistoryModelColToIdx = {
"Name": 0,
"Prompt": 1,
"Submitted": 2,
"_db_id": 3,
}
class HistoryModel(QAbstractTableModel):
def __init__(self, data = []):
super().__init__()
self._data = data
self._columns = list(filter(lambda x: not x.startswith("_"), HistoryModelColToIdx.keys()))
def data(self, index: QModelIndex, role: Qt.ItemDataRole):
if role == Qt.ItemDataRole.DisplayRole:
if len(self._data) == 0:
return list(map(lambda _: "", self._columns))
else:
return self._data[index.row()][index.column()]
elif role == Qt.ItemDataRole.UserRole:
return self._data
def rowCount(self, _):
return len(self._data)
def columnCount(self, _):
if len(self._data) == 0:
return 0
else:
return len(self._columns)
def headerData(self, section, orientation, role):
if role == Qt.ItemDataRole.DisplayRole:
if orientation == Qt.Orientation.Horizontal:
return self._columns[section]
if orientation == Qt.Orientation.Vertical:
return str(section+1)
class PromptData(TypedDict):
db_id: int
class PromptItem(QStandardItem):
name_path: str
show_in_context: bool
data: PromptData
def __init__(self, name_path: str, show_in_context: bool, db_id: int):
super().__init__()
self.setEditable(False)
# Remove trailing / for display.
name_path = re.sub(r'/+$', '', name_path)
self.name_path = re.sub(r' */ *', '/', name_path)
self.show_in_context = show_in_context
# Not storing db_schema, assuming all prompts are in userdata.
self.data = PromptData(db_id = db_id)
self.setEditable(False)
self.setText(self.name_path)
class GptPromptsWindow(AppWindowInterface):
_input_timer = QTimer()
def __init__(self, app_data: AppData, prompt_params: Optional[OpenPromptParams] = None, parent = None) -> None:
super().__init__(parent)
logger.info("GptPromptsWindow()")
self._app_data: AppData = app_data
self.tokenizer_worker: Optional[TokenizerWorker] = None
self.completion_worker: Optional[CompletionWorker] = None
self.thread_pool = QThreadPool()
self.sidebar_visible = True
self._setup_ui()
self._init_values()
self._connect_signals()
self._update_vert_splitter_widths()
self._update_horiz_splitter_widths()
if prompt_params is not None:
self._show_prompt_by_params(prompt_params)
def _setup_ui(self):
self.setWindowTitle("GPT Prompts - Simsapa")
self.resize(1068, 625)
self._central_widget = QtWidgets.QWidget(self)
self.setCentralWidget(self._central_widget)
self._layout = QVBoxLayout()
self._central_widget.setLayout(self._layout)
# horizontal splitter
self.vert_splitter = QSplitter(self._central_widget)
self.vert_splitter.setOrientation(QtCore.Qt.Orientation.Horizontal)
self._layout.addWidget(self.vert_splitter)
self.left_box_widget = QWidget(self.vert_splitter)
self.left_box_layout = QVBoxLayout(self.left_box_widget)
self.left_box_layout.setContentsMargins(0, 0, 0, 0)
self.right_box_widget = QWidget(self.vert_splitter)
self.right_box_layout = QVBoxLayout(self.right_box_widget)
self.right_box_layout.setContentsMargins(0, 0, 0, 0)
# vertical splitter
self.horiz_splitter = QSplitter(self._central_widget)
self.horiz_splitter.setOrientation(QtCore.Qt.Orientation.Vertical)
self.left_box_layout.addWidget(self.horiz_splitter)
self.prompt_input_widget = QWidget(self.horiz_splitter)
self.prompt_input_layout = QVBoxLayout(self.prompt_input_widget)
self.prompt_input_layout.setContentsMargins(0, 0, 0, 0)
self.completion_text_widget = QWidget(self.horiz_splitter)
self.completion_text_layout = QVBoxLayout(self.completion_text_widget)
self.completion_text_layout.setContentsMargins(0, 0, 0, 0)
self.tabs = QTabWidget()
self.right_box_layout.addWidget(self.tabs)
# bottom buttons
self._bottom_buttons_box = QHBoxLayout()
self._layout.addLayout(self._bottom_buttons_box)
self.prompt_submit = QPushButton("Submit")
self.prompt_submit.setMinimumSize(QSize(80, 40))
self._bottom_buttons_box.addWidget(self.prompt_submit)
self.openai_append_mode = QCheckBox("Append", self)
self.openai_append_mode.setToolTip("Append the completion to the prompt")
self._bottom_buttons_box.addWidget(self.openai_append_mode)
label = QLabel("Model:")
self._bottom_buttons_box.addWidget(label)
self.openai_model_select = QComboBox()
items = [i.value for i in OpenAIModel]
self.openai_model_select.addItems(items)
self.openai_model_select.setCurrentText(self._app_data.app_settings['openai']['model'])
self._bottom_buttons_box.addWidget(self.openai_model_select)
self.openai_temperature_input = QDoubleSpinBox()
self.openai_temperature_input.setToolTip("Temperature")
self.openai_temperature_input.setMinimum(0.0)
self.openai_temperature_input.setMaximum(2.0)
self.openai_temperature_input.setSingleStep(0.1)
label = QLabel("T:")
label.setToolTip("Temperature (random variation)")
self._bottom_buttons_box.addWidget(label)
self._bottom_buttons_box.addWidget(self.openai_temperature_input)
self.openai_max_tokens_input = QSpinBox()
self.openai_max_tokens_input.setToolTip("Max tokens to generate")
self.openai_max_tokens_input.setMinimum(16)
model_max = model_max_tokens(self._app_data.app_settings['openai']['model'])
self.openai_max_tokens_input.setMaximum(model_max)
label = QLabel("M:")
label.setToolTip("Max tokens to generate")
self._bottom_buttons_box.addWidget(label)
self._bottom_buttons_box.addWidget(self.openai_max_tokens_input)
self.openai_auto_max = QCheckBox("Auto max", self)
self.openai_auto_max.setToolTip("Maximize generated token number")
self._bottom_buttons_box.addWidget(self.openai_auto_max)
self.token_count_msg = QLabel()
self._bottom_buttons_box.addWidget(self.token_count_msg)
self.token_warning_msg = QLabel(f"Warning: max total tokens for {self._app_data.app_settings['openai']['model']} is {model_max}")
self._bottom_buttons_box.addWidget(self.token_warning_msg)
self.token_warning_msg.setVisible(False)
self.completion_loading_bar = QLabel()
self._bottom_buttons_box.addWidget(self.completion_loading_bar)
self._loading_bar_anim = QMovie(':loading-bar')
self._loading_bar_empty_anim = QMovie(':loading-bar-empty')
self.completion_warning_msg = QLabel()
self._bottom_buttons_box.addWidget(self.completion_warning_msg)
self._bottom_buttons_box.addItem(QSpacerItem(20, 0, QExpanding, QMinimum))
self.toggle_sidebar_btn = QPushButton()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/angles-right"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)
self.toggle_sidebar_btn.setIcon(icon)
self.toggle_sidebar_btn.setMinimumSize(QSize(40, 40))
self.toggle_sidebar_btn.setToolTip("Toggle Sidebar")
self._bottom_buttons_box.addWidget(self.toggle_sidebar_btn)
self._setup_menubar()
self._setup_editor()
self._setup_prompts_tab()
self._setup_history_tab()
self._setup_settings_tab()
self.reload_history_table()
def _setup_menubar(self):
self.menubar = QMenuBar()
self.setMenuBar(self.menubar)
self.menu_File = QMenu(self.menubar)
self.menu_File.setTitle("&File")
self.menubar.addAction(self.menu_File.menuAction())
self.action_Close_Window = QAction("&Close Window")
self.menu_File.addAction(self.action_Close_Window)
self.action_Import = QAction("&Import from CSV...")
self.menu_File.addAction(self.action_Import)
self.action_Export = QAction("&Export as CSV...")
self.menu_File.addAction(self.action_Export)
def _setup_editor(self):
if IS_MAC:
font = QFont("Helvetica", pointSize = 13)
else:
font = QFont("DejaVu Sans", pointSize = 13)
self.prompt_input_top_buttons_layout = QHBoxLayout()
self.prompt_input_layout.addLayout(self.prompt_input_top_buttons_layout)
self.prompt_clear_all_btn = QPushButton("Clear All")
self.prompt_input_top_buttons_layout.addWidget(self.prompt_clear_all_btn)
self.prompt_show_parsed_btn = QPushButton("Show Parsed")
self.prompt_input_top_buttons_layout.addWidget(self.prompt_show_parsed_btn)
self.prompt_input_top_buttons_layout.addItem(QSpacerItem(0, 0, QExpanding, QMinimum))
self.prompt_input_top_buttons_layout.addWidget(QLabel("Copy:"))
self.prompt_copy_btn = QPushButton("User Prompt")
self.prompt_input_top_buttons_layout.addWidget(self.prompt_copy_btn)
self.prompt_copy_completion_btn = QPushButton("Completion")
self.prompt_input_top_buttons_layout.addWidget(self.prompt_copy_completion_btn)
self.prompt_copy_all_btn = QPushButton("All")
self.prompt_input_top_buttons_layout.addWidget(self.prompt_copy_all_btn)
self.prompt_name_input = QLineEdit()
self.prompt_name_input.setPlaceholderText("Prompt name, e.g. summarize text")
self.prompt_input_layout.addWidget(self.prompt_name_input)
self.prompt_input_layout.addWidget(QLabel("System:"))
self.system_prompt_input = QTextEdit("You are a helpful assistant in understanding the teachings of the Buddha.")
self.system_prompt_input.setFont(font)
self.system_prompt_input.setMaximumHeight(100)
self.prompt_input_layout.addWidget(self.system_prompt_input)
self.prompt_input_layout.addWidget(QLabel("User:"))
self.user_prompt_input = QTextEdit()
self.user_prompt_input.setFont(font)
self.prompt_input_layout.addWidget(self.user_prompt_input)
self.user_prompt_input.setFocus()
self.completion_text_layout.addWidget(QLabel("Completion:"))
self.completion_text = QTextEdit()
self.completion_text.setFont(font)
self.completion_text_layout.addWidget(self.completion_text)
def _setup_prompts_tab(self):
self.prompts_tab_widget = QWidget()
self.prompts_tab_layout = QVBoxLayout()
self.prompts_tab_widget.setLayout(self.prompts_tab_layout)
self.tabs.addTab(self.prompts_tab_widget, "Prompts")
self.prompt_buttons_layout = QHBoxLayout()
self.prompts_tab_layout.addLayout(self.prompt_buttons_layout)
self.prompt_save_btn = QPushButton("Save Current")
self.prompt_buttons_layout.addWidget(self.prompt_save_btn)
self.prompt_toggle_menu_btn = QPushButton("Toggle Menu")
self.prompt_buttons_layout.addWidget(self.prompt_toggle_menu_btn)
self.prompt_buttons_layout.addItem(QSpacerItem(0, 0, QExpanding, QMinimum))
self.prompt_delete_btn = QPushButton("Delete")
self.prompt_buttons_layout.addWidget(self.prompt_delete_btn)
self.prompt_delete_all_btn = QPushButton("Delete All")
self.prompt_buttons_layout.addWidget(self.prompt_delete_all_btn)
self._setup_prompts_tree_view()
def _setup_prompts_tree_view(self):
self.prompts_tree_view = QTreeView()
self.prompts_tab_layout.addWidget(self.prompts_tree_view)
self._init_prompts_tree_model()
def _init_prompts_tree_model(self):
self.prompts_tree_model = QStandardItemModel(0, 2, self)
self.prompts_tree_view.setHeaderHidden(False)
self.prompts_tree_view.setRootIsDecorated(True)
self.prompts_tree_view.setModel(self.prompts_tree_model)
self._create_prompts_tree_items(self.prompts_tree_model)
# NOTE: Don't select the first item when opening the window. It is
# confusing when the prompt is loaded from a sutta window.
# idx = self.prompts_tree_model.index(0, 0)
# self.prompts_tree_view.selectionModel() \
# .select(idx,
# QItemSelectionModel.SelectionFlag.ClearAndSelect | \
# QItemSelectionModel.SelectionFlag.Rows)
# self._handle_prompts_tree_clicked(idx)
self.prompts_tree_view.expandAll()
def reload_prompts_tree(self):
self.prompts_tree_model.clear()
self._create_prompts_tree_items(self.prompts_tree_model)
self.prompts_tree_model.layoutChanged.emit()
self.prompts_tree_view.expandAll()
def _create_prompts_tree_items(self, model):
item = QStandardItem()
item.setText("Prompt")
model.setHorizontalHeaderItem(0, item)
item = QStandardItem()
item.setText("Show in Context Menu")
item.setToolTip("Show in Right-Click Context Menu")
model.setHorizontalHeaderItem(1, item)
root_node = model.invisibleRootItem()
res = self._app_data.db_session \
.query(Um.GptPrompt) \
.order_by(Um.GptPrompt.name_path.asc()) \
.all()
for r in res:
do_show = (r.show_in_context is not None and r.show_in_context)
s = "✓" if do_show else ""
show = QStandardItem(s)
prompt = PromptItem(r.name_path, do_show, r.id)
root_node.appendRow([prompt, show])
self.prompts_tree_view.resizeColumnToContents(0)
self.prompts_tree_view.resizeColumnToContents(1)
def _show_prompt_by_id(self, db_id: int):
prompt = self._app_data.db_session \
.query(Um.GptPrompt) \
.filter(Um.GptPrompt.id == db_id) \
.first()
if prompt is None or prompt.messages_json is None:
return
self.prompt_name_input.setText(prompt.name_path)
messages: List[ChatMessage] = json.loads(prompt.messages_json)
self._set_prompt_inputs(messages)
def _set_prompt_inputs(self,
messages: List[ChatMessage],
parse_sutta_in_text = False,
sutta_uid: Optional[str] = None,
selection_text: Optional[str] = None):
if len(messages) == 0:
self.system_prompt_input.setPlainText("")
self.user_prompt_input.setPlainText("")
self.completion_text.setPlainText("")
return
if len(messages) > 0:
self.system_prompt_input.setPlainText(messages[0]['content'])
if len(messages) > 1:
prompt = messages[1]['content']
user_prompt = self._parse_prompt_variables(
prompt = prompt,
parse_sutta_in_text = parse_sutta_in_text,
sutta_uid = sutta_uid,
selection_text = selection_text)
self.user_prompt_input.setPlainText(user_prompt)
if len(messages) > 2:
self.completion_text.setPlainText(messages[2]['content'])
def _show_prompt_by_params(self, params: OpenPromptParams):
prompt = self._app_data.db_session \
.query(Um.GptPrompt) \
.filter(Um.GptPrompt.id == params['prompt_db_id']) \
.first()
if prompt is None or prompt.messages_json is None:
return
if params['with_name'] is None:
self.prompt_name_input.setText(prompt.name_path)
else:
self.prompt_name_input.setText(params['with_name'])
messages: List[ChatMessage] = json.loads(prompt.messages_json)
self._set_prompt_inputs(messages, False, params['sutta_uid'], params['selection_text'])
def _handle_prompts_tree_clicked(self, val: QModelIndex):
item: PromptItem = self.prompts_tree_model.itemFromIndex(val) # type: ignore
if item is not None:
self._show_prompt_by_id(item.data['db_id'])
def _setup_history_tab(self):
self.history_tab_widget = QWidget()
self.history_tab_layout = QVBoxLayout()
self.history_tab_widget.setLayout(self.history_tab_layout)
self.tabs.addTab(self.history_tab_widget, "History")
self.history_buttons_layout = QHBoxLayout()
self.history_tab_layout.addLayout(self.history_buttons_layout)
self.history_load_btn = QPushButton("Load")
self.history_buttons_layout.addWidget(self.history_load_btn)
self.history_buttons_layout.addItem(QSpacerItem(0, 0, QExpanding, QMinimum))
self.history_delete_btn = QPushButton("Delete")
self.history_buttons_layout.addWidget(self.history_delete_btn)
self.history_delete_all_btn = QPushButton("Delete All")
self.history_buttons_layout.addWidget(self.history_delete_all_btn)
self.history_table = QTableView()
self.history_tab_layout.addWidget(self.history_table)
self.history_table.setShowGrid(False)
self.history_table.setWordWrap(False)
self.history_table.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)
# MultiSelection allows multiple items to be selected with left-click,
# and it becomes confusing what should be opened when the Open button or
# double-click is used.
self.history_table.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)
horiz_header = self.history_table.horizontalHeader()
if horiz_header:
horiz_header.setSectionResizeMode(QHeaderView.ResizeMode.Interactive)
horiz_header.setStretchLastSection(True)
self.history_table.setContextMenuPolicy(Qt.ContextMenuPolicy.ActionsContextMenu)
self.history_model = HistoryModel()
self.history_table.setModel(self.history_model)
def reload_history_table(self):
data = self._data_items_for_history()
self.history_model = HistoryModel(data)
self.history_table.setModel(self.history_model)
self.history_model.layoutChanged.emit()
def _data_items_for_history(self) -> List[List[str]]:
res = self._app_data.db_session \
.query(Um.GptHistory) \
.order_by(Um.GptHistory.created_at.desc()) \
.all()
if len(res) == 0:
return []
def _model_data_item(x: Um.GptHistory) -> List[str]:
# Return values ordered as in HistoryModelColToIdx
if x.messages_json is None:
text = ""
else:
m = json.loads(x.messages_json)
if len(m) > 1:
text = m[1]['content'][0:20]
else:
text = ""
return ["" if x.name_path is None else str(x.name_path[0:20]),
text,
str(x.created_at),
str(x.id)]
data = list(map(_model_data_item, res))
return data
def _setup_settings_tab(self):
self.settings_tab_widget = QWidget()
self.settings_tab_layout = QVBoxLayout()
self.settings_tab_widget.setLayout(self.settings_tab_layout)
self.tabs.addTab(self.settings_tab_widget, "Settings")
self.settings_buttons_layout = QHBoxLayout()
self.settings_tab_layout.addLayout(self.settings_buttons_layout)
self.settings_buttons_layout.addItem(QSpacerItem(0, 0, QExpanding, QMinimum))
self.settings_reset_btn = QPushButton("Reset")
self.settings_buttons_layout.addWidget(self.settings_reset_btn)
self.settings_tab_layout.addWidget(QLabel("OpenAI API key:"))
self.openai_api_key_input = QLineEdit()
self.openai_api_key_input.setPlaceholderText("sk-...")
self.settings_tab_layout.addWidget(self.openai_api_key_input)
self.openai_sign_up_info = QLabel("<p>Sign for an <a href='https://beta.openai.com/signup'>OpenAI account</a> and create your API key.</p>")
self.openai_sign_up_info.setWordWrap(True)
self.settings_tab_layout.addWidget(self.openai_sign_up_info)
label = QLabel("<p>Number of completions to generate:</p>")
label.setWordWrap(True)
self.settings_tab_layout.addWidget(label)
self.openai_n_completions_input = QSpinBox()
self.openai_n_completions_input.setMinimum(0)
self.openai_n_completions_input.setMaximum(10)
self.openai_n_completions_input.setDisabled(True)
self.settings_tab_layout.addWidget(self.openai_n_completions_input)
label = QLabel("<p>Join short lines in the prompt under x chars to reduce token count:</p>")
label.setWordWrap(True)
self.settings_tab_layout.addWidget(label)
self.openai_join_lines_under_input = QSpinBox()
self.openai_join_lines_under_input.setMinimum(0)
self.openai_join_lines_under_input.setMaximum(999)
self.settings_tab_layout.addWidget(self.openai_join_lines_under_input)
self.settings_tab_layout.addItem(QSpacerItem(0, 0, QMinimum, QExpanding))
def _init_values(self):
s = self._app_data.app_settings['openai']
if s['api_key'] is not None and s['api_key'] != "":
self.openai_api_key_input.setText(s['api_key'])
self.openai_sign_up_info.setVisible(False)
else:
self.openai_api_key_input.setText("")
self.openai_sign_up_info.setVisible(True)
self.openai_auto_max.setChecked(s['auto_max_tokens'])
self.openai_append_mode.setChecked(s['append_mode'])
self.openai_temperature_input.setValue(s['temperature'])
self.openai_max_tokens_input.setValue(s['max_tokens'])
self.openai_n_completions_input.setValue(s['n_completions'])
self.openai_join_lines_under_input.setValue(s['join_short_lines'])
self.openai_max_tokens_input.setDisabled(self.openai_auto_max.isChecked())
def _save_all_settings(self):
api_key = self.openai_api_key_input.text()
if api_key == "":
api_key = None
self.openai_max_tokens_input.setDisabled(self.openai_auto_max.isChecked())
openai_settings = OpenAISettings(
api_key = api_key,
model = OpenAIModelToEnum[self.openai_model_select.currentText()],
temperature = self.openai_temperature_input.value(),
max_tokens = self.openai_max_tokens_input.value(),
auto_max_tokens = self.openai_auto_max.isChecked(),
n_completions = self.openai_n_completions_input.value(),
join_short_lines = self.openai_join_lines_under_input.value(),
append_mode = self.openai_append_mode.isChecked(),
)
self._app_data.app_settings['openai'] = openai_settings
self._app_data._save_app_settings()
def _update_model_max(self):
model = self._app_data.app_settings['openai']['model']
model_max = model_max_tokens(model)
self.openai_max_tokens_input.setMaximum(model_max)
self.token_warning_msg.setText(f"Warning: max total tokens for {model} is {model_max}")
self._update_token_count()
def _append_mode_toggled(self):
self._update_horiz_splitter_widths()
self._save_all_settings()
def _auto_max_toggled(self):
if self.openai_auto_max.isChecked():
self._update_token_count()
self._save_all_settings()
def _toggle_sidebar(self):
self.sidebar_visible = not self.sidebar_visible
self._update_vert_splitter_widths()
def _update_vert_splitter_widths(self):
if self.sidebar_visible:
self.vert_splitter.setSizes([2000, 2000])
else:
self.vert_splitter.setSizes([2000, 0])
def _update_horiz_splitter_widths(self):
if self.openai_append_mode.isChecked():
self.horiz_splitter.setSizes([2000, 0])
else:
self.horiz_splitter.setSizes([2000, 2000])
def _show_info(self, text: str):
box = QMessageBox(self)
box.setIcon(QMessageBox.Icon.Information)
box.setWindowTitle("Information")
box.setStandardButtons(QMessageBox.StandardButton.Ok)
box.setText(text)
box.exec()
def _show_warning(self, text: str):
box = QMessageBox(self)
box.setIcon(QMessageBox.Icon.Warning)
box.setWindowTitle("Warning")
box.setStandardButtons(QMessageBox.StandardButton.Ok)
box.setText(text)
box.exec()
def _parse_prompt_variables(self,
prompt: str,
parse_sutta_in_text = False,
sutta_uid: Optional[str] = None,
selection_text: Optional[str] = None) -> str:
prompt = prompt.strip()
if parse_sutta_in_text:
prompt = self._parse_prompt_sutta_in_text(prompt)
if sutta_uid:
prompt = self._parse_prompt_current_sutta(prompt, sutta_uid)
if selection_text:
prompt = self._parse_prompt_selection_text(prompt, selection_text)
return prompt
def _parse_prompt_selection_text(self, prompt: str, selection_text: str) -> str:
if '<<selection_text>>' not in prompt:
return prompt
return prompt.replace('<<selection_text>>', selection_text)
def _parse_prompt_current_sutta(self, prompt: str, sutta_uid: str) -> str:
if '<<current_sutta>>' not in prompt:
return prompt
res: List[USutta] = []
r = self._app_data.db_session \
.query(Am.Sutta) \
.filter(Am.Sutta.uid == sutta_uid) \
.first()
if r:
res.append(r)
r = self._app_data.db_session \
.query(Um.Sutta) \
.filter(Um.Sutta.uid == sutta_uid) \
.first()
if r:
res.append(r)
if len(res) > 0:
sutta_plain = self._sutta_content_plain(res[0])
return prompt.replace('<<current_sutta>>', sutta_plain)
else:
return prompt
def _parse_prompt_sutta_in_text(self, prompt: str) -> str:
matches = re.finditer(r'<<suttas*/([^>]+)>>', prompt)
parsed_prompt = prompt
already_replaced = []
for m in matches:
if m.group(0) in already_replaced:
continue
uid = m.group(1)
res: List[USutta] = []
r = self._app_data.db_session \
.query(Am.Sutta) \
.filter(Am.Sutta.uid == uid) \
.first()
if r:
res.append(r)
r = self._app_data.db_session \
.query(Um.Sutta) \
.filter(Um.Sutta.uid == uid) \
.first()
if r:
res.append(r)
if len(res) > 0:
sutta_plain = self._sutta_content_plain(res[0])
parsed_prompt = re.sub(m.group(0), sutta_plain, parsed_prompt)
already_replaced.append(m.group(0))
return parsed_prompt
def _sutta_content_plain(self, sutta: USutta) -> str:
max = self._app_data.app_settings['openai']['join_short_lines']
content = sutta_content_plain(sutta, max)
return content
def _submit_prompt(self):
openai_settings = self._app_data.app_settings['openai']
api_key = openai_settings['api_key']
if api_key is None or api_key == "":
self._show_warning("<p>Please add your OpenAI key in the Settings tab.</p>")
return
messages: List[ChatMessage] = []
messages.append(
ChatMessage(
role=ChatRole.System,
content=self.system_prompt_input.toPlainText().strip(),
))
text = self.user_prompt_input.toPlainText().strip()
user_prompt = self._parse_prompt_variables(text, parse_sutta_in_text=True)
if len(user_prompt) < 4:
return
messages.append(
ChatMessage(
role=ChatRole.User,
content=user_prompt,
))
if self.completion_worker is not None:
self.completion_worker.will_emit_finished = False
self.completion_worker = CompletionWorker(messages, openai_settings)
self.completion_worker.signals.finished.connect(partial(self._completion_finished))
def _completion_error(msg: str):
if self.completion_worker is not None:
self.completion_worker.will_emit_finished = False
self.stop_loading_animation()
self._show_warning(msg)
def _completion_warning(msg: str):
self.completion_warning_msg.setText(msg)
self.completion_worker.signals.error.connect(partial(_completion_error))
self.completion_worker.signals.warning.connect(partial(_completion_warning))
self.start_loading_animation()
self.thread_pool.start(self.completion_worker)
def _completion_finished(self, results: List[str]):
self.stop_loading_animation()
self.completion_warning_msg.setText("")
if len(results) == 0:
return
# TODO: Add interface elements to show all returned choices.
# Only using the first returned choice for now.
result = results[0]
append_mode = self._app_data.app_settings['openai']['append_mode']
name_path = self.prompt_name_input.text()
user_prompt = self.user_prompt_input.toPlainText()
if append_mode:
self.user_prompt_input.setPlainText(user_prompt + "\n\n[SYSTEM]:\n" + result + "\n\n[USER]:\n")
self.completion_text.setPlainText("")
vert_bar = self.user_prompt_input.verticalScrollBar()
if vert_bar:
scroll_bar = self.user_prompt_input.verticalScrollBar()
if scroll_bar:
vert_bar.setValue(scroll_bar.maximum())
else:
self.completion_text.setPlainText(result)
messages: List[ChatMessage] = []
messages.append(
ChatMessage(
role=ChatRole.System,
content=self.system_prompt_input.toPlainText().strip(),
))
messages.append(
ChatMessage(
role=ChatRole.User,
content=user_prompt,
))
messages.append(
ChatMessage(
role=ChatRole.System,
content=result,
))
log = Um.GptHistory(name_path = name_path, messages_json = json.dumps(messages))
self._app_data.db_session.add(log)
self._app_data.db_session.commit()
self.reload_history_table()
def _inputs_to_messages(self, parse_variables = False) -> List[ChatMessage]:
messages: List[ChatMessage] = []
messages.append(
ChatMessage(
role=ChatRole.System,
content=self.system_prompt_input.toPlainText().strip(),
))
text = self.user_prompt_input.toPlainText().strip()
if parse_variables:
text = self._parse_prompt_variables(text)
messages.append(
ChatMessage(
role=ChatRole.User,
content=text,
))
messages.append(
ChatMessage(
role=ChatRole.System,
content=self.completion_text.toPlainText().strip(),
))
return messages
def _user_typed(self):
if not self._input_timer.isActive():
self._input_timer = QTimer()
self._input_timer.timeout.connect(partial(self._update_token_count))
self._input_timer.setSingleShot(True)
self._input_timer.start(SEARCH_TIMER_SPEED)
def _tokenizer_finished(self, p: int):
# p = prompt token count
auto_max = self._app_data.app_settings['openai']['auto_max_tokens']
model_max = model_max_tokens(self._app_data.app_settings['openai']['model'])
if auto_max:
min_val = self.openai_max_tokens_input.minimum()
max_val = self.openai_max_tokens_input.maximum()
m = min(max(model_max - p, min_val), max_val)
self.openai_max_tokens_input.setValue(m)
else:
m = self.openai_max_tokens_input.value()
total = p+m
self.token_count_msg.setText(f"{p} (prompt) + {m} = {total} tokens")
self.token_warning_msg.setVisible(total > model_max)
def _tokenizer_error(self, msg: str):
self.token_count_msg.setText(msg)
auto_max = self._app_data.app_settings['openai']['auto_max_tokens']
if auto_max:
self.openai_auto_max.setChecked(False)
self._app_data.app_settings['openai']['auto_max_tokens'] = False
self._app_data._save_app_settings()
def _update_token_count(self):
if self.tokenizer_worker is not None:
self.tokenizer_worker.will_emit_finished = False
messages = self._inputs_to_messages(parse_variables=True)
model = self._app_data.app_settings['openai']['model']
self.tokenizer_worker = TokenizerWorker(model, messages)
self.tokenizer_worker.signals.finished.connect(partial(self._tokenizer_finished))
self.tokenizer_worker.signals.error.connect(partial(self._tokenizer_error))
self.thread_pool.start(self.tokenizer_worker)
def start_loading_animation(self):
self.completion_loading_bar.setMovie(self._loading_bar_anim)
self._loading_bar_anim.start()
icon_processing = QtGui.QIcon()
icon_processing.addPixmap(QtGui.QPixmap(":/stopwatch"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)
self.prompt_submit.setIcon(icon_processing)
def stop_loading_animation(self):
self._loading_bar_anim.stop()
self.completion_loading_bar.setMovie(self._loading_bar_empty_anim)
self.prompt_submit.setIcon(QtGui.QIcon())
def _handle_selection_changed(self, selected: QItemSelection, _: QItemSelection):
indexes = selected.indexes()
if len(indexes) > 0:
self._handle_prompts_tree_clicked(indexes[0])
def _reset_settings(self):
self._app_data.app_settings['openai'] = default_openai_settings()
self._app_data._save_app_settings()
self._init_values()
def _prompt_clear_all(self):
self.prompt_name_input.setText("")
self.system_prompt_input.setPlainText("")
self.user_prompt_input.setPlainText("")
self.completion_text.setPlainText("")
self.completion_warning_msg.setText("")
def _prompt_save(self):
name_path = self.prompt_name_input.text().strip()
if name_path == "":
t = datetime.now().strftime("%F %T")
name_path = f"Prompt {t}"
self.prompt_name_input.setText(name_path)
prompt: Optional[Um.GptPrompt] = None
prompt = self._app_data.db_session \
.query(Um.GptPrompt) \
.filter(Um.GptPrompt.name_path == name_path) \
.first()
if prompt is None:
prompt = Um.GptPrompt(
name_path = name_path,
messages_json = json.dumps(self._inputs_to_messages()),
show_in_context = False,
)
self._app_data.db_session.add(prompt)
else:
prompt.messages_json = json.dumps(self._inputs_to_messages())
self._app_data.db_session.commit()
self.reload_prompts_tree()
def _get_selected_prompt(self) -> Optional[Um.GptPrompt]:
a = self.prompts_tree_view.selectedIndexes()
if not a:
return
# only one tree node is selected at a time
idx = a[0]
item: PromptItem = self.prompts_tree_model.itemFromIndex(idx) # type: ignore
prompt = self._app_data.db_session \
.query(Um.GptPrompt) \
.filter(Um.GptPrompt.id == item.data['db_id']) \
.first()
return prompt
def _prompt_delete_selected(self):
prompt = self._get_selected_prompt()
if prompt:
self._app_data.db_session.delete(prompt)
self._app_data.db_session.commit()
self.reload_prompts_tree()
def _prompt_delete_all(self):
box = QMessageBox(self)
box.setIcon(QMessageBox.Icon.Warning)
box.setWindowTitle("Delete Confirmation")
box.setText("<p>Delete all Prompts?</p>")
box.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
reply = box.exec()
if reply != QMessageBox.StandardButton.Yes:
return
self._app_data.db_session.query(Um.GptPrompt).delete()
self._app_data.db_session.commit()
self.reload_prompts_tree()
def _prompt_copy(self):
user_prompt_text = self.user_prompt_input.toPlainText().strip()
if user_prompt_text != "":
self._app_data.clipboard_setText(user_prompt_text)
def _prompt_copy_completion(self):
completion_text = self.completion_text.toPlainText().strip()
if completion_text != "":
self._app_data.clipboard_setText(completion_text)
def _prompt_copy_all(self):
prompt_name = self.prompt_name_input.text().strip()
system_prompt_text = self.user_prompt_input.toPlainText().strip()
user_prompt_text = self.user_prompt_input.toPlainText().strip()
completion_text = self.completion_text.toPlainText().strip()
if system_prompt_text != "" or user_prompt_text != "" or completion_text != "":
all_text = f"{prompt_name}\n\n{system_prompt_text}\n\n{user_prompt_text}\n\n{completion_text}".strip()
self._app_data.clipboard_setText(all_text)
def _prompt_toggle_menu(self):
prompt = self._get_selected_prompt()
if not prompt:
return
if prompt.show_in_context is None or not prompt.show_in_context:
prompt.show_in_context = True
else:
prompt.show_in_context = False
self._app_data.db_session.commit()
a = self.prompts_tree_view.selectedIndexes()
idx = a[0]
sel_row = idx.row()
self.reload_prompts_tree()
idx = self.prompts_tree_model.index(sel_row, 0)
sel_model = self.prompts_tree_view.selectionModel()
if sel_model:
sel_model.select(idx,
QItemSelectionModel.SelectionFlag.ClearAndSelect | \
QItemSelectionModel.SelectionFlag.Rows)
def _prompt_show_parsed(self):
prompt_name = self.prompt_name_input.text().strip()
system_prompt_text = self.system_prompt_input.toPlainText().strip()
text = self.user_prompt_input.toPlainText().strip()
user_prompt_text = self._parse_prompt_variables(text, parse_sutta_in_text=True)
completion_text = self.completion_text.toPlainText().strip()
text = f"{prompt_name}\n\n{system_prompt_text}\n\n{user_prompt_text}\n\n{completion_text}".strip()
d = ShowPromptDialog(text)
d.exec()
def _handle_history_row_load(self):
a = self.history_table.selectedIndexes()
if len(a) != 0:
self._handle_history_load(a[0])
def _handle_history_load(self, val: QModelIndex):
model = val.model()
if model is None:
return
data = model.data(val, Qt.ItemDataRole.UserRole)
db_id = int(data[val.row()][HistoryModelColToIdx['_db_id']])
res = self._app_data.db_session \
.query(Um.GptHistory) \
.filter(Um.GptHistory.id == db_id) \
.first()
if not res:
return
self.prompt_name_input.setText("" if res.name_path is None else res.name_path)
if res.messages_json is None:
messages = []
else:
messages = json.loads(res.messages_json)
self._set_prompt_inputs(messages)
def _history_delete_selected(self):
a = self.history_table.selectedIndexes()
if not a:
return
db_ids = set(map(lambda idx: self.history_model._data[idx.row()][HistoryModelColToIdx['_db_id']], a))
n = len(db_ids)
if n > 1:
box = QMessageBox(self)
box.setIcon(QMessageBox.Icon.Warning)
box.setWindowTitle("Delete Confirmation")
box.setText(f"<p>Delete {n} GPT history entries?</p>")
box.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
reply = box.exec()
if reply != QMessageBox.StandardButton.Yes:
return
items = self._app_data.db_session \
.query(Um.GptHistory) \
.filter(Um.GptHistory.id.in_(db_ids)) \
.all()
for i in items:
self._app_data.db_session.delete(i)
self._app_data.db_session.commit()
self.reload_history_table()
def _history_delete_all(self):
box = QMessageBox(self)
box.setIcon(QMessageBox.Icon.Warning)
box.setWindowTitle("Delete Confirmation")
box.setText("<p>Delete all GPT history entries?</p>")
box.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
reply = box.exec()
if reply != QMessageBox.StandardButton.Yes:
return
self._app_data.db_session.query(Um.GptHistory).delete()
self._app_data.db_session.commit()
self.reload_history_table()
def _handle_import(self):
file_path, _ = QFileDialog \
.getOpenFileName(self,
"Import from CSV...",
"",
"CSV Files (*.csv)")
if len(file_path) == 0:
return
n = self._app_data.import_prompts(file_path)
self.reload_history_table()
self.reload_prompts_tree()
box = QMessageBox(self)
box.setIcon(QMessageBox.Icon.Information)
box.setText(f"Imported {n} prompts.")
box.setWindowTitle("Import Completed")
box.setStandardButtons(QMessageBox.StandardButton.Ok)
box.exec()
def _handle_export(self):
file_path, _ = QFileDialog \
.getSaveFileName(self,
"Export as CSV...",
"",
"CSV Files (*.csv)")
if len(file_path) == 0:
return
n = self._app_data.export_prompts(file_path)
box = QMessageBox(self)
box.setIcon(QMessageBox.Icon.Information)
box.setText(f"Exported {n} prompts.")
box.setWindowTitle("Export Completed")
box.setStandardButtons(QMessageBox.StandardButton.Ok)
box.exec()
def _handle_close(self):
self.close()
def _connect_signals(self):
self.action_Close_Window \
.triggered.connect(partial(self._handle_close))
self.action_Import \
.triggered.connect(partial(self._handle_import))
self.action_Export \
.triggered.connect(partial(self._handle_export))
self.toggle_sidebar_btn.clicked.connect(partial(self._toggle_sidebar))
sel_model = self.prompts_tree_view.selectionModel()
if sel_model:
sel_model.selectionChanged.connect(partial(self._handle_selection_changed))
self.prompt_submit.clicked.connect(partial(self._submit_prompt))
self.prompt_clear_all_btn.clicked.connect(partial(self._prompt_clear_all))
self.prompt_save_btn.clicked.connect(partial(self._prompt_save))
self.prompt_show_parsed_btn.clicked.connect(partial(self._prompt_show_parsed))
self.prompt_toggle_menu_btn.clicked.connect(partial(self._prompt_toggle_menu))
self.prompt_copy_btn.clicked.connect(partial(self._prompt_copy))
self.prompt_copy_completion_btn.clicked.connect(partial(self._prompt_copy_completion))
self.prompt_copy_all_btn.clicked.connect(partial(self._prompt_copy_all))
self.prompt_delete_btn.clicked.connect(partial(self._prompt_delete_selected))
self.prompt_delete_all_btn.clicked.connect(partial(self._prompt_delete_all))
self.history_table.doubleClicked.connect(self._handle_history_load)
self.history_load_btn.clicked.connect(partial(self._handle_history_row_load))
self.history_delete_btn.clicked.connect(partial(self._history_delete_selected))
self.history_delete_all_btn.clicked.connect(partial(self._history_delete_all))
self.settings_reset_btn.clicked.connect(partial(self._reset_settings))
self.system_prompt_input.textChanged.connect(partial(self._user_typed))
self.user_prompt_input.textChanged.connect(partial(self._user_typed))
self.openai_model_select.currentIndexChanged.connect(partial(self._save_all_settings))
self.openai_model_select.currentIndexChanged.connect(partial(self._update_model_max))
self.openai_append_mode.toggled.connect(partial(self._append_mode_toggled))
self.openai_auto_max.toggled.connect(partial(self._auto_max_toggled))
for i in [self.openai_temperature_input,
self.openai_max_tokens_input,
self.openai_n_completions_input,
self.openai_join_lines_under_input]:
i.valueChanged.connect(self._save_all_settings)
for i in [self.openai_api_key_input,]:
i.textChanged.connect(self._save_all_settings)
class CompletionWorkerSignals(QObject):
error = pyqtSignal(str)
warning = pyqtSignal(str)
finished = pyqtSignal(list)
class CompletionWorker(QRunnable):
signals: CompletionWorkerSignals
def __init__(self, messages: List[ChatMessage], openai_settings: OpenAISettings):
super().__init__()
api_key = openai_settings['api_key']
if api_key is None or api_key == "":
logger.error("OpenAI API key is None")
return
import openai
self.openai = openai
self.openai.api_key = openai_settings['api_key']
self.signals = CompletionWorkerSignals()
self.messages = messages
self.openai_settings = openai_settings
self.query_started_time: datetime = datetime.now()
self.query_finished_time: Optional[datetime] = None
self.will_emit_finished = True
@pyqtSlot()
def run(self):
logger.info("CompletionWorker::run()")
try:
results = self.chat_completion()
if self.will_emit_finished:
logger.info("CompletionWorker::run() signals.finished.emit()")
self.signals.finished.emit(results)
except Exception as e:
logger.error(e)
self.signals.error.emit(f"<p>OpenAI Completion error:</p><p>{e}</p>")
def chat_completion(self, max_retries = 10) -> List[str]:
logger.info("gpt3_chat()")
content: List[str] = []
try_count = 1
while try_count <= max_retries:
try:
logger.info(f"Request ChatCompletion, try_count {try_count}")
# https://platform.openai.com/docs/api-reference/chat/create
resp: ChatResponse = self.openai.ChatCompletion.create( # type: ignore
model = OpenAIModelLatest[self.openai_settings['model']],
messages = self.messages,
temperature = self.openai_settings['temperature'],
max_tokens = self.openai_settings['max_tokens'],
n = self.openai_settings['n_completions'],
stream = False,
)
content = [i['message']['content'].strip() for i in resp['choices']]
break
except Exception as e:
logger.error(f"---\n{e}\n---")
# The model: `gpt-4` does not exist
if "does not exist" in str(e):
raise Exception(f"ChatGPT request failed. Error: {e}")
if "maximum context length" in str(e):
# Return the exception text as a response, so the script can continue.
return [str(e)]
msg = f"ChatGPT Request failed, retrying ({try_count})."
self.signals.warning.emit(msg)
logger.error(msg)
try_count += 1
if len(content) > 0:
return content
else:
raise Exception(f"ChatGPT request failed, max_retries {max_retries} reached.")
class TokenizerWorkerSignals(QObject):
error = pyqtSignal(str)
finished = pyqtSignal(int)
class TokenizerWorker(QRunnable):
signals: TokenizerWorkerSignals
def __init__(self,
model: OpenAIModel,
messages: List[ChatMessage]):
super().__init__()
self.signals = TokenizerWorkerSignals()
self.model = model
self.messages = messages
self.tokenizer: Optional[Any] = None
self.will_emit_finished = True
@pyqtSlot()
def run(self):
try:
count = num_tokens_from_messages(self.messages, model=OpenAIModelLatest[self.model])
if self.will_emit_finished:
self.signals.finished.emit(count)
except Exception as e:
msg = f"Tokenizer error: {e}"
logger.error(msg)
self.signals.error.emit(msg)
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
"""
Returns the number of tokens used by a list of messages.
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.warn("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
# logger.info("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
# logger.info("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def messages_concat(messages: List[ChatMessage]) -> str:
prompt = "\n\n".join([i["content"] for i in messages])
return prompt
| [
"\n\n",
"content",
"prompt_db_id",
"None"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.