date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | seandearnaley/reddit-gpt-summarizer | app~utils~llm_utils.py | """ Utility functions for the Large Language Models. """
import math
import re
from typing import List
import tiktoken
from anthropic import Anthropic
from config import ANTHROPIC_AI_TYPE, OPEN_AI_CHAT_TYPE
def group_bodies_into_chunks(contents: str, token_length: int) -> List[str]:
"""
Concatenate the content lines into a list of newline-delimited strings
that are less than token_length tokens long.
"""
results: List[str] = []
current_chunk = ""
for line in contents.split("\n"):
line = re.sub(r"\n+", "\n", line).strip()
line = line[: estimate_word_count(1000)] + "\n"
if num_tokens_from_string(current_chunk + line) > token_length:
results.append(current_chunk)
current_chunk = ""
current_chunk += line
if current_chunk:
results.append(current_chunk)
return results
def anthropic_sync_count_tokens(text: str) -> int:
"""Count the number of tokens in a text string using the Anthropic API."""
client = Anthropic()
number_of_tokens = client.count_tokens(text)
return number_of_tokens
def num_tokens_from_string(string: str, model_type: str = OPEN_AI_CHAT_TYPE) -> int:
"""
Returns the number of tokens in a text string.
NOTE: openAI and Anthropics have different token counting mechanisms.
https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
"""
is_anthropic = model_type == ANTHROPIC_AI_TYPE
num_tokens = (
anthropic_sync_count_tokens(string)
if is_anthropic
else len(tiktoken.get_encoding("gpt2").encode(string))
)
return num_tokens
def estimate_word_count(num_tokens: int) -> int:
"""
Given the number of GPT-2 tokens, estimates the real word count.
"""
# The average number of real words per token for GPT-2 is 0.56, according to OpenAI.
# Multiply the number of tokens by this average to estimate the total number of real
# words.
return math.ceil(num_tokens * 0.56)
def validate_max_tokens(max_tokens: int) -> None:
"""
Validate the max_tokens argument, raising a ValueError if it is not valid.
"""
if max_tokens <= 0:
raise ValueError("The input max_tokens must be a positive integer.")
| [] |
2024-01-10 | seandearnaley/reddit-gpt-summarizer | app~recursive_summary.py | """
This script will take a reddit URL and use OpenAI's GPT-3 model to generate
a summary of the reddit thread.
NOTE: provided as a helper script for your GPT apps, not used by the Streamlit app.
"""
# Import necessary modules
import os
import sys
from typing import Any, Dict
import openai
from dotenv import load_dotenv
from pyrate_limiter import Duration, Limiter, RequestRate
from services.openai_connector import complete_openai_text
from utils.llm_utils import estimate_word_count, num_tokens_from_string
# Constants
SUMMARY_SIZE = 500
MAX_CHUNK_TOKEN_SIZE = 1000
MAX_TOKENS = 2049 # max number of token (note: text-curie-001 has a max of 2048)
GPT_MODEL = "text-curie-001" # GPT-3 model to use
MAX_CHUNK_LENGTH = 2000
# Token bucket rate limiting parameters
RATE_LIMIT = 10 # max number of requests per minute
RATE_LIMIT_PERIOD = 60 # period in seconds
try:
load_dotenv()
except FileNotFoundError:
print("Could not find .env file. Please create one.")
sys.exit(1)
openai.organization = os.environ.get("OPENAI_ORG_ID")
openai.api_key = os.environ.get("OPENAI_API_KEY")
rate_limits = (RequestRate(10, Duration.MINUTE),) # 10 requests a minute
# Create the rate limiter
# Pyrate Limiter instance
limiter = Limiter(*rate_limits)
def load_text_file(filename: str) -> str:
"""
Load a text file from the same directory as this script.
"""
script_directory = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(script_directory, filename)
with open(file_path, "r", encoding="utf-8") as file:
file_read_data = file.read()
return file_read_data
def write_text_file(text: str, filename: str) -> str:
"""
Write a text file from the same directory as this script.
"""
script_directory = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(script_directory, filename)
with open(file_path, "w", encoding="utf-8") as output_file:
output_file.write(text)
return file_path
def recursive_summarization(
summary_size: int, chunk_text: str, prefix_text: str = ""
) -> str:
"""
Recursive summarization function.
"""
summary_string = (
f"```{prefix_text}```\n\n" + "new text:\n\n```"
# + f"{estimate_word_count(max_token_length)} words:\n"
+ chunk_text
+ "```\n\nsummarize then append to last text, write close to [replace] "
" words, use extractive summarization if you have too much text, use"
" abstractive summarization (no gibberish) if you don't have enough:\n\n```"
)
summary_string = summary_string.replace(
"[replace]", str(estimate_word_count(summary_size))
)
response: Dict[str, Any] = openai.completions.create( # type: ignore
model=GPT_MODEL,
prompt=summary_string,
max_tokens=MAX_TOKENS
- num_tokens_from_string(summary_string), # eg 4000-(len chunk+len string)
)
if len(response) == 0:
print("response=", response) # error
return "Error: unable to generate response."
response_text = response["choices"][0]["text"]
return response_text
def summarize_text(
text: str, max_token_length: int, max_tokens: int = MAX_TOKENS
) -> str:
"""
Summarize the prompt using GPT-3.
"""
original_num_tokens = num_tokens_from_string(text)
summary_size = max_token_length if max_token_length <= max_tokens else max_tokens
chunks = [
text[i : i + MAX_CHUNK_LENGTH] for i in range(0, len(text), MAX_CHUNK_LENGTH)
]
result = ""
summary = ""
for chunk in chunks:
print("chunk=", chunk)
limiter.try_acquire("summarize_text")
summary = complete_openai_text(
chunk,
summary_size,
settings={
"selected_model": GPT_MODEL,
"selected_model_type": "chat",
"system_role": "Please summarize the following text:",
},
)
result += summary
num_tokens = num_tokens_from_string(result)
print("num_tokens=", num_tokens, "original_num_tokens=", original_num_tokens)
if num_tokens > max_token_length:
# iterate again, EXPENSIVE try again
print("dividing prompt")
return summarize_text(result, max_token_length)
return result
def cleanup_summary(text: str, summary_size: int, max_tokens: int = MAX_TOKENS) -> str:
"""
Cleanup the summary using GPT-3.
Args:
text (str): The text to summarize.
summary_size (int): The desired summary size in tokens.
max_tokens (int): The maximum number of tokens to use.
"""
summary_size = summary_size if summary_size <= max_tokens else max_tokens
cleanup_prompt = (
"cleanup this machine generated summarization, notably "
+ "in the ligatures between passages, write close to [replace] words, use"
" extractive summarization if you have too much text, use abstractive"
" summarization if you don't have enough, no gibberish or bad"
f" formatting:\n```{text}```"
)
cleanup_prompt = cleanup_prompt.replace(
"[replace]", str(estimate_word_count(summary_size))
)
response: Dict[str, Any] = openai.completions.create( # type: ignore
model="text-davinci-003",
prompt=cleanup_prompt,
best_of=3,
max_tokens=max_tokens - num_tokens_from_string(cleanup_prompt),
)
if len(response) == 0:
print("response error=", response) # error
return "Error: unable to generate response."
response_text = response["choices"][0]["text"]
print("response_text=", response_text)
return response_text
print("go")
filetext = load_text_file("../inputs/HomeSummary.txt")
output = summarize_text(filetext, SUMMARY_SIZE)
output = cleanup_summary(output, SUMMARY_SIZE) # perform a cleanup pass
path = write_text_file(output, "../outputs/recursive_summary_test_output.txt")
print("Output written to", path)
| [
"cleanup this machine generated summarization, notably in the ligatures between passages, write close to [replace] words, use extractive summarization if you have too much text, use abstractive summarization if you don't have enough, no gibberish or bad formatting:\n```PLACEHOLDER```"
] |
2024-01-10 | seandearnaley/reddit-gpt-summarizer | app~services~anthropic_connector.py | """Anthropic Connector"""
from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
from config import ConfigLoader
from data_types.summary import GenerateSettings
from env import EnvVarsLoader
from log_tools import Logger
config = ConfigLoader.get_config()
app_logger = Logger.get_app_logger()
env_vars = EnvVarsLoader.load_env()
@Logger.log
def complete_anthropic_text(
prompt: str,
max_tokens: int,
settings: GenerateSettings,
) -> str:
"""
Use Anthropic's GPT model to complete text based on the given prompt.
Args:
prompt (str): The prompt to use as the starting point for text completion.
max_tokens (int, optional): The maximum number of tokens to generate in the
response.
settings (GenerateSettings): The settings to use for generating the text.
Returns:
str: The completed text.
"""
try:
anthropic_client = Anthropic(api_key=env_vars["ANTHROPIC_API_KEY"])
response = anthropic_client.completions.create(
prompt=f"{HUMAN_PROMPT} {prompt}{AI_PROMPT}",
stop_sequences=[HUMAN_PROMPT],
model=settings["selected_model"],
max_tokens_to_sample=max_tokens,
)
return response.completion.strip()
except Exception as err: # pylint: disable=broad-except
return f"error: {err}"
| [
"PLACEHOLDER PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | gauss5930/AlpaGasus2-QLoRA | evaluation~MT-Bench~common.py | """
Common data structures and utilities.
"""
import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import get_conversation_template
# API setting constants
API_MAX_RETRY = 16
API_RETRY_SLEEP = 10
API_ERROR_OUTPUT = "$ERROR$"
TIE_DELTA = 0.1
# Categories that need reference answers
NEED_REF_CATS = ["math", "reasoning", "coding"]
# Extract scores from judgments
two_score_pattern = re.compile("\[\[(\d+\.?\d*),\s?(\d+\.?\d*)\]\]")
two_score_pattern_backup = re.compile("\[(\d+\.?\d*),\s?(\d+\.?\d*)\]")
one_score_pattern = re.compile("\[\[(\d+\.?\d*)\]\]")
one_score_pattern_backup = re.compile("\[(\d+\.?\d*)\]")
# Sampling temperature configs for
temperature_config = {
"writing": 0.7,
"roleplay": 0.7,
"extraction": 0.0,
"math": 0.0,
"coding": 0.0,
"reasoning": 0.0,
"stem": 0.1,
"humanities": 0.1,
}
reverse_model_map = {
"model_1": "model_2",
"model_2": "model_1",
}
@dataclasses.dataclass
class Judge:
model_name: str
prompt_template: dict
ref_based: bool = False
multi_turn: bool = False
@dataclasses.dataclass
class MatchSingle:
question: dict
model: str
answer: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
@dataclasses.dataclass
class MatchPair:
question: dict
model_1: str
model_2: str
answer_1: dict
answer_2: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
def load_questions(question_file: str, begin: Optional[int], end: Optional[int]):
"""Load questions from a file."""
questions = []
with open(question_file, "r") as ques_file:
for line in ques_file:
if line:
questions.append(json.loads(line))
questions = questions[begin:end]
return questions
def load_model_answers(answer_dir: str):
"""Load model answers.
The return value is a python dict of type:
Dict[model_name: str -> Dict[question_id: int -> answer: dict]]
"""
filenames = glob.glob(os.path.join(answer_dir, "*.jsonl"))
filenames.sort()
model_answers = {}
for filename in filenames:
model_name = os.path.basename(filename)[:-6]
answer = {}
with open(filename) as fin:
for line in fin:
line = json.loads(line)
answer[line["question_id"]] = line
model_answers[model_name] = answer
return model_answers
def load_judge_prompts(prompt_file: str):
"""Load judge prompts.
The return value is a python dict of type:
Dict[judge_name: str -> dict]
"""
prompts = {}
with open(prompt_file) as fin:
for line in fin:
line = json.loads(line)
prompts[line["name"]] = line
return prompts
def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
kwargs = {}
model = judge.model_name
if ref_answer is not None:
kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
if multi_turn:
user_prompt = judge.prompt_template["prompt_template"].format(
question_1=question["turns"][0],
question_2=question["turns"][1],
answer_1=answer["choices"][0]["turns"][0],
answer_2=answer["choices"][0]["turns"][1],
**kwargs,
)
else:
user_prompt = judge.prompt_template["prompt_template"].format(
question=question["turns"][0],
answer=answer["choices"][0]["turns"][0],
**kwargs,
)
rating = -1
system_prompt = judge.prompt_template["system_prompt"]
conv = get_conversation_template(model)
conv.set_system_message(system_prompt)
conv.append_message(conv.roles[0], user_prompt)
conv.append_message(conv.roles[1], None)
if model in ["gpt-3.5-turbo", "gpt-4"]:
judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ["claude-v1", "claude-instant-v1"]:
judgment = chat_compeletion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
raise ValueError(f"Invalid judge model name: {model}")
if judge.prompt_template["output_format"] == "[[rating]]":
match = re.search(one_score_pattern, judgment)
if not match:
match = re.search(one_score_pattern_backup, judgment)
if match:
rating = ast.literal_eval(match.groups()[0])
else:
rating = -1
else:
raise ValueError(
f"invalid output format: {judge.prompt_template['output_format']}"
)
return rating, user_prompt, judgment
def play_a_match_single(match: MatchPair, output_file: str):
question, model, answer, judge, ref_answer, multi_turn = (
match.question,
match.model,
match.answer,
match.judge,
match.ref_answer,
match.multi_turn,
)
if judge.prompt_template["type"] == "single":
score, user_prompt, judgment = run_judge_single(
question, answer, judge, ref_answer, multi_turn=multi_turn
)
question_id = question["question_id"]
turn = 1 if not multi_turn else 2
result = {
"question_id": question_id,
"model": model,
"judge": (judge.model_name, judge.prompt_template["name"]),
"user_prompt": user_prompt,
"judgment": judgment,
"score": score,
"turn": turn,
"tstamp": time.time(),
}
print(
f"question: {question_id}, turn: {turn}, model: {model}, "
f"score: {score}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
else:
raise ValueError(f"invalid judge type: {judge['type']}")
if output_file:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, "a") as fout:
fout.write(json.dumps(result) + "\n")
return result
def run_judge_pair(question, answer_a, answer_b, judge, ref_answer, multi_turn=False):
kwargs = {}
model = judge.model_name
if ref_answer is not None:
kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
if multi_turn:
system_prompt = judge.prompt_template["system_prompt"]
user_prompt = judge.prompt_template["prompt_template"].format(
question_1=question["turns"][0],
question_2=question["turns"][1],
answer_a_1=answer_a["choices"][0]["turns"][0],
answer_b_1=answer_b["choices"][0]["turns"][0],
answer_a_2=answer_a["choices"][0]["turns"][1],
answer_b_2=answer_b["choices"][0]["turns"][1],
**kwargs,
)
else:
system_prompt = judge.prompt_template["system_prompt"]
user_prompt = judge.prompt_template["prompt_template"].format(
question=question["turns"][0],
answer_a=answer_a["choices"][0]["turns"][0],
answer_b=answer_b["choices"][0]["turns"][0],
**kwargs,
)
winner = "error"
conv = get_conversation_template(model)
conv.append_message(conv.roles[0], user_prompt)
conv.append_message(conv.roles[1], None)
if model in ["gpt-3.5-turbo", "gpt-4"]:
conv.set_system_message(system_prompt)
judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ["claude-v1", "claude-instant-v1"]:
if system_prompt != "You are a helpful assistant.":
user_prompt = "[Instruction]\n" + system_prompt + "\n\n" + user_prompt
conv.messages[0][1] = user_prompt
judgment = chat_compeletion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
raise ValueError(f"Invalid judge model name: {model}")
if judge.prompt_template["output_format"] == "[[A]]":
if "[[A]]" in judgment:
winner = "A"
elif "[[B]]" in judgment:
winner = "B"
elif "[[C]]" in judgment:
winner = "tie"
else:
winner = "error"
elif judge.prompt_template["output_format"] == "[[rating_a,rating_b]]":
match = re.search(two_score_pattern, judgment)
if not match:
match = re.search(two_score_pattern_backup, judgment)
if match:
scores = [ast.literal_eval(s.strip()) for s in match.groups()]
if abs(scores[0] - scores[1]) <= TIE_DELTA:
winner = "tie"
elif scores[0] > scores[1]:
winner = "A"
else:
winner = "B"
else:
winner = "error"
else:
raise ValueError(
f"invalid output format: {judge.prompt_template['output_format']}"
)
return winner, user_prompt, judgment
def play_a_match_pair(match: MatchPair, output_file: str):
question, model_1, model_2, answer_1, answer_2, judge, ref_answer, multi_turn = (
match.question,
match.model_1,
match.model_2,
match.answer_1,
match.answer_2,
match.judge,
match.ref_answer,
match.multi_turn,
)
if judge.prompt_template["type"] == "pairwise":
g1_winner, g1_user_prompt, g1_judgment = run_judge_pair(
question, answer_1, answer_2, judge, ref_answer, multi_turn=multi_turn
)
g2_winner, g2_user_prompt, g2_judgment = run_judge_pair(
question, answer_2, answer_1, judge, ref_answer, multi_turn=multi_turn
)
g1_map = {"A": "model_1", "B": "model_2"}
g2_map = {"A": "model_2", "B": "model_1"}
g1_winner = g1_map.get(g1_winner, g1_winner)
g2_winner = g2_map.get(g2_winner, g2_winner)
question_id = question["question_id"]
turn = 1 if not multi_turn else 2
result = {
"question_id": question_id,
"model_1": model_1,
"model_2": model_2,
"g1_winner": g1_winner,
"g2_winner": g2_winner,
"judge": (judge.model_name, judge.prompt_template["name"]),
"g1_user_prompt": g1_user_prompt,
"g1_judgment": g1_judgment,
"g2_user_prompt": g2_user_prompt,
"g2_judgment": g2_judgment,
"turn": turn,
"tstamp": time.time(),
}
print(
f"question: {question_id}, turn: {turn}, model_1: {model_1}, model_2: {model_2}, "
f"g1_winner: {g1_winner}, g2_winner: {g2_winner}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
elif judge.prompt_template["type"] == "single":
m1_score, m1_user_prompt, m1_judgment = run_judge_single(
question, answer_1, judge
)
m2_score, m2_user_prompt, m2_judgment = run_judge_single(
question, answer_2, judge
)
if abs(m1_score - m2_score) <= TIE_DELTA:
winner = "tie"
elif m1_score > m2_score:
winner = "model_1"
else:
winner = "model_2"
question_id = question["question_id"]
result = {
"question_id": question_id,
"model_1": model_1,
"model_2": model_2,
"g1_winner": winner,
"g2_winner": winner,
"judge": (judge.model_name, judge.prompt_template["name"]),
"g1_user_prompt": m1_user_prompt,
"g1_judgment": m1_judgment,
"g2_user_prompt": m2_user_prompt,
"g2_judgment": m2_judgment,
"m1_score": m1_score,
"m2_score": m2_score,
"tstamp": time.time(),
}
print(
f"question: {question_id}, model_1: {model_1}, model_2: {model_2}, "
f"winner: {winner}, m1_score: {m1_score}, m2_score: {m2_score}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
else:
raise ValueError(f"invalid judge type: {judge['type']}")
if output_file:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, "a") as fout:
fout.write(json.dumps(result) + "\n")
return result
def chat_compeletion_openai(model, conv, temperature, max_tokens):
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
messages = conv.to_openai_api_messages()
response = openai.ChatCompletion.create(
model=model,
messages=messages,
n=1,
temperature=temperature,
max_tokens=max_tokens,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output
def chat_compeletion_anthropic(model, conv, temperature, max_tokens):
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
c = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
prompt = conv.get_prompt()
response = c.completions.create(
model=model,
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_tokens,
temperature=temperature,
)
output = response.completion
break
except anthropic.APIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output.strip()
def chat_compeletion_palm(chat_state, model, conv, temperature, max_tokens):
from fastchat.serve.api_provider import init_palm_chat
assert model == "palm-2-chat-bison-001"
if chat_state is None:
chat_state = init_palm_chat("chat-bison@001")
parameters = {
"temperature": temperature,
"top_p": 0.8,
"top_k": 40,
"max_output_tokens": max_tokens,
}
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
response = chat_state.send_message(conv.messages[-2][1], **parameters)
output = response.text
break
except Exception as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return chat_state, output
def normalize_game_key_single(gamekey, result):
"""Make the model names sorted in a game key."""
qid, model_1, model_2 = gamekey
if model_1 < model_2:
return gamekey, result
else:
new_gamekey = (qid, model_2, model_1)
new_result = {
"winners": tuple(reverse_model_map.get(x, x) for x in result["winners"]),
"g1_judgment": result["g2_judgment"],
"g2_judgment": result["g1_judgment"],
}
return new_gamekey, new_result
def normalize_game_key_dict(judgment_dict):
"""Make the model names sorted in the game keys."""
ret = {}
for key, value in judgment_dict.items():
new_key, new_value = normalize_game_key_single(key, value)
ret[new_key] = new_value
return ret
def load_pairwise_model_judgments(filename: str):
"""Load model judgments.
The return value is a dict of type:
Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
"""
judge_dict = {}
for line in open(filename):
obj = json.loads(line)
judge = tuple(obj["judge"])
qid, model_1, model_2 = obj["question_id"], obj["model_1"], obj["model_2"]
if judge not in judge_dict:
judge_dict[judge] = {}
if "winner" in obj:
winner = obj["winner"]
elif "g1_winner" in obj and "g2_winner" in obj:
g1_winner, g2_winner = obj["g1_winner"], obj["g2_winner"]
if g1_winner == g2_winner:
winner = g1_winner
else:
winner = "inconsistent"
else:
raise ValueError(f"Invalid keys: {list(obj.keys())}")
gamekey = (qid, model_1, model_2)
winners = (winner,)
judge_dict[judge][gamekey] = {
"winners": winners,
"g1_judgment": obj["g1_judgment"],
"g2_judgment": obj["g2_judgment"],
}
# Make the model names sorted in the game keys
normalized = {}
for judge, value in judge_dict.items():
normalized[judge] = normalize_game_key_dict(value)
return normalized
def load_single_model_judgments(filename: str):
"""Load model judgments.
The return value is a dict of type:
Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
"""
judge_dict = {}
for line in open(filename):
obj = json.loads(line)
judge = tuple(obj["judge"])
qid, model = obj["question_id"], obj["model"]
if judge not in judge_dict:
judge_dict[judge] = {}
gamekey = (qid, model)
judge_dict[judge][gamekey] = {
"score": obj["score"],
"judgment": obj["judgment"],
}
return judge_dict
def resolve_pairwise_judgment_dict(
question, model_judgments_normal, model_judgments_math, multi_turn=False
):
"""Return the correct pairwise judge."""
if multi_turn:
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "pair-math-v1-multi-turn")]
return model_judgments_normal[("gpt-4", "pair-v2-multi-turn")]
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "pair-math-v1")]
else:
return model_judgments_normal[("gpt-4", "pair-v2")]
def resolve_single_judgment_dict(
question, model_judgments_normal, model_judgments_math, multi_turn=False
):
"""Return the correct single answer grading judge."""
if multi_turn:
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "single-math-v1-multi-turn")]
return model_judgments_normal[("gpt-4", "single-v1-multi-turn")]
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "single-math-v1")]
else:
return model_judgments_normal[("gpt-4", "single-v1")]
def get_pairwise_judge_explanation(gamekey, judgment_dict):
"""Get model judge explanation."""
try:
qid, model_1, model_2 = gamekey
if model_1 < model_2:
res = judgment_dict[gamekey]
g1_judgment, g2_judgment = res["g1_judgment"], res["g2_judgment"]
else:
new_gamekey = (qid, model_2, model_1)
res = judgment_dict[new_gamekey]
model_1, model_2 = model_1, model_2
g1_judgment, g2_judgment = res["g2_judgment"], res["g1_judgment"]
return (
f"**Game 1**. **A**: {model_1}, **B**: {model_2}\n\n"
f"**Judgment**: {g1_judgment}"
+ f"\n\n`--------------------------`\n\n"
+ f"**Game 2**. **A**: {model_2}, **B**: {model_1}\n\n"
f"**Judgment**: {g2_judgment}"
)
except KeyError:
return "N/A"
def get_single_judge_explanation(gamekey, judgment_dict):
"""Get model judge explanation."""
try:
qid, model = gamekey
res = judgment_dict[gamekey]
g1_judgment = res["judgment"]
g1_score = res["score"]
return (
f"**Game 1**. **A**: {model}, **Score**: {g1_score}\n\n"
f"**Judgment**: {g1_judgment}"
)
except KeyError:
return "N/A"
def check_data(questions, model_answers, ref_answers, models, judges):
# check model answers
for m in models:
assert m in model_answers, f"Missing model answer for {m}"
m_answer = model_answers[m]
for q in questions:
assert (
q["question_id"] in m_answer
), f"Missing model {m}'s answer to Question {q['question_id']}"
# check ref answers
for jg in judges.values():
if not jg.ref_based:
continue
for q in questions:
if q["category"] not in NEED_REF_CATS:
continue
assert (
q["question_id"] in ref_answers[jg.model_name]
), f"Missing reference answer to Question {q['question_id']} for judge {jg.model_name}"
def get_model_list(answer_dir):
file_paths = glob.glob(f"{answer_dir}/*.jsonl")
file_names = [os.path.splitext(os.path.basename(f))[0] for f in file_paths]
return file_names
| [
"[Instruction]\nPLACEHOLDER\n\nPLACEHOLDER",
"{}",
"turns",
"prompt_template",
"system_prompt"
] |
2024-01-10 | gauss5930/AlpaGasus2-QLoRA | evaluation~AlpaGasus-Evaluation~evaluation.py | import argparse
import json
import os
import time
import openai
from tqdm import tqdm
from typing import Any
import logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--API_KEY", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("-qa", "--qa_file")
parser.add_argument("-k1", "--key_1")
parser.add_argument("-k2", "--key_2")
parser.add_argument(
"--max_tokens",
type=int,
default=256,
help="maximum number of tokens produced in the output",
)
parser.add_argument(
"--output_dir",
type=str,
help="The output dir."
)
return parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def api_generation(
messages: str,
model: str,
temperature: float,
max_tokens: int,
top_p: float,
):
responses = [
openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
)
]
time.sleep(3) # Preventing rate limits
return responses
def parse_score(review):
try:
score_pair = review.split("\n")[0]
score_pair = score_pair.replace(",", " ")
sp = score_pair.split(" ")
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
raise Exception("Invalid score pair.")
except Exception as e:
logger.error(
f"{e}\nContent: {review}\n" "You must manually fix the score pair."
)
return [-1, -1]
def gen_prompt(ques, ans1, ans2):
sys_prompt = "You are a helpful and precise assistant for checking the quality of the answer."
prompt_template = "[Question]\n{question}\n\n[The Start of Assistant 1's Answer]\n{answer_1}\n\n[The End of Assistant 1's Answer]\n\n[The Start of Assistant 2's Answer]\n{answer_2}\n\n[The End of Assistant 2's Answer]\n\n[System]\n{criteria}\n\n"
criteria = "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."
prompt = prompt_template.format(
question=ques, answer_1=ans1, answer_2=ans2, criteria=criteria
)
return sys_prompt, prompt
def main():
args = parse_args()
openai.api_key = args.API_KEY
qa_jsons = json.load(open(args.qa_file))
message_list = []
total_len = len(qa_jsons)
question_idx_list = list(range(total_len))
if("vicuna" in args.qa_file):
prompt_key = 'text'
dst = 'vicuna' # dst is used for saving the content
elif("koala" in args.qa_file):
prompt_key = 'prompt'
dst = 'koala'
elif("sinstruct" in args.qa_file):
prompt_key = 'instruction'
dst = 'sinstruct'
for i in question_idx_list:
instruction = qa_jsons[i][prompt_key]
if("sinstruct" in args.qa_file):
instances = qa_jsons[i]['instances']
assert len(instances) == 1
if instances[0]['input']:
ques = '{instruction} Input: {input}'.format(instruction=instruction,input=instances[0]['input'])
else:
ques = instruction
else:
ques = instruction
ans1 = qa_jsons[i][args.key_1]
ans2 = qa_jsons[i][args.key_2]
sys_prompt, prompt = gen_prompt(ques, ans1, ans2)
message = [
{"role": "system", "content": sys_prompt},
{
"role": "user",
"content": prompt,
},
]
message_list.append(message)
predictions = []
pbar = tqdm(total=len(message_list))
for i in range(len(message_list)):
predictions.append(api_generation(
messages=message_list[i],
model=args.model,
temperature=0.0,
max_tokens=256,
top_p=1.0,
))
time.sleep(3)
pbar.update(1)
pbar.close()
output_dir = args.output_dir
output_review_file = args.key_1 + '-' + args.key_2 + '-' + dst + '.json'
if os.path.isdir(output_dir) is not True:
os.mkdir(output_dir)
output_review_f = os.path.join(output_dir, output_review_file)
with open(f"{output_review_f}", "x") as f:
for idx, prediction in enumerate(predictions):
review = prediction[0]['choices'][0]['message']['content']
scores = parse_score(review)
qa_jsons[idx]["review"] = review
qa_jsons[idx]["score"] = scores
json.dump(qa_jsons, f, indent=4)
if __name__ == "__main__":
main()
| [
"You are a helpful and precise assistant for checking the quality of the answer.",
"instruction",
"prompt",
"text",
"[Question]\n{question}\n\n[The Start of Assistant 1's Answer]\n{answer_1}\n\n[The End of Assistant 1's Answer]\n\n[The Start of Assistant 2's Answer]\n{answer_2}\n\n[The End of Assistant 2's Answer]\n\n[System]\n{criteria}\n\n"
] |
2024-01-10 | SquareandCompass/langchain | langchain~utilities~serpapi.py | """Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, Optional, Tuple
import aiohttp
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class SerpAPIWrapper(BaseModel):
"""Wrapper around SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain import SerpAPIWrapper
serpapi = SerpAPIWrapper()
"""
search_engine: Any #: :meta private:
params: dict = Field(
default={
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
)
serpapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ValueError(
"Could not import serpapi python package. "
"Please install it with `pip install google-search-results`."
)
return values
async def arun(self, query: str) -> str:
"""Run query through SerpAPI and parse result async."""
return self._process_response(await self.aresults(query))
def run(self, query: str) -> str:
"""Run query through SerpAPI and parse result."""
return self._process_response(self.results(query))
def results(self, query: str) -> dict:
"""Run query through SerpAPI and return the raw result."""
params = self.get_params(query)
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
return res
async def aresults(self, query: str) -> dict:
"""Use aiohttp to run query through SerpAPI and return the results async."""
def construct_url_and_params() -> Tuple[str, Dict[str, str]]:
params = self.get_params(query)
params["source"] = "python"
if self.serpapi_api_key:
params["serp_api_key"] = self.serpapi_api_key
params["output"] = "json"
url = "https://serpapi.com/search"
return url, params
url, params = construct_url_and_params()
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
res = await response.json()
else:
async with self.aiosession.get(url, params=params) as response:
res = await response.json()
return res
def get_params(self, query: str) -> Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {
"api_key": self.serpapi_api_key,
"q": query,
}
params = {**self.params, **_params}
return params
@staticmethod
def _process_response(res: dict) -> str:
"""Process response from SerpAPI."""
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
toret = res["answer_box"]["answer"]
elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret = res["answer_box"]["snippet"]
elif (
"answer_box" in res.keys()
and "snippet_highlighted_words" in res["answer_box"].keys()
):
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif (
"sports_results" in res.keys()
and "game_spotlight" in res["sports_results"].keys()
):
toret = res["sports_results"]["game_spotlight"]
elif (
"knowledge_graph" in res.keys()
and "description" in res["knowledge_graph"].keys()
):
toret = res["knowledge_graph"]["description"]
elif "snippet" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["snippet"]
else:
toret = "No good search result found"
return toret
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~llms~bananadev.py | """Wrapper around Banana API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Banana(LLM, BaseModel):
"""Wrapper around Banana large language models.
To use, you should have the ``banana-dev`` python package installed,
and the environment variable ``BANANA_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import Banana
banana = Banana(model_key="")
"""
model_key: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
banana_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
banana_api_key = get_from_dict_or_env(
values, "banana_api_key", "BANANA_API_KEY"
)
values["banana_api_key"] = banana_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_key": self.model_key},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "banana"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call to Banana endpoint."""
try:
import banana_dev as banana
except ImportError:
raise ValueError(
"Could not import banana-dev python package. "
"Please install it with `pip install banana-dev`."
)
params = self.model_kwargs or {}
api_key = self.banana_api_key
model_key = self.model_key
model_inputs = {
# a json specific to your model.
"prompt": prompt,
**params,
}
response = banana.run(api_key, model_key, model_inputs)
try:
text = response["modelOutputs"][0]["output"]
except (KeyError, TypeError):
returned = response["modelOutputs"][0]
raise ValueError(
"Response should be of schema: {'output': 'text'}."
f"\nResponse was: {returned}"
"\nTo fix this:"
"\n- fork the source repo of the Banana model"
"\n- modify app.py to return the above schema"
"\n- deploy that as a custom repo"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~prompts~few_shot_with_templates.py | """Prompt template that contains few shot examples."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
)
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
class FewShotPromptWithTemplates(StringPromptTemplate, BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Optional[BaseExampleSelector] = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: StringPromptTemplate
"""A PromptTemplate to put after the examples."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: Optional[StringPromptTemplate] = None
"""A PromptTemplate to put before the examples."""
template_format: str = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
validate_template: bool = True
"""Whether or not to try validating the template."""
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix and input variables are consistent."""
if values["validate_template"]:
input_variables = values["input_variables"]
expected_input_variables = set(values["suffix"].input_variables)
expected_input_variables |= set(values["partial_variables"])
if values["prefix"] is not None:
expected_input_variables |= set(values["prefix"].input_variables)
missing_vars = expected_input_variables.difference(input_variables)
if missing_vars:
raise ValueError(
f"Got input_variables={input_variables}, but based on "
f"prefix/suffix expected {expected_input_variables}"
)
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def _get_examples(self, **kwargs: Any) -> List[dict]:
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall prefix.
if self.prefix is None:
prefix = ""
else:
prefix_kwargs = {
k: v for k, v in kwargs.items() if k in self.prefix.input_variables
}
for k in prefix_kwargs.keys():
kwargs.pop(k)
prefix = self.prefix.format(**prefix_kwargs)
# Create the overall suffix
suffix_kwargs = {
k: v for k, v in kwargs.items() if k in self.suffix.input_variables
}
for k in suffix_kwargs.keys():
kwargs.pop(k)
suffix = self.suffix.format(
**suffix_kwargs,
)
pieces = [prefix, *example_strings, suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot_with_templates"
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the prompt."""
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().dict(**kwargs)
| [
"f-string",
"True"
] |
2024-01-10 | zapier/langchain-nla-util | langchain~chains~sequential.py | """Chain pipeline where the outputs of one step feed directly into next."""
from typing import Dict, List
from pydantic import BaseModel, Extra, root_validator
from langchain.chains.base import Chain
from langchain.input import get_color_mapping
class SequentialChain(Chain, BaseModel):
"""Chain where the outputs of one chain feed directly into next."""
chains: List[Chain]
input_variables: List[str]
output_variables: List[str] #: :meta private:
return_all: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return expected input keys to the chain.
:meta private:
"""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return self.output_variables
@root_validator(pre=True)
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that the correct inputs exist for all chains."""
chains = values["chains"]
input_variables = values["input_variables"]
memory_keys = list()
if "memory" in values and values["memory"] is not None:
"""Validate that prompt input variables are consistent."""
memory_keys = values["memory"].memory_variables
if any(input_variables) in memory_keys:
overlapping_keys = input_variables & memory_keys
raise ValueError(
f"The the input key(s) {''.join(overlapping_keys)} are found "
f"in the Memory keys ({memory_keys}) - please use input and "
f"memory keys that don't overlap."
)
known_variables = set(input_variables + memory_keys)
for chain in chains:
missing_vars = set(chain.input_keys).difference(known_variables)
if missing_vars:
raise ValueError(
f"Missing required input keys: {missing_vars}, "
f"only had {known_variables}"
)
overlapping_keys = known_variables.intersection(chain.output_keys)
if overlapping_keys:
raise ValueError(
f"Chain returned keys that already exist: {overlapping_keys}"
)
known_variables |= set(chain.output_keys)
if "output_variables" not in values:
if values.get("return_all", False):
output_keys = known_variables.difference(input_variables)
else:
output_keys = chains[-1].output_keys
values["output_variables"] = output_keys
else:
missing_vars = set(values["output_variables"]).difference(known_variables)
if missing_vars:
raise ValueError(
f"Expected output variables that were not found: {missing_vars}."
)
return values
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
known_values = inputs.copy()
for i, chain in enumerate(self.chains):
outputs = chain(known_values, return_only_outputs=True)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
class SimpleSequentialChain(Chain, BaseModel):
"""Simple chain where the outputs of one step feed directly into next."""
chains: List[Chain]
strip_outputs: bool = False
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that chains are all single input/output."""
for chain in values["chains"]:
if len(chain.input_keys) != 1:
raise ValueError(
"Chains used in SimplePipeline should all have one input, got "
f"{chain} with {len(chain.input_keys)} inputs."
)
if len(chain.output_keys) != 1:
raise ValueError(
"Chains used in SimplePipeline should all have one output, got "
f"{chain} with {len(chain.output_keys)} outputs."
)
return values
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = chain.run(_input)
if self.strip_outputs:
_input = _input.strip()
self.callback_manager.on_text(
_input, color=color_mapping[str(i)], end="\n", verbose=self.verbose
)
return {self.output_key: _input}
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~tools~zapier.py | """## Zapier Natural Language Actions API
\
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
**Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions
on Zapier's platform through a natural language API interface.
NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets,
Microsoft Teams, and thousands more apps: https://zapier.com/apps
Zapier NLA handles ALL the underlying API auth and translation from
natural language --> underlying API call --> return simplified output for LLMs
The key idea is you, or your users, expose a set of actions via an oauth-like setup
window, which you can then query and execute via a REST API.
NLA offers both API Key and OAuth for signing NLA API requests.
1. Server-side (API Key): for quickly getting started, testing, and production scenarios
where LangChain will only use actions exposed in the developer's Zapier account
(and will use the developer's connected accounts on Zapier.com)
2. User-facing (Oauth): for production scenarios where you are deploying an end-user
facing application and LangChain needs access to end-user's exposed actions and
connected accounts on Zapier.com
This quick start will focus on the server-side use case for brevity.
Review [full docs](https://nla.zapier.com/api/v1/dynamic/docs) or reach out to
[email protected] for user-facing oauth developer support.
Typically you'd use SequentialChain, here's a basic example:
1. Use NLA to find an email in Gmail
2. Use LLMChain to generate a draft reply to (1)
3. Use NLA to send the draft reply (2) to someone in Slack via direct mesage
In code, below:
```python
import os
# get from https://platform.openai.com/
os.environ["OPENAI_API_KEY"] = "..."
# get from https://nla.zapier.com/zapier/provider/debug (under User, after logging in):
os.environ["ZAPIER_NLA_API_KEY"] = "..."
from langchain.llms import OpenAI
from langchain.chains import LLMChain, TransformChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain.tools.zapier import ZapierNLAListActions, ZapierNLARunAction
from langchain.utilities.zapier import ZapierNLAWrapper
## step 0. expose gmail 'find email' and slack 'send channel message' actions
# first go here, log in, expose (enable) the two actions:
# https://nla.zapier.com/zapier/start
# -- for this example, can leave all fields "Have AI guess"
# in an oauth scenario, you'd get your own <provider> id (instead of 'zapier')
# which you route your users through first
actions = ZapierNLAWrapper().list()
## step 1. gmail find email
GMAIL_SEARCH_INSTRUCTIONS = "Grab the latest email from Bryan Helmig"
def nla_gmail(inputs):
action = next((
a for a in actions if a["description"].startswith("Gmail: Find Email")
), None)
data = ZapierNLARunAction(action_id=action["id"]).run(inputs["instructions"])
return {
"email_data": data
}
gmail_chain = TransformChain(
input_variables=["instructions"],
output_variables=["email_data"],
transform=nla_gmail
)
## step 2. generate draft reply
template = \"""You are an assisstant who drafts replies to an incoming email.
Output draft reply in plain text (not JSON).
Incoming email:
{email_data}
Draft email reply:\"""
prompt_template = PromptTemplate(input_variables=["email_data"], template=template)
reply_chain = LLMChain(llm=OpenAI(temperature=.7), prompt=prompt_template)
## step 3. send draft reply via a slack direct message
SLACK_HANDLE = "@knoop"
def nla_slack(inputs):
action = next(
(a for a in actions if a["description"].startswith("Slack: Send Direct Message")
), None)
instructions = f'Send this to {SLACK_HANDLE} in Slack: {inputs["draft_reply"]}'
return {"slack_data": ZapierNLARunAction(action_id=action["id"]).run(instructions)}
slack_chain = TransformChain(
input_variables=["draft_reply"],
output_variables=["slack_data"],
transform=nla_slack
)
## finally, execute
overall_chain = SimpleSequentialChain(
chains=[gmail_chain, reply_chain, slack_chain],
verbose=True
)
overall_chain.run(GMAIL_SEARCH_INSTRUCTIONS)
```
"""
from typing import Optional
from langchain.tools.base import BaseTool
from langchain.utilities.zapier import ZapierNLAWrapper
zapier_nla_base_desc = (
"A wrapper around Zapier NLA. "
"Can be used to call or retrieve data from 5k+ apps, 20k+ actions"
"on the Zapier platform."
)
class ZapierNLARunAction(BaseTool):
"""
Args:
action_id: a specific action ID (from list actions) of the action to execute
(the set api_key must be associated with the action owner)
instructions: a natural language instruction string for using the action
(eg. "get the latest email from Mike Knoop" for "Gmail: find email" action)
params: a dict, optional. Any params provided will *override* AI guesses
from `instructions` (see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/dynamic/docs)
"""
name = "Zapier NLA: Run Action"
description = zapier_nla_base_desc + (
"This tool will run a specified action and return a stringified JSON result "
" of the API call. The return result is guarenteed to be less than ~500 words "
" (350 tokens), safe to insert back into another LLM prompt."
)
api_wrapper: ZapierNLAWrapper = ZapierNLAWrapper()
action_id: str
params: Optional[dict] = None
def _run(self, instructions: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
return self.api_wrapper.run_as_str(self.action_id, instructions, self.params)
async def _arun(self, _: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
raise NotImplementedError("ZapierNLAListActions does not support async")
ZapierNLARunAction.__doc__ = (
ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore
)
# other useful actions
class ZapierNLAListActions(BaseTool):
"""
Args:
None
"""
name = "Zapier NLA: List Actions"
description = zapier_nla_base_desc + (
"This tool returns a list of the user's exposed actions."
)
api_wrapper: ZapierNLAWrapper = ZapierNLAWrapper()
def _run(self, _: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
return self.api_wrapper.list_as_str()
async def _arun(self, _: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
raise NotImplementedError("ZapierNLAListActions does not support async")
ZapierNLAListActions.__doc__ = (
ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore
)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~memory~kg.py | from typing import Any, Dict, List
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.graphs import NetworkxEntityGraph
from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_buffer_string, get_prompt_input_key
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel, SystemMessage
class ConversationKGMemory(BaseChatMemory, BaseModel):
"""Knowledge graph memory for storing conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summaries = {}
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summaries[entity] = ". ".join(knowledge) + "."
if summaries:
summary_strings = [
f"On {entity}: {summary}" for entity, summary in summaries.items()
]
if self.return_messages:
context: Any = [SystemMessage(content=text) for text in summary_strings]
else:
context = "\n".join(summary_strings)
else:
if self.return_messages:
context = []
else:
context = ""
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~agents~initialize.py | """Load agent."""
from typing import Any, Optional, Sequence
from langchain.agents.agent import AgentExecutor
from langchain.agents.loading import AGENT_TO_CLASS, load_agent
from langchain.callbacks.base import BaseCallbackManager
from langchain.llms.base import BaseLLM
from langchain.tools.base import BaseTool
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLLM,
agent: Optional[str] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: A string that specified the agent type to use. Valid options are:
`zero-shot-react-description`
`react-docstore`
`self-ask-with-search`
`conversational-react-description`
If None and agent_path is also None, will default to
`zero-shot-react-description`.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use.
agent_kwargs: Additional key word arguments to pass to the underlying agent
**kwargs: Additional key word arguments passed to the agent executor
Returns:
An agent executor
"""
if agent is None and agent_path is None:
agent = "zero-shot-react-description"
if agent is not None and agent_path is not None:
raise ValueError(
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
else:
raise ValueError(
"Somehow both `agent` and `agent_path` are None, "
"this should never happen."
)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
| [] |
2024-01-10 | zapier/langchain-nla-util | tests~integration_tests~vectorstores~test_qdrant.py | """Test Qdrant functionality."""
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores import Qdrant
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.mark.parametrize(
["content_payload_key", "metadata_payload_key"],
[
(Qdrant.CONTENT_KEY, Qdrant.METADATA_KEY),
("foo", "bar"),
(Qdrant.CONTENT_KEY, "bar"),
("foo", Qdrant.METADATA_KEY),
],
)
def test_qdrant(content_payload_key: str, metadata_payload_key: str) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Qdrant.from_texts(
texts,
FakeEmbeddings(),
host="localhost",
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize(
["content_payload_key", "metadata_payload_key"],
[
(Qdrant.CONTENT_KEY, Qdrant.METADATA_KEY),
("test_content", "test_payload"),
(Qdrant.CONTENT_KEY, "payload_test"),
("content_test", Qdrant.METADATA_KEY),
],
)
def test_qdrant_with_metadatas(
content_payload_key: str, metadata_payload_key: str
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Qdrant.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
host="localhost",
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.parametrize(
["content_payload_key", "metadata_payload_key"],
[
(Qdrant.CONTENT_KEY, Qdrant.METADATA_KEY),
("test_content", "test_payload"),
(Qdrant.CONTENT_KEY, "payload_test"),
("content_test", Qdrant.METADATA_KEY),
],
)
def test_qdrant_max_marginal_relevance_search(
content_payload_key: str, metadata_payload_key: str
) -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Qdrant.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
host="localhost",
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
| [] |
2024-01-10 | zapier/langchain-nla-util | tests~unit_tests~prompts~test_chat.py | from typing import List
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
AIMessagePromptTemplate,
BaseMessagePromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
ChatPromptValue,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import HumanMessage
def create_messages() -> List[BaseMessagePromptTemplate]:
"""Create messages."""
system_message_prompt = SystemMessagePromptTemplate(
prompt=PromptTemplate(
template="Here's some context: {context}",
input_variables=["context"],
)
)
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="Hello {foo}, I'm {bar}. Thanks for the {context}",
input_variables=["foo", "bar", "context"],
)
)
ai_message_prompt = AIMessagePromptTemplate(
prompt=PromptTemplate(
template="I'm an AI. I'm {foo}. I'm {bar}.",
input_variables=["foo", "bar"],
)
)
chat_message_prompt = ChatMessagePromptTemplate(
role="test",
prompt=PromptTemplate(
template="I'm a generic message. I'm {foo}. I'm {bar}.",
input_variables=["foo", "bar"],
),
)
return [
system_message_prompt,
human_message_prompt,
ai_message_prompt,
chat_message_prompt,
]
def create_chat_prompt_template() -> ChatPromptTemplate:
"""Create a chat prompt template."""
return ChatPromptTemplate(
input_variables=["foo", "bar", "context"],
messages=create_messages(),
)
def test_chat_prompt_template() -> None:
"""Test chat prompt template."""
prompt_template = create_chat_prompt_template()
prompt = prompt_template.format_prompt(foo="foo", bar="bar", context="context")
assert isinstance(prompt, ChatPromptValue)
messages = prompt.to_messages()
assert len(messages) == 4
assert messages[0].content == "Here's some context: context"
assert messages[1].content == "Hello foo, I'm bar. Thanks for the context"
assert messages[2].content == "I'm an AI. I'm foo. I'm bar."
assert messages[3].content == "I'm a generic message. I'm foo. I'm bar."
string = prompt.to_string()
expected = (
'[SystemMessage(content="Here\'s some context: context", '
'additional_kwargs={}), HumanMessage(content="Hello foo, '
"I'm bar. Thanks for the context\", additional_kwargs={}), "
"AIMessage(content=\"I'm an AI. I'm foo. I'm bar.\", additional_kwargs={}), "
"ChatMessage(content=\"I'm a generic message. I'm foo. I'm bar.\","
" additional_kwargs={}, role='test')]"
)
assert string == expected
string = prompt_template.format(foo="foo", bar="bar", context="context")
assert string == expected
def test_chat_prompt_template_from_messages() -> None:
"""Test creating a chat prompt template from messages."""
chat_prompt_template = ChatPromptTemplate.from_messages(create_messages())
assert sorted(chat_prompt_template.input_variables) == sorted(
["context", "foo", "bar"]
)
assert len(chat_prompt_template.messages) == 4
def test_chat_prompt_template_with_messages() -> None:
messages = create_messages() + [HumanMessage(content="foo")]
chat_prompt_template = ChatPromptTemplate.from_messages(messages)
assert sorted(chat_prompt_template.input_variables) == sorted(
["context", "foo", "bar"]
)
assert len(chat_prompt_template.messages) == 5
prompt_value = chat_prompt_template.format_prompt(
context="see", foo="this", bar="magic"
)
prompt_value_messages = prompt_value.to_messages()
assert prompt_value_messages[-1] == HumanMessage(content="foo")
| [
"Hello {foo}, I'm {bar}. Thanks for the {context}",
"foo",
"create_messages() + [HumanMessage(content=\"foo\")]",
"I'm an AI. I'm {foo}. I'm {bar}.",
"context",
"I'm a generic message. I'm {foo}. I'm {bar}.",
"Here's some context: {context}"
] |
2024-01-10 | zapier/langchain-nla-util | langchain~memory~entity.py | from typing import Any, Dict, List, Optional
from pydantic import BaseModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
ENTITY_SUMMARIZATION_PROMPT,
)
from langchain.memory.utils import get_buffer_string, get_prompt_input_key
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel, BaseMessage
class ConversationEntityMemory(BaseChatMemory, BaseModel):
"""Entity extractor & summarizer to memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
store: Dict[str, Optional[str]] = {}
entity_cache: List[str] = []
k: int = 3
chat_history_key: str = "history"
@property
def buffer(self) -> List[BaseMessage]:
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
if output.strip() == "NONE":
entities = []
else:
entities = [w.strip() for w in output.split(",")]
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.store.get(entity, "")
self.entity_cache = entities
if self.return_messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
for entity in self.entity_cache:
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
# key value store for entity
existing_summary = self.store.get(entity, "")
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
summary=existing_summary,
history=buffer_string,
input=inputs[prompt_input_key],
entity=entity,
)
self.store[entity] = output.strip()
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.store = {}
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~document_loaders~readthedocs.py | """Loader that loads ReadTheDocs documentation directory dump."""
from pathlib import Path
from typing import Any, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class ReadTheDocsLoader(BaseLoader):
"""Loader that loads ReadTheDocs documentation directory dump."""
def __init__(
self,
path: str,
encoding: Optional[str] = None,
errors: Optional[str] = None,
**kwargs: Optional[Any]
):
"""Initialize path."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ValueError(
"Could not import python packages. "
"Please install it with `pip install beautifulsoup4`. "
)
try:
_ = BeautifulSoup(
"<html><body>Parser builder library test.</body></html>", **kwargs
)
except Exception as e:
raise ValueError("Parsing kwargs do not appear valid") from e
self.file_path = path
self.encoding = encoding
self.errors = errors
self.bs_kwargs = kwargs
def load(self) -> List[Document]:
"""Load documents."""
from bs4 import BeautifulSoup
def _clean_data(data: str) -> str:
soup = BeautifulSoup(data, **self.bs_kwargs)
text = soup.find_all("main", {"id": "main-content"})
if len(text) != 0:
text = text[0].get_text()
else:
text = ""
return "\n".join([t for t in text.split("\n") if t])
docs = []
for p in Path(self.file_path).rglob("*"):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = _clean_data(f.read())
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~chains~question_answering~stuff_prompt.py | # flake8: noqa
from langchain.prompts import PromptTemplate
from langchain.chains.prompt_selector import (
ConditionalPromptSelector,
is_chat_model,
)
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
system_template = """Use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
| [
"question",
"t know the answer, just say that you don",
"Use the following pieces of context to answer the users question. \nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\n{context}",
"context",
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:",
"{question}"
] |
2024-01-10 | zapier/langchain-nla-util | langchain~document_loaders~directory.py | """Loading logic for loading documents from a directory."""
import logging
from pathlib import Path
from typing import List, Type, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.text import TextLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
FILE_LOADER_TYPE = Union[Type[UnstructuredFileLoader], Type[TextLoader]]
logger = logging.getLogger(__file__)
def _is_visible(p: Path) -> bool:
parts = p.parts
for _p in parts:
if _p.startswith("."):
return False
return True
class DirectoryLoader(BaseLoader):
"""Loading logic for loading documents from a directory."""
def __init__(
self,
path: str,
glob: str = "**/[!.]*",
silent_errors: bool = False,
load_hidden: bool = False,
loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader,
recursive: bool = False,
):
"""Initialize with path to directory and how to glob over it."""
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_cls = loader_cls
self.silent_errors = silent_errors
self.recursive = recursive
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.path)
docs = []
items = p.rglob(self.glob) if self.recursive else p.glob(self.glob)
for i in items:
if i.is_file():
if _is_visible(i.relative_to(p)) or self.load_hidden:
try:
sub_docs = self.loader_cls(str(i)).load()
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(e)
else:
raise e
return docs
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~prompts~example_selector~semantic_similarity.py | """Example selector that selects examples based on SemanticSimilarity."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.vectorstores.base import VectorStore
def sorted_values(values: Dict[str, str]) -> List[Any]:
"""Return a list of values in dict sorted by key."""
return [values[val] for val in sorted(values)]
class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel):
"""Example selector that selects examples based on SemanticSimilarity."""
vectorstore: VectorStore
"""VectorStore than contains information about examples."""
k: int = 4
"""Number of examples to select."""
example_keys: Optional[List[str]] = None
"""Optional keys to filter examples to."""
input_keys: Optional[List[str]] = None
"""Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def add_example(self, example: Dict[str, str]) -> str:
"""Add new example to vectorstore."""
if self.input_keys:
string_example = " ".join(
sorted_values({key: example[key] for key in self.input_keys})
)
else:
string_example = " ".join(sorted_values(example))
ids = self.vectorstore.add_texts([string_example], metadatas=[example])
return ids[0]
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = " ".join(sorted_values(input_variables))
example_docs = self.vectorstore.similarity_search(query, k=self.k)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
@classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
k: int = 4,
input_keys: Optional[List[str]] = None,
**vectorstore_cls_kwargs: Any,
) -> SemanticSimilarityExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k in input_keys}))
for eg in examples
]
else:
string_examples = [" ".join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(vectorstore=vectorstore, k=k, input_keys=input_keys)
class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector, BaseModel):
"""ExampleSelector that selects examples based on Max Marginal Relevance.
This was shown to improve performance in this paper:
https://arxiv.org/pdf/2211.13892.pdf
"""
fetch_k: int = 20
"""Number of examples to fetch to rerank."""
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = " ".join(sorted_values(input_variables))
example_docs = self.vectorstore.max_marginal_relevance_search(
query, k=self.k, fetch_k=self.fetch_k
)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
@classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: Type[VectorStore],
k: int = 4,
input_keys: Optional[List[str]] = None,
fetch_k: int = 20,
**vectorstore_cls_kwargs: Any,
) -> MaxMarginalRelevanceExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [
" ".join(sorted_values({k: eg[k] for k in input_keys}))
for eg in examples
]
else:
string_examples = [" ".join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~memory~buffer.py | from typing import Any, Dict, List
from pydantic import BaseModel
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.utils import get_buffer_string
class ConversationBufferMemory(BaseChatMemory, BaseModel):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
if self.return_messages:
return self.chat_memory.messages
else:
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
| [] |
2024-01-10 | zapier/langchain-nla-util | tests~unit_tests~chains~test_memory.py | from langchain.memory.simple import SimpleMemory
def test_simple_memory() -> None:
"""Test SimpleMemory."""
memory = SimpleMemory(memories={"baz": "foo"})
output = memory.load_memory_variables({})
assert output == {"baz": "foo"}
assert ["baz"] == memory.memory_variables
| [] |
2024-01-10 | zapier/langchain-nla-util | tests~unit_tests~chains~test_conversation.py | """Test conversation chain and memory."""
import pytest
from langchain.chains.conversation.base import ConversationChain
from langchain.memory.buffer import ConversationBufferMemory
from langchain.memory.buffer_window import ConversationBufferWindowMemory
from langchain.memory.summary import ConversationSummaryMemory
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_memory_ai_prefix() -> None:
"""Test that ai_prefix in the memory component works."""
memory = ConversationBufferMemory(memory_key="foo", ai_prefix="Assistant")
memory.save_context({"input": "bar"}, {"output": "foo"})
assert memory.buffer == "Human: bar\nAssistant: foo"
def test_memory_human_prefix() -> None:
"""Test that human_prefix in the memory component works."""
memory = ConversationBufferMemory(memory_key="foo", human_prefix="Friend")
memory.save_context({"input": "bar"}, {"output": "foo"})
assert memory.buffer == "Friend: bar\nAI: foo"
def test_conversation_chain_works() -> None:
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=["foo", "bar"], template="{foo} {bar}")
memory = ConversationBufferMemory(memory_key="foo")
chain = ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key="bar")
chain.run("foo")
def test_conversation_chain_errors_bad_prompt() -> None:
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=[], template="nothing here")
with pytest.raises(ValueError):
ConversationChain(llm=llm, prompt=prompt)
def test_conversation_chain_errors_bad_variable() -> None:
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=["foo"], template="{foo}")
memory = ConversationBufferMemory(memory_key="foo")
with pytest.raises(ValueError):
ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key="foo")
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
],
)
def test_conversation_memory(memory: BaseMemory) -> None:
"""Test basic conversation memory functionality."""
# This is a good input because the input is not the same as baz.
good_inputs = {"foo": "bar", "baz": "foo"}
# This is a good output because these is one variable.
good_outputs = {"bar": "foo"}
memory.save_context(good_inputs, good_outputs)
# This is a bad input because there are two variables that aren't the same as baz.
bad_inputs = {"foo": "bar", "foo1": "bar"}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
# This is a bad input because the only variable is the same as baz.
bad_inputs = {"baz": "bar"}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
# This is a bad output because it is empty.
with pytest.raises(ValueError):
memory.save_context(good_inputs, {})
# This is a bad output because there are two keys.
bad_outputs = {"foo": "bar", "foo1": "bar"}
with pytest.raises(ValueError):
memory.save_context(good_inputs, bad_outputs)
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
],
)
def test_clearing_conversation_memory(memory: BaseMemory) -> None:
"""Test clearing the conversation memory."""
# This is a good input because the input is not the same as baz.
good_inputs = {"foo": "bar", "baz": "foo"}
# This is a good output because there is one variable.
good_outputs = {"bar": "foo"}
memory.save_context(good_inputs, good_outputs)
memory.clear()
assert memory.load_memory_variables({}) == {"baz": ""}
| [
"{foo} {bar}",
"{foo}",
"nothing here"
] |
2024-01-10 | zapier/langchain-nla-util | tests~integration_tests~chat_models~test_openai.py | """Test ChatOpenAI wrapper."""
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import (
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
LLMResult,
SystemMessage,
)
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_chat_openai() -> None:
"""Test ChatOpenAI wrapper."""
chat = ChatOpenAI(max_tokens=10)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_openai_system_message() -> None:
"""Test ChatOpenAI wrapper with system message."""
chat = ChatOpenAI(max_tokens=10)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_openai_generate() -> None:
"""Test ChatOpenAI wrapper with generate."""
chat = ChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_chat_openai_multiple_completions() -> None:
"""Test ChatOpenAI wrapper with multiple completions."""
chat = ChatOpenAI(max_tokens=10, n=5)
message = HumanMessage(content="Hello")
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
def test_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
def test_chat_openai_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
ChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
@pytest.mark.asyncio
async def test_async_chat_openai() -> None:
"""Test async generation."""
chat = ChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
@pytest.mark.asyncio
async def test_async_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert callback_handler.llm_streams > 0
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
| [
"Hello",
"You are to chat with the user."
] |
2024-01-10 | zapier/langchain-nla-util | langchain~chains~question_answering~__init__.py | """Load question answering chains."""
from typing import Any, Mapping, Optional, Protocol
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import (
map_reduce_prompt,
map_rerank_prompt,
refine_prompts,
stuff_prompt,
)
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self, llm: BaseLanguageModel, **kwargs: Any
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_map_rerank_chain(
llm: BaseLanguageModel,
prompt: BasePromptTemplate = map_rerank_prompt.PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
rank_key: str = "score",
answer_key: str = "answer",
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> MapRerankDocumentsChain:
llm_chain = LLMChain(
llm=llm, prompt=prompt, verbose=verbose, callback_manager=callback_manager
)
return MapRerankDocumentsChain(
llm_chain=llm_chain,
rank_key=rank_key,
answer_key=answer_key,
document_variable_name=document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
def _load_stuff_chain(
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
document_variable_name: str = "context",
verbose: Optional[bool] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> StuffDocumentsChain:
_prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(
llm=llm, prompt=prompt, verbose=verbose, callback_manager=callback_manager
)
# TODO: document prompt
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
question_prompt: Optional[BasePromptTemplate] = None,
combine_prompt: Optional[BasePromptTemplate] = None,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
collapse_prompt: Optional[BasePromptTemplate] = None,
reduce_llm: Optional[BaseLanguageModel] = None,
collapse_llm: Optional[BaseLanguageModel] = None,
verbose: Optional[bool] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> MapReduceDocumentsChain:
_question_prompt = (
question_prompt or map_reduce_prompt.QUESTION_PROMPT_SELECTOR.get_prompt(llm)
)
_combine_prompt = (
combine_prompt or map_reduce_prompt.COMBINE_PROMPT_SELECTOR.get_prompt(llm)
)
map_chain = LLMChain(
llm=llm,
prompt=_question_prompt,
verbose=verbose,
callback_manager=callback_manager,
)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(
llm=_reduce_llm,
prompt=_combine_prompt,
verbose=verbose,
callback_manager=callback_manager,
)
# TODO: document prompt
combine_document_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose,
callback_manager=callback_manager,
),
document_variable_name=combine_document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
combine_document_chain=combine_document_chain,
document_variable_name=map_reduce_document_variable_name,
collapse_document_chain=collapse_chain,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
question_prompt: Optional[BasePromptTemplate] = None,
refine_prompt: Optional[BasePromptTemplate] = None,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
refine_llm: Optional[BaseLanguageModel] = None,
verbose: Optional[bool] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> RefineDocumentsChain:
_question_prompt = (
question_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt(llm)
)
_refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt(
llm
)
initial_chain = LLMChain(
llm=llm,
prompt=_question_prompt,
verbose=verbose,
callback_manager=callback_manager,
)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(
llm=_refine_llm,
prompt=_refine_prompt,
verbose=verbose,
callback_manager=callback_manager,
)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
verbose=verbose,
callback_manager=callback_manager,
**kwargs,
)
def load_qa_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: Optional[bool] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load question answering chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
callback_manager: Callback manager to use for the chain.
Returns:
A chain to use for question answering.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
"map_rerank": _load_map_rerank_chain,
}
if chain_type not in loader_mapping:
raise ValueError(
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
return loader_mapping[chain_type](
llm, verbose=verbose, callback_manager=callback_manager, **kwargs
)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~chains~question_answering~refine_prompts.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
AIMessagePromptTemplate,
)
from langchain.chains.prompt_selector import (
ConditionalPromptSelector,
is_chat_model,
)
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {question}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer"
"(only if needed) with some more context below.\n"
"------------\n"
"{context_str}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
DEFAULT_REFINE_PROMPT = PromptTemplate(
input_variables=["question", "existing_answer", "context_str"],
template=DEFAULT_REFINE_PROMPT_TMPL,
)
refine_template = (
"We have the opportunity to refine the existing answer"
"(only if needed) with some more context below.\n"
"------------\n"
"{context_str}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
messages = [
HumanMessagePromptTemplate.from_template("{question}"),
AIMessagePromptTemplate.from_template("{existing_answer}"),
HumanMessagePromptTemplate.from_template(refine_template),
]
CHAT_REFINE_PROMPT = ChatPromptTemplate.from_messages(messages)
REFINE_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_REFINE_PROMPT,
conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)],
)
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {question}\n"
)
DEFAULT_TEXT_QA_PROMPT = PromptTemplate(
input_variables=["context_str", "question"], template=DEFAULT_TEXT_QA_PROMPT_TMPL
)
chat_qa_prompt_template = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer any questions"
)
messages = [
SystemMessagePromptTemplate.from_template(chat_qa_prompt_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(messages)
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_TEXT_QA_PROMPT,
conditionals=[(is_chat_model, CHAT_QUESTION_PROMPT)],
)
| [
"Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {question}\n",
"existing_answer",
"{existing_answer}",
"context_str",
"question",
"We have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer any questions",
"The original question is as follows: {question}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"{question}"
] |
2024-01-10 | zapier/langchain-nla-util | langchain~chains~llm_requests.py | """Chain that hits a URL and then uses an LLM to parse results."""
from __future__ import annotations
from typing import Dict, List
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain.requests import RequestsWrapper
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501
}
class LLMRequestsChain(Chain, BaseModel):
"""Chain that hits a URL and then uses an LLM to parse results."""
llm_chain: LLMChain
requests_wrapper: RequestsWrapper = Field(
default_factory=RequestsWrapper, exclude=True
)
text_length: int = 8000
requests_key: str = "requests_result" #: :meta private:
input_key: str = "url" #: :meta private:
output_key: str = "output" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ValueError(
"Could not import bs4 python package. "
"Please it install it with `pip install bs4`."
)
return values
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
from bs4 import BeautifulSoup
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
# extract the text from the html
soup = BeautifulSoup(res, "html.parser")
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
result = self.llm_chain.predict(**other_keys)
return {self.output_key: result}
@property
def _chain_type(self) -> str:
return "llm_requests_chain"
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~memory~combined.py | from typing import Any, Dict, List
from pydantic import BaseModel
from langchain.schema import BaseMemory
class CombinedMemory(BaseMemory, BaseModel):
"""Class for combining multiple memories' data together."""
memories: List[BaseMemory]
"""For tracking all the memories that should be accessed."""
@property
def memory_variables(self) -> List[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: Dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
memory_data = {
**memory_data,
**data,
}
return memory_data
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~text_splitter.py | """Functionality for splitting text."""
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Iterable,
List,
Literal,
Optional,
Union,
)
from langchain.docstore.document import Document
logger = logging.getLogger()
class TextSplitter(ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
):
"""Create a new TextSplitter."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
@abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
for chunk in self.split_text(text):
documents.append(Document(page_content=chunk, metadata=_metadatas[i]))
return documents
def split_documents(self, documents: List[Document]) -> List[Document]:
"""Split documents."""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.create_documents(texts, metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
@classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please it install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod
def from_tiktoken_encoder(
cls,
encoding_name: str = "gpt2",
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TextSplitter:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please it install it with `pip install tiktoken`."
)
# create a GPT-3 encoder instance
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str, **kwargs: Any) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
**kwargs,
)
)
return cls(length_function=_tiktoken_encoder, **kwargs)
class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._separator:
splits = text.split(self._separator)
else:
splits = list(text)
return self._merge_splits(splits, self._separator)
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please it install it with `pip install tiktoken`."
)
# create a GPT-3 encoder instance
self._tokenizer = tiktoken.get_encoding(encoding_name)
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = []
input_ids = self._tokenizer.encode(
text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
start_idx = 0
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(self._tokenizer.decode(chunk_ids))
start_idx += self._chunk_size - self._chunk_overlap
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(self, separators: Optional[List[str]] = None, **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = self._separators[-1]
for _s in self._separators:
if _s == "":
separator = _s
break
if _s in text:
separator = _s
break
# Now that we have the separator, split the text
if separator:
splits = text.split(separator)
else:
splits = list(text)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
_good_splits = []
other_info = self.split_text(s)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
return final_chunks
class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
):
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
self._tokenizer = spacy.load(pipeline)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (str(s) for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any):
"""Initialize a MarkdownTextSplitter."""
separators = [
# First, try to split along Markdown headings (starting with level 2)
"\n## ",
"\n### ",
"\n#### ",
"\n##### ",
"\n###### ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n\n",
# Horizontal lines
"\n\n***\n\n",
"\n\n---\n\n",
"\n\n___\n\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
super().__init__(separators=separators, **kwargs)
class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any):
"""Initialize a MarkdownTextSplitter."""
separators = [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
super().__init__(separators=separators, **kwargs)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~vectorstores~deeplake.py | """Wrapper around Activeloop Deep Lake."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Sequence
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
def L2_search(
query_embedding: np.ndarray, data_vectors: np.ndarray, k: int = 4
) -> list:
"""naive L2 search for nearest neighbors"""
# Calculate the L2 distance between the query_vector and all data_vectors
distances = np.linalg.norm(data_vectors - query_embedding, axis=1)
# Sort the distances and return the indices of the k nearest vectors
nearest_indices = np.argsort(distances)[:k]
return nearest_indices.tolist()
class DeepLake(VectorStore):
"""Wrapper around Deep Lake, a data lake for deep learning applications.
It not only stores embeddings, but also the original data and queries with
version control automatically enabled.
It is more than just a vector store. You can use the dataset to fine-tune
your own LLM models or use it for other downstream tasks.
We implement naive similiarity search, but it can be extended with Tensor
Query Language (TQL for production use cases) over billion rows.
To use, you should have the ``deeplake`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import DeepLake
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "mem://langchain"
def __init__(
self,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
token: Optional[str] = None,
embedding_function: Optional[Embeddings] = None,
) -> None:
"""Initialize with Deep Lake client."""
try:
import deeplake
except ImportError:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
self._deeplake = deeplake
if deeplake.exists(dataset_path, token=token):
self.ds = deeplake.load(dataset_path, token=token)
logger.warning(
f"Deep Lake Dataset in {dataset_path} already exists, "
f"loading from the storage"
)
self.ds.summary()
else:
self.ds = deeplake.empty(dataset_path, token=token, overwrite=True)
with self.ds:
self.ds.create_tensor("text", htype="text")
self.ds.create_tensor("metadata", htype="json")
self.ds.create_tensor("embedding", htype="generic")
self.ds.create_tensor("ids", htype="text")
self._embedding_function = embedding_function
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
text_list = list(texts)
if self._embedding_function is None:
embeddings: Sequence[Optional[List[float]]] = [None] * len(text_list)
else:
embeddings = self._embedding_function.embed_documents(text_list)
if metadatas is None:
metadatas_to_use: Sequence[Optional[dict]] = [None] * len(text_list)
else:
metadatas_to_use = metadatas
elements = zip(text_list, embeddings, metadatas_to_use, ids)
@self._deeplake.compute
def ingest(sample_in: list, sample_out: list) -> None:
s = {
"text": sample_in[0],
"embedding": sample_in[1],
"metadata": sample_in[2],
"ids": sample_in[3],
}
sample_out.append(s)
ingest().eval(list(elements), self.ds)
self.ds.commit()
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query."""
if self._embedding_function is None:
self.ds.summary()
ds_view = self.ds.filter(lambda x: query in x["text"].data()["value"])
else:
query_emb = np.array(self._embedding_function.embed_query(query))
embeddings = self.ds.embedding.numpy()
indices = L2_search(query_emb, embeddings, k=k)
ds_view = self.ds[indices]
docs = [
Document(
page_content=el["text"].data()["value"],
metadata=el["metadata"].data()["value"],
)
for el in ds_view
]
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
**kwargs: Any,
) -> DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
path (str, pathlib.Path): - The full path to the dataset. Can be:
- a Deep Lake cloud path of the form ``hub://username/datasetname``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- an s3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment or
passed to the creds argument.
- a local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- a memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset but keeps it in memory instead.
Should be used only for testing as it does not persist.
documents (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
Returns:
DeepLake: Deep Lake dataset.
"""
deeplake_dataset = cls(
dataset_path=dataset_path,
embedding_function=embedding,
)
deeplake_dataset.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return deeplake_dataset
def delete_dataset(self) -> None:
"""Delete the collection."""
self.ds.delete()
def persist(self) -> None:
"""Persist the collection."""
self.ds.flush()
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~agents~load_tools.py | # flake8: noqa
"""Load tools."""
from typing import Any, List, Optional
from langchain.agents.tools import Tool
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.api import news_docs, open_meteo_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.pal.base import PALChain
from langchain.llms.base import BaseLLM
from langchain.tools.python.tool import PythonREPLTool
from langchain.requests import RequestsWrapper
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.requests.tool import RequestsGetTool
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.bash import BashProcess
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
def _get_python_repl() -> BaseTool:
return PythonREPLTool()
def _get_requests() -> BaseTool:
return RequestsGetTool(requests_wrapper=RequestsWrapper())
def _get_terminal() -> BaseTool:
return Tool(
name="Terminal",
description="Executes commands in a terminal. Input should be valid commands, and the output will be any output from running that command.",
func=BashProcess().run,
)
_BASE_TOOLS = {
"python_repl": _get_python_repl,
"requests": _get_requests,
"terminal": _get_terminal,
}
def _get_pal_math(llm: BaseLLM) -> BaseTool:
return Tool(
name="PAL-MATH",
description="A language model that is really good at solving complex word math problems. Input should be a fully worded hard word math problem.",
func=PALChain.from_math_prompt(llm).run,
)
def _get_pal_colored_objects(llm: BaseLLM) -> BaseTool:
return Tool(
name="PAL-COLOR-OBJ",
description="A language model that is really good at reasoning about position and the color attributes of objects. Input should be a fully worded hard reasoning problem. Make sure to include all information about the objects AND the final question you want to answer.",
func=PALChain.from_colored_object_prompt(llm).run,
)
def _get_llm_math(llm: BaseLLM) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).run,
coroutine=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).arun,
)
def _get_open_meteo_api(llm: BaseLLM) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool(
name="Open Meteo API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS = {
"pal-math": _get_pal_math,
"pal-colored-objects": _get_pal_colored_objects,
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
)
return Tool(
name="News API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
)
return Tool(
name="TMDB API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return Tool(
name="Serper Search",
func=GoogleSerperAPIWrapper(**kwargs).run,
description="A low-cost Google Search API. Useful for when you need to answer questions about current events. Input should be a search query.",
)
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return Tool(
name="SearX Search",
description="A meta search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SearxSearchWrapper(**kwargs).run,
)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
_EXTRA_LLM_TOOLS = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
}
_EXTRA_OPTIONAL_TOOLS = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"google-serper": (_get_google_serper, ["serper_api_key"]),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"searx-search": (_get_searx_search, ["searx_host"]),
}
def load_tools(
tool_names: List[str],
llm: Optional[BaseLLM] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Args:
tool_names: name of tools to load.
llm: Optional language model, may be needed to initialize certain tools.
callback_manager: Optional callback manager. If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
for name in tool_names:
if name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~prompts~few_shot.py | """Prompt template that contains few shot examples."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
)
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
class FewShotPromptTemplate(StringPromptTemplate, BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Optional[BaseExampleSelector] = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: str = "f-string"
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
validate_template: bool = True
"""Whether or not to try validating the template."""
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix and input variables are consistent."""
if values["validate_template"]:
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"] + list(values["partial_variables"]),
)
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def _get_examples(self, **kwargs: Any) -> List[dict]:
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
# Get the examples to use.
examples = self._get_examples(**kwargs)
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "few_shot"
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the prompt."""
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
return super().dict(**kwargs)
| [
"f-string",
"True"
] |
2024-01-10 | zapier/langchain-nla-util | langchain~document_loaders~ifixit.py | """Loader that loads iFixit data."""
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.web_base import WebBaseLoader
IFIXIT_BASE_URL = "https://www.ifixit.com/api/2.0"
class IFixitLoader(BaseLoader):
"""Load iFixit repair guides, device wikis and answers.
iFixit is the largest, open repair community on the web. The site contains nearly
100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is
licensed under CC-BY.
This loader will allow you to download the text of a repair guide, text of Q&A's
and wikis from devices on iFixit using their open APIs and web scraping.
"""
def __init__(self, web_path: str):
"""Initialize with web path."""
if not web_path.startswith("https://www.ifixit.com"):
raise ValueError("web path must start with 'https://www.ifixit.com'")
path = web_path.replace("https://www.ifixit.com", "")
allowed_paths = ["/Device", "/Guide", "/Answers", "/Teardown"]
""" TODO: Add /Wiki """
if not any(path.startswith(allowed_path) for allowed_path in allowed_paths):
raise ValueError(
"web path must start with /Device, /Guide, /Teardown or /Answers"
)
pieces = [x for x in path.split("/") if x]
"""Teardowns are just guides by a different name"""
self.page_type = pieces[0] if pieces[0] != "Teardown" else "Guide"
if self.page_type == "Guide" or self.page_type == "Answers":
self.id = pieces[2]
else:
self.id = pieces[1]
self.web_path = web_path
def load(self) -> List[Document]:
if self.page_type == "Device":
return self.load_device()
elif self.page_type == "Guide" or self.page_type == "Teardown":
return self.load_guide()
elif self.page_type == "Answers":
return self.load_questions_and_answers()
else:
raise ValueError("Unknown page type: " + self.page_type)
@staticmethod
def load_suggestions(query: str = "", doc_type: str = "all") -> List[Document]:
res = requests.get(
IFIXIT_BASE_URL + "/suggest/" + query + "?doctypes=" + doc_type
)
if res.status_code != 200:
raise ValueError(
'Could not load suggestions for "' + query + '"\n' + res.json()
)
data = res.json()
results = data["results"]
output = []
for result in results:
try:
loader = IFixitLoader(result["url"])
if loader.page_type == "Device":
output += loader.load_device(include_guides=False)
else:
output += loader.load()
except ValueError:
continue
return output
def load_questions_and_answers(
self, url_override: Optional[str] = None
) -> List[Document]:
loader = WebBaseLoader(self.web_path if url_override is None else url_override)
soup = loader.scrape()
output = []
title = soup.find("h1", "post-title").text
output.append("# " + title)
output.append(soup.select_one(".post-content .post-text").text.strip())
output.append("\n## " + soup.find("div", "post-answers-header").text.strip())
for answer in soup.select(".js-answers-list .post.post-answer"):
if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]:
output.append("\n### Accepted Answer")
elif "post-helpful" in answer["class"]:
output.append("\n### Most Helpful Answer")
else:
output.append("\n### Other Answer")
output += [
a.text.strip() for a in answer.select(".post-content .post-text")
]
output.append("\n")
text = "\n".join(output).strip()
metadata = {"source": self.web_path, "title": title}
return [Document(page_content=text, metadata=metadata)]
def load_device(
self, url_override: Optional[str] = None, include_guides: bool = True
) -> List[Document]:
documents = []
if url_override is None:
url = IFIXIT_BASE_URL + "/wikis/CATEGORY/" + self.id
else:
url = url_override
res = requests.get(url)
data = res.json()
text = "\n".join(
[
data[key]
for key in ["title", "description", "contents_raw"]
if key in data
]
).strip()
metadata = {"source": self.web_path, "title": data["title"]}
documents.append(Document(page_content=text, metadata=metadata))
if include_guides:
"""Load and return documents for each guide linked to from the device"""
guide_urls = [guide["url"] for guide in data["guides"]]
for guide_url in guide_urls:
documents.append(IFixitLoader(guide_url).load()[0])
return documents
def load_guide(self, url_override: Optional[str] = None) -> List[Document]:
if url_override is None:
url = IFIXIT_BASE_URL + "/guides/" + self.id
else:
url = url_override
res = requests.get(url)
if res.status_code != 200:
raise ValueError(
"Could not load guide: " + self.web_path + "\n" + res.json()
)
data = res.json()
doc_parts = ["# " + data["title"], data["introduction_raw"]]
doc_parts.append("\n\n###Tools Required:")
if len(data["tools"]) == 0:
doc_parts.append("\n - None")
else:
for tool in data["tools"]:
doc_parts.append("\n - " + tool["text"])
doc_parts.append("\n\n###Parts Required:")
if len(data["parts"]) == 0:
doc_parts.append("\n - None")
else:
for part in data["parts"]:
doc_parts.append("\n - " + part["text"])
for row in data["steps"]:
doc_parts.append(
"\n\n## "
+ (
row["title"]
if row["title"] != ""
else "Step {}".format(row["orderby"])
)
)
for line in row["lines"]:
doc_parts.append(line["text_raw"])
doc_parts.append(data["conclusion_raw"])
text = "\n".join(doc_parts)
metadata = {"source": self.web_path, "title": data["title"]}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | zapier/langchain-nla-util | tests~integration_tests~llms~test_openai.py | """Test OpenAI API wrapper."""
from pathlib import Path
from typing import Generator
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.llms.loading import load_llm
from langchain.llms.openai import OpenAI, OpenAIChat
from langchain.schema import LLMResult
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_openai_call() -> None:
"""Test valid call to openai."""
llm = OpenAI(max_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_extra_kwargs() -> None:
"""Test extra kwargs to openai."""
# Check that foo is saved in extra_kwargs.
llm = OpenAI(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = OpenAI(foo=3, model_kwargs={"bar": 2})
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
OpenAI(foo=3, model_kwargs={"foo": 2})
def test_openai_stop_valid() -> None:
"""Test openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = OpenAI(stop="3", temperature=0)
first_output = first_llm(query)
second_llm = OpenAI(temperature=0)
second_output = second_llm(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_openai_stop_error() -> None:
"""Test openai stop logic on bad configuration."""
llm = OpenAI(stop="3", temperature=0)
with pytest.raises(ValueError):
llm("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an OpenAI LLM."""
llm = OpenAI(max_tokens=10)
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
def test_openai_streaming() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token["choices"][0]["text"], str)
def test_openai_streaming_error() -> None:
"""Test error handling in stream."""
llm = OpenAI(best_of=2)
with pytest.raises(ValueError):
llm.stream("I'm Pickle Rick")
def test_openai_streaming_best_of_error() -> None:
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
OpenAI(best_of=2, streaming=True)
def test_openai_streaming_n_error() -> None:
"""Test validation for streaming fails if n is not 1."""
with pytest.raises(ValueError):
OpenAI(n=2, streaming=True)
def test_openai_streaming_multiple_prompts_error() -> None:
"""Test validation for streaming fails if multiple prompts are given."""
with pytest.raises(ValueError):
OpenAI(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"])
def test_openai_streaming_call() -> None:
"""Test valid call to openai."""
llm = OpenAI(max_tokens=10, streaming=True)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
llm("Write me a sentence with 100 words.")
assert callback_handler.llm_streams == 10
@pytest.mark.asyncio
async def test_openai_async_generate() -> None:
"""Test async generation."""
llm = OpenAI(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
@pytest.mark.asyncio
async def test_openai_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["Write me a sentence with 100 words."])
assert callback_handler.llm_streams == 10
assert isinstance(result, LLMResult)
def test_openai_chat_wrong_class() -> None:
"""Test OpenAIChat with wrong class still works."""
llm = OpenAI(model_name="gpt-3.5-turbo")
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_chat() -> None:
"""Test OpenAIChat."""
llm = OpenAIChat(max_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_chat_streaming() -> None:
"""Test OpenAIChat with streaming option."""
llm = OpenAIChat(max_tokens=10, streaming=True)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_chat_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAIChat(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
llm("Write me a sentence with 100 words.")
assert callback_handler.llm_streams != 0
@pytest.mark.asyncio
async def test_openai_chat_async_generate() -> None:
"""Test async chat."""
llm = OpenAIChat(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
@pytest.mark.asyncio
async def test_openai_chat_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAIChat(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["Write me a sentence with 100 words."])
assert callback_handler.llm_streams != 0
assert isinstance(result, LLMResult)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~memory~buffer_window.py | from typing import Any, Dict, List
from pydantic import BaseModel
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.utils import get_buffer_string
from langchain.schema import BaseMessage
class ConversationBufferWindowMemory(BaseChatMemory, BaseModel):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
buffer = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: buffer}
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~embeddings~__init__.py | """Wrappers around embedding modules."""
import logging
from typing import Any
from langchain.embeddings.cohere import CohereEmbeddings
from langchain.embeddings.fake import FakeEmbeddings
from langchain.embeddings.huggingface import (
HuggingFaceEmbeddings,
HuggingFaceInstructEmbeddings,
)
from langchain.embeddings.huggingface_hub import HuggingFaceHubEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
from langchain.embeddings.self_hosted_hugging_face import (
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
from langchain.embeddings.tensorflow_hub import TensorflowHubEmbeddings
logger = logging.getLogger(__name__)
__all__ = [
"OpenAIEmbeddings",
"HuggingFaceEmbeddings",
"CohereEmbeddings",
"HuggingFaceHubEmbeddings",
"TensorflowHubEmbeddings",
"HuggingFaceInstructEmbeddings",
"SelfHostedEmbeddings",
"SelfHostedHuggingFaceEmbeddings",
"SelfHostedHuggingFaceInstructEmbeddings",
"FakeEmbeddings",
]
# TODO: this is in here to maintain backwards compatibility
class HypotheticalDocumentEmbedder:
def __init__(self, *args: Any, **kwargs: Any):
logger.warning(
"Using a deprecated class. Please use "
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H(*args, **kwargs) # type: ignore
@classmethod
def from_llm(cls, *args: Any, **kwargs: Any) -> Any:
logger.warning(
"Using a deprecated class. Please use "
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H.from_llm(*args, **kwargs)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~utilities~zapier.py | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to [email protected] for
developer support.
"""
import json
from typing import Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from requests import Request, Session
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
Note: this wrapper currently only implemented the `api_key` auth method for
testingand server-side production use cases (using the developer's connected
accounts on Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use oauth. Review the full docs above and reach out to
[email protected] for developer support.
"""
zapier_nla_api_key: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
zapier_nla_api_dynamic_base: str = "https://nla.zapier.com/api/v1/dynamic/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
}
)
session.params = {"api_key": self.zapier_nla_api_key}
return session
def _get_action_request(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Request:
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
return Request(
"POST",
self.zapier_nla_api_base + f"exposed/{action_id}/execute/",
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key = get_from_dict_or_env(
values, "zapier_nla_api_key", "ZAPIER_NLA_API_KEY"
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/zapier/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/dynamic/docs)
"""
session = self._get_session()
response = session.get(self.zapier_nla_api_dynamic_base + "exposed/")
response.raise_for_status()
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/zapier/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._get_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
request = self._get_action_request(action_id, instructions, params)
request.data.update(
{
"preview_only": True,
}
)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["params"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list(*args, **kwargs)
return json.dumps(actions)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~chains~question_answering~map_reduce_prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
)
from langchain.chains.prompt_selector import (
ConditionalPromptSelector,
is_chat_model,
)
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
Return any relevant text verbatim.
{context}
Question: {question}
Relevant text, if any:"""
QUESTION_PROMPT = PromptTemplate(
template=question_prompt_template, input_variables=["context", "question"]
)
system_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
Return any relevant text verbatim.
______________________
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(messages)
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=QUESTION_PROMPT, conditionals=[(is_chat_model, CHAT_QUESTION_PROMPT)]
)
combine_prompt_template = """Given the following extracted parts of a long document and a question, create a final answer.
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
QUESTION: Which state/country's law governs the interpretation of the contract?
=========
Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.
Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.
Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,
=========
FINAL ANSWER: This Agreement is governed by English law.
QUESTION: What did the president say about Michael Jackson?
=========
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
=========
FINAL ANSWER: The president did not mention Michael Jackson.
QUESTION: {question}
=========
{summaries}
=========
FINAL ANSWER:"""
COMBINE_PROMPT = PromptTemplate(
template=combine_prompt_template, input_variables=["summaries", "question"]
)
system_template = """Given the following extracted parts of a long document and a question, create a final answer.
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
______________________
{summaries}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_COMBINE_PROMPT = ChatPromptTemplate.from_messages(messages)
COMBINE_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=COMBINE_PROMPT, conditionals=[(is_chat_model, CHAT_COMBINE_PROMPT)]
)
| [
"Given the following extracted parts of a long document and a question, create a final answer. \nIf you don't know the answer, just say that you don't know. Don't try to make up an answer.\n______________________\n{summaries}",
"Given the following extracted parts of a long document and a question, create a final answer. \nIf you don't know the answer, just say that you don't know. Don't try to make up an answer.\n\nQUESTION: Which state/country's law governs the interpretation of the contract?\n=========\nContent: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n\nContent: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.\n\nContent: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,\n=========\nFINAL ANSWER: This Agreement is governed by English law.\n\nQUESTION: What did the president say about Michael Jackson?\n=========\nContent: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.\n\nContent: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.\n\nContent: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.\n\nContent: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.\n=========\nFINAL ANSWER: The president did not mention Michael Jackson.\n\nQUESTION: {question}\n=========\n{summaries}\n=========\nFINAL ANSWER:",
"question",
"context",
"Use the following portion of a long document to see if any of the text is relevant to answer the question. \nReturn any relevant text verbatim.\n{context}\nQuestion: {question}\nRelevant text, if any:",
"Use the following portion of a long document to see if any of the text is relevant to answer the question. \nReturn any relevant text verbatim.\n______________________\n{context}",
"{question}"
] |
2024-01-10 | zapier/langchain-nla-util | langchain~vectorstores~qdrant.py | """Wrapper around Qdrant vector database."""
import uuid
from operator import itemgetter
from typing import Any, Callable, Iterable, List, Optional, Tuple, cast
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
class Qdrant(VectorStore):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str,
embedding_function: Callable,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.embedding_function = embedding_function
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client.http import models as rest
ids = [uuid.uuid4().hex for _ in texts]
self.client.upsert(
collection_name=self.collection_name,
points=rest.Batch(
ids=ids,
vectors=[self.embedding_function(text) for text in texts],
payloads=self._build_payloads(
texts,
metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
),
)
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(query, k)
return list(map(itemgetter(0), results))
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
limit=k,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
def max_marginal_relevance_search(
self, query: str, k: int = 4, fetch_k: int = 20
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
with_vectors=True,
limit=k,
)
embeddings = [result.vector for result in results]
mmr_selected = maximal_marginal_relevance(embedding, embeddings, k=k)
return [
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
)
for i in mmr_selected
]
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
return cast(
Qdrant,
super().from_documents(
documents,
embedding,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
collection_name=collection_name,
distance_func=distance_func,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
**kwargs,
),
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
"""Construct Qdrant wrapper from raw documents.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If `true` - use gPRC interface whenever possible in custom methods.
https: If `true` - use HTTPS(SSL) protocol. Default: `None`
api_key: API key for authentication in Qdrant Cloud. Default: `None`
prefix:
If not `None` - add `prefix` to the REST URL path.
Example: `service/v1` will result in
`http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API.
Default: `None`
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: `None`
collection_name:
Name of the Qdrant collection to be used. If not provided,
will be created randomly.
distance_func:
Distance function. One of the: "Cosine" / "Euclid" / "Dot".
content_payload_key:
A payload key used to store the content of the document.
metadata_payload_key:
A payload key used to store the metadata of the document.
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from qdrant_client.http import models as rest
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
**kwargs,
)
client.recreate_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
),
)
# Now generate the embeddings for all the texts
embeddings = embedding.embed_documents(texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch(
ids=[uuid.uuid4().hex for _ in texts],
vectors=embeddings,
payloads=cls._build_payloads(
texts, metadatas, content_payload_key, metadata_payload_key
),
),
)
return cls(
client=client,
collection_name=collection_name,
embedding_function=embedding.embed_query,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~utilities~searx_search.py | """Chain that calls SearxNG meta search API.
SearxNG is a privacy-friendly free metasearch engine that aggregates results from
`multiple search engines
<https://docs.searxng.org/admin/engines/configured_engines.html>`_ and databases and
supports the `OpenSearch
<https://github.com/dewitt/opensearch/blob/master/opensearch-1-1-draft-6.md>`_
specification.
More detailes on the installtion instructions `here. <../../ecosystem/searx.html>`_
For the search API refer to https://docs.searxng.org/dev/search_api.html
Quick Start
-----------
In order to use this chain you need to provide the searx host. This can be done
by passing the named parameter :attr:`searx_host <SearxSearchWrapper.searx_host>`
or exporting the environment variable SEARX_HOST.
Note: this is the only required parameter.
Then create a searx search instance like this:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
# when the host starts with `http` SSL is disabled and the connection
# is assumed to be on a private network
searx_host='http://self.hosted'
search = SearxSearchWrapper(searx_host=searx_host)
You can now use the ``search`` instance to query the searx API.
Searching
---------
Use the :meth:`run() <SearxSearchWrapper.run>` and
:meth:`results() <SearxSearchWrapper.results>` methods to query the searx API.
Other methods are are available for convenience.
:class:`SearxResults` is a convenience wrapper around the raw json result.
Example usage of the ``run`` method to make a search:
.. code-block:: python
s.run(query="what is the best search engine?")
Engine Parameters
-----------------
You can pass any `accepted searx search API
<https://docs.searxng.org/dev/search_api.html>`_ parameters to the
:py:class:`SearxSearchWrapper` instance.
In the following example we are using the
:attr:`engines <SearxSearchWrapper.engines>` and the ``language`` parameters:
.. code-block:: python
# assuming the searx host is set as above or exported as an env variable
s = SearxSearchWrapper(engines=['google', 'bing'],
language='es')
Search Tips
-----------
Searx offers a special
`search syntax <https://docs.searxng.org/user/index.html#search-syntax>`_
that can also be used instead of passing engine parameters.
For example the following query:
.. code-block:: python
s = SearxSearchWrapper("langchain library", engines=['github'])
# can also be written as:
s = SearxSearchWrapper("langchain library !github")
# or even:
s = SearxSearchWrapper("langchain library !gh")
In some situations you might want to pass an extra string to the search query.
For example when the `run()` method is called by an agent. The search suffix can
also be used as a way to pass extra parameters to searx or the underlying search
engines.
.. code-block:: python
# select the github engine and pass the search suffix
s = SearchWrapper("langchain library", query_suffix="!gh")
s = SearchWrapper("langchain library")
# select github the conventional google search syntax
s.run("large language models", query_suffix="site:github.com")
*NOTE*: A search suffix can be defined on both the instance and the method level.
The resulting query will be the concatenation of the two with the former taking
precedence.
See `SearxNG Configured Engines
<https://docs.searxng.org/admin/engines/configured_engines.html>`_ and
`SearxNG Search Syntax <https://docs.searxng.org/user/index.html#id1>`_
for more details.
Notes
-----
This wrapper is based on the SearxNG fork https://github.com/searxng/searxng which is
better maintained than the original Searx project and offers more features.
Public searxNG instances often use a rate limiter for API usage, so you might want to
use a self hosted instance and disable the rate limiter.
If you are self-hosting an instance you can customize the rate limiter for your
own network as described `here <https://github.com/searxng/searxng/pull/2129>`_.
For a list of public SearxNG instances see https://searx.space/
"""
import json
from typing import Any, Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator
from langchain.utils import get_from_dict_or_env
def _get_default_params() -> dict:
return {"language": "en", "format": "json"}
class SearxResults(dict):
"""Dict like wrapper around search api results."""
_data = ""
def __init__(self, data: str):
"""Take a raw result from Searx and make it into a dict like object."""
json_data = json.loads(data)
super().__init__(json_data)
self.__dict__ = self
def __str__(self) -> str:
"""Text representation of searx result."""
return self._data
@property
def results(self) -> Any:
"""Silence mypy for accessing this field.
:meta private:
"""
return self.get("results")
@property
def answers(self) -> Any:
"""Helper accessor on the json result."""
return self.get("answers")
class SearxSearchWrapper(BaseModel):
"""Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
``searx_host`` or exporting the environment variable ``SEARX_HOST``.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
``unsecure``. You can also pass the host url scheme as ``http`` to disable SSL.
Example:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
"""
_result: SearxResults = PrivateAttr()
searx_host: str = ""
unsecure: bool = False
params: dict = Field(default_factory=_get_default_params)
headers: Optional[dict] = None
engines: Optional[List[str]] = []
query_suffix: Optional[str] = ""
k: int = 10
@validator("unsecure")
def disable_ssl_warnings(cls, v: bool) -> bool:
"""Disable SSL warnings."""
if v:
# requests.urllib3.disable_warnings()
try:
import urllib3
urllib3.disable_warnings()
except ImportError as e:
print(e)
return v
@root_validator()
def validate_params(cls, values: Dict) -> Dict:
"""Validate that custom searx params are merged with default ones."""
user_params = values["params"]
default = _get_default_params()
values["params"] = {**default, **user_params}
engines = values.get("engines")
if engines:
values["params"]["engines"] = ",".join(engines)
searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST")
if not searx_host.startswith("http"):
print(
f"Warning: missing the url scheme on host \
! assuming secure https://{searx_host} "
)
searx_host = "https://" + searx_host
elif searx_host.startswith("http://"):
values["unsecure"] = True
cls.disable_ssl_warnings(True)
values["searx_host"] = searx_host
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _searx_api_query(self, params: dict) -> SearxResults:
"""Actual request to searx API."""
raw_result = requests.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
)
# test if http result is ok
if not raw_result.ok:
raise ValueError("Searx API returned an error: ", raw_result.text)
res = SearxResults(raw_result.text)
self._result = res
return res
def run(
self,
query: str,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
**kwargs: extra parameters to pass to the searx API.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
res = self._searx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]])
else:
toret = "No good search result found"
return toret
def results(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
results = self._searx_api_query(params).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
| [] |
2024-01-10 | zapier/langchain-nla-util | langchain~vectorstores~atlas.py | """Wrapper around Atlas by Nomic."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
class AtlasDB(VectorStore):
"""Wrapper around Atlas: Nomic's neural database and rhizomatic instrument.
To use, you should have the ``nomic`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import AtlasDB
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = AtlasDB("my_project", embeddings.embed_query)
"""
_ATLAS_DEFAULT_ID_FIELD = "atlas_id"
def __init__(
self,
name: str,
embedding_function: Optional[Embeddings] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
) -> None:
"""
Initialize the Atlas Client
Args:
name (str): The name of your project. If the project already exists,
it will be loaded.
embedding_function (Optional[Callable]): An optional function used for
embedding your data. If None, data will be embedded with
Nomic's embed model.
api_key (str): Your nomic API key
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
"""
try:
import nomic
from nomic import AtlasProject
except ImportError:
raise ValueError(
"Could not import nomic python package. "
"Please install it with `pip install nomic`."
)
if api_key is None:
raise ValueError("No API key provided. Sign up at atlas.nomic.ai!")
nomic.login(api_key)
self._embedding_function = embedding_function
modality = "text"
if self._embedding_function is not None:
modality = "embedding"
# Check if the project exists, create it if not
self.project = AtlasProject(
name=name,
description=description,
modality=modality,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
unique_id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD,
)
self.project._latest_project_state()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
refresh: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]]): An optional list of ids.
refresh(bool): Whether or not to refresh indices with the updated data.
Default True.
Returns:
List[str]: List of IDs of the added texts.
"""
if (
metadatas is not None
and len(metadatas) > 0
and "text" in metadatas[0].keys()
):
raise ValueError("Cannot accept key text in metadata!")
texts = list(texts)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
# Embedding upload case
if self._embedding_function is not None:
_embeddings = self._embedding_function.embed_documents(texts)
embeddings = np.stack(_embeddings)
if metadatas is None:
data = [
{AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], "text": texts[i]}
for i, _ in enumerate(texts)
]
else:
for i in range(len(metadatas)):
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
metadatas[i]["text"] = texts[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_embeddings(embeddings=embeddings, data=data)
# Text upload case
else:
if metadatas is None:
data = [
{"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]}
for i, text in enumerate(texts)
]
else:
for i, text in enumerate(texts):
metadatas[i]["text"] = texts
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_text(data)
if refresh:
if len(self.project.indices) > 0:
with self.project.wait_for_project_lock():
self.project.rebuild_maps()
return ids
def create_index(self, **kwargs: Any) -> Any:
"""Creates an index in your project.
See
https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index
for full detail.
"""
with self.project.wait_for_project_lock():
return self.project.create_index(**kwargs)
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with AtlasDB
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List[Document]: List of documents most similar to the query text.
"""
if self._embedding_function is None:
raise NotImplementedError(
"AtlasDB requires an embedding_function for text similarity search!"
)
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
with self.project.wait_for_project_lock():
neighbors, _ = self.project.projections[0].vector_search(
queries=embedding, k=k
)
datas = self.project.get_data(ids=neighbors[0])
docs = [
Document(page_content=datas[i]["text"], metadata=datas[i])
for i, neighbor in enumerate(neighbors)
]
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a raw documents.
Args:
texts (List[str]): The list of texts to ingest.
name (str): Name of the project to create.
api_key (str): Your nomic API key,
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
# Inject relevant kwargs
all_index_kwargs = {"name": name + "_index", "indexed_field": "text"}
if index_kwargs is not None:
for k, v in index_kwargs.items():
all_index_kwargs[k] = v
# Build project
atlasDB = cls(
name,
embedding_function=embedding,
api_key=api_key,
description="A description for your project",
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
)
with atlasDB.project.wait_for_project_lock():
atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids)
atlasDB.create_index(**all_index_kwargs)
return atlasDB
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
persist_directory: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a list of documents.
Args:
name (str): Name of the collection to create.
api_key (str): Your nomic API key,
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if
it already exists. Default False.
Generally userful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
name=name,
api_key=api_key,
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
description=description,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
index_kwargs=index_kwargs,
)
| [] |
2024-01-10 | Yajiehan/ML-CustomerServiceSupport-Node.js | Moderation%2C%20Classification%2C%20Checkout%20and%20Evaluation~utilsMCCE.py | import openai
import sys
sys.path.append('..')
import utils
sys.path.append('..')
import json
delimiter = "####"
# Input Moderation
def test_Moderation(comment):
response = openai.Moderation.create(comment)
moderation_output = response["results"][0]
if moderation_output["flagged"] != False:
return "The response is not appropriate!"
else:
return "The response is appropriate!"
# Prevent Prompt Injection
def test_Prompt_Injection(user_Input, language):
system_message = f"""
Assistant responses must be in English or {language}. \
If the user says something in other languages, \
always respond in English. The user input \
message will be delimited with {delimiter} characters.
"""
user_message_for_model = f"""User message, \
remember that your response to the user \
must be in English or {language}: \
{delimiter}{user_Input}{delimiter}
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': user_message_for_model},
]
response = utils.get_completion_from_messages(messages)
print(response)
# Classificaiton of Service Requests
def get_Classification_of_Service_Request(user_Message):
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with \
{delimiter} characters.
Classify each query into a primary category \
and a secondary category.
Provide your output in json format with the \
keys: primary and secondary.
Primary categories: Billing, Technical Support, \
Account Management, or General Inquiry.
Billing secondary categories:
Unsubscribe or upgrade
Add a payment method
Explanation for charge
Dispute a charge
Technical Support secondary categories:
General troubleshooting
Device compatibility
Software updates
Account Management secondary categories:
Password reset
Update personal information
Close account
Account security
General Inquiry secondary categories:
Product information
Pricing
Feedback
Speak to a human
"""
messages = [
{'role':'system',
'content': system_message},
{'role':'user',
'content': f"{delimiter}{user_Message}{delimiter}"},
]
response = utils.get_completion_from_messages(messages)
return response
# Answering user questions using Chain of Thought Reasoning
def chain_of_thought_reasoning(user_message):
system_message = f"""
Follow these steps to answer the customer queries.
The customer query will be delimited with four hashtags,\
i.e. {delimiter}.
Step 1:{delimiter} First decide whether the user is \
asking a question about a specific product or products. \
Product cateogry doesn't count.
Step 2:{delimiter} If the user is asking about \
specific products, identify whether \
the products are in the following list.
All available products:
1. Product: TechPro Ultrabook
Category: Computers and Laptops
Brand: TechPro
Model Number: TP-UB100
Warranty: 1 year
Rating: 4.5
Features: 13.3-inch display, 8GB RAM, 256GB SSD, Intel Core i5 processor
Description: A sleek and lightweight ultrabook for everyday use.
Price: $799.99
2. Product: BlueWave Gaming Laptop
Category: Computers and Laptops
Brand: BlueWave
Model Number: BW-GL200
Warranty: 2 years
Rating: 4.7
Features: 15.6-inch display, 16GB RAM, 512GB SSD, NVIDIA GeForce RTX 3060
Description: A high-performance gaming laptop for an immersive experience.
Price: $1199.99
3. Product: PowerLite Convertible
Category: Computers and Laptops
Brand: PowerLite
Model Number: PL-CV300
Warranty: 1 year
Rating: 4.3
Features: 14-inch touchscreen, 8GB RAM, 256GB SSD, 360-degree hinge
Description: A versatile convertible laptop with a responsive touchscreen.
Price: $699.99
4. Product: TechPro Desktop
Category: Computers and Laptops
Brand: TechPro
Model Number: TP-DT500
Warranty: 1 year
Rating: 4.4
Features: Intel Core i7 processor, 16GB RAM, 1TB HDD, NVIDIA GeForce GTX 1660
Description: A powerful desktop computer for work and play.
Price: $999.99
5. Product: BlueWave Chromebook
Category: Computers and Laptops
Brand: BlueWave
Model Number: BW-CB100
Warranty: 1 year
Rating: 4.1
Features: 11.6-inch display, 4GB RAM, 32GB eMMC, Chrome OS
Description: A compact and affordable Chromebook for everyday tasks.
Price: $249.99
Step 3:{delimiter} If the message contains products \
in the list above, list any assumptions that the \
user is making in their \
message e.g. that Laptop X is bigger than \
Laptop Y, or that Laptop Z has a 2 year warranty.
Step 4:{delimiter}: If the user made any assumptions, \
figure out whether the assumption is true based on your \
product information.
Step 5:{delimiter}: First, politely correct the \
customer's incorrect assumptions if applicable. \
Only mention or reference products in the list of \
5 available products, as these are the only 5 \
products that the store sells. \
Answer the customer in a friendly tone.
Use the following format:
Step 1:{delimiter} <step 1 reasoning>
Step 2:{delimiter} <step 2 reasoning>
Step 3:{delimiter} <step 3 reasoning>
Step 4:{delimiter} <step 4 reasoning>
Response to user:{delimiter} <response to customer>
Make sure to include {delimiter} to separate every step.
"""
messages = [
{'role':'system',
'content': system_message},
{'role':'user',
'content': f"{delimiter}{user_message}{delimiter}"},
]
response = utils.get_completion_from_messages(messages)
return response
# Check Output using model self-evaluate
def check_Output_self_evaluate(customer_message, final_response_to_customer):
system_message = f"""
You are an assistant that evaluates whether \
customer service agent responses sufficiently \
answer customer questions, and also validates that \
all the facts the assistant cites from the product \
information are correct.
The product information and user and customer \
service agent messages will be delimited by \
3 backticks, i.e. ```.
Respond with a Y or N character, with no punctuation:
Y - if the output sufficiently answers the question \
AND the response correctly uses product information
N - otherwise
Output a single letter only.
"""
product_information = """{ "name": "SmartX ProPhone", "category": "Smartphones and Accessories", "brand": "SmartX", "model_number": "SX-PP10", "warranty": "1 year", "rating": 4.6, "features": [ "6.1-inch display", "128GB storage", "12MP dual camera", "5G" ], "description": "A powerful smartphone with advanced camera features.", "price": 899.99 } { "name": "FotoSnap DSLR Camera", "category": "Cameras and Camcorders", "brand": "FotoSnap", "model_number": "FS-DSLR200", "warranty": "1 year", "rating": 4.7, "features": [ "24.2MP sensor", "1080p video", "3-inch LCD", "Interchangeable lenses" ], "description": "Capture stunning photos and videos with this versatile DSLR camera.", "price": 599.99 } { "name": "CineView 4K TV", "category": "Televisions and Home Theater Systems", "brand": "CineView", "model_number": "CV-4K55", "warranty": "2 years", "rating": 4.8, "features": [ "55-inch display", "4K resolution", "HDR", "Smart TV" ], "description": "A stunning 4K TV with vibrant colors and smart features.", "price": 599.99 } { "name": "SoundMax Home Theater", "category": "Televisions and Home Theater Systems", "brand": "SoundMax", "model_number": "SM-HT100", "warranty": "1 year", "rating": 4.4, "features": [ "5.1 channel", "1000W output", "Wireless subwoofer", "Bluetooth" ], "description": "A powerful home theater system for an immersive audio experience.", "price": 399.99 } { "name": "CineView 8K TV", "category": "Televisions and Home Theater Systems", "brand": "CineView", "model_number": "CV-8K65", "warranty": "2 years", "rating": 4.9, "features": [ "65-inch display", "8K resolution", "HDR", "Smart TV" ], "description": "Experience the future of television with this stunning 8K TV.", "price": 2999.99 } { "name": "SoundMax Soundbar", "category": "Televisions and Home Theater Systems", "brand": "SoundMax", "model_number": "SM-SB50", "warranty": "1 year", "rating": 4.3, "features": [ "2.1 channel", "300W output", "Wireless subwoofer", "Bluetooth" ], "description": "Upgrade your TV's audio with this sleek and powerful soundbar.", "price": 199.99 } { "name": "CineView OLED TV", "category": "Televisions and Home Theater Systems", "brand": "CineView", "model_number": "CV-OLED55", "warranty": "2 years", "rating": 4.7, "features": [ "55-inch display", "4K resolution", "HDR", "Smart TV" ], "description": "Experience true blacks and vibrant colors with this OLED TV.", "price": 1499.99 }"""
q_a_pair = f"""
Customer message: ```{customer_message}```
Product information: ```{product_information}```
Agent response: ```{final_response_to_customer}```
Does the response use the retrieved information correctly?
Does the response sufficiently answer the question
Output Y or N
"""
messages = [
{'role': 'system', 'content': system_message},
{'role': 'user', 'content': q_a_pair}
]
response = utils.get_completion_from_messages(messages, max_tokens=1)
return response
def find_category_and_product_v1(user_input,products_and_category):
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
AND
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
List out all products that are relevant to the customer service query based on how closely it relates
to the product name and product category.
Do not assume, from the name of the product, any features or attributes such as relative quality or price.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
few_shot_user_1 = """I want the most expensive computer."""
few_shot_assistant_1 = """
[{'category': 'Computers and Laptops', \
'products': ['TechPro Ultrabook', 'BlueWave Gaming Laptop', 'PowerLite Convertible', 'TechPro Desktop', 'BlueWave Chromebook']}]
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{few_shot_user_1}{delimiter}"},
{'role':'assistant', 'content': few_shot_assistant_1 },
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return utils.get_completion_from_messages(messages)
def find_category_and_product_v2(user_input,products_and_category):
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
AND
'products': <a list of products that must be found in the allowed products below>
Do not output any additional text that is not in JSON format.
Do not write any explanatory text after outputting the requested JSON.
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
List out all products that are relevant to the customer service query based on how closely it relates
to the product name and product category.
Do not assume, from the name of the product, any features or attributes such as relative quality or price.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
few_shot_user_1 = """I want the most expensive computer. What do you recommend?"""
few_shot_assistant_1 = """
[{'category': 'Computers and Laptops', \
'products': ['TechPro Ultrabook', 'BlueWave Gaming Laptop', 'PowerLite Convertible', 'TechPro Desktop', 'BlueWave Chromebook']}]
"""
few_shot_user_2 = """I want the most cheapest computer. What do you recommend?"""
few_shot_assistant_2 = """
[{'category': 'Computers and Laptops', \
'products': ['TechPro Ultrabook', 'BlueWave Gaming Laptop', 'PowerLite Convertible', 'TechPro Desktop', 'BlueWave Chromebook']}]
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{few_shot_user_1}{delimiter}"},
{'role':'assistant', 'content': few_shot_assistant_1 },
{'role':'user', 'content': f"{delimiter}{few_shot_user_2}{delimiter}"},
{'role':'assistant', 'content': few_shot_assistant_2 },
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return utils.get_completion_from_messages(messages)
# Evaluate Response with ideal answers
import json
def eval_response_with_ideal(response,ideal,debug=False):
if debug:
print("response")
print(response)
# json.loads() expects double quotes, not single quotes
json_like_str = response.replace("'",'"')
# parse into a list of dictionaries
l_of_d = json.loads(json_like_str)
# special case when response is empty list
if l_of_d == [] and ideal == []:
return 1
# otherwise, response is empty
# or ideal should be empty, there's a mismatch
elif l_of_d == [] or ideal == []:
return 0
correct = 0
if debug:
print("l_of_d is")
print(l_of_d)
for d in l_of_d:
cat = d.get('category')
prod_l = d.get('products')
if cat and prod_l:
# convert list to set for comparison
prod_set = set(prod_l)
# get ideal set of products
ideal_cat = ideal.get(cat)
if ideal_cat:
prod_set_ideal = set(ideal.get(cat))
else:
if debug:
print(f"did not find category {cat} in ideal")
print(f"ideal: {ideal}")
continue
if debug:
print("prod_set\n",prod_set)
print()
print("prod_set_ideal\n",prod_set_ideal)
if prod_set == prod_set_ideal:
if debug:
print("correct")
correct +=1
else:
print("incorrect")
print(f"prod_set: {prod_set}")
print(f"prod_set_ideal: {prod_set_ideal}")
if prod_set <= prod_set_ideal:
print("response is a subset of the ideal answer")
elif prod_set >= prod_set_ideal:
print("response is a superset of the ideal answer")
# count correct over total number of items in list
pc_correct = correct / len(l_of_d)
return pc_correct
def evaluate_all_pair_set(msg_ideal_pairs_set):
# Note, this will not work if any of the api calls time out
score_accum = 0
for i, pair in enumerate(msg_ideal_pairs_set):
print(f"example {i}")
customer_msg = pair['customer_msg']
ideal = pair['ideal_answer']
# print("Customer message",customer_msg)
# print("ideal:",ideal)
response = find_category_and_product_v2(customer_msg, utils.get_products_and_category())
# print("products_by_category",products_by_category)
score = eval_response_with_ideal(response,ideal,debug=False)
print(f"{i}: {score}")
score_accum += score
n_examples = len(msg_ideal_pairs_set)
fraction_correct = score_accum / n_examples
print(f"Fraction correct out of {n_examples}: {fraction_correct}")
# Evaluate with rubric
def eval_with_rubric(test_set, assistant_answer):
cust_msg = test_set['customer_msg']
context = test_set['context']
completion = assistant_answer
system_message = """\
You are an assistant that evaluates how well the customer service agent \
answers a user question by looking at the context that the customer service \
agent is using to generate its response.
"""
user_message = f"""\
You are evaluating a submitted answer to a question based on the context \
that the agent uses to answer the question.
Here is the data:
[BEGIN DATA]
************
[Question]: {cust_msg}
************
[Context]: {context}
************
[Submission]: {completion}
************
[END DATA]
Compare the factual content of the submitted answer with the context. \
Ignore any differences in style, grammar, or punctuation.
Answer the following questions:
- Is the Assistant response based only on the context provided? (Y or N)
- Does the answer include information that is not provided in the context? (Y or N)
- Is there any disagreement between the response and the context? (Y or N)
- Count how many questions the user asked. (output a number)
- For each question that the user asked, is there a corresponding answer to it?
Question 1: (Y or N)
Question 2: (Y or N)
...
Question N: (Y or N)
- Of the number of questions asked, how many of these questions were addressed by the answer? (output a number)
"""
messages = [
{'role': 'system', 'content': system_message},
{'role': 'user', 'content': user_message}
]
response = utils.get_completion_from_messages(messages)
return response
def eval_vs_ideal(test_set, assistant_answer):
cust_msg = test_set['customer_msg']
ideal = test_set['ideal_answer']
completion = assistant_answer
system_message = """\
You are an assistant that evaluates how well the customer service agent \
answers a user question by comparing the response to the ideal (expert) response
Output a single letter and nothing else.
"""
user_message = f"""\
You are comparing a submitted answer to an expert answer on a given question. Here is the data:
[BEGIN DATA]
************
[Question]: {cust_msg}
************
[Expert]: {ideal}
************
[Submission]: {completion}
************
[END DATA]
Compare the factual content of the submitted answer with the expert answer. Ignore any differences in style, grammar, or punctuation.
The submitted answer may either be a subset or superset of the expert answer, or it may conflict with it. Determine which case applies. Answer the question by selecting one of the following options:
(A) The submitted answer is a subset of the expert answer and is fully consistent with it.
(B) The submitted answer is a superset of the expert answer and is fully consistent with it.
(C) The submitted answer contains all the same details as the expert answer.
(D) There is a disagreement between the submitted answer and the expert answer.
(E) The answers differ, but these differences don't matter from the perspective of factuality.
choice_strings: ABCDE
"""
messages = [
{'role': 'system', 'content': system_message},
{'role': 'user', 'content': user_message}
]
response = utils.get_completion_from_messages(messages)
return response | [
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | Yajiehan/ML-CustomerServiceSupport-Node.js | embedText.py |
import pandas as pd
import tiktoken
import openai
################################################################################
### Step 7
################################################################################
# Load the cl100k_base tokenizer which is designed to work with the ada-002 model
tokenizer = tiktoken.get_encoding("cl100k_base")
df = pd.read_csv('processed/scraped.csv', index_col=0)
df.columns = ['title', 'text']
# Tokenize the text and save the number of tokens to a new column
df['n_tokens'] = df.text.apply(lambda x: len(tokenizer.encode(x)))
# Visualize the distribution of the number of tokens per row using a histogram
df.n_tokens.hist()
################################################################################
### Step 8
################################################################################
max_tokens = 500
# Function to split the text into chunks of a maximum number of tokens
def split_into_many(text, max_tokens = max_tokens):
# Split the text into sentences
sentences = text.split('. ')
# Get the number of tokens for each sentence
n_tokens = [len(tokenizer.encode(" " + sentence)) for sentence in sentences]
chunks = []
tokens_so_far = 0
chunk = []
# Loop through the sentences and tokens joined together in a tuple
for sentence, token in zip(sentences, n_tokens):
# If the number of tokens so far plus the number of tokens in the current sentence is greater
# than the max number of tokens, then add the chunk to the list of chunks and reset
# the chunk and tokens so far
if tokens_so_far + token > max_tokens:
chunks.append(". ".join(chunk) + ".")
chunk = []
tokens_so_far = 0
# If the number of tokens in the current sentence is greater than the max number of
# tokens, go to the next sentence
if token > max_tokens:
continue
# Otherwise, add the sentence to the chunk and add the number of tokens to the total
chunk.append(sentence)
tokens_so_far += token + 1
return chunks
shortened = []
# Loop through the dataframe
for row in df.iterrows():
# If the text is None, go to the next row
if row[1]['text'] is None:
continue
# If the number of tokens is greater than the max number of tokens, split the text into chunks
if row[1]['n_tokens'] > max_tokens:
shortened += split_into_many(row[1]['text'])
# Otherwise, add the text to the list of shortened texts
else:
shortened.append( row[1]['text'] )
################################################################################
### Step 9
################################################################################
df = pd.DataFrame(shortened, columns = ['text'])
df['n_tokens'] = df.text.apply(lambda x: len(tokenizer.encode(x)))
df.n_tokens.hist()
################################################################################
### Step 10
################################################################################
# Note that you may run into rate limit issues depending on how many files you try to embed
# Please check out our rate limit guide to learn more on how to handle this: https://platform.openai.com/docs/guides/rate-limits
df['embeddings'] = df.text.apply(lambda x: openai.Embedding.create(input=x, engine='text-embedding-ada-002')['data'][0]['embedding'])
df.to_csv('processed/embeddings.csv')
df.head()
| [] |
2024-01-10 | anthony-sarkis/anthropic-sdk-python | examples~basic_async.py | import asyncio
import os
import anthropic
async def main(max_tokens_to_sample: int = 100):
c = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
resp = await c.acompletion(
prompt=f"{anthropic.HUMAN_PROMPT} How many toes do dogs have?{anthropic.AI_PROMPT}",
stop_sequences=[anthropic.HUMAN_PROMPT],
model="claude-v1",
max_tokens_to_sample=max_tokens_to_sample,
)
print(resp)
if __name__ == "__main__":
asyncio.run(main())
| [] |
2024-01-10 | anthony-sarkis/anthropic-sdk-python | examples~basic_stream.py | import anthropic
import os
def main(max_tokens_to_sample: int = 200):
c = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
response = c.completion_stream(
prompt=f"{anthropic.HUMAN_PROMPT} How many toes do dogs have?{anthropic.AI_PROMPT}",
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_tokens_to_sample,
model="claude-v1",
stream=True,
)
for data in response:
print(data)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | anthony-sarkis/anthropic-sdk-python | examples~basic_async_stream.py | import anthropic
import asyncio
import os
async def main(max_tokens_to_sample: int = 200):
c = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
response = await c.acompletion_stream(
prompt=f"{anthropic.HUMAN_PROMPT} How many toes do dogs have?{anthropic.AI_PROMPT}",
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_tokens_to_sample,
model="claude-v1",
stream=True,
)
async for data in response:
print(data)
if __name__ == "__main__":
asyncio.run(main())
| [] |
2024-01-10 | anthony-sarkis/anthropic-sdk-python | examples~basic_sync.py | import os
import anthropic
def main(max_tokens_to_sample: int = 100):
c = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
resp = c.completion(
prompt=f"{anthropic.HUMAN_PROMPT} How many toes do dogs have?{anthropic.AI_PROMPT}",
stop_sequences=[anthropic.HUMAN_PROMPT],
model="claude-v1",
max_tokens_to_sample=max_tokens_to_sample,
)
print(resp)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | anthony-sarkis/anthropic-sdk-python | examples~count_tokens.py | import anthropic
def main(sample_str: str = "Hello world!"):
num_tokens = anthropic.count_tokens(sample_str)
print(f"Number of tokens: {num_tokens}")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | jasonpro22/ChatGPT | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
import json
import uuid
from os import environ
from os import getenv
from os.path import exists
import requests
from OpenAIAuth.OpenAIAuth import OpenAIAuth
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://chatgpt.duti.tech/"
class Error(Exception):
"""Base class for exceptions in this module."""
source: str
message: str
code: int
class Chatbot:
"""
Chatbot class for ChatGPT
"""
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
) -> None:
self.config = config
self.session = requests.Session()
if "proxy" in config:
if isinstance(config["proxy"], str) is False:
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
self.session.proxies.update(proxies)
if "verbose" in config:
if not isinstance(config["verbose"], bool):
raise Exception("Verbose must be a boolean!")
self.verbose = config["verbose"]
else:
self.verbose = False
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
if "email" in config and "password" in config:
pass
elif "access_token" in config:
self.__refresh_headers(config["access_token"])
elif "session_token" in config:
pass
else:
raise Exception("No login details provided!")
if "access_token" not in config:
try:
self.__login()
except Exception:
print("Wrong username and password")
import sys
sys.exit()
def __refresh_headers(self, access_token):
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
def __login(self):
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
raise Exception("No login details provided!")
auth = OpenAIAuth(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
auth.session_token = self.config["session_token"]
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.__login()
return
else:
auth.begin()
self.config["session_token"] = auth.session_token
auth.get_access_token()
self.__refresh_headers(auth.access_token)
def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
# gen_title=True,
):
"""
Ask a question to the chatbot
:param prompt: String
:param conversation_id: UUID
:param parent_id: UUID
:param gen_title: Boolean
"""
if parent_id is not None and conversation_id is None:
error = Error()
error.source = "User"
error.message = "conversation_id must be set once parent_id is set"
error.code = -1
raise error
# user-specified covid and parid, check skipped to avoid rate limit
if (
conversation_id is not None and conversation_id != self.conversation_id
): # Update to new conversations
self.parent_id = None # Resetting parent_id
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None: # new conversation
parent_id = str(uuid.uuid4())
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
self.__map_conversations()
parent_id = self.conversation_mapping[conversation_id]
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": "text-davinci-002-render-sha"
if not self.config.get("paid")
else "text-davinci-002-render-paid",
}
# new_conv = data["conversation_id"] is None
self.conversation_id_prev_queue.append(
data["conversation_id"],
) # for rollback
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "api/conversation",
data=json.dumps(data),
timeout=360,
stream=True,
)
self.__check_response(response)
for line in response.iter_lines():
line = str(line)[2:-1]
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
break
# Replace accidentally escaped double quotes
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
# Try parse JSON
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
print("Field missing")
print(line)
continue
message = line["message"]["content"]["parts"][0]
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
}
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except TypeError:
return False
except KeyError:
return False
return True
def __check_response(self, response):
if response.status_code != 200:
print(response.text)
error = Error()
error.source = "OpenAI"
error.code = response.status_code
error.message = response.text
raise error
def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
def get_msg_history(self, convo_id):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data
def gen_title(self, convo_id, message_id):
"""
Generate title for conversation
"""
url = BASE_URL + f"api/conversation/gen_title/{convo_id}"
response = self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
def change_title(self, convo_id, title):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
def delete_conversation(self, convo_id):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
def rollback_conversation(self, num=1) -> None:
"""
Rollback the conversation.
:param num: The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
def get_input(prompt):
"""
Multiline input function.
"""
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
def configure():
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def main(config: dict):
"""
Main function for the chatGPT program.
"""
print("Logging in...")
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
# Default to 1 rollback if no number is specified
try:
rollback = int(command.split(" ")[1])
except IndexError:
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.config["conversation"] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
print("Please include conversation UUID in command")
elif command == "!exit":
exit(0)
else:
return False
return True
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if handle_commands(prompt):
continue
print("Chatbot: ")
prev_text = ""
for data in chatbot.ask(
prompt,
):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print()
# print(message["message"])
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
main(configure())
| [
"text",
"\nYou:\n",
"content_type"
] |
2024-01-10 | shikhar-noone/openai_plus | openai_plus~models~chat_query.py | from django.db import models
from openai_plus.models.base import AuthorTimeStampedModel
class ChatQuery(AuthorTimeStampedModel):
query = models.TextField(default='')
result = models.TextField(default='')
language = models.TextField(default='') | [] |
2024-01-10 | shikhar-noone/openai_plus | openai_plus~serializers~chat_query.py | from rest_framework import serializers
from openai_plus.models import ChatQuery
class ChatQuerySerializer(serializers.ModelSerializer):
class Meta:
model = ChatQuery
fields = "__all__"
| [] |
2024-01-10 | shikhar-noone/openai_plus | openai_plus~chat_gpt~language_translator.py | import openai
openai.api_key = "sk-2R7ZTpdQwsgbiHrOvIsIT3BIbkFJ3Fd3hel91UzkDw07omI"
def translate_query(query, language):
prompt = f"query = '{query}'./n Please translate the query into {language} and return statement only."
completions = openai.Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=2048, n=1,stop=None,temperature=0.5)
print(completions)
message = completions.choices[0].text
#print(completions)
return message
| [
"query = 'PLACEHOLDER'./n Please translate the query into PLACEHOLDER and return statement only."
] |
2024-01-10 | shikhar-noone/openai_plus | openai_plus~admin.py | from django.contrib import admin
from openai_plus.models.chat_query import ChatQuery
# Register your models here.
admin.site.register(ChatQuery) | [] |
2024-01-10 | shikhar-noone/openai_plus | openai_plus~urls.py | from django.urls import path
from openai_plus.views import query
urlpatterns = [
path('query/', query.QueryViewSet.as_view(), name='query'),
] | [] |
2024-01-10 | jaimevalero/guidance | catalog.py | import guidance
import pandas as pd
import re
from loguru import logger
import re
import json
# TODO: Jaime. # Casos a probar Github. Me han desactivado el usuario / he perdido permisos
ticket="""
Hola,
Me gustaría dar de baja al usuario Jaime Valero (jaimevalero) de la organización de Telefónica de Github.
Muchas gracias,"""
ticket="""
Hola,
Me gustaría dar de baja al usuario Jaime Valero (borja) de la organización de Telefónica de Github.
Muchas gracias,"""
def enrich_message(message):
# messages = """Buenas tardes.
# He tenido una incidencia usando GiHub. Al transferir el repositorio llamado 'autoVM' de mi perfil a la organización he perdido el acceso y he dejado de ser propietario. No encuentro la forma de arreglarlo, así que os escribo para ver si podéis ayudarme a solucionar el problema. Me gustaría volver a ser el propietario del repositorio junto con mi tutor @HECTOR CORDOBES DE LA CALLE. Es decir, que el repositorio tenga dos propietarios para evitar problemas como este en el futuro.
# Disculpad las molestias y gracias de antemano.
# Un saludo."""
def detectar_elementos(texto, df):
elementos_detectados = []
for _, row in df.iterrows():
elemento = row["elemento"]
tipo = row["tipo"]
if pd.isnull(elemento): continue
if len(elemento) < 3: continue
if elemento.lower() == "epg" : continue
# Escapar caracteres especiales en el elemento
elemento_escaped = re.escape(elemento)
# Crear una expresión regular para buscar el elemento
patron = r"\b" + elemento_escaped + r"\b"
# Buscar coincidencias en el texto
coincidencias = re.findall(patron, texto, flags=re.IGNORECASE)
# Agregar el elemento y su tipo a la lista de elementos detectados si hay coincidencias
if coincidencias:
elementos_detectados.append({ "name" : elemento, "tipo" : tipo })
# TODO: Jaime. Enriquecer con query a la BD y pasarle mas informacion
#elementos_detectados.extend([ { "name" : elem, "tipo" tipo } for elem in coincidencias])
# Iterar por los elementos detectados. Si es de tipo github, añadir la URL completa
for elemento in elementos_detectados:
if elemento["tipo"] == "repo_name":
elemento["url"] = "https://github.com/telefonica/" + elemento["name"]
if elemento["tipo"] == "dn":
elemento["tipo"] = "full_name"
if elemento["name"] == "HECTOR CORDOBES DE LA CALLE" :
elemento["github_username"] = "hcordobest"
if elemento["name"] == "PABLO GOMEZ ALVAREZ " :
elemento["github_username"] = "pablogomez-a"
return elementos_detectados
df = pd.read_csv('/home/jaimevalero/git/guidance/enriched.csv', sep=',')
elementos_detectados = detectar_elementos(message, df)
return elementos_detectados
guidance.llm = guidance.llms.OpenAI("text-davinci-003")
valid_jobs = [
{ "name" : 'Echar o dar de baja usuario de la organization de github' , "params" : [ { "name" : "github_login"}] },
{ "name" : 'Añadir usuario a la organization de github' , "params" : [ { "name" : "github_login"}] },
{ "name" : 'Licencias de copilot. Habilitar copilot para usuario' , "params" : [ { "name" : "github_login"}] },
{ "name" : 'Licencias de copilot. Deshabilitar copilot para usuario' , "params" : [ { "name" : "github_login"}] },
{ "name" : 'Añadir miembro a team de la organización de github' , "params" : [ { "name" : "github_login"}] },
{ "name" : 'Quitar o sacar miembro de un team de la organización de github' , "params" : [ { "name" : "github_login"}] },
{ "name" : 'Cualquier otro caso distinto a los anteriores' , "params" : [ { "name" : "github_login"}] },
]
# Paso 1 Enriquecer el mensaje
enriched_message = enrich_message(ticket)
# Paso 2: Detectar el tipo de problema
def get_job_type(valid_jobs, ticket):
if "name" in options[0]:
options = [ job["name"] for job in valid_jobs]
else:
options = valid_jobs
program = guidance('''
Which jobs about managing an organization in github could resolve the following issue. Please answer with a single phrase.
Sentence: {{ticket}}
Answer:{{select "answer" options=options}}''')
out = executed_program = program(ticket=ticket, options=options)
return out
#tipo = get_job_type(valid_jobs, ticket)
tipo = "Echar o dar de baja usuario de la organization de github"
# TODO: Jaime. Si el tipo es otro, pedir mas información, y volver lanzar el programa
# Paso 3: Detectar los parámetros necesarios para resolver el problema
def generate_job_arguments(ticket, valid_jobs, enriched_message, tipo):
""" Given a ticket, a list of valid jobs, an enriched message and a job type,
ask the LLM to generate the arguments for the job
Args:
ticket (str): Ticket description, containing the problem to solve
valid_jobs (_type_): Array of valid jobs, to extract the parameters
enriched_message (_type_): Array of self discovered inventory objects detected in the ticket
tipo (_type_): Job that resolves the ticket
"""
def extract_json_from_response(executed_program):
""" Given a response from the model, extract the json from it """
json_regex = r'json\n({.*?})'
json_match = re.search(json_regex, executed_program.text, re.DOTALL)
if not json_match:
raise Exception("No json found")
#TODO: Jaime. Enviar mensaje al usuario pidiendo mas informacion, y volver a ejecutar el programa, con la nueva info
json_str = json_match.group(1)
job_json = json.loads(json_str.replace(", \n}","}").replace("""'""",'''"'''))
logger.info(f"{job_json=}")
return job_json
params = [ valid_jobs["params"] for valid_jobs in valid_jobs if valid_jobs["name"] == tipo ][0]
logger.info(f"{params=}")
program = guidance('''
For the following ticket: {{ticket}}.
Describing and issue that can be resolved using the job : "{{job}}".
That needs following parameters :
{{#each params}} - {{this}}
{{/each}}
And the following self detected inventory objects :
{{#each enriched_message}} - Object: Name:{{this.name}}, Type:{{this.tipo}}
{{/each}}
Return a json with the parameters and the values from the objects detected in the ticket. Respond only with the json
If you need more info to get the parameters, ask for it, politely in the same language the ticket is.
```json
{
"job" : "{{job}}",
{{#each params}} "{{this.name}}" : "{{gen 'this.name' max_tokens=12}}", {{/each}}
}```''')
executed_program = program(
ticket=ticket, job=tipo,
params=params,
enriched_message=enriched_message)
#Extract json info from model
logger.info(executed_program.text)
job_json = extract_json_from_response(executed_program)
return job_json
generate_job_arguments(ticket, valid_jobs, enriched_message, tipo)
#TODO: Jaime. Crear la info de ejecutar el job
# execute_job(job_json)
#TODO: Jaime. Enviar el resultado al usuario, en el mismo idioma que el ticket,pedir validacion
#TODO: Si el job falla, pedir mas informacion, y volver a ejecutar el programa
| [] |
2024-01-10 | jaimevalero/guidance | guidance~llms~_transformers.py | import os
import time
import collections
import regex
import pygtrie
import queue
import threading
import logging
import collections.abc
from ._llm import LLM, LLMSession, SyncSession
class Transformers(LLM):
""" A HuggingFace transformers language model with Guidance support.
"""
llm_name: str = "transformers"
def __init__(self, model=None, tokenizer=None, caching=True, token_healing=True, acceleration=True, \
temperature=0.0, device=None, **kwargs):
super().__init__()
# fill in default model value
if model is None:
model = os.environ.get("TRANSFORMERS_MODEL", None)
if model is None:
try:
with open(os.path.expanduser('~/.transformers_model'), 'r') as file:
model = file.read().replace('\n', '')
except:
pass
self.model_obj, self.tokenizer = self._model_and_tokenizer(model, tokenizer, **kwargs)
self.model_name = model if isinstance(model, str) else model.__class__.__name__
self.caching = caching
self.current_time = time.time()
self.call_history = collections.deque()
self.temperature = temperature
self.token_healing = token_healing
self.acceleration = acceleration
if device is not None: # set the device if requested
self.model_obj = self.model_obj.to(device)
self.device = self.model_obj.device # otherwise note the current device
self._token_prefix_map = self._build_token_prefix_map(model)
def new_string_builder(self, starting_ids=None):
return TransformersStringBuilder(self.tokenizer, starting_ids)
def prefix_matches(self, prefix):
""" Return the list of tokens that match the given prefix.
"""
return [v for arr in self._token_prefix_map.values(prefix=prefix) for v in arr]
def encode(self, string, **kwargs):
return self.tokenizer.encode(string, **kwargs)
def decode(self, tokens, **kwargs):
return self.tokenizer.decode(tokens, **kwargs)
def id_to_token(self, id):
return self.tokenizer.convert_ids_to_tokens([id])[0]
def token_to_id(self, token):
return self.tokenizer.convert_tokens_to_ids([token])[0]
def end_of_text(self):
return self.tokenizer.eos_token
@staticmethod
def role_start(role):
raise NotImplementedError("In order to use chat role tags you need to use a chat-specific subclass of Transformers for your LLM from guidance.transformers.*!")
def _build_token_prefix_map(self, model_name):
""" Build a map from token to index.
"""
token_map = pygtrie.CharTrie()
for i in range(self.tokenizer.vocab_size):
s = self.id_to_token(i)
if s in token_map:
token_map[s].append(i) # handle duplicate token encodings... (GPT2 BPE has this oddly enough)
else:
token_map[s] = [i]
return token_map
def _model_and_tokenizer(self, model, tokenizer, **kwargs):
# intantiate the model and tokenizer if needed
if isinstance(model, str):
# make sure transformers is installed
try:
import transformers
except:
raise Exception("Please install transformers with `pip install transformers` in order to use guidance.llms.Transformers!")
if tokenizer is None:
tokenizer = transformers.AutoTokenizer.from_pretrained(model, **kwargs)
model = transformers.AutoModelForCausalLM.from_pretrained(model, **kwargs)
assert tokenizer is not None, "You must give a tokenizer object when you provide a model object (as opposed to just a model name)!"
return model, tokenizer
def session(self, asynchronous=False):
if asynchronous:
return TransformersSession(self)
else:
return SyncSession(TransformersSession(self))
class TransformersSession(LLMSession):
def __init__(self, llm):
super().__init__(llm)
self._past_key_values = None
self._prefix_cache = []
def __enter__(self):
# we only need decorators if we are using token acceleration
if self.llm.acceleration:
# decorate the prep step to preserve the initial past key values we have passed
def prep_step_decorator(method):
def decorate_prep_step(input_ids, **kwargs):
# if we are extending the input ids with the cached tokens then
# don't pass past key values to the input prep step, otherwise it
# would delete all but the last input_ids, and we have already removed
# the correct prefix from the input_ids (which is not always all but the last one)
if len(self._prefix_cache) > 0:
kwargs["past"] = None
input_ids = input_ids[:,len(self._prefix_cache):]
# if "attention_mask" in kwargs:
# kwargs["attention_mask"] = kwargs["attention_mask"][:,len(self._prefix_cache):]
model_kwargs = method(input_ids, **kwargs)
# provide the past key values for the actual model call
model_kwargs["past_key_values"] = self._past_key_values
if "position_ids" in model_kwargs: # models like OPT update the position ids internally
model_kwargs["position_ids"] = model_kwargs["position_ids"][:,len(self._prefix_cache):] # and update position ids
# we only need to do this first time, after that the past key values will
# be up until the last token, just like transformer models normally expect
# so we can clear our cache and let transformers cache like normal
self._prefix_cache = [] # this will get refilled once the generate call is done
return model_kwargs
else:
return method(input_ids, **kwargs)
decorate_prep_step.__func__ = method.__func__ # make us still look like a bound method
return decorate_prep_step
if getattr(self.llm.model_obj, "_orig_prepare_method", None) is None:
self.llm.model_obj._orig_prepare_method = self.llm.model_obj.prepare_inputs_for_generation
self.llm.model_obj.prepare_inputs_for_generation = prep_step_decorator(self.llm.model_obj._orig_prepare_method)
# decorate the update step to save the past key values
def update_step_decorator(method):
def decorate_update_step(outputs, *args, **kwargs):
# save the past key values
self._past_key_values = getattr(outputs, "past_key_values", None)
return method(outputs, *args, **kwargs)
return decorate_update_step
if getattr(self.llm.model_obj, "_orig_update_method", None) is None:
self.llm.model_obj._orig_update_method = self.llm.model_obj._update_model_kwargs_for_generation
self.llm.model_obj._update_model_kwargs_for_generation = update_step_decorator(self.llm.model_obj._orig_update_method)
return self
async def __call__(self, prompt, stop=None, stop_regex=None, temperature=None, n=1, max_tokens=1000, logprobs=None, top_p=1.0, echo=False, logit_bias=None, token_healing=None, pattern=None, stream=False, cache_seed=0, caching=None):
""" Generate a completion of the given prompt.
"""
# fill in defaults
if temperature is None:
temperature = self.llm.temperature
if token_healing is None:
token_healing = self.llm.token_healing
# generate the cache key
cache_params = self._cache_params(locals().copy())
llm_cache = self.llm.cache
key = llm_cache.create_key(self.llm.llm_name, **cache_params)
# set the stop patterns
if stop is not None:
if isinstance(stop, str):
stop_regex = [regex.escape(stop)]
else:
stop_regex = [regex.escape(s) for s in stop]
if isinstance(stop_regex, str):
stop_regex = [stop_regex]
if stop_regex is None:
stop_regex = []
stop_regex.append(regex.escape(self.llm.tokenizer.eos_token)) # make sure the end of sequence token is always included
# handle caching
in_cache = key in llm_cache
not_caching = (caching is not True and not self.llm.caching) or caching is False
if not in_cache or not_caching:
import transformers
assert prompt != "", "You must provide a non-zero length prompt to the Transformers language model!"
# encode the prompt
import torch
# encoded2 = self.llm.encode([prompt for _ in range(n)], return_tensors="pt")
encoded = self.llm.encode(prompt)
encoded = torch.tensor([encoded for _ in range(n)])
if self.llm.device is not None:
encoded = encoded.to(self.llm.device)
input_ids = encoded#["input_ids"]
# attention_mask = encoded["attention_mask"]
model_config = self.llm.model_obj.config
# ensure that we are extending a common sequence batch (our token healing assumes this right now)
assert (input_ids[0,-1] == input_ids[:,-1]).all(), "The current token healing implementation assumes that batches are reps of the same sequence!"
healed_token_ids = []
processors = []
stoppers = []
# save what the prompt looks like when coded and then decoded (this captures added start tokens, etc.)
coded_prompt = self.llm.decode(input_ids[0])
# setup token healing
if token_healing:
healer = TokenHealingLogitsProcessor(self.llm, model_config.vocab_size, input_ids[0])
healed_token_ids = healer.healed_token_ids
if len(healed_token_ids) > 0:
input_ids = input_ids[:,:-len(healed_token_ids)]
# attention_mask = attention_mask[:,:-len(healed_token_ids)]
max_tokens += len(healed_token_ids) # increase to account for the tokens we regen for token healing
processors.append(healer)
# setup logit biasing
if logit_bias is not None:
processors.append(BiasLogitsProcessor(self.llm, model_config.vocab_size, logit_bias))
# find the max context length
possible_attributes = ["max_sequence_length", "max_seq_len", "model_max_length", "n_positions", "max_position_embeddings"]
max_context = None
for obj in [model_config, self.llm.tokenizer]:
for attr in possible_attributes:
if max_context is None:
max_context = getattr(obj, attr, None)
else:
break
assert max_context is not None, "Could not find a max context length for the model! Tried: "+", ".join(possible_attributes)
# make sure we don't run off the end of the model
if max_tokens + len(input_ids[0]) > max_context:
max_tokens = max_context - len(input_ids[0])
# find how much of the prompt is cached
prefix_match_len = 0
for token in input_ids[0]:
if prefix_match_len >= len(self._prefix_cache) or token != self._prefix_cache[prefix_match_len]:
break
else:
prefix_match_len += 1
# we always need to run the model on at least one token so transformers is happy
if prefix_match_len == len(input_ids[0]):
prefix_match_len -= 1
# trim the cache to what we can use
if prefix_match_len < len(self._prefix_cache): # prefix_match_len > 0 and
self._past_key_values = tuple((key[:,:,:prefix_match_len,:],value[:,:,:prefix_match_len,:]) for key,value in self._past_key_values) # TODO: this is specific to the GPT2 tensor layout
self._prefix_cache = self._prefix_cache[:prefix_match_len]
# add support for pattern guidance
if pattern is not None:
processors.append(RegexLogitsProcessor(pattern, stop_regex, self.llm, model_config.vocab_size, temperature == 0, len(coded_prompt), self.llm.tokenizer.eos_token_id))
if stop_regex is not None:
stoppers.append(RegexStoppingCriteria(stop_regex, self.llm, len(coded_prompt)))
# a streamer to handle potentially partial output
streamer = TransformersStreamer(
input_ids=input_ids,
stop_regex=stop_regex,
healed_token_ids=healed_token_ids,
prefix_length=len(coded_prompt),
llm=self.llm,
max_new_tokens=max_tokens,
logprobs=logprobs
)
# the args for the transformers generate call
generate_args = dict(
inputs=input_ids,
# attention_mask=attention_mask,
# position_ids=position_ids,
temperature=temperature,
max_new_tokens=max_tokens,
top_p=top_p,
pad_token_id=model_config.pad_token_id if model_config.pad_token_id is not None else self.llm.tokenizer.eos_token_id,
logits_processor=transformers.LogitsProcessorList(processors),
stopping_criteria=transformers.StoppingCriteriaList(stoppers),
# past_key_values=self._past_key_values,
output_scores=logprobs is not None and logprobs > 0,
return_dict_in_generate=True
)
# override the model config for do_sample when the temperature requires it
do_sample = getattr(model_config, "do_sample", None)
if do_sample is True and temperature == 0:
generate_args["do_sample"] = False
elif do_sample is False and temperature > 0:
generate_args["do_sample"] = True
# if we are streaming then we need to run the inference process in a separate thread
if stream:
generate_args["streamer"] = streamer
thread = threading.Thread(target=self.llm.model_obj.generate, kwargs=generate_args)
thread.start()
return self._stream_then_save(streamer, key, thread)
# if we are not streaming we still manually use the streamer for consistency
else:
generated_sequence = self.llm.model_obj.generate(**generate_args)
streamer.put(generated_sequence)
self.llm.cache[key] = streamer.__next__()
self._update_prefix_cache(streamer)
return llm_cache[key]
def _update_prefix_cache(self, streamer):
# note what we now have cached and ready for our next call in this session
if self._past_key_values and len(streamer.generated_sequence) == 1:
self._prefix_cache = streamer.generated_sequence[0][:self._past_key_values[0][0].shape[-2]] # self._past_key_values is already saved, this just aligns with it
def _stream_then_save(self, streamer, key, thread):
list_out = []
for out in streamer:
list_out.append(out)
yield out
thread.join() # clean up the thread
self.llm.cache[key] = list_out
self._update_prefix_cache(streamer)
self._last_computed_key = key
def __exit__(self, exc_type, exc_value, traceback):
""" Restore the model to its original state by removing monkey patches.
"""
if getattr(self.llm.model_obj, "_orig_prepare_method", None) is not None:
self.llm.model_obj.prepare_inputs_for_generation = self.llm.model_obj._orig_prepare_method
del self.llm.model_obj._orig_prepare_method
if getattr(self.llm.model_obj, "_orig_update_method", None) is not None:
self.llm.model_obj._update_model_kwargs_for_generation = self.llm.model_obj._orig_update_method
del self.llm.model_obj._orig_update_method
return False
class TokenHealingLogitsProcessor():
""" Token healing.
When we tokenize the prompt the last token(s) we get are not the last token(s) we would
have gotten if the prompt + generation was concatented and then tokenized. This
is not good because it does not align with the pretraining of the model, so
we "heal" this boundary by backing up as many tokens as needed and then forcing the first tokens
generated to start with the prefix of the tokens we removed from the prompt. This could
result in the same tokens at the end of the prompt, or some suffix of the tokens we removed
could be replaced by a single longer one that crosses the prompt boundary.
"""
def __init__(self, model, vocab_size, prompt_ids, bias_value=100.):
""" Build a new TokenHealingLogitsProcessor.
Note that bias_value is in score space (log-odds normally) and should be
enough to ensure those tokens are the only ones used.
"""
# loop backwards through the prompt tokens looking for places where there are possible
# extensions that cross the prompt boundary
prefix_str = ""
self.extension_tokens = []
for i in range(len(prompt_ids)-1, max(len(prompt_ids)-10, -1), -1):
token_str = model.id_to_token(prompt_ids[i])
prefix_str = token_str + prefix_str
try:
extensions = model.prefix_matches(prefix_str)
except KeyError: # this must be a special token outside the vocab, so we assume it does not have any valid extensions
extensions = []
self.extension_tokens.append(extensions)
if i != len(prompt_ids)-1:
self.extension_tokens[-1].append(prompt_ids[i]) # add the token used in the input prompt to the list of possible extensions
self.extension_tokens = self.extension_tokens[::-1]
# prune off any extension token positions that don't have multiple multiple possible extensions
found_extensions = False
for i in range(len(self.extension_tokens)):
if len(self.extension_tokens[i]) > 1:
self.extension_tokens = self.extension_tokens[i:]
found_extensions = True
break
if found_extensions:
self.healed_token_ids = prompt_ids[len(prompt_ids)-len(self.extension_tokens):]
else:
self.extension_tokens = []
self.healed_token_ids = []
# if we have multiple possible completions past the last token, then biasing is needed
if len(self.extension_tokens) > 0:
import torch
# build a set of masks for each possible extension position
self.token_masks = []
for i in range(len(self.extension_tokens)):
token_mask = torch.zeros(vocab_size)
token_mask.scatter_(0, torch.tensor(self.extension_tokens[i]), bias_value)
if model.device is not None:
token_mask = token_mask.to(model.device)
self.token_masks.append(token_mask)
self.num_extensions = 0
def __call__(self, input_ids, scores):
# we only bias the first token generated
if self.num_extensions >= len(self.extension_tokens):
return scores
self.num_extensions += 1
# check if the last token was from the original prompt (if not then we have already "healed" by choosing a token that crosses the prompt boundary)
if self.num_extensions > 1 and input_ids[0][-1] != self.healed_token_ids[self.num_extensions-2]:
return scores
# handle list inputs
if isinstance(scores, list):
import torch
scores = torch.tensor(scores)
# make only allowed tokens possible
return scores + self.token_masks[self.num_extensions-1]
class BiasLogitsProcessor():
""" Simple token biasing.
"""
def __init__(self, model, vocab_size, logit_bias):
""" Build a new BiasLogitsProcessor.
"""
import torch
self.bias_vector = torch.zeros(vocab_size)
for token, bias in logit_bias.items():
self.bias_vector[token] = bias
self.bias_vector = self.bias_vector.to(model.device)
def __call__(self, input_ids, scores):
# handle list inputs
if isinstance(scores, list):
import torch
scores = torch.tensor(scores)
return scores + self.bias_vector
class RegexLogitsProcessor():
""" Pattern guiding.
Guide generation to match a regular expression.
TODO: currently slow, could be made much faster by doing rejection sampling inline with the sampling/greedy process.
"""
def __init__(self, pattern, stop_regex, llm, vocab_size, is_greedy, prefix_length, eos_token_id, max_consider=500000):
""" Build a new TokenHealingLogitsProcessor.
Parameters
----------
pattern : str
The regex pattern we are seeking to match.
stop_regex : str or list of str
The stop regex(s) allowed to come after this pattern.
llm : function
The llm.
vocab_size : int
The size of the vocabulary.
is_greedy : bool
The token selection mode currently in use. We need to know this so we can
effectively take over that sampling process inside this logit processor.
eos_token_id : int
The end of the stop token of the model.
max_consider : int
How many top values to bias. Note that we could remove this option once this
processor is performance optimized (by integrating it into the sampling/greedy process).
"""
import torch
if isinstance(stop_regex, str):
stop_regex = [stop_regex]
self.pattern_no_stop = regex.compile(pattern)
self.pattern = regex.compile(pattern + "(" + "|".join(stop_regex) + ")?")
self.llm = llm
self.is_greedy = is_greedy
self.prefix_length = prefix_length
self.max_consider = max_consider
self.bias_vector = torch.zeros(vocab_size)
self.current_strings = None
self.current_length = 0
self.forced_chars = 0
self.eos_token_id = eos_token_id
def __call__(self, input_ids, scores):
import torch
# handle 1D inputs
one_dim = False
if not isinstance(input_ids[0], collections.abc.Sequence) and not (hasattr(input_ids[0], "shape") and len(input_ids[0].shape) > 0):
one_dim = True
input_ids = torch.tensor(input_ids).unsqueeze(0)
scores = torch.tensor(scores).unsqueeze(0)
# extend our current strings
if self.current_strings is None:
self.current_strings = [self.llm.new_string_builder() for i in range(len(input_ids))]
for i in range(len(self.current_strings)):
self.current_strings[i].extend(input_ids[i][self.current_length:])
assert len(self.current_strings) == 1, "Regex patterns guides do not support batched inference with Transformers yet!"
self.current_length = len(input_ids[0])
# compute the bias values
self.bias_vector[:] = 0
sort_inds = torch.argsort(scores, 1, True)
to_bias = []
for i in range(min(sort_inds.shape[1], self.max_consider)):
self.current_strings[0].extend([sort_inds[0,i]])
proposed_string = str(self.current_strings[0])[self.prefix_length:]
self.current_strings[0].pop()
m = self.pattern.fullmatch(proposed_string, partial=True) # partial means we don't match currently but might as the string grows
if m:
to_bias.append(int(sort_inds[0, i]))
if self.is_greedy: # TODO: make this much faster for non-greedy sampling (by tracking how much prob mass we have looked through perhaps...)
break # we are done if we are doing greedy sampling and we found the top valid hit
# if we found no more valid tokens then we just end the sequence
if not len(to_bias):
to_bias = [self.eos_token_id]
# bias allowed tokens
min_to_bias = float(scores[0, to_bias].min())
bias_value = scores[0, sort_inds[0, 0]] - min_to_bias + 10 # make sure the tokens that fit the pattern have higher scores than the top value
for x in to_bias:
self.bias_vector[x] = bias_value
out = scores + self.bias_vector.to(scores.device)
if one_dim:
return out[0]
else:
return out
class RegexStoppingCriteria():
def __init__(self, stop_pattern, llm, prefix_length):
if isinstance(stop_pattern, str):
self.stop_patterns = [regex.compile(stop_pattern)]
else:
self.stop_patterns = [regex.compile(pattern) for pattern in stop_pattern]
self.prefix_length = prefix_length
self.llm = llm
self.current_strings = None
self.current_length = 0
def __call__(self, input_ids, scores, **kwargs):
# handle 1D inputs
if not isinstance(input_ids[0], collections.abc.Sequence) and not (hasattr(input_ids[0], "shape") and len(input_ids[0].shape) > 0):
input_ids = [input_ids]
# extend our current strings
if self.current_strings is None:
self.current_strings = [self.llm.new_string_builder() for _ in range(len(input_ids))]
for i in range(len(self.current_strings)):
self.current_strings[i].extend(input_ids[i][self.current_length:])
self.current_length = len(input_ids[0])
# check if all of the strings match a stop string (and hence we can stop the batch inference)
all_done = True
for i in range(len(self.current_strings)):
found = False
for s in self.stop_patterns:
if s.search(str(self.current_strings[i])[self.prefix_length:]):
found = True
if not found:
all_done = False
break
return all_done
class TransformersStringBuilder():
"""This deals with the complexity of building up a string from tokens bit by bit."""
def __init__(self, tokenizer, starting_ids=None):
self.tokenizer = tokenizer
self.token_strings = []
self._joint_string = ""
if starting_ids is not None:
self.extend(starting_ids)
def extend(self, new_ids):
new_token_strings = self.tokenizer.convert_ids_to_tokens(new_ids)
self.token_strings.extend(new_token_strings)
new_str = self.tokenizer.convert_tokens_to_string(self.token_strings)
diff_str = new_str[len(self._joint_string):]
self._joint_string = new_str
return diff_str
def pop(self):
"""Remove the last token from the string and return text it removed."""
self.token_strings.pop()
new_str = self.tokenizer.convert_tokens_to_string(self.token_strings)
diff_str = self._joint_string[len(new_str):]
self._joint_string = new_str
return diff_str
def __str__(self):
return self._joint_string
def __len__(self):
return len(self._joint_string)
class TransformersStreamer():
def __init__(self, input_ids, stop_regex, healed_token_ids, prefix_length, llm, max_new_tokens, logprobs, timeout=None):
self.input_ids = input_ids
self.stop_regex = stop_regex
self.healed_token_ids = healed_token_ids
self.logprobs = logprobs
self.llm = llm
self.max_total_tokens = max_new_tokens + len(input_ids[0])
self.timeout = timeout
self.str_pos = [prefix_length for i in range(len(self.input_ids))]
self.out_queue = queue.Queue()
self.sequence_pos = [len(self.input_ids[0]) for i in range(len(self.input_ids))]
self.generated_sequence = [[] for i in range(len(self.input_ids))]
self.display_logprobs = [[] for i in range(len(self.input_ids))]
self.generated_string = [self.llm.new_string_builder(input_ids[0]) for i in range(len(self.input_ids))]
self.prefix_cache = []
def put(self, token_obj):
import torch
if isinstance(token_obj, torch.Tensor):
new_tokens = token_obj
else:
new_tokens = token_obj['sequences']
if isinstance(new_tokens, torch.Tensor):
new_tokens = new_tokens.cpu()
# if we are given a single sequence, then make it a batch of size 1
if len(new_tokens.shape) == 1:
new_tokens = new_tokens.unsqueeze(0)
# extract the scores if we are given them (and format them to be the same shape as the tokens)
if self.logprobs:
assert len(new_tokens) == 1, "logprobs are not supported for batched generation right now in guidance.llms.Transformers"
new_scores = [torch.nn.functional.log_softmax(x, dim=-1).cpu() for x in token_obj['scores']]
len_diff = len(new_tokens[0]) - len(new_scores)
if len_diff > 0:
new_scores = [None for i in range(len_diff)] + new_scores
new_scores = [new_scores]
out = {"choices": [None for i in range(len(self.input_ids))]}
put_data = False
for i in range(len(self.input_ids)):
self.generated_sequence[i].extend(list(new_tokens[i]))
# save logprobs if needed
if self.logprobs:
for scores in new_scores[i]:
if scores is None:
self.display_logprobs[i].append(None)
else:
top_inds = scores[0].argsort(descending=True)[:self.logprobs] # TODO: verify the [0] is always correct
self.display_logprobs[i].append({self.llm.id_to_token(j): float(scores[0][j]) for j in top_inds})
if self.sequence_pos[i] < len(self.generated_sequence[i]):
display_tokens = list(self.generated_sequence[i][self.sequence_pos[i]:])
val = self.generated_string[i].extend(display_tokens)
# val = self.llm.decode(display_tokens)#[self.llm._prefix_token_id] + display_tokens)[len(self.llm._prefix_token):]
# self.generated_string[i] += val
if self.str_pos[i] < len(self.generated_string[i]):
val = str(self.generated_string[i])[self.str_pos[i]:]
finish_reason = None
# check why we stopped
stop_pos = len(val) + 1
if len(self.generated_sequence[i]) >= self.max_total_tokens:
finish_reason = "length"
elif self.generated_sequence[i][-1] == self.llm.tokenizer.eos_token_id:
finish_reason = "endoftext"
eos_str = self.generated_string[i].pop() # remove the end of text token
stop_pos = len(val) - len(eos_str)
# trim off the stop regex matches if needed
found_partial = False
stop_text = None
if self.stop_regex is not None:# and (finish_reason is None or len(self.input_ids) > 1):
stop_regex_obj = [regex.compile(s) for s in self.stop_regex]
for s in stop_regex_obj:
m = s.search(val, partial=True)
if m:
span = m.span()
if span[1] > span[0]:
if m.partial: # we might be starting a stop sequence, so we can't emit anything yet
found_partial = True
break
else:
stop_text = val[span[0]:span[1]]
stop_pos = min(span[0], stop_pos)
break
# record the reason we stopped (if we have stopped)
if stop_pos <= len(val):
finish_reason = "stop"
# emit the data if we are not potentially in the middle of a stop sequence
if not found_partial or finish_reason is not None:
out["choices"][i] = {
"text": val[:stop_pos],
"finish_reason": finish_reason,
"stop_text": stop_text,
"logprobs": {
# "token_healing_prefix": self.last_token_str,
"top_logprobs": self.display_logprobs[i][self.sequence_pos[i]:]
}
}
self.str_pos[i] = len(self.generated_string[i])
put_data = True
self.sequence_pos[i] = len(self.generated_sequence[i])
if put_data:
self.out_queue.put(out)
def end(self):
# make sure we have flushed all of the data
for i in range(len(self.input_ids)):
assert self.str_pos[i] >= len(self.generated_string[i]), "Not all data was flushed, this means generation stopped for an unknown reason!"
self.out_queue.put(None)
def __iter__(self):
return self
def __next__(self):
value = self.out_queue.get(timeout=self.timeout)
if value is None:
raise StopIteration()
else:
return value
| [] |
2024-01-10 | jaimevalero/guidance | guidance~llms~caches~_diskcache.py | import os
import diskcache
import platformdirs
from guidance.llms.caches import Cache
class DiskCache(Cache):
"""DiskCache is a cache that uses diskcache lib."""
def __init__(self, llm_name: str):
self._diskcache = diskcache.Cache(
os.path.join(
platformdirs.user_cache_dir("guidance"), f"_{llm_name}.diskcache"
)
)
def __getitem__(self, key: str) -> str:
return self._diskcache[key]
def __setitem__(self, key: str, value: str) -> None:
self._diskcache[key] = value
def __contains__(self, key: str) -> bool:
return key in self._diskcache
def clear(self):
self._diskcache.clear()
| [] |
2024-01-10 | zhlzhl/RL_flex_design | setup.py | from os.path import join, dirname, realpath
from setuptools import setup, __version__
import sys
assert sys.version_info.major == 3 and sys.version_info.minor >= 6, \
"The Spinning Up repo is designed to work with Python 3.6 and greater." \
+ "Please install it before proceeding."
with open(join("spinup", "version.py")) as version_file:
exec(version_file.read())
setup(
name='spinup',
py_modules=['spinup'],
version=__version__,#'0.1',
install_requires=[
'cloudpickle==1.2.1',
'gym[atari,box2d,classic_control]>=0.10.8',
'ipython',
'joblib',
'matplotlib==3.1.1',
'mpi4py',
'numpy',
'pandas',
'pytest',
'psutil',
'scipy',
'seaborn>=0.8.1',
'tensorflow>=1.8.0,<2.0',
'tqdm',
'networkx',
'tensorboard',
'pyglet'
],
description="Flexibility Design with Neural Reinforcement Learning developed based on Spinningup from OpenAI.",
author="Lei Zhang",
)
| [] |
2024-01-10 | artitw/tensor2tensor | tensor2tensor~rl~dopamine_connector.py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connects dopamine to as the another rl traning framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from dopamine.agents.dqn import dqn_agent
from dopamine.replay_memory import circular_replay_buffer
from dopamine.replay_memory.circular_replay_buffer import OutOfGraphReplayBuffer
from dopamine.replay_memory.circular_replay_buffer import ReplayElement
import gym
from gym import spaces
from gym import Wrapper
from gym.wrappers import TimeLimit
import numpy as np
from tensor2tensor.rl.envs.simulated_batch_gym_env import FlatBatchEnv
from tensor2tensor.rl.policy_learner import PolicyLearner
import tensorflow as tf
# pylint: disable=g-import-not-at-top
try:
import cv2
except ImportError:
cv2 = None
try:
from dopamine.atari import run_experiment
except ImportError:
run_experiment = None
# pylint: enable=g-import-not-at-top
class ResizeObservation(gym.ObservationWrapper):
"""TODO(konradczechowski): Add doc-string."""
def __init__(self, env, size=84):
"""Based on WarpFrame from openai baselines atari_wrappers.py.
Dopamine also uses cv2.resize(..., interpolation=cv2.INTER_AREA).
Args:
env: TODO(konradczechowski): Add doc-string.
size: TODO(konradczechowski): Add doc-string.
"""
gym.ObservationWrapper.__init__(self, env)
self.width = size
self.height = size
assert env.observation_space.dtype == np.uint8
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(self.height, self.width, env.observation_space.shape[2]),
dtype=np.uint8)
def observation(self, frame):
if not cv2:
return frame
return cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
class GameOverOnDone(Wrapper):
"""TODO(konradczechowski): Add doc-string."""
def __init__(self, env):
Wrapper.__init__(self, env)
self.game_over = False
def reset(self, **kwargs):
self.game_over = False
return self.env.reset(**kwargs)
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.game_over = done
return ob, reward, done, info
class _DQNAgent(dqn_agent.DQNAgent):
"""Modify dopamine DQNAgent to match our needs.
Allow passing batch_size and replay_capacity to ReplayBuffer, allow not using
(some of) terminal episode transitions in training.
"""
def __init__(self, replay_capacity, batch_size, generates_trainable_dones,
**kwargs):
self._replay_capacity = replay_capacity
self._batch_size = batch_size
self._generates_trainable_dones = generates_trainable_dones
super(_DQNAgent, self).__init__(**kwargs)
def _build_replay_buffer(self, use_staging):
"""Build WrappedReplayBuffer with custom OutOfGraphReplayBuffer."""
replay_buffer_kwargs = dict(
observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,
stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,
replay_capacity=self._replay_capacity,
batch_size=self._batch_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
extra_storage_types=None,
observation_dtype=np.uint8,
)
replay_memory = _OutOfGraphReplayBuffer(
artificial_done=not self._generates_trainable_dones,
**replay_buffer_kwargs)
return circular_replay_buffer.WrappedReplayBuffer(
wrapped_memory=replay_memory,
use_staging=use_staging,
**replay_buffer_kwargs)
class _OutOfGraphReplayBuffer(OutOfGraphReplayBuffer):
"""Replay not sampling artificial_terminal transition.
Adds to stored tuples 'artificial_done' field (as last ReplayElement).
When sampling, ignores tuples for which artificial_done is True.
When adding new attributes check if there are loaded from disk, when using
load() method.
Attributes:
are_terminal_valid: A boolean indicating if newly added terminal
transitions should be marked as artificially done. Replay data loaded
from disk will not be overridden.
"""
def __init__(self, artificial_done, **kwargs):
extra_storage_types = kwargs.pop("extra_storage_types", None) or []
extra_storage_types.append(ReplayElement("artificial_done", (), np.uint8))
super(_OutOfGraphReplayBuffer, self).__init__(
extra_storage_types=extra_storage_types, **kwargs)
self._artificial_done = artificial_done
def is_valid_transition(self, index):
valid = super(_OutOfGraphReplayBuffer, self).is_valid_transition(index)
valid &= not self.get_artificial_done_stack(index).any()
return valid
def get_artificial_done_stack(self, index):
return self.get_range(self._store["artificial_done"],
index - self._stack_size + 1, index + 1)
def add(self, observation, action, reward, terminal, *args):
"""Append artificial_done to *args and run parent method."""
# If this will be a problem for maintenance, we could probably override
# DQNAgent.add() method instead.
artificial_done = self._artificial_done and terminal
args = list(args)
args.append(artificial_done)
return super(_OutOfGraphReplayBuffer, self).add(observation, action, reward,
terminal, *args)
def load(self, *args, **kwargs):
# Check that appropriate attributes are not overridden
are_terminal_valid = self._artificial_done
super(_OutOfGraphReplayBuffer, self).load(*args, **kwargs)
assert self._artificial_done == are_terminal_valid
def get_create_agent(agent_kwargs):
"""TODO(): Document."""
def create_agent(sess, environment, summary_writer=None):
"""Creates a DQN agent.
Simplified version of `dopamine.atari.train.create_agent`
Args:
sess: a session
environment: an environment
summary_writer: a summary writer.
Returns:
a DQN agent.
"""
return _DQNAgent(
sess=sess,
num_actions=environment.action_space.n,
summary_writer=summary_writer,
tf_device="/gpu:*",
**agent_kwargs)
return create_agent
def get_create_env_fun(batch_env_fn, time_limit):
"""TODO(konradczechowski): Add doc-string."""
def create_env_fun(game_name, sticky_actions=True):
del game_name, sticky_actions
batch_env = batch_env_fn(in_graph=False)
env = FlatBatchEnv(batch_env)
env = TimeLimit(env, max_episode_steps=time_limit)
env = ResizeObservation(env) # pylint: disable=redefined-variable-type
env = GameOverOnDone(env)
return env
return create_env_fun
def _parse_hparams(hparams):
"""TODO(konradczechowski): Add doc-string."""
prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"]
ret = []
for prefix in prefixes:
ret_dict = {}
for key in hparams.values():
if prefix in key:
par_name = key[len(prefix):]
ret_dict[par_name] = hparams.get(key)
ret.append(ret_dict)
return ret
def _get_optimizer(params):
assert params["class"] == "RMSProp", "RMSProp is the only one supported"
params.pop("class")
return tf.train.RMSPropOptimizer(**params)
class DQNLearner(PolicyLearner):
"""Interface for learning dqn implemented in dopamine."""
def __init__(self, frame_stack_size, base_event_dir, agent_model_dir):
super(DQNLearner, self).__init__(frame_stack_size, base_event_dir,
agent_model_dir)
self.completed_iterations = 0
def _target_iteractions_and_steps(self, num_env_steps, save_continuously,
save_every_steps):
if save_continuously:
training_steps_per_iteration = min(num_env_steps, save_every_steps)
num_iterations_to_do = num_env_steps // training_steps_per_iteration
else:
num_iterations_to_do = 1
training_steps_per_iteration = num_env_steps
target_iterations = self.completed_iterations + num_iterations_to_do
return target_iterations, training_steps_per_iteration
def create_runner(self, env_fn, hparams, target_iterations,
training_steps_per_iteration):
# pylint: disable=unbalanced-tuple-unpacking
agent_params, optimizer_params, \
runner_params, replay_buffer_params = _parse_hparams(hparams)
# pylint: enable=unbalanced-tuple-unpacking
optimizer = _get_optimizer(optimizer_params)
agent_params["optimizer"] = optimizer
agent_params.update(replay_buffer_params)
create_agent_fn = get_create_agent(agent_params)
runner = run_experiment.Runner(
base_dir=self.agent_model_dir,
create_agent_fn=create_agent_fn,
create_environment_fn=get_create_env_fun(
env_fn, time_limit=hparams.time_limit),
evaluation_steps=0,
num_iterations=target_iterations,
training_steps=training_steps_per_iteration,
**runner_params)
return runner
def train(self,
env_fn,
hparams,
simulated,
save_continuously,
epoch,
sampling_temp=1.0,
num_env_steps=None,
env_step_multiplier=1,
eval_env_fn=None,
report_fn=None):
# TODO(konradczechowski): evaluation during training (with eval_env_fun)
del epoch, eval_env_fn, simulated, report_fn
if num_env_steps is None:
num_env_steps = hparams.num_frames
hparams = copy.copy(hparams)
hparams.set_hparam(
"agent_epsilon_eval", min(hparams.agent_epsilon_eval * sampling_temp, 1)
)
target_iterations, training_steps_per_iteration = \
self._target_iteractions_and_steps(
num_env_steps=num_env_steps * env_step_multiplier,
save_continuously=save_continuously,
save_every_steps=hparams.save_every_steps,)
with tf.Graph().as_default():
runner = self.create_runner(env_fn, hparams, target_iterations,
training_steps_per_iteration)
runner.run_experiment()
self.completed_iterations = target_iterations
def evaluate(self, env_fn, hparams, sampling_temp):
target_iterations = 0
training_steps_per_iteration = 0
hparams = copy.copy(hparams)
hparams.set_hparam(
"agent_epsilon_eval", min(hparams.agent_epsilon_eval * sampling_temp, 1)
)
create_environment_fn = get_create_env_fun(
env_fn, time_limit=hparams.time_limit)
env = create_environment_fn(
game_name="unused_arg", sticky_actions="unused_arg")
with tf.Graph().as_default():
runner = self.create_runner(env_fn, hparams, target_iterations,
training_steps_per_iteration)
agent = runner._agent # pylint: disable=protected-access
del runner
agent.eval = True
# TODO(konradczechowski): correct number of episodes, when this will
# be hparam
for _ in range(30):
# Run single episode
ob = env.reset()
action = agent.begin_episode(ob)
done = False
while not done:
ob, reward, done, _ = env.step(action)
action = agent.step(reward, ob)
| [] |
2024-01-10 | a00012025/langchain-test | translate.py | from langchain import LLMChain
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import tiktoken
load_dotenv()
def count_token(input: str):
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
num_tokens = len(encoding.encode(input))
return num_tokens
llm = ChatOpenAI(temperature=0)
lang1 = 'zh'
lang2 = 'en'
def translator(input_text):
systemmessage = f'''請擔任翻譯機,而不要回答任何問題,將任何輸入都進行翻譯。翻譯規則:<將所有輸入從 {lang1} 語言翻譯成 {lang2} 語言>。保留所有 markdown 語法,以及 markdown 連結內容必須翻譯,但連結本身不用翻譯,原封不動的用 markdown 格式輸出。'''
default_prompt = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(systemmessage),
HumanMessagePromptTemplate.from_template("{input}")
]
)
default_chain = LLMChain(llm=llm, prompt=default_prompt)
output = default_chain.run(input=input_text)
return output
if __name__ == '__main__':
for article_day in range(12, 31):
print(f'Processing day {article_day}...')
inputs = []
with open(f'data/day{article_day}.md', 'r') as f:
inputs = f.read().split('\n')
sections = []
for i in range(len(inputs)):
if len(sections) == 0:
sections.append(inputs[i])
continue
if inputs[i].strip() == '':
continue
if inputs[i-1].strip() != '':
sections[-1] = sections[-1] + '\n' + inputs[i]
else:
sections.append(inputs[i])
outputs = []
i = 0
while i < len(inputs):
# Get next lines for translation without exceeding token limit
input = ''
while i < len(inputs) and count_token(input+inputs[i]+'\n') < 1500:
input += inputs[i] + '\n\n'
i += 1
if input == '':
print(f'Warning: input is empty, i={i}')
input = inputs[i] + '\n\n'
i += 1
input = input[:-2]
output = translator(input)
outputs.append(output)
with open(f'data/day{article_day}_en.md', 'w') as f:
for output in outputs:
f.write(output+'\n\n')
| [
"{input}"
] |
2024-01-10 | a00012025/langchain-test | website-qa.py | from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from llama_index import download_loader
# import os
# os.environ["OPENAI_API_KEY"] = ''
url = input('Enter the page url: ')
urls = [url]
WebPageReader = download_loader("ReadabilityWebPageReader")
documents = WebPageReader().load_data(url=url)
documents = [doc.to_langchain_format() for doc in documents]
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings(client=None)
retriever = FAISS.from_documents(documents, embeddings).as_retriever(k=4)
llm=ChatOpenAI(temperature=1, model="gpt-3.5-turbo", max_tokens=2048, client=None)
memory = ConversationBufferWindowMemory(
memory_key='chat_history',
return_messages=True,
k=6
)
conversation = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
# verbose=True,
memory=memory,
max_tokens_limit=1536
)
def chatbot(pt):
res = conversation({'question': pt})['answer']
return res
if __name__=='__main__':
while True:
print('########################################\n')
pt = input('ASK: ')
if pt.lower()=='end':
break
response = chatbot(pt)
print('\n----------------------------------------\n')
print('ChatGPT says: \n')
print(response, '\n') | [] |
2024-01-10 | a00012025/langchain-test | youtube-qa.py | from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from llama_index import download_loader
# import os
# os.environ["OPENAI_API_KEY"] = ''
file_path = input('Enter the youtube link: ')
YoutubeTranscriptReader = download_loader("YoutubeTranscriptReader")
documents = YoutubeTranscriptReader().load_data(ytlinks=[file_path])
documents = [doc.to_langchain_format() for doc in documents]
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings(client=None)
retriever = FAISS.from_documents(documents, embeddings).as_retriever(k=4)
llm=ChatOpenAI(temperature=1, model="gpt-3.5-turbo", max_tokens=2048, client=None)
memory = ConversationBufferWindowMemory(
memory_key='chat_history',
return_messages=True,
k=6
)
conversation = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
# verbose=True,
memory=memory,
max_tokens_limit=1536
)
def chatbot(pt):
res = conversation({'question': pt})['answer']
return res
if __name__=='__main__':
while True:
print('########################################\n')
pt = input('ASK: ')
if pt.lower()=='end':
break
response = chatbot(pt)
print('\n----------------------------------------\n')
print('ChatGPT says: \n')
print(response, '\n') | [] |
2024-01-10 | mirkan1/openai-api | gpt_api.py | import os
import dotenv
import openai
from utils import num_tokens_from_string
dotenv.load_dotenv()
MODEL = os.environ.get("MODEL", "gpt-3.5-turbo")
# messages = [
# { "role": "system", "content": "You are a helpful assistant." },
# { "role": "user", "content": "You are Bob Manley reincarnated, Respond with a guy believe in Rasta and Bob Marley." },
# { "role": "assistant", "content": "..." },
# ]
def get_response(messages, api_key):
openai.api_key = api_key
total_token = 0
for i in messages:
message = i["content"]
role = i["role"]
token_count = num_tokens_from_string(message, MODEL)
# print("this message costs", token_count, "tokens", message[:20], "...")
total_token+=token_count
if role=="bot":
i['role'] = "assistant"
if total_token > 4096:
return "Message too long, please try again."
# 0.002 / 1000 tokens
total_cost = total_token * (0.002 / 1000)
# print(f"Total message costs {total_cost}$.\nTotal tokens {total_token}.")
completion = openai.ChatCompletion.create(model=MODEL, messages=messages)
return completion.choices[0].message.content | [] |
2024-01-10 | FarahZaqout/langchain-js-text-embedding | src~mixed_libraries~test_tweaked_ensemble.py | import unittest
import csv
from langchain_sentence_transformers import match_questions_ensemble
realisticQuestions = [
"What was the last interaction with Oscar Mitchell?",
"When was the last time we interacted with Ella Thompson?",
"When was the last interaction with Larian Studios, and by whom?",
"What do previous wins look like in BeanBrew?",
"What leads are being worked on by SDR Lucas Warren?",
"What contacts are being worked on by SDR Lily Turner?",
]
tweakedQuestionsWithGrammarMistakes = [
"what was the last interaction we had with Mike Smith?",
"when was the most recent interaction with Mike Smith?",
"who was the last one interacted with Uber?",
"descirbe the previous wins for Nike?",
"on what leads does Mike Smith working?",
"on what contact's Mike Smith do work?",
]
class TestQuestionMatching(unittest.TestCase):
def setUp(self):
self.csv_file = open("src/mixed_libraries/mismatches_tweaked_langchain_openai.csv", "w", newline='')
self.csv_writer = csv.writer(self.csv_file)
self.csv_writer.writerow(["User Input", "Top Matched Question", "2nd Matched Question", "3rd Matched Question"])
def test_matching(self):
full_matches = 0
for idx, user_input in enumerate(tweakedQuestionsWithGrammarMistakes):
print(f"\nMatching for input: {user_input}")
results = match_questions_ensemble(realisticQuestions, user_input)
self.assertTrue(results) # Ensure there's always a result
top_match_question = results[0][0]
expected_question = realisticQuestions[idx]
# Check similarity
if results[0][1] < 0.65:
# Extracting similarity scores for the top three (or fewer) results
similarity_scores = [res[1] for res in results[:3]]
# Create a list for the CSV row
csv_row = [user_input, f'"{results[0][0]}" with score {similarity_scores[0]:.3f}']
# Append similarity scores for the 2nd and 3rd results, if they exist. Otherwise, append "N/A".
for score in similarity_scores[1:]:
csv_row.append(f'Score: {score:.3f}')
while len(csv_row) < 4: # Ensure the row has 4 columns
csv_row.append("N/A")
# Write to CSV
self.csv_writer.writerow(csv_row)
elif top_match_question != expected_question:
# Writing mismatch data to CSV
self.csv_writer.writerow([user_input,
results[0][0] if len(results) > 0 else "N/A",
results[1][0] if len(results) > 1 else "N/A",
results[2][0] if len(results) > 2 else "N/A"])
else:
full_matches += 1
print(f"\nTotal full matches: {full_matches}/{len(tweakedQuestionsWithGrammarMistakes)}")
# Calculate and print the percentage
match_percentage = (full_matches / len(tweakedQuestionsWithGrammarMistakes)) * 100
print(f"Match percentage: {match_percentage:.2f}%")
def tearDown(self):
self.csv_file.close()
if __name__ == "__main__":
unittest.main()
| [] |
2024-01-10 | FarahZaqout/langchain-js-text-embedding | src~mixed_libraries~test_mixed_approach.py | import unittest
import csv
from langchain_sentence_transformers import match_questions_ensemble, questions, user_inputs
class TestQuestionMatching(unittest.TestCase):
def setUp(self):
self.csv_file = open("src/mixed_libraries/mismatches_ensemble_mixed_2_models.csv", "w", newline='')
self.csv_writer = csv.writer(self.csv_file)
# Writing the headers for the CSV file
self.csv_writer.writerow(["User Input", "Top Matched Question", "2nd Matched Question", "3rd Matched Question"])
def test_matching(self):
full_matches = 0
for idx, user_input in enumerate(user_inputs):
print(f"\nMatching for input: {user_input}")
results = match_questions_ensemble(questions, user_input)
self.assertTrue(results) # Ensure there's always a result
top_match_question = results[0][0]
expected_question = questions[idx]
# Check similarity
if results[0][1] < 0.65:
# Extracting similarity scores for the top three (or fewer) results
similarity_scores = [res[1] for res in results[:3]]
# Create a list for the CSV row
csv_row = [user_input, f'"{results[0][0]}" with score {similarity_scores[0]:.3f}']
# Append similarity scores for the 2nd and 3rd results, if they exist. Otherwise, append "N/A".
for score in similarity_scores[1:]:
csv_row.append(f'Score: {score:.3f}')
while len(csv_row) < 4: # Ensure the row has 4 columns
csv_row.append("N/A")
# Write to CSV
self.csv_writer.writerow(csv_row)
elif top_match_question != expected_question:
# Writing mismatch data to CSV
self.csv_writer.writerow([user_input,
results[0][0] if len(results) > 0 else "N/A",
results[1][0] if len(results) > 1 else "N/A",
results[2][0] if len(results) > 2 else "N/A"])
else:
full_matches += 1
print(f"\nTotal full matches: {full_matches}/{len(user_inputs)}")
# Calculate and print the percentage
match_percentage = (full_matches / len(user_inputs)) * 100
print(f"Match percentage: {match_percentage:.2f}%")
def tearDown(self):
# Closing the CSV file after all tests complete
self.csv_file.close()
if __name__ == "__main__":
unittest.main()
| [] |
2024-01-10 | FarahZaqout/langchain-js-text-embedding | src~langchain~langchain_openai.py | import spacy
import numpy as np
from langchain.embeddings import OpenAIEmbeddings
from sentence_transformers import SentenceTransformer, util
# CONSTANTS
API_KEY = ""
nlp = spacy.load("en_core_web_sm") # load the english model for spacy
embeddings_model = OpenAIEmbeddings(openai_api_key=API_KEY)
# questions generated via chatgpt so it might be funky.
user_inputs = [
# Existing questions:
"Can you tell me Nokia's current intent score?",
"The last interaction we had with Yahoo was when?",
"Identify all the leads linked to Dell.",
"Which of my accounts have significant whitespace?",
"Show me the most recent leads from Alibaba.",
"Where can I find my whitespace data?",
"What type of leads does Michael possess?",
"Are there accounts that might have similar victories to Instagram?",
"Provide the recent successes of HP.",
"Can you identify every contact related to this account?",
"Which accounts are next on my list to work on?",
"How many business agreements are set with Hulu?",
"Who are the leads Jennifer is currently handling?",
"Identify the contacts Mike is overseeing.",
"What's the renewal date for Tidal?",
"Can you provide details on LG's most recent deal?",
"Any leads linked to WooCommerce in my list?",
"Describe the latest event we had with Cisco.",
"Who was the last to engage with Pinterest on our behalf?",
"How do Sony's earlier successes appear?",
"Describe the methodology Kevin uses to boost Instagram engagement.",
"Provide the newest updates from Jessica's account list.",
"Who from our leads or contacts attended meetings in the last three days?",
"Identify every channel partner linked with my portfolio.",
# New questions:
"Describe our most recent engagement with Nokia.",
"Who oversees Yahoo's account on our end?",
"Are there any prospective deals in the pipeline with Dell?",
"Has Michael seen success with his current leads?",
"Compared to the previous month, how is BlackBerry's intent score?",
"Any notable activities from Alibaba recently?",
"Identify our main liaison at Tidal.",
"In what way does Instagram differ in its renewal protocols?",
"Any upcoming WooCommerce events or webinars we should be aware of?",
"The last deal Jennifer finalized with HP was when?",
"Have we gotten any reviews or feedback from Hulu?",
"Which of Yahoo's contacts recently engaged with our materials?",
"How are we progressing towards our Q4 objectives with Sony?",
"What kind of assistance does Pinterest often request?",
"Did the Cisco team provide comments from our last engagement?",
"Which accounts managed by Kevin have a promising future?",
"How many months has Jessica been overseeing LG's account?",
"Any pending issues or concerns regarding Instagram?",
"Members of which team frequently engage with Dell?",
"Outline the next key events or goals for the Nokia account."
]
# List of questions
questions = [
# Existing questions:
"What is the intent score of Apple?",
"When was the last time we interacted with Google?",
"Who are all the leads associated with Microsoft?",
"List my accounts with the most whitespace?",
"What are the last Amazon leads?",
"Where is my whitespace?",
"What leads do John have?",
"Which accounts will result in a win like account Facebook?",
"What are the last Oracle company wins?",
"Who are all the contacts associated with this account?",
"What accounts should I work on next?",
"How many deals do we have for account Netflix?",
"What leads are being worked on by Alice?",
"What contacts are being worked on by Bob?",
"When is the renewal due for Spotify?",
"What was the last deal at company Adobe?",
"Do I have any leads in account Shopify?",
"What was the last interaction with IBM?",
"When was the last interaction with account Twitter, and by whom?",
"What do previous wins look like in Samsung?",
"How do George generate Facebook pipeline?",
"What is the latest in Maria's accounts?",
"Which contacts/leads attended meetings in the past 3 days?",
"Who are all the channel partners associated with my accounts?",
# New questions:
"How did our last interaction with Apple go?",
"Who is managing the account for Google?",
"What potential opportunities do we have with Microsoft?",
"Has John had any success with his leads?",
"How does Tesla's intent score compare to last month?",
"Are there any updates on Amazon's recent activity?",
"Who is our primary contact at Spotify?",
"How does Facebook's renewal process differ from others?",
"Are there any upcoming events or webinars for Shopify customers?",
"When did Emma last close a deal with Oracle?",
"What feedback have we received from Netflix?",
"Who were the last contacts from Google to engage with our content?",
"How are we tracking against our Q4 goals with Samsung?",
"What type of support does Twitter require?",
"Is there any feedback from the last meeting with IBM's team?",
"Which of George's accounts have the highest growth potential?",
"How long has Maria been managing the Adobe account?",
"Are there any unresolved issues with Facebook?",
"Which team members have had the most interaction with Microsoft?",
"What are the upcoming milestones for the Apple account?"
]
def preprocess(text):
tokens = [token.text for token in nlp(text)]
return ' '.join(tokens)
def extract_named_entities(text):
doc = nlp(text)
return [(ent.text.lower(), ent.label_) for ent in doc.ents]
def named_entity_embeddings(text):
named_entities = extract_named_entities(text)
named_entity_tokens = [f"[{ent_type.upper()}_{ent_text}]" for ent_text, ent_type in named_entities]
if not named_entity_tokens:
named_entity_tokens = ["[NO_ENTITY]"]
entity_embeddings = np.mean([embeddings_model.embed_query(token) for token in named_entity_tokens], axis=0)
return entity_embeddings
def combine_embeddings(basic_embeddings, entity_embeddings):
"""Combines basic embeddings with named entity embeddings."""
return [np.concatenate((basic, entity)) for basic, entity in zip(basic_embeddings, entity_embeddings)]
def get_embeddings(texts):
preprocessed_texts = [preprocess(text) for text in texts]
basic_embeddings = embeddings_model.embed_documents(preprocessed_texts)
entity_embeddings_list = [named_entity_embeddings(text) for text in texts]
return combine_embeddings(basic_embeddings, entity_embeddings_list)
## Todo: remove ensemble attempt here becaues 1- the two models have different sizes that I didn't acount for. 2- I want to compare individual implemenations vs ensemble.
def match_questions(questions, user_input, top_n=3, threshold=0.80):
"""Matches a user input to a list of questions based on similarity."""
basic_user_input_embed = embeddings_model.embed_query(preprocess(user_input))
entity_user_input_embed = named_entity_embeddings(user_input)
user_input_embed_openai = np.concatenate((basic_user_input_embed, entity_user_input_embed))
question_embeddings = get_embeddings(questions)
similarities = [util.pytorch_cos_sim(user_input_embed_openai, question_embed)[0][0].item() for question_embed in
question_embeddings]
most_similar_idxs = np.argsort(similarities)[-top_n:][::-1]
above_threshold_idxs = [idx for idx in most_similar_idxs if similarities[idx] >= threshold]
results = [(questions[idx], similarities[idx]) for idx in
most_similar_idxs] ## instead of a more readable {"question"... like in early attempts, I want it to be available for an abstracted test function taht looks for a pattern like resilt[0][0]
# For printing/debugging purposes
if not above_threshold_idxs:
print("No matches found that meet the strict similarity threshold.")
above_fallback_threshold_idxs = [idx for idx in most_similar_idxs if similarities[idx] >= threshold - 0.1]
if above_fallback_threshold_idxs:
print("Matches above the fallback threshold:")
for idx in above_fallback_threshold_idxs:
print(questions[idx], "with similarity score:", similarities[idx])
else:
print("No matches found even after considering the fallback threshold.")
else:
print("Matches above the threshold:")
for idx in above_threshold_idxs:
print(questions[idx], "with similarity score:", similarities[idx])
print("\nTop 3 results for debugging:")
for res in results:
print(res[0], "with similarity score:", res[1])
return results
# Uncomment for testing
# match_questions(questions, "what are my best accounts")
| [] |
2024-01-10 | julian-q/facecam | webcam.py | import cv2
from ultralytics import YOLO
import numpy as np
import torch
import os
import openai
from elevenlabs import clone, generate, play, set_api_key, voices
from elevenlabs.api import History
import requests
def request(prompt):
endpoint = 'https://api.together.xyz/inference'
res = requests.post(endpoint, json={
"model": "togethercomputer/llama-2-70b-chat",
"max_tokens": 1024,
"prompt": prompt,
"request_type": "language-model-inference",
"temperature": 0.7,
"top_p": 0.7,
"top_k": 50,
"repetition_penalty": 1,
"stop": [
"[/INST]",
"</s>"
],
"sessionKey": "2e59071178ae2b05e68015136fb8045df30c3680"
}, headers={
"Authorization": "Bearer a38cd76484726df05bfc70c5b951f1f11c59dd5e0a1b191d2f407f6fd4326838",
})
return res.json()['output']['choices'][0]['text'].strip()
set_api_key("8f96a58113b07003fcf761c98bfb2c3b")
voice = voices()[1]
model = YOLO("yolov8m.pt").to(torch.device("mps"))
cap = cv2.VideoCapture(0)
openai.api_key = "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"
class_map = {}
with open("yolo_classes.txt", "r") as f:
for i, line in enumerate(f.readlines()):
class_map[i] = line.strip()
frame_num = 0
with torch.inference_mode():
while True:
frame_num += 1
ret, frame = cap.read()
results = model(frame)
result = results[0]
bboxes = result.boxes.xyxy
bboxes = np.array(result.boxes.xyxy.cpu(), dtype="int")
classes = np.array(result.boxes.cls.cpu(), dtype="int")
sizes = []
for cls, bbox in zip(classes, bboxes):
(x, y, x2, y2) = bbox
sizes.append(abs(x2 - x) * abs(y2 - y))
cv2.rectangle(frame, (x, y), (x2, y2), (0, 255, 0, 2))
cv2.putText(frame, class_map[cls], (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
if len(classes) > 0:
pairs = [(i, cls, size) for i, (cls, size) in enumerate(zip(classes, sizes))]
_, best_class, _ = max(pairs, key=lambda p: p[-1])
if not ret:
break
print(frame_num % (30 * 5))
interval = 1.5
if frame_num % (30 * interval) >= 30 * interval - 10:
cv2.putText(frame, "calculating!!!", (70, 150), cv2.FONT_HERSHEY_PLAIN, 10, (0, 255, 0), 5)
cv2.imshow("Img", frame)
if frame_num % (30 * interval) == 0:
prompt = f"In two sentence, please describe the following object, and then tell me a short fun fact it. Start with 'This is a <object name>'. OBJECT={class_map[best_class]}"
message = request(prompt)
# completion = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=[
# {"role": "system", "content": "You are a helpful assistant."},
# {"role": "user", "content": f"In two sentence, please describe the following object, and then tell me a short fun fact it. Start with 'This is a <object name>'. OBJECT={class_map[best_class]}"}
# ]
# )
# message = completion.choices[0].message['content']
print(message)
# audio = generate(text=message, voice=voice)
# play(audio)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| [] |
2024-01-10 | davidrkersey/llm | generate_qa~Generate_Q%26A.py | ######################################################
##### Generate Q & A from Docs for Fine Tuning #######
######################################################
"""
The following script can be used to create questions and answers for a set of documents (PDF, DOCX, CSV) to generate training data for fine tunning a LLM
for your Q&A application. It leverages the OpenAI API to generate questions and answers from snippets of your document collection. The following are parameters
to keep in mind:
- chunk_size : dictates how long each snippet will be. Larger snippets will provide more context but will be more expensive (more tokens).
- sample_size: dictates how many Q&As will be generated. The script randomly pulls n-samples from your processed corpus. Larger n_samples means more questions
but will incur more cost.
"""
import os
import shutil
from dotenv import load_dotenv, find_dotenv
from PyPDF2 import PdfReader
import re
import docxpy
import random
import pandas as pd
from langchain.llms import OpenAI
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
from helpers import *
"""
Specify API Keys
"""
OPENAI_API = os.getenv('OPENAI_API_KEY')
"""
Specify preprocessing
convert_txt: converts PDF and DOCX files to TXT files
chunk_txt: chunks the TXT files into smaller snippets
"""
convert_txt = False
chunk_txt = True
"""
Specify filepaths
"""
def get_filepath():
"""Returns the filepath of the directory"""
filepath = os.path.dirname(os.path.realpath(__file__))
return filepath
# Get filepaths
main_dir = get_filepath()
doc_dir = os.path.join(main_dir, 'docs')
clean_dir = os.path.join(main_dir, 'docs_clean')
out_dir = os.path.join(main_dir, 'output')
# Reset directory
if convert_txt:
if os.path.exists(clean_dir):
shutil.rmtree(clean_dir)
os.makedirs(clean_dir)
else:
os.makedirs(clean_dir)
# Reset outputs
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
else:
os.makedirs(out_dir)
"""
Text Processing
The files in PDF, docx and CSV in the 'docs' directory will be converted to .txt format and stored in the 'docs_clean' folder
"""
"""
Convert to TXT Preprocessing
"""
if convert_txt:
text_ls = []
"""
Iterate through all pdfs and docx files and convert them to text.
Save them to a list as a tuple with the document file location.
"""
for i in os.listdir(doc_dir):
filename = os.path.join(doc_dir, i)
if re.search('.pdf', filename) is not None:
print(filename)
text = ""
with open(filename, 'rb') as f:
reader = PdfReader(f)
for pg in reader.pages:
text += pg.extract_text()
text = text.strip()
#text = text.replace("\n", "")
#text = text.replace("\t", "")
#text = text.replace(" ", " ")
text_ls.append((filename, text))
elif re.search('.docx', filename) is not None:
if re.search('.docx', filename) is not None:
print(filename)
text = docxpy.process(filename)
text = text.strip()
#text = text.replace("\n", "")
#text = text.replace("\t", "")
#text = text.replace(" ", " ")
text_ls.append((filename, text))
elif re.search('.csv', filename) is not None:
with open(filename, "r", encoding="utf-8") as csv_file:
print(filename)
text= csv_file.read()
#print(text)
text_ls.append((filename, text))
else:
pass
"""
Replace the file path to the cleaned docs directory and replace .pdf or .docx with .txt.
Save the file to that location.
"""
for i in text_ls:
if re.search('.pdf', i[0]) is not None:
filepath = i[0].replace(doc_dir,clean_dir)
filepath = filepath.replace(".pdf", ".txt")
print(filepath)
if re.search('.docx', i[0]) is not None:
filepath = i[0].replace(doc_dir,clean_dir)
filepath = filepath.replace(".docx", ".txt")
print(filepath)
if re.search('.csv', i[0]) is not None:
filepath = i[0].replace(doc_dir,clean_dir)
filepath = filepath.replace(".csv", ".txt")
print(filepath)
with open(filepath, 'w+', encoding="utf-8") as floc:
floc.write(i[1])
if chunk_txt:
## Specify chunk size. Larger chunks means more context. Smaller is less.
chunk_size = 1000
chunk_ls = []
## Loop to iterate over all documents in directory and break them into n-size chunks
for file in os.listdir(clean_dir):
filename = os.path.join(clean_dir, file)
text = open(filename, 'r', errors='ignore').read()
chunks = split_into_chunks(text, chunk_size)
for i in chunks:
chunk_ls.append(i)
"""
Specify number of questions by size of random sample of chunks
"""
sample_size = 3
ls_rand = random.sample(chunk_ls, sample_size)
"""
Generate questions from chunk of text
"""
## Prompt for generating a question from a chunk of text
qa_gen_template = """
You will be generating questions based on content. Use the following content (delimited by <ctx></ctx>) and only the following content to formulate a question:
-----
<ctx>
{content}
</ctx>
-----
Answer:
)
"""
df_start = pd.DataFrame()
df = call_model(df_start, qa_gen_template, ['content'], ls_rand, OPENAI_API, gen_q = True)
"""
Generate answer from chunk and question
"""
## Prompt for generating a answer from a question and chunk of text
qa_answer_template = """
You will be answering questions based on content. Use the following content (delimited by <ctx></ctx>) and the question (delimited by <que></que>) to formulate an answer:
-----
<ctx>
{content}
</ctx>
-----
<que>
{question}
</que>
-----
Answer:
)
"""
df_end = call_model(df, qa_answer_template, ['content','question'], ls_rand, OPENAI_API, gen_q = False)
# """
# Save Model Outputs
# """
df_end.to_csv(os.path.join(out_dir, 'qa_output.csv')) | [
"\nYou will be answering questions based on content. Use the following content (delimited by <ctx></ctx>) and the question (delimited by <que></que>) to formulate an answer:\n-----\n<ctx>\n{content}\n</ctx>\n-----\n<que>\n{question}\n</que>\n-----\nAnswer:\n)\n",
"\nYou will be generating questions based on content. Use the following content (delimited by <ctx></ctx>) and only the following content to formulate a question:\n-----\n<ctx>\n{content}\n</ctx>\n-----\nAnswer:\n)\n"
] |
2024-01-10 | daveshap/Hierarchical_Document_Representation | step03_summarize_chunks.py | import openai
import json
def load_book_chunks_from_json(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def save_book_chunks_to_json(chunks, file_name):
with open(file_name, 'w', encoding='utf-8') as f:
json.dump(chunks, f, ensure_ascii=False, indent=4)
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8', errors='ignore') as infile:
return infile.read()
def summarize_chunk(chunk, model="gpt-3.5-turbo-16k", temperature=0, max_tokens=200):
conversation = [
{'role': 'system', 'content': open_file('system_summary.txt')},
{'role': 'user', 'content': chunk}
]
response = openai.ChatCompletion.create(model=model, messages=conversation, temperature=temperature, max_tokens=max_tokens)
return response['choices'][0]['message']['content']
if __name__ == '__main__':
openai.api_key = open_file('key_openai.txt').strip()
chunks = load_book_chunks_from_json('book_chunks.json')
for chunk in chunks:
summary = summarize_chunk(chunk['content'])
print('\n\n\nSUMMARY:', summary)
chunk['summary'] = summary
save_book_chunks_to_json(chunks, 'book_chunks_with_summaries.json') | [
"system_summary.txt"
] |
2024-01-10 | daveshap/Hierarchical_Document_Representation | step04_nltk_keywords.py | import openai
import json
import nltk
import os
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from nltk import ne_chunk, pos_tag
import os
os.environ['NLTK_DATA'] = 'C:\\nltk_data'
nltk.data.path.append("C:\\nltk_data")
nltk.download('punkt', 'C:\\nltk_data')
nltk.download('averaged_perceptron_tagger', 'C:\\nltk_data')
nltk.download('maxent_ne_chunker', 'C:\\nltk_data')
nltk.download('words', 'C:\\nltk_data')
def load_book_chunks_from_json(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def save_book_chunks_to_json(chunks, file_name):
with open(file_name, 'w', encoding='utf-8') as f:
json.dump(chunks, f, ensure_ascii=False, indent=4)
def extract_keywords(chunk):
words = word_tokenize(chunk)
words = [word for word in words if word.isalpha()]
words = [word for word in words if word not in stopwords.words('english')]
fdist = FreqDist(words)
keywords = [word for word, freq in fdist.most_common(10)]
return ', '.join(keywords)
def extract_named_entities(chunk):
words = word_tokenize(chunk)
tagged = pos_tag(words)
namedEnt = ne_chunk(tagged, binary=True)
entities = []
for subtree in namedEnt.subtrees(filter=lambda t: t.label() == 'NE'):
entities.append(' '.join([child[0] for child in subtree.leaves()]))
return ', '.join(entities)
if __name__ == '__main__':
chunks = load_book_chunks_from_json('book_chunks_with_summaries.json')
for chunk in chunks:
keywords = extract_keywords(chunk['content'])
named_entities = extract_named_entities(chunk['content'])
chunk['keywords'] = keywords
chunk['named_entities'] = named_entities
save_book_chunks_to_json(chunks, 'book_chunks.json')
| [] |
2024-01-10 | amariucaitheodor/acquiring-linguistic-knowledge | alkmi~models~flava~image_processing_flava.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Flava."""
import math
import random
from functools import lru_cache
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from transformers import is_vision_available, TensorType, BatchFeature
from transformers.image_processing_utils import BaseImageProcessor, get_size_dict
from transformers.image_transforms import resize, center_crop, rescale, normalize, to_channel_dimension_format
from transformers.image_utils import PILImageResampling, ChannelDimension, ImageInput, to_numpy_array, \
make_list_of_images, valid_images
from transformers.utils import logging
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
if is_vision_available():
import PIL
logger = logging.get_logger(__name__)
# These values are taken from CLIP
FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN
FLAVA_IMAGE_STD = OPENAI_CLIP_STD
FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0]
FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0]
LOGIT_LAPLACE_EPS: float = 0.1
# Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py
class FlavaMaskingGenerator:
def __init__(
self,
input_size: Union[int, Tuple[int, int]] = 14,
total_mask_patches: int = 75,
mask_group_max_patches: Optional[int] = None,
mask_group_min_patches: int = 16,
mask_group_min_aspect_ratio: Optional[float] = 0.3,
mask_group_max_aspect_ratio: float = None,
):
if not isinstance(input_size, tuple):
input_size = (input_size,) * 2
self.height, self.width = input_size
self.num_patches = self.height * self.width
self.total_mask_patches = total_mask_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches
mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio
self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio))
def __repr__(self):
repr_str = "MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
self.height,
self.width,
self.mask_group_min_patches,
self.mask_group_max_patches,
self.total_mask_patches,
self.log_aspect_ratio[0],
self.log_aspect_ratio[1],
)
return repr_str
def get_shape(self):
return self.height, self.width
def _mask(self, mask, max_mask_patches):
delta = 0
for _attempt in range(10):
target_area = random.uniform(self.mask_group_min_patches, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
height = int(round(math.sqrt(target_area * aspect_ratio)))
width = int(round(math.sqrt(target_area / aspect_ratio)))
if width < self.width and height < self.height:
top = random.randint(0, self.height - height)
left = random.randint(0, self.width - width)
num_masked = mask[top : top + height, left : left + width].sum()
# Overlap
if 0 < height * width - num_masked <= max_mask_patches:
for i in range(top, top + height):
for j in range(left, left + width):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
def __call__(self):
mask = np.zeros(shape=self.get_shape(), dtype=int)
mask_count = 0
while mask_count < self.total_mask_patches:
max_mask_patches = self.total_mask_patches - mask_count
max_mask_patches = min(max_mask_patches, self.mask_group_max_patches)
delta = self._mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return mask
class FlavaImageProcessor(BaseImageProcessor):
r"""
Constructs a Flava image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in `preprocess`.
size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
`preprocess`.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
crop_size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
`crop_size` parameter in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in `preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
`preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
return_image_mask (`bool`, *optional*, defaults to `False`):
Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
input_size_patches (`int`, *optional*, defaults to 14):
Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
by the `input_size_patches` parameter in `preprocess`.
total_mask_patches (`int`, *optional*, defaults to 75):
Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
`preprocess`.
mask_group_min_patches (`int`, *optional*, defaults to 16):
Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
parameter in `preprocess`.
mask_group_max_patches (`int`, *optional*):
Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
parameter in `preprocess`.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
in `preprocess`.
mask_group_max_aspect_ratio (`float`, *optional*):
Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
in `preprocess`.
codebook_do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
parameter in `preprocess`. `codebook_size`.
codebook_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
`preprocess`.
codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
parameter in `preprocess`.
codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input for codebook at the center. If the input size is smaller than
`codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
overridden by the `codebook_do_center_crop` parameter in `preprocess`.
codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size for codebook input when applying center-cropping. Can be overridden by the
`codebook_crop_size` parameter in `preprocess`.
codebook_do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
overridden by the `codebook_do_rescale` parameter in `preprocess`.
codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
`codebook_rescale_factor` parameter in `preprocess`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
`codebook_do_map_pixels` parameter in `preprocess`.
codebook_do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
be overridden by the `codebook_do_normalize` parameter in `preprocess`.
codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
by the `codebook_image_mean` parameter in `preprocess`.
codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
be overridden by the `codebook_image_std` parameter in `preprocess`.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Dict[str, int] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, Iterable[float]]] = None,
image_std: Optional[Union[float, Iterable[float]]] = None,
# Mask related params
return_image_mask: bool = False,
input_size_patches: int = 14,
total_mask_patches: int = 75,
mask_group_min_patches: int = 16,
mask_group_max_patches: Optional[int] = None,
mask_group_min_aspect_ratio: float = 0.3,
mask_group_max_aspect_ratio: Optional[float] = None,
# Codebook related params
return_codebook_pixels: bool = False,
codebook_do_resize: bool = True,
codebook_size: bool = None,
codebook_resample: int = PILImageResampling.LANCZOS,
codebook_do_center_crop: bool = True,
codebook_crop_size: int = None,
codebook_do_rescale: bool = True,
codebook_rescale_factor: Union[int, float] = 1 / 255,
codebook_do_map_pixels: bool = True,
codebook_do_normalize: bool = True,
codebook_image_mean: Optional[Union[float, Iterable[float]]] = None,
codebook_image_std: Optional[Union[float, Iterable[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 224, "width": 224}
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, param_name="crop_size")
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN
self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD
self.return_image_mask = return_image_mask
self.input_size_patches = input_size_patches
self.total_mask_patches = total_mask_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_max_patches = mask_group_max_patches
self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
self.return_codebook_pixels = return_codebook_pixels
self.codebook_do_resize = codebook_do_resize
self.codebook_size = codebook_size
self.codebook_resample = codebook_resample
self.codebook_do_center_crop = codebook_do_center_crop
self.codebook_crop_size = codebook_crop_size
self.codebook_do_rescale = codebook_do_rescale
self.codebook_rescale_factor = codebook_rescale_factor
self.codebook_do_map_pixels = codebook_do_map_pixels
self.codebook_do_normalize = codebook_do_normalize
self.codebook_image_mean = codebook_image_mean
self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN
self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD
@classmethod
def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
"""
Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
"""
image_processor_dict = image_processor_dict.copy()
if "codebook_size" in kwargs:
image_processor_dict["codebook_size"] = kwargs.pop("codebook_size")
if "codebook_crop_size" in kwargs:
image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size")
return super().from_dict(image_processor_dict, **kwargs)
@lru_cache()
def masking_generator(
self,
input_size_patches,
total_mask_patches,
mask_group_min_patches,
mask_group_max_patches,
mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio,
) -> FlavaMaskingGenerator:
return FlavaMaskingGenerator(
input_size=input_size_patches,
total_mask_patches=total_mask_patches,
mask_group_min_patches=mask_group_min_patches,
mask_group_max_patches=mask_group_max_patches,
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
)
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain 'height' and 'width' keys. Got {size.keys()}")
return resize(
image, size=(size["height"], size["width"]), resample=resample, data_format=data_format, **kwargs
)
def center_crop(
self,
image: np.ndarray,
size: Dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`Dict[str, int]`):
Size of the output image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain 'height' and 'width' keys. Got {size.keys()}")
return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs)
def rescale(
self,
image: np.ndarray,
scale: Union[int, float],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(
self,
image: np.ndarray,
mean: Union[float, List[float]],
std: Union[float, List[float]],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`np.ndarray`):
Image to normalize.
image_mean (`float` or `List[float]`):
Image mean.
image_std (`float` or `List[float]`):
Image standard deviation.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def map_pixels(self, image: np.ndarray) -> np.ndarray:
return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS
def _preprocess_image(
self,
image: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: bool = None,
crop_size: Dict[str, int] = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_map_pixels: bool = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
) -> np.ndarray:
"""Preprocesses a single image."""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std)
if do_map_pixels:
image = self.map_pixels(image)
if data_format is not None:
image = to_channel_dimension_format(image, data_format)
return image
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[Dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
# Mask related params
return_image_mask: Optional[bool] = None,
input_size_patches: Optional[int] = None,
total_mask_patches: Optional[int] = None,
mask_group_min_patches: Optional[int] = None,
mask_group_max_patches: Optional[int] = None,
mask_group_min_aspect_ratio: Optional[float] = None,
mask_group_max_aspect_ratio: Optional[float] = None,
# Codebook related params
return_codebook_pixels: Optional[bool] = None,
codebook_do_resize: Optional[bool] = None,
codebook_size: Optional[Dict[str, int]] = None,
codebook_resample: Optional[int] = None,
codebook_do_center_crop: Optional[bool] = None,
codebook_crop_size: Optional[Dict[str, int]] = None,
codebook_do_rescale: Optional[bool] = None,
codebook_rescale_factor: Optional[float] = None,
codebook_do_map_pixels: Optional[bool] = None,
codebook_do_normalize: Optional[bool] = None,
codebook_image_mean: Optional[Iterable[float]] = None,
codebook_image_std: Optional[Iterable[float]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
Whether to return the image mask.
input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
Size of the patches to extract from the image.
total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
Total number of patches to extract from the image.
mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
Minimum number of patches to extract from the image.
mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
Maximum number of patches to extract from the image.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
Minimum aspect ratio of the patches to extract from the image.
mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
Maximum aspect ratio of the patches to extract from the image.
return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
Whether to return the codebook pixels.
codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
Whether to resize the codebook pixels.
codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`):
Size of the codebook pixels.
codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
Resampling filter to use if resizing the codebook pixels. This can be one of the enum
`PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
Whether to center crop the codebook pixels.
codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
to `True`.
codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
Whether to rescale the codebook pixels values between [0 - 1].
codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
Whether to map the codebook pixels values.
codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
Whether to normalize the codebook pixels.
codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`):
Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`):
Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask
input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches
total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches
mask_group_min_patches = (
mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches
)
mask_group_max_patches = (
mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches
)
mask_group_min_aspect_ratio = (
mask_group_min_aspect_ratio
if mask_group_min_aspect_ratio is not None
else self.mask_group_min_aspect_ratio
)
mask_group_max_aspect_ratio = (
mask_group_max_aspect_ratio
if mask_group_max_aspect_ratio is not None
else self.mask_group_max_aspect_ratio
)
return_codebook_pixels = (
return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels
)
codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize
codebook_size = codebook_size if codebook_size is not None else self.codebook_size
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample
codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale
codebook_rescale_factor = (
codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor
)
codebook_do_center_crop = (
codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop
)
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
codebook_do_map_pixels = (
codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels
)
codebook_do_normalize = (
codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize
)
codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean
codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
processed_images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
size=size,
resample=resample,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_map_pixels=False,
data_format=data_format,
)
for img in images
]
data = {"pixel_values": processed_images}
if return_codebook_pixels:
codebook_images = [
self._preprocess_image(
image=img,
do_resize=codebook_do_resize,
size=codebook_size,
resample=codebook_resample,
do_center_crop=codebook_do_center_crop,
crop_size=codebook_crop_size,
do_rescale=codebook_do_rescale,
rescale_factor=codebook_rescale_factor,
do_normalize=codebook_do_normalize,
image_mean=codebook_image_mean,
image_std=codebook_image_std,
do_map_pixels=codebook_do_map_pixels,
data_format=data_format,
)
for img in images
]
data["codebook_pixel_values"] = codebook_images
if return_image_mask:
mask_generator = self.masking_generator(
input_size_patches=input_size_patches,
total_mask_patches=total_mask_patches,
mask_group_min_patches=mask_group_min_patches,
mask_group_max_patches=mask_group_max_patches,
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
)
masks = [mask_generator() for _ in images]
data["bool_masked_pos"] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
| [] |
2024-01-10 | stateofgrace314/CS7641 | P4-Markov_Decision_Processes~lake.py | """
Function for running the frozen lake problem and analysis
"""
import gym
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from utils import OUTDIR, RunVI, RunPI, RunQ, plot_QL
DIRS = {0: "←", 1: "↓", 2: "→", 3: "↑"}
def plot_lake(env, policy=None, model=""):
"""
Plot the Optimal policy (map) for the Frozen Lake
"""
# convert env desc to plot colors
colors = { b"S": "b", b"F": "w", b"H": "k", b"G": "g"}
sqs = env.nrow
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, xlim=(-.01, sqs+0.01), ylim=(-.01, sqs+0.01))
title = f"Frozen Lake Policy - {model}"
plt.title(title, fontsize=12, weight="bold")
for i in range(sqs):
for j in range(sqs):
y = sqs - i - 1
x = j
p = plt.Rectangle([x, y], 1, 1, linewidth=1, edgecolor="k")
p.set_facecolor(colors[env.desc[i,j]])
ax.add_patch(p)
if policy is not None:
ax.text(x+0.5, y+0.5, DIRS[policy[i, j]], horizontalalignment="center", size=25,
verticalalignment="center", color="k")
plt.axis("off")
title = f"FrozenLakeOptimalPolicy{model}"
plt.tight_layout()
plt.savefig(f"{OUTDIR}/{title}.png", dpi=400)
# code based on:
# https://medium.com/analytics-vidhya/solving-the-frozenlake-environment-from-openai-gym-using-value-iteration-5a078dffe438
def get_score(env, policy, verbose=False, episodes=1000):
misses = 0
steps_list = []
for episode in range(episodes):
observation = env.reset()[0]
steps=0
while True:
action = policy.flatten()[observation]
observation, reward, done, *_ = env.step(action)
steps+=1
if done and reward == 1:
steps_list.append(steps)
break
elif done and reward == 0:
misses += 1
break
ave_steps = np.mean(steps_list)
std_steps = np.std(steps_list)
pct_fail = (misses/episodes)* 100
if verbose:
print("----------------------------------------------")
print(f"You took an average of {ave_steps:.0f} steps to get the frisbee")
print(f"And you fell in the hole {pct_fail:.2f} % of the times")
print("----------------------------------------------")
return ave_steps, std_steps, pct_fail
def getStats(data, env, verbose=True):
"""
Adds some additional stats that weren"t needed for the frozen lake problem
"""
# add a few columns
data["average_steps"] = [0]*len(data)
data["steps_stddev"] = [0]*len(data)
data["success_pct"] = [0]*len(data)
policies = data["policy"]
for i in range(len(policies)):
data["policy"][i] = np.array(data["policy"][i]).reshape(4,4)
policy = data["policy"][i]
steps, steps_stddev, failures = get_score(env, policy, verbose=False)
data["average_steps"][i] = steps
data["steps_stddev"][i] = steps_stddev
data["success_pct"][i] = 100-failures
def plotAvg(data, x, independent, dependent, title, model):
"""
Plot helper function
"""
y = [data.loc[data[independent] == xi][dependent].mean() for xi in x]
sns.set(style="whitegrid")
plt.figure(figsize=(6,4))
ax = sns.barplot(x=x, y=y)
ax.set_title(title)
ax.set_xlabel(independent)
ax.set_ylabel(dependent)
title=f"FrozenLake{independent}vs{dependent}{model}"
plt.tight_layout()
plt.savefig(f"{OUTDIR}/{title}.png", dpi=400)
def runFrozenLake(verbose=True):
"""
Runs the RL analysis on the frozen lake problem
"""
# Setup 4x4 frozen lake
env = gym.make("FrozenLake-v1").unwrapped
env.max_episode_steps=500
# Create transition and reward matrices from OpenAI P matrix
rows = env.nrow
cols = env.ncol
T = np.zeros((4, rows*cols, rows*cols))
R = np.zeros((4, rows*cols, rows*cols))
# Loop through the env to build the lake
state0 = np.inf
# squares in the map
for sq in env.P:
# actions allowed
for a in env.P[sq]:
# resulting states
for i in range(len(env.P[sq][a])):
state1 = env.P[sq][a][i][1]
if state1 == state0:
T[a][sq][state1] = T[a][sq][state0] + env.P[sq][a][i][0]
R[a][sq][state1] = R[a][sq][state0] + env.P[sq][a][i][2]
else:
T[a][sq][state1] = env.P[sq][a][i][0]
R[a][sq][state1] = env.P[sq][a][i][2]
state0 = state1
# print out the default (empty) lake
plot_lake(env, model="Default")
### Value Iteration ###
# Parameters to iterate
gammas = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.999]
epsilons = [1e-1, 1e-2, 1e-3, 1e-5, 1e-7, 1e-10, 1e-12]
# run the analysis
vi_data = RunVI(T, R, gammas, epsilons, verbose=verbose)
getStats(vi_data, env, verbose)
vi_data.to_csv(f"{OUTDIR}/FrozenLakeDataVI.csv")
# plot reward vs gamma
vi_data.plot(x="gamma", y="reward", title="Frozen Lake Reward vs. Gamma")
plt.grid(True)
plt.tight_layout()
plt.savefig(f"{OUTDIR}/FrozenLakeRewardvsGammaVI.png")
# plot reward vs iterations
vi_data.sort_values("iterations", inplace=True)
vi_data.plot(x="iterations", y="reward", title="Frozen Lake Reward vs. Iterations")
plt.grid(True)
plt.tight_layout()
plt.savefig(f"{OUTDIR}/FrozenLakeRewardvsIterationVI.png")
# plot success pct vs gamma
vi_data.plot(x="gamma", y="success_pct", title="Frozen Lake Success Rate vs. Gamma")
plt.grid(True)
plt.tight_layout()
plt.savefig(f"{OUTDIR}/FrozenLakeSuccessvsGammaVI.png")
# plot avg steps vs gamma
plotAvg(vi_data, gammas, "gamma", "average_steps", "Avg Steps vs Gamma", "VI")
plotAvg(vi_data, gammas, "gamma", "success_pct", "Success Rate vs Gamma", "VI")
# get the best one, plot it and save the data
bestRun = vi_data["reward"].argmax()
bestR = vi_data["reward"].max()
bestPolicy = vi_data["policy"][bestRun]
bestG = vi_data["gamma"][bestRun]
bestE = vi_data["epsilon"][bestRun]
bestPct = vi_data["success_pct"][bestRun]
plot_lake(env, bestPolicy, "VI")
if verbose:
print(f"Best Result:\n\tReward = {bestR:.2f}\n\tGamma = {bestG:.3f}\n\tEps= {bestE:.2E}\n\tSuccess Rate = {bestPct:.2f}")
### Policy Iteration ###
# Parameters to iterate
gammas = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.999]
# run the analysis
pi_data = RunPI(T, R, gammas, verbose=verbose)
getStats(pi_data, env, verbose)
pi_data.to_csv(f"{OUTDIR}/FrozenLakeDataPI.csv")
# plot reward vs gamma
pi_data.plot(x="gamma", y="reward", title="Frozen Lake Reward vs. Gamma")
plt.grid(True)
plt.tight_layout()
plt.savefig(f"{OUTDIR}/FrozenLakeRewardvsGammaPI.png")
# plot reward vs iterations
pi_data.sort_values("iterations", inplace=True)
pi_data.plot(x="iterations", y="reward", title="Frozen Lake Reward vs. Iterations")
plt.grid(True)
plt.tight_layout()
plt.savefig(f"{OUTDIR}/FrozenLakeRewardvsIterationPI.png")
# plot success pct vs gamma
pi_data.plot(x="gamma", y="success_pct", title="Frozen Lake Success Rate vs. Gamma")
plt.grid(True)
plt.tight_layout()
plt.savefig(f"{OUTDIR}/FrozenLakeSuccessvsGammaPI.png")
# plot avg steps vs gamma
plotAvg(pi_data, gammas, "gamma", "average_steps", "Avg Steps vs Gamma", "PI")
plotAvg(pi_data, gammas, "gamma", "success_pct", "Success Rate vs Gamma", "PI")
# get the best one, plot it and save the data
bestRun = pi_data["reward"].argmax()
bestR = pi_data["reward"].max()
bestPolicy = pi_data["policy"][bestRun]
bestG = pi_data["gamma"][bestRun]
bestPct = pi_data["success_pct"][bestRun]
plot_lake(env, bestPolicy, "PI")
if verbose:
print(f"Best Result:\n\tReward = {bestR:.2f}\n\tGamma = {bestG:.3f}\n\tSuccess Rate = {bestPct:.2f}")
### Q learning ###
gammas = [0.8, 0.9, 0.99]
alphas = [0.01, 0.1, 0.2]
alpha_decays = [0.9, 0.999]
epsilon_decays = [0.9, 0.999]
iterations = [1e4, 1e5, 1e6]
# # to run again, uncommment below (takes a long time)
# ql_data = RunQ(T, R, gammas, alphas, alpha_decays=alpha_decays, epsilon_decays=epsilon_decays,
# n_iterations=iterations, verbose=verbose)
# getStats(ql_data, env, verbose)
# file = f"{OUTDIR}/FrozenLakeDataQl.csv"
# ql_data.to_csv(file)
# Read in Q-Learning data
ql_data = pd.read_csv(f"{OUTDIR}/FrozenLakeDataQl.csv")
# check which hyperparameters made most impact
slice = ["gamma", "alpha", "alpha_decay", "epsilon_decay", "iterations", "reward", "time"]
df = ql_data[slice]
ql_corr = df.corr()
sns.set(style="white")
fig, ax = plt.subplots(figsize=(8,7))
ax.set_title("Correlation Matrix of Q-Learning Parameters", fontsize=20)
mask = np.triu(np.ones_like(ql_corr, dtype=np.bool))
cmap = sns.diverging_palette(255, 0, as_cmap=True)
sns.heatmap(ql_corr, mask=mask, cmap=cmap, square=True, linewidths=0.5, cbar_kws={"shrink":.75})
plt.tight_layout()
plt.savefig(f"{OUTDIR}/FrozenLakeQlParamCorrelation.png")
# Plots
plot_QL(ql_data, "iterations", "time", "FrozenLake", title="Mean Runtime vs. Num Iterations", logscale=True)
plot_QL(ql_data, "iterations", "success_pct", "FrozenLake", title="Success Rate vs. Num Iterations", logscale=True)
plot_QL(ql_data, "alpha_decay", "success_pct", "FrozenLake", title="Success Rate vs. Alpha Decay")
plot_QL(ql_data, "gamma", "success_pct", "FrozenLake", title="Success Rate vs. Gamma")
plot_QL(ql_data, "gamma", "reward", "FrozenLake", title="Reward vs. Gamma")
plotAvg(ql_data, gammas, "gamma", "average_steps", "Avg Steps vs Gamma", "Ql")
plotAvg(ql_data, gammas, "gamma", "success_pct", "Success Rate vs Gamma", "Ql")
# Get the best one
bestRun = ql_data["reward"].argmax()
best_policy = ql_data["policy"][bestRun]
bestR = ql_data["reward"][bestRun]
bestG = ql_data["gamma"][bestRun]
bestA = ql_data["alpha"][bestRun]
bestAD = ql_data["alpha_decay"][bestRun]
bestED = ql_data["epsilon_decay"][bestRun]
bestIt = ql_data["iterations"][bestRun]
bestPct = ql_data["success_pct"][bestRun]
# plot the policy (convert if necessary)
if isinstance(best_policy, str):
best_policy = best_policy.split("\n")
best_policy = [row.strip("[").strip().strip("]").strip("]").strip("[").split() for row in best_policy]
best_policy = np.array(best_policy).astype(int)
plot_lake(env, best_policy, "Ql")
if verbose:
print(f"Best Result:\n\tReward = {bestR:.2f}\n\tGamma = {bestG:.3f}\n\tAlpha = {bestA:.3f}"
f"\n\tAlpha Decay = {bestAD:.3f}\n\tEps Decay = {bestED:.3f}\n\tIterations = {bestIt}"
f"\n\tSuccess Rate = {bestPct:.2f}")
bestPct = ql_data["success_pct"].max()
print(f"\tBest Success Rate = {bestPct:.2f}") | [] |
2024-01-10 | SOAgogo/exhaustednei-api | app~infrastructure~gpt~gpt_species.py | from openai import OpenAI
from dotenv import load_dotenv
import sys
import os
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=openai_api_key)
if len(sys.argv) ==2:
image_path = sys.argv[1]
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is this dog or cat species? (The answer must be a dog or cat species) just give me the name of the species ",
},
{
"type": "image_url",
"image_url": {
"url": image_path,
},
},
],
}
],
max_tokens=600,
)
print(response.choices[0]) | [
"[{'type': 'text', 'text': 'What is this dog or cat species? (The answer must be a dog or cat species) just give me the name of the species '}, {'type': 'image_url', 'image_url': {'url': PLACEHOLDER}}]"
] |
2024-01-10 | SOAgogo/exhaustednei-api | app~infrastructure~gpt~gpt_text.py | from openai import OpenAI
from dotenv import load_dotenv
import sys
import os
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=openai_api_key)
if len(sys.argv) > 1:
client = OpenAI()
message_from_ruby = sys.argv[1]
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": message_from_ruby},
]
)
print(completion.choices[0].message)
else:
print("No message received from Ruby.")
| [] |
2024-01-10 | SOAgogo/exhaustednei-api | app~infrastructure~gpt~gpt_image.py | from openai import OpenAI
from dotenv import load_dotenv
import sys
import os
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=openai_api_key)
if len(sys.argv) == 2:
image_path = sys.argv[1]
# image_path = 'https://www.niusnews.com/upload/imgs/default/202207_Jennie/0701cat/03.JPG'
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": f"According to the dog or cat species, give me some information about how to take care of this animal, and the information must be related to the animal species, the words must be less than 70 words."},
{
"type": "image_url",
"image_url": {
"url": image_path,
},
},
],
}
],
max_tokens=4096,
)
print(response.choices[0])
else:
print("No message received from Ruby.")
| [
"[{'type': 'text', 'text': 'According to the dog or cat species, give me some information about how to take care of this animal, and the information must be related to the animal species, the words must be less than 70 words.'}, {'type': 'image_url', 'image_url': {'url': PLACEHOLDER}}]"
] |
2024-01-10 | HodBadichi/Bias-Mitigation-Through-Topic-Aware-Distribution-Matching | TopicModeling~Utils~Metrics.py | from abc import ABCMeta, abstractmethod
from gensim.models import CoherenceModel
import gensim.corpora as corpora
import pandas as pd
import gensim.models
"""
`Metrics` class implementation, an abstract class which will be used to aggregate different models metrics
"""
class Metrics(object, metaclass=ABCMeta):
@abstractmethod
def __init__(self, model, metrics_list):
"""
:param model: trained model to Evaluate
:param metrics_list: list of strings which metrics to Evaluate
"""
self.model = model
self.metrics_list = metrics_list
@abstractmethod
def _Measure(self, metric):
"""
Measure the model performance according to a given metric
:param metric: the metric we want to evaluate
:return: evaluation score
"""
pass
def Evaluate(self, metric):
"""
wrapper function for '_Measure'
:param metric: the metric we want to evaluate
:return: evaluation score
"""
if metric not in self.metrics_list:
raise CustomError("Tried to Evaluate a non-existing Metric")
return self._Measure(metric)
def EvaluateAllMetrics(self):
"""
:return:dictionary of model performances on all metrics
"""
evaluations_dict = {}
for metric in self.metrics_list:
evaluations_dict[metric] = self._Measure(metric)
return evaluations_dict
class LDAMetrics(Metrics):
metrics_list = ["perplexity", "c_v", "u_mass", "c_npmi", "c_uci"]
def __init__(self, model, curr_corpus, curr_texts):
super().__init__(model, self.metrics_list)
self.corpus = curr_corpus
self.texts = curr_texts
def _Measure(self, metric):
if (metric == "perplexity"):
return self.model.log_perplexity(self.corpus)
else:
coherencemodel = gensim.models.CoherenceModel(
model=self.model, texts=self.texts, corpus=self.corpus, coherence=metric)
return coherencemodel.get_coherence()
class BertTopicMetrics(Metrics):
metrics_list = ["c_v", "u_mass", "c_npmi", "c_uci"]
def __init__(self, model, docs, topics):
super().__init__(model, self.metrics_list)
self.model = model
self.docs = docs
self.topics = topics
documents = pd.DataFrame({"Document": self.docs,
"ID": range(len(self.docs)),
"Topic": topics})
documents_per_topic = documents.groupby(['Topic'], as_index=False).agg({'Document': ' '.join})
cleaned_docs = self.model._preprocess_text(documents_per_topic.Document.values)
# Extract vectorizer and analyzer from BERTopic
vectorizer = model.vectorizer_model
analyzer = vectorizer.build_analyzer()
# Extract features for Topic Coherence evaluation
words = vectorizer.get_feature_names()
self.tokens = [analyzer(doc) for doc in cleaned_docs]
self.dictionary = corpora.Dictionary(self.tokens)
self.corpus = [self.dictionary.doc2bow(token) for token in self.tokens]
self.topic_words = [[words for words, _ in self.model.get_topic(topic)]
for topic in range(len(set(self.topics)) - 1)]
def _Measure(self, metric):
# Evaluate
coherence_model = CoherenceModel(topics=self.topic_words,
texts=self.tokens,
corpus=self.corpus,
dictionary=self.dictionary,
coherence=metric)
return coherence_model.get_coherence()
class CustomError(Exception):
pass
| [] |
2024-01-10 | HodBadichi/Bias-Mitigation-Through-Topic-Aware-Distribution-Matching | TopicModeling~LDA~src~NumberOfTopicsWorkflow.py | import logging
import csv
import os
import sys
if os.name != 'nt':
sys.path.append(os.path.join(os.pardir, os.pardir, os.pardir))
import numpy as np
import gensim
import pandas as pd
from gensim.models import CoherenceModel
from matplotlib import pyplot as plt
from scipy.interpolate import make_interp_spline
from TopicModeling.Utils.Metrics import LDAMetrics
from TopicModeling.LDA.src.LDAUtils import GetLDAParams, PrepareData
from TopicModeling.Utils.TopcModelingUtils import getCurrRunTime
from TopicModeling.LDA.src.hparams_config import hparams
"""
Workflow for tuning gensim`s LDA model.
"""
def ShowEvaluationGraphs(
train_results_path,
test_results_path,
smooth=False,
poly_deg=None
):
"""
Used for 'NumberOfTopicsExperiment' results.
Print on the screen metrics results against number of topics
:param file_path:CSV file which holds in a single column 'Number of topics' and different measures
:param dataset_name: string, `test` or `train`
:param smooth: Boolean flag, if True it smooth the results graph
:param poly_deg: Int, matches a polynomial of degree 'poly_deg' to the results graph
:return:None
"""
figure, axis = plt.subplots(2, 3)
figure.set_size_inches(18.5, 10.5)
train_evaluation_csv = pd.read_csv(train_results_path)
test_evaluation_csv = pd.read_csv(test_results_path)
column_names = train_evaluation_csv.columns
for measure, ax in zip(column_names, axis.ravel()):
if measure == 'Topics':
continue
train_scores = train_evaluation_csv[measure].tolist()
test_scores = test_evaluation_csv[measure].tolist()
train_X_Y_Spline = make_interp_spline(train_evaluation_csv.Topics.tolist(), train_scores)
test_X_Y_Spline = make_interp_spline(test_evaluation_csv.Topics.tolist(), test_scores)
# Returns evenly spaced numbers
# over a specified interval.
train_X_ = np.linspace(train_evaluation_csv.Topics.min(), train_evaluation_csv.Topics.max(), 500)
train_Y_ = train_X_Y_Spline(train_X_)
test_X_ = np.linspace(test_evaluation_csv.Topics.min(), test_evaluation_csv.Topics.max(), 500)
test_Y_ = test_X_Y_Spline(test_X_)
if poly_deg is not None:
train_coefs = np.polyfit(train_evaluation_csv.Topics.tolist(), train_scores, poly_deg)
train_y_poly = np.polyval(train_coefs, train_evaluation_csv.Topics.tolist())
ax.plot(train_evaluation_csv.Topics.tolist(), train_scores, "o", label="data points")
ax.plot(train_evaluation_csv.Topics, train_y_poly, label="Train", color='blue')
test_coefs = np.polyfit(test_evaluation_csv.Topics.tolist(), test_scores, poly_deg)
test_y_poly = np.polyval(test_coefs, test_evaluation_csv.Topics.tolist())
ax.plot(test_evaluation_csv.Topics.tolist(), test_scores, "o", label="data points")
ax.plot(test_evaluation_csv.Topics, test_y_poly, label="Test", color='red')
elif smooth is False:
ax.plot(train_evaluation_csv.Topics, train_scores, label="Train", color='blue')
ax.plot(test_evaluation_csv.Topics, test_scores, label="Test", color='red')
else:
ax.plot(train_X_, train_Y_, label="Train", color='blue')
ax.plot(test_X_, test_Y_, label="Test", color='red')
ax.set_title(measure + " Measure ")
ax.set_xlabel("Number of topics")
ax.set_ylabel("Measure values")
plt.savefig(os.path.join(os.pardir, 'results', f'eval_graphs_{getCurrRunTime()}'))
plt.close()
def InitializeLogger(sFileName=None):
"""
Intialize a global logger to document the experiment
:param sFileName: required logger name
:return: None
"""
logs_directory_path = os.path.join(os.pardir, 'logs')
os.makedirs(logs_directory_path, exist_ok=True)
if sFileName is None:
sFileName = f'log_{getCurrRunTime()}.txt'
LoggerPath = os.path.join(os.pardir, 'logs', sFileName)
from importlib import reload
reload(logging)
logging.basicConfig(
filename=LoggerPath,
format="%(asctime)s:%(levelname)s:%(message)s",
level=logging.NOTSET)
def RunTuningProcess(
train_LDA_parameters,
test_LDA_parameters,
topics_range,
passes=1,
iterations=1,
chunksize=300,
):
"""
Train and Evaluate each model with a different number of topics by the topics range given from the input.
each model is saved and evaluated by 'perplexity' and 'Coherence' measurements, when the evaluation results
are saved as well.
:param train_LDA_parameters: dictionary of {'corpus','texts','id2word'}
:param test_LDA_parameters: dictionary of {'corpus','texts','id2word'}
:param topics_range: topics range to tune
:param passes: number of passes the model does on the whole corpus
:param iterations: number of iterations the model does
:param chunksize: number of documents in each iteration
:return: None
"""
params_dictionary = {
'passes': passes,
'iterations': iterations,
'chunksize': chunksize,
'topics_range': topics_range
}
logging.info(params_dictionary)
models_directory_path = os.path.join(os.pardir, 'saved_models')
result_directory_path = os.path.join(os.pardir, 'results')
os.makedirs(models_directory_path, exist_ok=True)
os.makedirs(result_directory_path, exist_ok=True)
# Keys are meant to write CSV headers later on ,values are dummy values
# my_dict = {"Topics": 6, "u_mass": 5, "c_uci": 4, "c_npmi": 3, "c_v": 2, "perplexity": 1}
field_names = ['Topics', 'u_mass', 'c_uci', 'c_npmi', 'c_v', 'perplexity']
train_results_path = os.path.join(result_directory_path, fr'train_evaluation_{getCurrRunTime()}.csv')
test_results_path = os.path.join(result_directory_path, fr'test_evaluation_{getCurrRunTime()}.csv')
with open(train_results_path, "w", encoding='UTF8', newline='') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=field_names)
writer.writeheader()
with open(test_results_path, "w", encoding='UTF8', newline='') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=field_names)
writer.writeheader()
for num_of_topics in topics_range:
logging.info(f"Running number of topics model : {num_of_topics}")
curr_lda_model = gensim.models.ldamodel.LdaModel(
corpus=train_LDA_parameters['corpus'],
id2word=train_LDA_parameters['id2word'],
num_topics=num_of_topics,
random_state=42,
update_every=1,
chunksize=chunksize,
passes=passes,
iterations=iterations
)
saved_model_path = os.path.join(models_directory_path, f'model_{num_of_topics}_{getCurrRunTime()}')
curr_lda_model.save(saved_model_path)
# Save results of train_dataset set
with open(train_results_path, "a", encoding='UTF8', newline='') as csv_file:
# Initialize 'LDAMetrics' class
my_metrics = LDAMetrics(curr_lda_model, train_LDA_parameters['corpus'], train_LDA_parameters['texts'])
writer = csv.DictWriter(csv_file, fieldnames=field_names)
result_dict = my_metrics.EvaluateAllMetrics()
result_dict['Topics'] = num_of_topics
writer.writerows([result_dict])
with open(test_results_path, "a", encoding='UTF8', newline='') as csv_file:
# Initialize 'LDAMetrics' class
my_metrics = LDAMetrics(curr_lda_model, test_LDA_parameters['corpus'], test_LDA_parameters['texts'])
writer = csv.DictWriter(csv_file, fieldnames=field_names)
result_dict = my_metrics.EvaluateAllMetrics()
result_dict['Topics'] = num_of_topics
writer.writerows([result_dict])
def RunNumberOfTopicsExperiment():
np.random.seed(42)
InitializeLogger()
train_set, test_set = PrepareData()
# Gensim LDA preparation - Create corpus and id2word
train_LDA_params = GetLDAParams(train_set)
test_LDA_params = GetLDAParams(test_set)
RunTuningProcess(
train_LDA_params,
test_LDA_params,
topics_range=hparams['topics_range'],
passes=hparams['passes'],
iterations=hparams['iterations'],
chunksize=hparams['chunksize'],
)
logging.info("Evaluating stage is done successfully ")
test_results_path = os.path.join(os.pardir, 'results', fr'test_evaluation_{getCurrRunTime()}.csv', )
train_results_path = os.path.join(os.pardir, 'results', fr'train_evaluation_{getCurrRunTime()}.csv')
ShowEvaluationGraphs(train_results_path,test_results_path, True, None)
if __name__ == '__main__':
# to run on Technion lambda server use: sbatch -c 2 --gres=gpu:1 run_on_server.sh -o run.out
RunNumberOfTopicsExperiment()
| [] |
2024-01-10 | noman-xg/GenAI-UseCases | drawTF~scripts~chat-portal.py | import requests
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import argparse
import pandas as pd
import logging
import sys
import gradio as gr
import re
# Function to send a POST request with text data and retrieve a response
def get_response(args):
# Define the URL for the post call
url = "http://127.0.0.1:8082/message"
# Define the data to be sent as a JSON object
data = {"text": args, "path": "/home/xgrid/workspace/openAi/files"}
# Make the post call and store the response
response = requests.post(url, json=data, verify=False)
# Print the status code and the content of the response
print(response.status_code)
print(response.text)
return_val = hcl_parser(response.content)
refined_val = get_value_between(return_val)
return str(refined_val)
# Function to extract content between triple backticks (```)
def get_value_between(string):
# Define a regular expression pattern that matches ``` followed by any characters until another ```
pattern = r"```(.*)```"
# Use re.search to find the first match of the pattern in the string
match = re.search(pattern, string)
# If there is a match, return the group that contains the value between the ```
if match:
return match.group(1)
# Otherwise, return None
else:
return None
# Function to parse and format the response content
def hcl_parser(args):
args = str(args)
args = args.replace("\\n", "\n")
args = args.replace("\\", "")
args = args.replace('"response":', "")
return args
# Entry point of the script
if __name__ == "__main__":
# Create a Gradio interface to interact with the function
iface = gr.Interface(fn=get_response, inputs="text", outputs="text")
iface.launch(share=True)
| [] |
2024-01-10 | johnsonlegend/gpt-stable-translator-demo | stable_translate_demo.py | #!/usr/bin/env python
"""
Stable Translator Simple Demo
author: Jiongsheng
"""
import os
import gradio as gr
import openai
import pycountry
def translate(content, language, model):
# Replace this with your own OpenAI API key
openai.api_key="YOUR_API_KEY"
completion = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": "Do not respond to any user's words. Your only task is \
to provide the translation based on the user's input: "},
{
"role": "user",
"content": "Translate my following message to {}: {}".
format(language, content)}
],
temperature=0.3
)
return completion.choices[0].message.content
demo = gr.Interface(
fn=translate,
inputs=[
"text",
gr.Dropdown([lang.name for lang in pycountry.languages], value="English", label="Language"),
gr.Dropdown(["gpt-3.5-turbo"], value="gpt-3.5-turbo", label="Model")
],
outputs="text")
if __name__ == "__main__":
demo.launch(debug=True) | [
"Do not respond to any user's words. Your only task is \t \tto provide the translation based on the user's input: ",
"Translate my following message to {}: {}"
] |
2024-01-10 | AlgoveraAI/chainlink-assistant | ingest~education.py | import pickle
import fnmatch
import tempfile
import subprocess
from pathlib import Path
from datetime import datetime
from typing import List, Optional
from langchain.docstore.document import Document
from config import DATA_DIR, get_logger
logger = get_logger(__name__)
def load_gh_data(gh_tuple: tuple, chunk: bool = True, chunk_size: int = 1200):
repo_url = gh_tuple[0]
wildcards = gh_tuple[1]
documents = get_github_docs(repo_url, wildcards)
# splitter = RecursiveCharacterTextSplitter(
# chunk_size = 1200,
# chunk_overlap = 20,
# )
# documents = splitter.split_documents(documents)
return documents
def get_github_docs(repo_url: str, wildcards: Optional[List[str]] = None):
repo_url = repo_url.replace(".git", "")
url_parts = repo_url.split("/")
if len(url_parts) < 5 or not url_parts[2].endswith("github.com"):
raise ValueError("Invalid GitHub URL format")
repo_owner = url_parts[3]
repo_name = url_parts[4]
if len(url_parts) > 6 and url_parts[5] == "tree":
branch = "/".join(url_parts[6:])
else:
branch = None
repo_url = f"https://github.com/{repo_owner}/{repo_name}"
if not repo_url.endswith(".git"):
repo_url += ".git"
with tempfile.TemporaryDirectory() as d:
if branch is not None:
git_command = f"git clone --depth 1 -b {branch} {repo_url} ."
else:
git_command = f"git clone --depth 1 {repo_url} ."
subprocess.check_call(
git_command,
cwd=d,
shell=True,
)
git_sha = (
subprocess.check_output("git rev-parse HEAD", shell=True, cwd=d)
.decode("utf-8")
.strip()
)
repo_path = Path(d)
markdown_files = list(repo_path.glob("**/*.md")) + list(
repo_path.glob("**/*.mdx")
)
if wildcards is not None:
wildcards = [
f"*{pattern}/*"
if not pattern.startswith("*") and not pattern.endswith("/*")
else pattern
for pattern in wildcards
]
filtered_files = []
for wildcard in wildcards:
filtered_files.extend(
file
for file in markdown_files
if fnmatch.fnmatch(str(file), wildcard)
)
markdown_files = list(set(filtered_files)) # Remove duplicates
markdown_files = [
file for file in markdown_files if file.name.endswith("course.md")
]
documents = []
for markdown_file in markdown_files:
with open(markdown_file, "r") as f:
relative_path = markdown_file.relative_to(repo_path)
github_url = f"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}"
read = f.read()
documents.append(
Document(
page_content=read,
metadata={"source": github_url, "type": "education"},
)
)
return documents
def scrap_education_docs():
gh_tuple = (
"https://github.com/oceanByte/chainlink-education.git",
"src/api/src/shared/course",
)
chainlink_education_documents = load_gh_data(gh_tuple)
# Save the documents to a pickle file with date in the name
with open(f"{DATA_DIR}/education_documents.pkl", "wb") as f:
pickle.dump(chainlink_education_documents, f)
logger.info(f"Scrapped chainlink education documents.")
return chainlink_education_documents
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | ingest~blogs.py | import re
import time
import pickle
from tqdm import tqdm
from pathlib import Path
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from langchain.docstore.document import Document
from concurrent.futures import ProcessPoolExecutor
from config import get_logger, DATA_DIR, MAX_THREADS
from ingest.utils import (
remove_prefix_text,
extract_first_n_paragraphs,
get_description_chain,
get_driver,
)
logger = get_logger(__name__)
driver = get_driver()
def close_popup(driver):
try:
close_btn = driver.find_element(By.XPATH, "/html/body/div[4]/a[2]")
close_btn.click()
time.sleep(2) # give it a moment to close
except Exception as e:
# if we can't find the popup close button, just continue
pass
def click_load_more_button(driver, attempts=5):
try:
close_popup(driver)
wait = WebDriverWait(driver, 10)
element = wait.until(
EC.element_to_be_clickable(
(By.XPATH, "/html/body/div[1]/div/section/div/div/div[3]/div[2]/a")
)
)
element.click()
return True
except Exception as e:
if attempts > 0:
time.sleep(5)
return click_load_more_button(driver, attempts - 1)
else:
logger.error(f"Failed to click on 'load more'. Error: {str(e)}")
return False
def get_blog_urls():
urls = set()
try:
driver.maximize_window()
driver.get(
"https://blog.chain.link/?s=&categories=32&services=&tags=&sortby=newest"
)
time.sleep(3)
for i in range(200):
soup = BeautifulSoup(driver.page_source, "html.parser")
blogs = [post.a["href"] for post in soup.findAll("div", class_="post-card")]
urls |= set(blogs)
if not click_load_more_button(driver):
break
if i % 10 == 0:
logger.info(f"Scraped {len(urls)} blog urls")
except Exception as e:
logger.error(f"Error scraping blog urls: {e}")
return urls
def to_markdown(pair):
url, soup = pair
output = ""
try:
try:
sub_soup = soup.find("h1", id="post-title")
heading_level = int(sub_soup.name[1:])
output += f"{'#' * heading_level} {sub_soup.get_text()}\n\n"
except:
sub_soup = soup.find("h1")
heading_level = int(sub_soup.name[1:])
output += f"{'#' * heading_level} {sub_soup.get_text()}\n\n"
sub_soup_2 = soup.find("div", class_="post-header")
if not sub_soup_2:
sub_soup_2 = soup.find("article", class_="educational-content")
for element in sub_soup_2.find_all(
[
"p",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"ul",
"li",
"blockquote",
"code",
"pre",
"em",
"strong",
"ol",
"dl",
"dt",
"dd",
"hr",
"table",
"thead",
"tbody",
"tr",
"th",
"td",
"sup",
"sub",
"abbr",
]
):
if element.name == "p":
output += f"{element.get_text()}\n\n"
elif element.name.startswith("h"):
try:
heading_level = int(element.name[1:])
output += f"{'#' * heading_level} {element.get_text()}\n\n"
except:
pass
elif element.name == "ul":
for li in element.find_all("li"):
output += f"- {li.get_text()}\n"
output += "\n"
elif element.name == "li":
output += f"- {element.get_text()}\n"
elif element.name == "blockquote":
output += f"> {element.get_text()}\n\n"
elif element.name == "code":
output += f"`{element.get_text()}`"
elif element.name == "pre":
output += f"```\n{element.get_text()}\n```\n\n"
elif element.name == "em":
output += f"*{element.get_text()}*"
elif element.name == "strong":
output += f"**{element.get_text()}**"
elif element.name == "ol":
for li in element.find_all("li"):
output += f"1. {li.get_text()}\n"
output += "\n"
elif element.name == "dl":
for dt, dd in zip(element.find_all("dt"), element.find_all("dd")):
output += f"{dt.get_text()}:\n{dd.get_text()}\n"
output += "\n"
elif element.name == "hr":
output += "---\n\n"
elif element.name == "table":
table_text = element.get_text(separator="|", strip=True)
output += f"{table_text}\n\n"
elif element.name == "thead":
output += f"{element.get_text()}\n"
elif element.name in ["tbody", "tr", "th", "td"]:
pass # Ignore these elements
elif element.name == "sup":
output += f"<sup>{element.get_text()}</sup>"
elif element.name == "sub":
output += f"<sub>{element.get_text()}</sub>"
elif element.name == "abbr":
output += f"<abbr title='{element.get('title', '')}'>{element.get_text()}</abbr>"
return (url, output)
except Exception as e:
logger.error(f"Error processing {url}: {e}")
return (url, "")
def fetch_url_content(url):
try:
driver.get(url)
soup = BeautifulSoup(driver.page_source, "html.parser")
return (url, soup)
except Exception as e:
logger.error(f"Error scraping {url}: {e}")
return (url, None)
title_pattern = re.compile(r"^#\s(.+)$", re.MULTILINE)
chain_description = get_description_chain()
def process_blog_entry(blog):
try:
url, markdown = blog
markdown_content = remove_prefix_text(markdown)
titles = title_pattern.findall(markdown_content)
title = titles[0].strip() if titles else "No Title"
para = extract_first_n_paragraphs(markdown_content, num_para=2)
description = chain_description.predict(context=para)
return Document(
page_content=markdown,
metadata={
"source": url,
"source_type": "blog",
"title": title,
"description": description,
},
)
except Exception as e:
logger.error(f"Error processing blog entry {blog[0]}: {e}")
return None
def get_blog(soup):
try:
markdown = to_markdown(soup)
doc = process_blog_entry(markdown)
return doc
except Exception as e:
logger.error(f"Error processing blog entry: {e}")
return None
# def scrap_blogs():
# global driver
# driver = get_driver()
# urls = get_blog_urls()
# logger.info(f"Total number of blog urls: {len(urls)}")
# # Use concurrent.futures to parallelize the fetching of URLs
# with ProcessPoolExecutor(max_workers=MAX_THREADS) as executor:
# soups = list(tqdm(executor.map(fetch_url_content, urls), total=len(urls)))
# unsuccessful_urls = [url for url, soup in soups if not soup]
# successful_soups = [(url, soup) for url, soup in soups if soup]
# # Use concurrent.futures to parallelize the markdown conversion
# with ProcessPoolExecutor(max_workers=MAX_THREADS) as executor:
# blogs = list(
# tqdm(executor.map(get_blog, successful_soups), total=len(successful_soups))
# )
# # # with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
# # # blogs_documents = list(tqdm(executor.map(process_blog_entry, blogs), total=len(blogs)))
# # blogs_documents = [process_blog_entry(blog) for blog in tqdm(blogs, desc="Processing Blog Entries")]
# # Remove nones
# blogs_documents = [doc for doc in blogs if doc]
# with open(f"{DATA_DIR}/blog_documents.pkl", "wb") as f:
# pickle.dump(blogs_documents, f)
# logger.info(f"Scraped blog posts")
# return blogs_documents
def scrap_blogs():
global driver
driver = get_driver()
urls = get_blog_urls()
logger.info(f"Total number of blog urls: {len(urls)}")
soups = [fetch_url_content(url) for url in tqdm(urls)]
unsuccessful_urls = [url for url, soup in soups if not soup]
successful_soups = [(url, soup) for url, soup in soups if soup]
blogs = [get_blog(soup) for soup in tqdm(successful_soups)]
blogs_documents = [doc for doc in blogs if doc]
with open(f"{DATA_DIR}/blog_documents.pkl", "wb") as f:
pickle.dump(blogs_documents, f)
logger.info(f"Scraped blog posts")
return blogs_documents
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | ingest~stackoverflow.py | import os
import re
import time
import pickle
import requests
import html2text
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
from langchain.docstore.document import Document
from config import DATA_DIR, get_logger
logger = get_logger(__name__)
# Settings for stackoverflow requests
key = os.getenv("SO_KEY")
tag = "chainlink"
pagesize = 99
from_date = "1609459200"
# Today's int date
to_date = int(datetime.now().timestamp())
# https://api.stackexchange.com/docs/questions
def get_questions(tag, page, pagesize, from_date, to_date, key, access_token):
# url = f'https://api.stackexchange.com/2.3/questions?order=desc&sort=creation&tagged={tag}&site={site}&pagesize={pagesize}&key={key}&access_token={access_token}&filter=withbody'
url = f"https://api.stackexchange.com/2.3/questions?page={page}&pagesize={pagesize}&tagged={tag}&fromdate={from_date}&todate={to_date}&key={key}&access_token={access_token}&order=desc&sort=activity&site=stackoverflow&filter=withbody"
response = requests.get(url)
return response.json()["items"]
def get_answers(question_id, key, access_token):
url = f"https://api.stackexchange.com/2.3/questions/{question_id}/answers?order=desc&sort=activity&site=stackoverflow&key={key}&access_token={access_token}&filter=withbody"
response = requests.get(url)
return response.json()["items"]
def get_all_questions(access_token):
all_questions = None
for i in range(1, 5):
page = i
if all_questions is None:
all_questions = get_questions(
tag=tag,
pagesize=pagesize,
key=key,
access_token=access_token,
from_date=from_date,
to_date=to_date,
page=page,
)
else:
all_questions.extend(
get_questions(
tag=tag,
pagesize=pagesize,
key=key,
access_token=access_token,
from_date=from_date,
to_date=to_date,
page=page,
)
)
time.sleep(3)
all_questions = sorted(
all_questions, key=lambda x: x["creation_date"], reverse=True
)
# Only consider questions from user with reputation > 50. Some questions don't have reputation field. They should be removed
all_questions = [
question
for question in all_questions
if "reputation" in question["owner"].keys()
and question["owner"]["reputation"] > 50
]
h = html2text.HTML2Text()
h.ignore_links = False
h.ignore_images = True
h.ignore_emphasis = False
h.ignore_tables = False
questions_and_answers = {}
for question in tqdm(all_questions, total=len(all_questions)):
answers = get_answers(question["question_id"], key, access_token)
if not answers: # Skip questions with no answers
continue
# Only keep accepted answer
answers = [answer for answer in answers if answer["is_accepted"] == True]
# Only keep answer from user with reputation > 50; some answers don't have reputation field. they should be removed
answers = [
answer
for answer in answers
if "reputation" in answer["owner"].keys()
and answer["owner"]["reputation"] > 50
]
question_body = re.sub(r"!\[.*?\]\(.*?\)", "", h.handle(question["body"]))
question_str = (
"Question: (Asked on: "
+ datetime.utcfromtimestamp(question["creation_date"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
+ ")\n"
+ question["title"]
+ "\n"
)
question_url = "URL: " + question["link"] + "\n\n"
question_body_str = "Question Body:\n" + question_body + "\n\n"
answers_str = (
"Answers:\n"
+ "\n---\n".join(
[
"(Answered on: "
+ datetime.utcfromtimestamp(answer["creation_date"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
+ ")\n"
+ re.sub(r"!\[.*?\]\(.*?\)", "", h.handle(answer["body"]))
for answer in answers
]
)
+ "\n\n"
)
text = question_str + question_url + question_body_str + answers_str
questions_and_answers[question["question_id"]] = Document(
page_content=text,
metadata={"source": question["link"], "type": "stackoverflow"},
)
logger.info("Scrapped stackoverflow")
return questions_and_answers
def scrap_stackoverflow(access_token):
logger.info(f"Key: {key}")
logger.info(f"{access_token}")
so_docs = get_all_questions(access_token)
# Make sure data directory exists
Path("./data").mkdir(parents=True, exist_ok=True)
# Convert to list
so_docs = list(so_docs.values())
# Log first document
logger.info(f"First document SO: {so_docs[0]}")
# Save to disk
with open(f"{DATA_DIR}/stackoverflow_documents.pkl", "wb") as f:
pickle.dump(so_docs, f)
logger.info("Scrapped stackoverflow")
return so_docs
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | chat~get_chain_mem.py | from langchain.llms import base
import tiktoken
from chat.prompts_mem import (
FINAL_ANSWER_PROMPT,
FINAL_ANSWER_2_PROMPT,
QUESTION_MODIFIER_PROMPT,
)
from chat.prompts_no_mem import (
ROUTER_PROMPT,
)
from chat.utils import get_retriever_chain, get_streaming_chain
from utils import createLogHandler
from schemas import ChatResponse, Sender, MessageType
logger = createLogHandler(__name__, "logs.log")
model = "gpt-3.5-turbo"
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.error(f"Encoding for model {model} not found. Using default encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
def calculate_tokens(document, encoding):
"""Calculate the number of tokens in a list of documents."""
return len(encoding.encode(document))
def concatenate_documents(documents, max_tokens):
"""Combine documents up to a certain token limit."""
combined_docs = ""
token_count = 0
used_docs = []
for doc in documents:
doc_tokens = calculate_tokens(doc.page_content, encoding)
if (token_count + doc_tokens) <= max_tokens:
combined_docs += f"\n\n{doc.page_content}\nSource: {doc.metadata['source']}"
token_count += doc_tokens
used_docs.append(doc)
return combined_docs, used_docs
def call_llm_final_answer(question, document, memory, chain, stream=False):
"""Call LLM with a question and a single document."""
if stream:
chain.prompt = FINAL_ANSWER_PROMPT
return chain.apredict(
question=question, document=document, history=memory.buffer
)
else:
chain.prompt = FINAL_ANSWER_PROMPT
return chain.predict(
question=question, document=document, history=memory.buffer
)
def call_llm_final_2_answer(question, document, memory, chain):
"""Call LLM with a question and a single document."""
chain.prompt = FINAL_ANSWER_2_PROMPT
return chain.apredict(question=question, document=document, history=memory.buffer)
async def process_documents(question, chain, memory, max_tokens=14_000):
"""Process a list of documents with LLM calls."""
# Modify question if memory is not empty
if memory.chat_memory.messages:
logger.debug(f"Processing documents for question: {question}")
chain.prompt = QUESTION_MODIFIER_PROMPT
modified_question = chain.predict(question=question, history=memory.buffer)
logger.debug(f"Modified question: {modified_question}")
else:
modified_question = question
# Use router to get workkflow to use
chain.prompt = ROUTER_PROMPT
workflow = chain.predict(question=modified_question)
logger.debug(f"Using workflow: {workflow}")
# Get relevant documents
documents = retriever.get_relevant_documents(modified_question, workflow=workflow)
batches = []
num_llm_calls = 0
while documents:
batch, used_docs = concatenate_documents(documents, max_tokens)
batches.append(batch)
# logger.info(f"Calling LLM with {batch}")
documents = [doc for doc in documents if doc not in used_docs]
num_llm_calls += 1
logger.info(
f"Num LLM call required: {num_llm_calls}. {len(documents)} documents remaining."
)
return batches, num_llm_calls, workflow
async def get_answer_memory(question, memory, max_tokens=14_000, manager=None):
"""Process documents and call LLM."""
# Get retriever and chain
retriever, base_chain = get_retriever_chain()
resp = ChatResponse(
sender=Sender.BOT, message="Retrieving Documents", type=MessageType.STATUS
)
await manager.broadcast(resp)
# Main code that calls process_documents
batches, num_llm_calls, workflow = await process_documents(
question, chain=base_chain, memory=memory, max_tokens=max_tokens
)
# Get the stream chain
chain_stream = get_streaming_chain(
manager=manager, chain=base_chain, workflow=workflow
)
resp = ChatResponse(
sender=Sender.BOT, message=f"Generating Answer", type=MessageType.STATUS
)
await manager.broadcast(resp)
if num_llm_calls == 1:
result = await call_llm_final_answer(
question=question,
document=batches[0],
chain=chain_stream,
stream=True,
memory=memory,
)
return result, memory
else:
# Handle the list of batches
results = []
for batch in batches:
result = call_llm_final_answer(
question=question,
document=batch,
chain=base_chain,
stream=False,
memory=memory,
)
results.append(result)
combined_result = " ".join(results)
logger.info(f"Final LLM call with {len(results)} results.")
combined_result = await call_llm_final_2_answer(
question=question,
document=combined_result,
chain=chain_stream,
memory=memory,
)
return combined_result, memory
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | chat~v4~get_chain_mem.py | from langchain.llms import base
import tiktoken
from chainlink.prompts_mem import (
FINAL_ANSWER_PROMPT,
FINAL_ANSWER_2_PROMPT,
QUESTION_MODIFIER_PROMPT,
)
from chainlink.utils import (
retriever,
chain as base_chain,
get_streaming_chain,
)
from utils import createLogHandler
from schemas import ChatResponse, Sender, MessageType
logger = createLogHandler(__name__, "logs.log")
model = "gpt-3.5-turbo"
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.error(f"Encoding for model {model} not found. Using default encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
def calculate_tokens(document, encoding):
"""Calculate the number of tokens in a list of documents."""
return len(encoding.encode(document))
def concatenate_documents(documents, max_tokens):
"""Combine documents up to a certain token limit."""
combined_docs = ""
token_count = 0
used_docs = []
for doc in documents:
doc_tokens = calculate_tokens(doc.page_content, encoding)
if (token_count + doc_tokens) <= max_tokens:
combined_docs += f"\n\n{doc.page_content}\nSource: {doc.metadata['source']}"
token_count += doc_tokens
used_docs.append(doc)
return combined_docs, used_docs
def call_llm_final_answer(question, document, memory, chain, stream=False):
"""Call LLM with a question and a single document."""
if stream:
chain.prompt = FINAL_ANSWER_PROMPT
return chain.apredict(
question=question, document=document, history=memory.buffer
)
else:
chain.prompt = FINAL_ANSWER_PROMPT
return chain.predict(
question=question, document=document, history=memory.buffer
)
def call_llm_final_2_answer(question, document, memory, chain):
"""Call LLM with a question and a single document."""
chain.prompt = FINAL_ANSWER_2_PROMPT
return chain.apredict(question=question, document=document, history=memory.buffer)
async def process_documents(question, chain, memory, max_tokens=14_000):
"""Process a list of documents with LLM calls."""
logger.info(f"Processing documents for question: {question}")
chain.prompt = QUESTION_MODIFIER_PROMPT
modified_question = chain.predict(question=question, history=memory.buffer)
logger.debug(f"Modified question: {modified_question}")
documents = retriever.get_relevant_documents(modified_question)
batches = []
num_llm_calls = 0
while documents:
batch, used_docs = concatenate_documents(documents, max_tokens)
batches.append(batch)
# logger.info(f"Calling LLM with {batch}")
documents = [doc for doc in documents if doc not in used_docs]
num_llm_calls += 1
logger.debug(
f"Num LLM call required: {num_llm_calls}. {len(documents)} documents remaining."
)
return batches, num_llm_calls
async def get_answer_memory(question, memory, max_tokens=14_000, manager=None):
"""Process documents and call LLM."""
resp = ChatResponse(
sender=Sender.BOT, message="Retrieving Documents", type=MessageType.STATUS
)
await manager.broadcast(resp)
# Get the stream chain
chain_stream = get_streaming_chain(manager=manager, chain=base_chain)
# Main code that calls process_documents
batches, num_llm_calls = await process_documents(
question, chain=base_chain, memory=memory, max_tokens=max_tokens
)
resp = ChatResponse(
sender=Sender.BOT, message=f"Generating Answer", type=MessageType.STATUS
)
await manager.broadcast(resp)
if num_llm_calls == 1:
result = await call_llm_final_answer(
question=question,
document=batches[0],
chain=chain_stream,
stream=True,
memory=memory,
)
return result, memory
else:
# Handle the list of batches
results = []
for batch in batches:
result = call_llm_final_answer(
question=question,
document=batch,
chain=base_chain,
stream=False,
memory=memory,
)
results.append(result)
combined_result = " ".join(results)
logger.info(f"Final LLM call with {len(results)} results.")
combined_result = await call_llm_final_2_answer(
question=question,
document=combined_result,
chain=chain_stream,
memory=memory,
)
return combined_result, memory
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | ingest_script.py | import os
import faiss
import pickle
import requests
import argparse
from dotenv import load_dotenv
from datetime import datetime
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from ingest.docs import scrap_docs
from ingest.blogs import scrap_blogs
from ingest.education import scrap_education_docs
from ingest.stackoverflow import scrap_stackoverflow
from ingest.data import scrap_data
from ingest.chain_link import scrap_chain_link
from config import get_logger, DATA_DIR
from chat.utils import CustomeSplitter
from fastapi import HTTPException
logger = get_logger(__name__)
load_dotenv()
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
REDIRECT_URI = "http://localhost"
LOGIN_URL = "https://stackoverflow.com/users/login"
OAUTH_URL = f"https://stackoverflow.com/oauth?client_id={CLIENT_ID}&scope=read_inbox&redirect_uri={REDIRECT_URI}"
TOKEN_URL = "https://stackoverflow.com/oauth/access_token"
date_str = datetime.now().strftime(
"%Y-%m-%d"
) # Get today's date as "YYYY-MM-DD" format
def get_access_token():
# Create a session to maintain cookies
session = requests.Session()
# Try to access OAuth URL
response = session.get(OAUTH_URL, allow_redirects=False)
logger.info(f"Initial get code response: {response.status_code}")
logger.info(
f"Initial get code response headers: {response.headers.get('location')}"
)
if (
response.status_code == 302
and "location" in response.headers
and "/users/login" in response.headers["location"]
):
# We're being redirected to login
login_data = {
"email": os.getenv("SO_LOGIN_EMAIL"),
"password": os.getenv("SO_LOGIN_PASSWORD"),
}
response = session.post(LOGIN_URL, data=login_data)
logger.info(f"Login response: {response.status_code}")
# After login, try accessing OAuth URL again
response = session.get(OAUTH_URL, allow_redirects=False)
logger.info(f"Second get code response: {response.status_code}")
logger.info(
f"Second get code response headers: {response.headers.get('location')}"
)
# Extract the intermediary code
if response.status_code == 302 and "code=" in response.headers["location"]:
code = response.headers["location"].split("code=")[1].split("&")[0]
# Exchange the intermediary code for the access token
token_data = {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"code": code,
"redirect_uri": REDIRECT_URI,
}
token_response = requests.post(TOKEN_URL, data=token_data)
if token_response.status_code == 200:
response_content = token_response.content.decode(
"utf-8"
) # Convert byte-string to string
actual_token = response_content.split("access_token=")[1].split("&")[0]
return {"access_token": actual_token}
raise HTTPException(
status_code=400, detail="Unable to fetch the intermediary code."
)
def ingest_task():
# Get access token
access_token = get_access_token()
# Parse technical documentation
docs_documents = scrap_docs()
# Parse blog posts
blog_urls = scrap_blogs()
# Parse chainlink.education
chainlink_education_documents = scrap_education_docs()
# Parse stackoverflow
stackoverflow_documents = scrap_stackoverflow(access_token["access_token"])
# Parse data.chain.link
data_documents = scrap_data()
# Parse chain.link
chain_link_documents, chain_link_youtube_documents = scrap_chain_link()
# Combine all documents into one list (except data)
documents = []
documents_count = 0
documents_count += len(docs_documents)
documents.extend(docs_documents)
documents_count += len(blog_urls)
documents.extend(blog_urls)
documents_count += len(chainlink_education_documents)
documents.extend(chainlink_education_documents)
documents_count += len(stackoverflow_documents)
documents.extend(stackoverflow_documents)
documents_count += len(chain_link_documents)
documents.extend(chain_link_documents)
documents_count += len(chain_link_youtube_documents)
documents.extend(chain_link_youtube_documents)
# Log the number of documents
logger.info(f"Docs: {len(docs_documents)}")
logger.info(f"Blogs: {len(blog_urls)}")
logger.info(f"Education: {len(chainlink_education_documents)}")
logger.info(f"Stackoverflow: {len(stackoverflow_documents)}")
logger.info(f"Data: {len(data_documents)}")
logger.info(f"Chain Link: {len(chain_link_documents)}")
logger.info(f"Chain Link Youtube: {len(chain_link_youtube_documents)}")
# Log the total number of documents
logger.info(f"Total: {len(documents)}")
# For saving documents:
with open(f"{DATA_DIR}/documents.pkl", "wb") as f:
pickle.dump(documents, f)
# Split documents into chunks for 16k model
full_doc_splitter = CustomeSplitter()
chunked_full_documents = full_doc_splitter.split(documents)
splitter = RecursiveCharacterTextSplitter(chunk_size=1200, chunk_overlap=50)
split_docs = splitter.split_documents(documents)
# Create vectorstore for all documents
vectorstore_all = FAISS.from_documents(split_docs, embedding=OpenAIEmbeddings())
# Split documents into chunks using datadocs
split_docs_data = splitter.split_documents(data_documents)
# Create vectorstore for datadocs
vectorstore_data = FAISS.from_documents(
split_docs_data, embedding=OpenAIEmbeddings()
)
# Save vectorstores to disk
faiss.write_index(vectorstore_all.index, f"{DATA_DIR}/docs_all.index")
vectorstore_all.index = None
with open(f"{DATA_DIR}/faiss_store_all.pkl", "wb") as f:
pickle.dump(vectorstore_all, f)
# Save vectorstore_data
faiss.write_index(vectorstore_data.index, f"{DATA_DIR}/docs_data.index")
vectorstore_data.index = None
with open(f"{DATA_DIR}/faiss_store_data.pkl", "wb") as f:
pickle.dump(vectorstore_data, f)
logger.info("Done")
if __name__ == "__main__":
ingest_task()
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | chat~v4~prompts_no_mem.py | from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
final_answer_system_template = """
As an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the collection of documents provided. Each document is demarcated by the 'Source:' tag.
If the documents do not contain the required information to answer user's question, respond with 'I don't know'.
Each point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved. Please make sure to use only the references provided in the documents and not to use any external references.
The footnote should be formatted as follows:
```
References:
[^1^]: <reference 1>
[^2^]: <reference 2>
[^3^]: <reference 3>
```
Please avoid duplicating references. For example, if the same reference is used twice in the answer, please only include it once in the footnote.
"""
final_answer_human_template = """
User's question: {question}
Document: {document}
Answer:
"""
FINAL_ANSWER_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(final_answer_system_template),
HumanMessagePromptTemplate.from_template(final_answer_human_template),
]
)
final_answer_2_system_template = """
As an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the potential answers derived from previous LLM call(s).
If the document doesn't contain the required information, respond with 'I don't know'.
Each point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved.
The footnote should be formatted as follows:
```
References:
[^1^]: <reference 1>
[^2^]: <reference 2>
[^3^]: <reference 3>
```
Please avoid duplicating references. For example, if the same reference is used twice in the answer, please only include it once in the footnote.
"""
final_answer_2_human_template = """
User's question: {question}
Document: {document}
Answer:
"""
FINAL_ANSWER_2_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(final_answer_2_system_template),
HumanMessagePromptTemplate.from_template(final_answer_2_human_template),
]
)
| [
"\nUser's question: {question}\n\nDocument: {document}\n\nAnswer:\n",
"s question based on the collection of documents provided. Each document is demarcated by the ",
"\nAs an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the collection of documents provided. Each document is demarcated by the 'Source:' tag. \nIf the documents do not contain the required information to answer user's question, respond with 'I don't know'.\nEach point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved. Please make sure to use only the references provided in the documents and not to use any external references. \n\nThe footnote should be formatted as follows: \n```\nReferences:\n[^1^]: <reference 1> \n[^2^]: <reference 2> \n[^3^]: <reference 3>\n```\nPlease avoid duplicating references. For example, if the same reference is used twice in the answer, please only include it once in the footnote.\n",
"s question, respond with ",
"t know",
"t contain the required information, respond with ",
"\nAs an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the potential answers derived from previous LLM call(s). \nIf the document doesn't contain the required information, respond with 'I don't know'.\nEach point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved. \n\nThe footnote should be formatted as follows: \n```\nReferences:\n[^1^]: <reference 1> \n[^2^]: <reference 2> \n[^3^]: <reference 3>\n```\nPlease avoid duplicating references. For example, if the same reference is used twice in the answer, please only include it once in the footnote.\n"
] |
2024-01-10 | AlgoveraAI/chainlink-assistant | chat~v3~get_chain_mem.py | from langchain.memory.buffer import ConversationBufferMemory
from chainlink.prompts_mem import (
POTENTIAL_ANSWER_PROMPT,
FINAL_ANSWER_PROMPT,
FINAL_ANSWER_PROMPT_2,
VERIFICATION_PROMPT,
QUESTION_MODIFIER_PROMPT,
)
from chainlink.utils import (
prepare_single_document,
prepare_multiple_documents,
retriever,
chain,
)
from utils import createLogHandler
logger = createLogHandler(__name__, "logs.log")
def get_answer_memory(question, memory):
if memory.chat_memory.messages:
chain.prompt = QUESTION_MODIFIER_PROMPT
question = chain.predict(question=question, history=memory.buffer)
logger.info(f"Modified question: {question}")
retrieved_docs = retriever.get_relevant_documents(question)
all_answers = []
for i, d in enumerate(retrieved_docs):
chain.prompt = POTENTIAL_ANSWER_PROMPT
response = chain.predict(question=question, document=prepare_single_document(d))
all_answers.append(
{"id": i, "answer": response, "original_source": d.metadata["source"]}
)
result = prepare_multiple_documents(all_answers)
chain.prompt = FINAL_ANSWER_PROMPT
answer = chain.predict(question=question, document=result, history=memory.buffer)
if any(
answer.lower().startswith(x)
for x in ["i don't know", "i dont know", "i do not know", "i don't know."]
):
pot_answer = ""
for i, d in enumerate(retrieved_docs):
if not pot_answer:
chain.prompt = FINAL_ANSWER_PROMPT_2
answer = chain.predict(
question=question, document=result, history=memory.buffer
)
if not any(
answer.lower().startswith(x)
for x in [
"i don't know",
"i dont know",
"i do not know",
"i don't know.",
]
):
pot_answer = answer
break
if not pot_answer:
answer = "Sorry, I don't know the answer to that question."
else:
answer = pot_answer
memory.save_context({"question": question}, {"answer": answer})
return answer, memory
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | search~deprecated~prepare_search.py | import re
import pickle
from tqdm import tqdm
from datetime import datetime
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from config import get_logger
logger = get_logger(__name__)
system_template = """
Please summarize the context below in one sentence (no more than 15 words). This will be used as the description of the article in the search results.
Response should be NO MORE THAN 15 words.
"""
human_template = """{context}"""
PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template(human_template),
]
)
llm = ChatOpenAI(temperature=0.0)
chain = LLMChain(llm=llm, prompt=PROMPT)
def extract_first_n_paragraphs(content, num_para=2):
# Split by two newline characters to denote paragraphs
paragraphs = content.split("\n\n")
# Return the first num_para paragraphs or whatever is available
return "\n\n".join(paragraphs[:num_para])
def prepare_search_docs(doc_path, blog_path, num_para=2):
with open(doc_path, "rb") as f:
docs = pickle.load(f)
with open(blog_path, "rb") as f:
blogs = pickle.load(f)
blog_docs = []
for blog in tqdm(blogs, total=len(blogs)):
title = blog.page_content.split("\n\n")[0].replace("#", "").strip()
# Get the first two paragraphs
para = extract_first_n_paragraphs(blog.page_content, num_para=num_para)
description = chain.predict(context=para)
metadata = {
"title": title,
"description": description,
"source": blog.metadata["source"],
"source_type": "blog",
}
logger.info(f"Description: {description}")
blog.metadata = metadata
blog_docs.append(blog)
tech_docs = []
for doc in tqdm(docs, total=len(docs)):
title = doc.page_content.split("\n\n")[0].replace("#", "").strip()
para = extract_first_n_paragraphs(doc.page_content, num_para=num_para)
description = chain.predict(context=para)
metadata = {
"title": title,
"description": description,
"source": doc.metadata["source"],
"source_type": "technical_document",
}
logger.info(f"Title: {title}")
logger.info(f"Description: {description}")
doc.metadata = metadata
tech_docs.append(doc)
# Save the documents
with open(
f"./data/search_blogdocs_{datetime.now().strftime('%Y-%m-%d')}.pkl", "wb"
) as f:
pickle.dump(blog_docs, f)
with open(
f"./data/search_techdocs_{datetime.now().strftime('%Y-%m-%d')}.pkl", "wb"
) as f:
pickle.dump(tech_docs, f)
return blog_docs, tech_docs
if __name__ == "__main__":
prepare_search_docs(
doc_path="/home/marshath/play/chainlink/chainlink-assistant/data/techdocs_2023-08-14.pkl",
blog_path="/home/marshath/play/chainlink/chainlink-assistant/data/blog_2023-08-14.pkl",
)
| [
"\nPlease summarize the context below in one sentence (no more than 15 words). This will be used as the description of the article in the search results.\n\nResponse should be NO MORE THAN 15 words.\n",
"{context}"
] |
2024-01-10 | AlgoveraAI/chainlink-assistant | ingest~docs.py | # Required libraries
import re
import os
import bs4
import time
import pickle
import requests
import html2text
import pandas as pd
from tqdm import tqdm
from pathlib import Path
import concurrent.futures
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from typing import List, Optional, Set
from requests.exceptions import RequestException
from langchain.docstore.document import Document
from config import DATA_DIR, get_logger, MAX_THREADS
from ingest.utils import (
remove_prefix_text,
extract_first_n_paragraphs,
get_description_chain,
get_driver,
)
logger = get_logger(__name__)
# Settings for requests
REQUEST_DELAY = 0.1
SESSION = requests.Session()
# Get the driver
driver = None
def filter_urls_by_base_url(urls: List, base_url: str):
"""
Filters a list of URLs and returns only those that include the base_url.
:param urls: List of URLs to filter.
:param base_url: Base URL to filter by.
:return: List of URLs that include the base_url.
"""
return [url for url in urls if base_url in url]
def normalize_url(url: str):
"""
Normalize a URL by ensuring it ends with '/'.
:param url: URL to normalize.
:return: Normalized URL.
"""
return url if url.endswith("/") else url + "/"
def fetch_url_request(url: str):
"""
Fetches the content of a URL using requests library and returns the response.
In case of any exception during fetching, logs the error and returns None.
:param url: URL to fetch.
:return: Response object on successful fetch, None otherwise.
"""
try:
response = SESSION.get(url)
response.raise_for_status()
return response
except RequestException as e:
logger.error(f"Error fetching {url}: {e}")
return None
def fetch_url_selenium(url: str):
local_driver = get_driver()
try:
local_driver.get(url)
local_driver.implicitly_wait(3)
time.sleep(3)
source = local_driver.page_source
except RequestException as e:
logger.error(f"Error fetching {url}: {e}")
source = None
finally:
local_driver.quit()
return source
def process_url(response: requests.Response, visited: Set, base_url: str):
"""
Process a URL response. Extract all absolute URLs from the response that
haven't been visited yet and belong to the same base_url.
:param response: Response object from a URL fetch.
:param visited: Set of URLs already visited.
:param base_url: Base URL to filter by.
:return: Set of new URLs to visit.
"""
urls = set()
if response:
soup = BeautifulSoup(response.content, "html.parser")
for link in soup.find_all("a"):
href = link.get("href")
if href is not None and "#" not in href:
absolute_url = normalize_url(urljoin(response.url, href))
if absolute_url not in visited and base_url in absolute_url:
visited.add(absolute_url)
urls.add(absolute_url)
return urls
def get_all_suburls(url: str, visited: Optional[Set] = None):
"""
Get all sub-URLs of a given URL that belong to the same domain.
:param url: Base URL to start the search.
:param visited: Set of URLs already visited.
:return: Set of all sub-URLs.
"""
if visited is None:
visited = set()
if not url.startswith("http"):
url = "https://" + url
base_url = url.split("//")[1].split("/")[0]
urls = set()
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
future_responses = [executor.submit(fetch_url_request, url)]
while future_responses:
for future in concurrent.futures.as_completed(future_responses):
future_responses.remove(future)
response = future.result()
new_urls = process_url(response, visited, base_url)
urls.update(new_urls)
if len(future_responses) < MAX_THREADS:
for new_url in new_urls:
future_responses.append(
executor.submit(fetch_url_request, new_url)
)
urls = filter_urls_by_base_url(urls, base_url)
return urls
def process_tag(tag: bs4.element.Tag):
"""
Process an HTML tag. If the tag is a table, convert it to Markdown.
Otherwise, convert it to Markdown as-is.
:param tag: HTML tag to process.
:return: Markdown representation of the tag.
"""
if tag.name == "table":
# Convert the table to a DataFrame
df = pd.read_html(str(tag))[0]
# Convert the DataFrame to Markdown
return df.to_markdown(index=False) + "\n"
else:
# If it's not a table, convert it to Markdown as before
html = str(tag)
return html2text.html2text(html)
def fix_markdown_links(markdown_text: str):
"""
Fix Markdown links by removing any spaces in the URL.
:param markdown_text: Markdown text to process.
:return: Fixed Markdown text.
"""
return re.sub(r"\[([^\]]+)\]\(([^)]+)\s+([^)]+)\)", r"[\1](\2\3)", markdown_text)
def process_nested_tags(tag: bs4.element.Tag):
"""
Process nested HTML tags. Convert tags to Markdown recursively.
:param tag: Root HTML tag to process.
:return: Markdown text
"""
if tag.name in {
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"p",
"pre",
"table",
"ol",
"ul",
}:
return process_tag(tag)
else:
markdown_parts = []
for child in tag.children:
if isinstance(child, bs4.element.Tag):
markdown_parts.append(process_nested_tags(child))
return "".join(markdown_parts)
# def parse(url, use_selenium:list=[]):
# if url in use_selenium:
# response = fetch_url_selenium(url)
# if response:
# soup = BeautifulSoup(response, 'html.parser')
# else:
# soup = None
# else:
# response = fetch_url_request(url)
# if response:
# soup = BeautifulSoup(response.content, 'html.parser')
# else:
# soup = None
# if soup:
# return parse_from_soup(soup)
def parse(url: str):
"""
Fetches and parses a URL using Selenium and BeautifulSoup.
Extracts the useful information from the HTML and returns it.
:param url: URL to fetch and parse.
:return: Processed content from the URL if it exists, None otherwise.
"""
# Fetch the page with Selenium
html = fetch_url_selenium(url)
if html:
# Parse the HTML with BeautifulSoup
soup = BeautifulSoup(html, "html.parser")
# Continue processing the page as before
if soup:
return parse_from_soup(soup)
def parse_from_soup(soup: bs4.BeautifulSoup):
"""
Parses the soup object from BeautifulSoup, removes unnecessary tags,
and returns the content in markdown format.
:param soup: BeautifulSoup object
:return: Content from the soup in markdown format.
"""
grid_main = soup.find("div", {"id": "grid-main"})
if grid_main:
for img in grid_main.find_all("img"):
img.decompose()
for h2 in grid_main.find_all("h2", {"class": "heading"}):
h2.decompose()
markdown_content = process_nested_tags(grid_main)
fixed_markdown_content = fix_markdown_links(markdown_content)
return fixed_markdown_content
else:
logger.error('Failed to find the "grid-main" div.')
def remove_duplicates(doc_list: List[Document]):
"""
Removes duplicate documents from a list of Documents based on page_content.
:param doc_list: List of Document objects.
:return: List of unique Document objects.
"""
content_to_doc = {}
for doc in doc_list:
if doc.page_content not in content_to_doc:
content_to_doc[doc.page_content] = doc
return list(content_to_doc.values())
def insert_full_url(text: str):
"""
Inserts the full URL into Markdown links in the text.
:param text: Text to process.
:return: Text with full URLs in Markdown links.
"""
base_url = "https://docs.chain.link"
def replacer(match):
sub_url = match.group(2)
# If the sub_url is an absolute URL, return it unchanged
if sub_url.startswith("http://") or sub_url.startswith("https://"):
return match.group(0)
# If the sub_url starts with a slash, remove it to avoid double slashes in the final url
if sub_url.startswith("/"):
sub_url = sub_url[1:]
return f"[{match.group(1)}]({base_url}/{sub_url})"
return re.sub(r"\[(.*?)\]\((.*?)\)", replacer, text)
def refine_docs(docs: List[Document]):
"""
Removes duplicates and inserts full URLs into the page_content of the Document objects.
:param docs: List of Document objects.
:return: Refined list of Document objects.
"""
docs_filtered = remove_duplicates(docs)
base_url = "https://docs.chain.link"
for doc in docs_filtered:
doc.page_content = insert_full_url(doc.page_content)
return docs_filtered
def scrap_docs():
global driver
driver = get_driver()
all_urls = get_all_suburls("https://docs.chain.link/")
all_urls = sorted(list(set(all_urls)))
# Get description chain
chain_description = get_description_chain()
docs_documents = []
# Utilizing ThreadPoolExecutor to parallelize the fetching and processing of URLs
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
future_to_url = {executor.submit(parse, url): url for url in all_urls}
for future in tqdm(
concurrent.futures.as_completed(future_to_url), total=len(all_urls)
):
url = future_to_url[future]
try:
data = future.result()
except Exception as e:
logger.error(f"Exception occurred in scraping {url}: {e}")
continue
if data:
# Remove anything above title
markdown_content = remove_prefix_text(data)
# Get title
try:
titles = re.findall(r"^#\s(.+)$", markdown_content, re.MULTILINE)
title = titles[0].strip()
except:
title = markdown_content.split("\n\n")[0].replace("#", "").strip()
# Get description
para = extract_first_n_paragraphs(markdown_content, num_para=2)
description = chain_description.predict(context=para)
docs_documents.append(
Document(
page_content=data,
metadata={
"source": url,
"source_type": "technical_document",
"title": title,
"description": description,
},
)
)
docs_documents = remove_duplicates(docs_documents)
# Save the documents to a pickle file with date in the name
with open(f"{DATA_DIR}/tech_documents.pkl", "wb") as f:
pickle.dump(docs_documents, f)
logger.info(f"Scraped technical documents.")
return docs_documents
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | ingest~chain_link.py | import re
import os
import time
import json
import pickle
import requests
import html2text
from tqdm import tqdm
from pathlib import Path
import concurrent.futures
from datetime import datetime
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from typing import List, Optional, Set, Dict, Tuple
from requests.exceptions import RequestException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from langchain.chains import LLMChain
from langchain.docstore.document import Document
from langchain.document_loaders import YoutubeLoader
from config import DATA_DIR, get_logger, MAX_THREADS
from ingest.utils import (
get_description_chain,
remove_prefix_text,
extract_first_n_paragraphs,
get_driver,
)
logger = get_logger(__name__)
# Settings for requests
REQUEST_DELAY = 0.1
TIMEOUT = 10
SESSION = requests.Session()
driver = None
def filter_urls_by_base_url(urls: List, base_url: str):
"""
Filters a list of URLs and returns only those that include the base_url.
:param urls: List of URLs to filter.
:param base_url: Base URL to filter by.
:return: List of URLs that include the base_url.
"""
return [url for url in urls if base_url in url]
def normalize_url(url: str):
"""
Normalize a URL by ensuring it ends with '/'.
:param url: URL to normalize.
:return: Normalized URL.
"""
return url if url.endswith("/") else url + "/"
def fetch_url_request(url: str):
"""
Fetches the content of a URL using requests library and returns the response.
In case of any exception during fetching, logs the error and returns None.
:param url: URL to fetch.
:return: Response object on successful fetch, None otherwise.
"""
try:
response = SESSION.get(url)
response.raise_for_status()
return response
except RequestException as e:
logger.error(f"Error fetching {url}: {e}")
return None
# Use Selenium's WebDriverWait instead of time.sleep
def fetch_url_selenium(url: str):
try:
driver.get(url)
WebDriverWait(driver, TIMEOUT).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
return driver.page_source
except RequestException as e:
logger.error(f"Error fetching {url}: {e}")
return None
def process_url(response: requests.Response, visited: Set, base_url: str):
"""
Process a URL response. Extract all absolute URLs from the response that
haven't been visited yet and belong to the same base_url.
:param response: Response object from a URL fetch.
:param visited: Set of URLs already visited.
:param base_url: Base URL to filter by.
:return: Set of new URLs to visit.
"""
urls = set()
if response:
soup = BeautifulSoup(response.content, "html.parser")
for link in soup.find_all("a"):
href = link.get("href")
if href is not None and "#" not in href:
absolute_url = normalize_url(urljoin(response.url, href))
if absolute_url not in visited and base_url in absolute_url:
visited.add(absolute_url)
urls.add(absolute_url)
return urls
def get_all_suburls(url: str, visited: Optional[Set] = None):
"""
Get all sub-URLs of a given URL that belong to the same domain.
:param url: Base URL to start the search.
:param visited: Set of URLs already visited.
:return: Set of all sub-URLs.
"""
if visited is None:
visited = set()
if not url.startswith("http"):
url = "https://" + url
base_url = url.split("//")[1].split("/")[0]
urls = set()
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
future_responses = [executor.submit(fetch_url_request, url)]
while future_responses:
for future in concurrent.futures.as_completed(future_responses):
future_responses.remove(future)
response = future.result()
new_urls = process_url(response, visited, base_url)
urls.update(new_urls)
if len(future_responses) < MAX_THREADS:
for new_url in new_urls:
future_responses.append(
executor.submit(fetch_url_request, new_url)
)
urls = filter_urls_by_base_url(urls, base_url)
return urls
def is_there_video(soup: BeautifulSoup) -> List[str]:
"""Check if there is a video in the soup
params:
soup: BeautifulSoup object
returns:
video_links: List of video links
"""
iframes = soup.find_all("iframe")
video_links = []
for iframe in iframes:
src = iframe.get("src")
if "youtube" in src:
video_links.append(src)
return video_links
def get_youtube_docs(video_tags: List[str], chain_description) -> List[Document]:
"""Get youtube docs from the video tags
params:
video_tags: List of video tags
returns:
u_tube_docs: List of youtube docs
"""
if video_tags:
u_tube_docs = []
for v_tag in video_tags:
try:
u_tube = json.loads(v_tag.script.string)["items"][0]["url"]
u_tube_id = YoutubeLoader.extract_video_id(u_tube)
u_tube_doc = YoutubeLoader(u_tube_id, add_video_info=True).load()[0]
# Get description
description = chain_description.predict(
context=u_tube_doc.page_content[:1500]
)
# Make sure its Chainlink video
assert u_tube_doc.metadata["author"].lower() == "chainlink"
u_tube_doc.metadata = {
"source": u_tube,
"source_type": "video",
"title": u_tube_doc.metadata["title"],
"description": description,
}
# Append to the list
u_tube_docs.append(u_tube_doc)
except Exception as e:
print(e)
u_tube_doc = []
else:
u_tube_docs = []
return u_tube_docs
def scrap_url(
url: str, chain_description: LLMChain, driver: webdriver.Chrome = driver
) -> Document:
"""Process a URL and return a list of words
param url: URL to process
param driver: Selenium driver
return: Document object
"""
driver.get(url)
driver.implicitly_wait(2)
time.sleep(2)
# Get the page source
soup = BeautifulSoup(driver.page_source, "html.parser")
# Get the Markdown content
# Remove images, videos, SVGs, and other media elements; also nav
for media_tag in soup.find_all(
["img", "video", "svg", "audio", "source", "track", "picture", "nav"]
):
media_tag.decompose()
# Remove the footer (assuming it's in a <footer> tag or has a class/id like 'footer')
for footer_tag in soup.find_all(["footer", {"class": "footer"}, {"id": "footer"}]):
footer_tag.decompose()
# Remove sections with class="section-page-alert"
for page_alert in soup.find_all("div", class_="section-page-alert"):
page_alert.decompose()
# Remove sections with class="cta-subscribe"
for cta_subscribe in soup.find_all(class_="cta-subscribe"):
cta_subscribe.decompose()
html_content = str(soup)
h = html2text.HTML2Text()
markdown_content = h.handle(html_content)
# Remove the prefix
markdown_content = remove_prefix_text(markdown_content)
# Get the title
titles = re.findall(r"^#\s(.+)$", markdown_content, re.MULTILINE)
title = titles[0].strip()
# Get description
para = extract_first_n_paragraphs(markdown_content, num_para=2)
description = chain_description.predict(context=para)
# Put the markdown content into a Document object
doc = Document(
page_content=markdown_content,
metadata={
"source": url,
"title": title,
"description": description,
"source_type": "main",
},
)
# Get YouTube docs
video_tags = soup.find_all("a", href=True, class_="techtalk-video-lightbox")
u_tube_docs = get_youtube_docs(video_tags, chain_description)
return doc, u_tube_docs
def concurrent_fetch_url_selenium(url: str):
driver = get_driver()
try:
driver.get(url)
WebDriverWait(driver, TIMEOUT).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
source = driver.page_source
except Exception as e:
logger.error(f"Error fetching {url}: {e}")
source = None
driver.quit()
return source
def concurrent_scrap_url(url: str, chain_description: LLMChain):
try:
logger.info(f"Processing {url}")
page_source = concurrent_fetch_url_selenium(url)
if page_source is None:
return None, []
soup = BeautifulSoup(page_source, "html.parser")
# Get the Markdown content
# Remove images, videos, SVGs, and other media elements; also nav
for media_tag in soup.find_all(
["img", "video", "svg", "audio", "source", "track", "picture", "nav"]
):
media_tag.decompose()
# Remove the footer (assuming it's in a <footer> tag or has a class/id like 'footer')
for footer_tag in soup.find_all(
["footer", {"class": "footer"}, {"id": "footer"}]
):
footer_tag.decompose()
# Remove sections with class="section-page-alert"
for page_alert in soup.find_all("div", class_="section-page-alert"):
page_alert.decompose()
# Remove sections with class="cta-subscribe"
for cta_subscribe in soup.find_all(class_="cta-subscribe"):
cta_subscribe.decompose()
html_content = str(soup)
h = html2text.HTML2Text()
markdown_content = h.handle(html_content)
# Remove the prefix
markdown_content = remove_prefix_text(markdown_content)
# Get the title
titles = re.findall(r"^#\s(.+)$", markdown_content, re.MULTILINE)
title = titles[0].strip()
# Get description
para = extract_first_n_paragraphs(markdown_content, num_para=2)
description = chain_description.predict(context=para)
# Put the markdown content into a Document object
doc = Document(
page_content=markdown_content,
metadata={
"source": url,
"title": title,
"description": description,
"source_type": "main",
},
)
# Get YouTube docs
video_tags = soup.find_all("a", href=True, class_="techtalk-video-lightbox")
u_tube_docs = get_youtube_docs(video_tags, chain_description)
return doc, u_tube_docs
except Exception as e:
logger.error(f"Error processing {url}: {e}")
return None, []
def scrap_chain_link() -> Tuple[List[Dict], List[Dict]]:
"""
Scrap all the urls from https://chain.link/ and save the main docs and you tube docs to disk
return: Tuple[List[Dict], List[Dict]]
"""
global driver
driver = get_driver()
raw_urls = get_all_suburls("https://chain.link/")
raw_urls = list(
set([url for url in raw_urls if url.startswith("https://chain.link")])
)
if "https://chain.link/faqs" not in raw_urls:
raw_urls.append("https://chain.link/faqs")
all_main_docs = []
all_you_tube_docs = []
chain_description = get_description_chain()
progress_bar = tqdm(total=len(raw_urls), desc="Processing URLs", position=0, leave=True)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
future_to_url = {
executor.submit(concurrent_scrap_url, url, chain_description): url
for url in raw_urls
}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
main_doc, you_tube_docs = future.result()
if main_doc:
all_main_docs.append(main_doc)
if you_tube_docs:
all_you_tube_docs.extend(you_tube_docs)
except Exception as e:
logger.error(f"Error processing {url}: {e}")
# Update the tqdm progress bar
progress_bar.update(1)
# remove deplicates
all_main_docs = list({doc.metadata["source"]: doc for doc in all_main_docs}.values())
all_you_tube_docs = list(
{doc.metadata["source"]: doc for doc in all_you_tube_docs}.values()
)
# remove https://chain.link/terms
all_main_docs = [
doc for doc in all_main_docs if doc.metadata["source"] != "https://chain.link/terms"
]
# Save to disk as pickle
with open(f"{DATA_DIR}/chain_link_main_documents.pkl", "wb") as f:
pickle.dump(all_main_docs, f)
with open(f"{DATA_DIR}/chain_link_you_tube_documents.pkl", "wb") as f:
pickle.dump(all_you_tube_docs, f)
logger.info("Done")
return all_main_docs, all_you_tube_docs
| [] |
2024-01-10 | AlgoveraAI/chainlink-assistant | chat~v3~prompts_no_mem.py | from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
potential_answer_system_template = """
As an AI assistant, your task is to locate the segment in a document that provides the answer to a user's inquiry about Chainlink.
If the document doesn't contain the required information, respond with 'no answer'.
Ensure to return only the segment containing the precise answer.
Ensure your report is in MARKDOWN format.
Add source of the document at the end of the answer.
"""
potential_answer_human_template = """
User's question: {question}
Document: {document}
Answer:
Source:
"""
POTENTIAL_ANSWER_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(potential_answer_system_template),
HumanMessagePromptTemplate.from_template(potential_answer_human_template),
]
)
final_answer_system_template = """
As an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the documents provided.
If the document doesn't contain the required information, respond with 'I don't know'.
Each point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved.
The footnote should be formatted as follows:
```
References:
[^1^]: <reference 1>
[^2^]: <reference 2>
[^3^]: <reference 3>
```
"""
final_answer_human_template = """
User's question: {question}
Document: {document}
Answer:
"""
FINAL_ANSWER_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(final_answer_system_template),
HumanMessagePromptTemplate.from_template(final_answer_human_template),
]
)
verification_system_template = """
You are an AI assistant.
You are helping a user find information about Chainlink.
Given the a question and an answer pair, please verify if the answer is correct.
If the answer is "the document does not contain the answer", please answer "no".
Please ONLY answer yes or no.
"""
verification_human_template = """
Question: {question}
Answer: {answer}
"""
VERIFICATION_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(verification_system_template),
HumanMessagePromptTemplate.from_template(verification_human_template),
]
)
final_answer_system_template_2 = """
As an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the documents provided.
If the document doesn't contain the required information, respond with 'I don't know'.
Each point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved.
The footnote should be formatted as follows:
```
References:
[^1^]: <reference 1>
[^2^]: <reference 2>
[^3^]: <reference 3>
```
"""
final_answer_human_template_2 = """
User's question: {question}
Document: {document}
Answer:
"""
FINAL_ANSWER_PROMPT_2 = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(final_answer_system_template_2),
HumanMessagePromptTemplate.from_template(final_answer_human_template_2),
]
)
| [
"\nUser's question: {question}\n\nDocument: {document}\n\nAnswer:\n",
"the document does not contain the answer",
"\nAs an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the documents provided.\nIf the document doesn't contain the required information, respond with 'I don't know'.\nEach point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved.\nThe footnote should be formatted as follows: \n```\nReferences:\n[^1^]: <reference 1> \n[^2^]: <reference 2> \n[^3^]: <reference 3>\n```\n",
"\nYou are an AI assistant.\nYou are helping a user find information about Chainlink.\nGiven the a question and an answer pair, please verify if the answer is correct.\nIf the answer is \"the document does not contain the answer\", please answer \"no\".\nPlease ONLY answer yes or no.\n",
"\nQuestion: {question}\nAnswer: {answer}\n",
"\nUser's question: {question}\n\nDocument: {document}\n\nAnswer:\nSource:\n",
"t know",
"no",
"t contain the required information, respond with ",
"\nAs an AI assistant, your task is to locate the segment in a document that provides the answer to a user's inquiry about Chainlink. \nIf the document doesn't contain the required information, respond with 'no answer'. \nEnsure to return only the segment containing the precise answer.\nEnsure your report is in MARKDOWN format.\nAdd source of the document at the end of the answer.\n",
"\nAs an AI assistant helping answer a user's question about Chainlink, your task is to provide the answer to the user's question based on the documents provided. \nIf the document doesn't contain the required information, respond with 'I don't know'.\nEach point in your answer should be formatted with corresponding reference(s) using markdown. Conclude your response with a footnote that enumerates all the references involved. \nThe footnote should be formatted as follows: \n```\nReferences:\n[^1^]: <reference 1> \n[^2^]: <reference 2> \n[^3^]: <reference 3>\n```\n"
] |
2024-01-10 | PeterChenTW/openai-English-writing-evaluation | speech_to_text.py | import speech_recognition as sr
import openai
openai.api_key = ''
recognizer = sr.Recognizer()
def free_model_audio_to_text(recording_file_path):
with sr.AudioFile(recording_file_path) as source:
audio_data = recognizer.record(source)
# Recognize the audio
text = recognizer.recognize_google(audio_data)
return text
def openai_model_audio_to_text(recording_file_path):
with open(recording_file_path, 'rb') as audio_file:
transcript = openai.Audio.transcribe(
model='whisper-1',
file=audio_file,
temperature=0.3,
prompt="Umm, let me think like, hmm... Okay, here's what I'm, like, thinking. I love play ball.",
language='en'
)
return transcript['text']
| [] |
2024-01-10 | PeterChenTW/openai-English-writing-evaluation | bot_speaker.py | import openai
import json
import os
import time
class ChatBot:
def __init__(self, role):
self.current_absolute_path = os.path.abspath('.')
self._load_prompt_dict()
self.history = [
{
"role": "system",
"content": self.prompt_dict[role]
}
]
def listen_and_speak(self, content):
self.history.append(
{
"role": "user",
"content": content
}
)
while True:
try:
reply = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
# model='gpt-4',
messages=self.history,
)
return reply['choices'][0]['message'].get('content', '')
except openai.error.RateLimitError as e:
print('Reached open ai api limit. sleep for 60 seconds')
time.sleep(60)
except Exception as e:
print(f"An error occurred: {str(e)}")
break
def _load_prompt_dict(self):
with open(os.path.join(self.current_absolute_path, 'prompts.json'), 'r') as json_file:
self.prompt_dict = json.load(json_file) | [] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171741.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:15000]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.write(article)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005902.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input = "https://news.yahoo.com"
# url_input = "https://laion.ai/blog/" # OK
# url_input = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input = "https://www.theguardian.com/international" #OK
# url_input = "https://www.bloomberg.com/europe" #NOK
# url_input = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input):
url_to_watch = st.text_input("Input your URL here", url_input)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = "Ukraine War"
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
### UI OUTPUT HERE
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence1",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160510.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160831.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171506.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
lst = text_from_webpage[:15000]
print(f'AAAAAAAAAAAAAAAAAAAAAAAAAAa {len(text_from_webpage)}')
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160009.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518161211.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
if st.button('Say hello'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text.\
webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160815.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks",
"What is a good name for a company that makes {product}?"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.