date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | elias-jhsph/jarvis-conversationalist | src~jarvis_conversationalist~openai_functions~internet_helper.py | import json
import multiprocessing
import threading
import requests
from bs4 import BeautifulSoup
from openai import OpenAI, _utils
from googlesearch import search as google_search
from tiktoken import encoding_for_model
client = OpenAI()
_utils._logs.logger.setLevel("CRITICAL")
basic_model = "gpt-3.5-turbo-16k"
advanced_model = "gpt-4"
enc = encoding_for_model(advanced_model)
temperature = 0.6
def search(search_term: str, num_results: int = 10, advanced: bool = False) -> dict:
"""
Searches for a term using either Google Custom Search API or a free alternative.
:param search_term: The term to search for.
:type search_term: str
:param num_results: The number of results to return, defaults to 10.
:type num_results: int, optional
:param advanced: Whether to use advanced info, defaults to False.
:type advanced: bool, optional
:return: A dictionary containing the search results.
:rtype: dict
"""
search_results = []
for url in google_search(search_term, num_results=num_results, advanced=advanced):
if advanced:
search_results.append({"link": url.url, "title": url.title, "description": url.description})
else:
search_results.append({"link": url})
return {"items": search_results}
def refine_query(query: str) -> str:
"""
Refines a query using the OpenAI API.
This function is used to refine a query to get better search results. It uses the OpenAI API to ask the user
for context and keywords to add to the query. The user's response is then sent directly to google.
:param query: The query to refine.
:type query: str
:return: The refined query.
:rtype: str
"""
response = client.chat.completions.create(model=advanced_model,
messages=[{"role": "user", "content":
f"Please help me improve this search query for better results: '{query}'. Add context and keywords "
"you think help better capture the idea behind the query. The response you send will go directly "
"into google. Here is a helpful reminder of google tools you can use but consider not using them "
"if you don't think you need them. Make sure some keywords aren't in quotes or you risk "
"only getting results with those exact words in that order:\n\n"
'Quotes (""): Use quotes to search for an exact phrase or word order.\n'
"Minus (-): Exclude a specific word from your search.\n"
"Asterisk (*): Use as a placeholder for unknown words.\n"
"OR: Search for multiple terms or phrases.\n"
"intitle: (intitle:): Search for words specifically in the title of webpages.\n"
"intext: (intext:): Search for words specifically in the body of webpages.\n"
"Note: Do not be so specific in your search that you miss the general point of the query. Also "
"DO NOT SURROUND THE ENTIRE QUERY BY QUOTES.\n Query:"}],
max_tokens=100,
n=1,
temperature=temperature)
refined_query = response.choices[0].message.content
return refined_query
def extract_content(url: str) -> str:
"""
Extracts the content from a webpage using BeautifulSoup.
:param url: The URL of the webpage.
:type url: str
:return: The content of the webpage.
:rtype: str
"""
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
paragraphs = soup.find_all("p")
content = " ".join([p.get_text() for p in paragraphs])
return content
except Exception as e:
print(f"Error extracting content from {url}: {e}")
return ""
def summarize(content: str, refined_query: str) -> str:
"""
Summarizes a piece of text using the OpenAI API.
:param content: The text to summarize.
:type content: str
:param refined_query: The refined query.
:type refined_query: str
:return: The summary.
:rtype: str
"""
response = client.chat.completions.create(model=basic_model,
messages=[{'role': 'system', 'content': f'There was a search for the following query:\n"{refined_query}"\nPlease '
f'provide a concise summary of the following content while keeping mind '
f'what will best respond to the search query:\n{content}\n'}],
max_tokens=400,
n=1,
stop=None,
temperature=temperature)
summary = response.choices[0].message.content
return summary
def rank_relevance(url: str, summary: str, query: str) -> int:
"""
Ranks the relevance of a summary using the OpenAI API.
:param url: The URL of the webpage.
:type url: str
:param summary: The summary.
:type summary: str
:param query: The query.
:type query: str
:return: The relevance of the summary.
"""
prompt = f"Given the query '{query}', rate the relevance of this summary from 1 (not relevant) to 10 (highly " \
f"relevant):\nURL: {url}\nSummary: {summary}"
schema = {"type": "function",
"function": {
"name": "store_rank_relevance",
"description": "Stores the relevance of a summary.",
"parameters": {
"type": "object",
"properties": {
"relevance": {
"type": "number",
"description": "The relevance of the summary. relevance is a number from 1 to 10.",
},
},
"required": ["relevance"],
},
}
}
response = client.chat.completions.create(model=advanced_model,
messages=[{'role': 'system', 'content': prompt}],
max_tokens=100,
n=1,
stop=None,
temperature=temperature,
tools=[schema],
tool_choice={"type": "function", "function": {"name": "store_rank_relevance"}})
relevance_raw = response.choices[0].message.tool_calls[0].function.arguments
relevance = int(json.loads(relevance_raw)['relevance'])
return relevance
def synthesize_information(summaries: list, query: str) -> str:
"""
Synthesizes information from a list of summaries using the OpenAI API.
:param summaries: The list of summaries.
:type summaries: list
:param query: The query.
:type query: str
"""
summaries_text = "\n".join([f"Summary {i + 1}: {summary}" for i, (url, summary) in enumerate(summaries)])
response = client.chat.completions.create(model=advanced_model,
messages=[{"role": "system", "content": f"Given the following summaries about '{query}', please synthesize "
f"a coherent and comprehensive response:\n{summaries_text}\n"}],
max_tokens=500,
n=1,
temperature=temperature)
synthesized_info = response.choices[0].message.content
return synthesized_info
def truncate_content(content: str, max_tokens: int = 3500) -> str:
"""
Truncates a piece of text to a maximum number of tokens.
:param content: The text to truncate.
:type content: str
:param max_tokens: The maximum number of tokens.
:type max_tokens: int
:return: The truncated text.
:rtype: str
"""
tokens = enc.encode(content)
if len(tokens) > max_tokens:
tokens = tokens[:max_tokens]
truncated_content = enc.decode(tokens)
return truncated_content + "(TRUNCATED)"
else:
return content
def search_helper(query: str, result_number: int = 6, skip: threading.Event = None) -> dict:
"""
Helper function for search.
This function is used to run the search for a given query by first refining the query, then searching for the query,
then summarizing the results, then ranking the relevance of the summaries, and finally synthesizing the information
:param query: The query.
:type query: str
:param result_number: The number of results to return.
:type result_number: int
:param skip: A threading.Event object that can be used to stop the search.
:type skip: threading.Event
:return: A dictionary containing the search results.
:rtype: dict
"""
search_data = {"initial_query": query, "refined_query": refine_query(query), "search_results": [],
"ranked_summaries": [], "synthesized_information": None}
temp = search(query, num_results=result_number)
if "items" not in temp:
search_data["refined_query"] = query
temp = search(search_data["refined_query"], num_results=result_number)
search_data["search_results"] = temp['items']
for result in search_data["search_results"]:
if skip is not None:
if skip.is_set():
return {}
content = extract_content(result['link'])
summary = summarize(truncate_content(content), search_data["refined_query"])
snippet = result.get('snippet', '') # Use an empty string if snippet is not available
search_data["ranked_summaries"].append({"url": result['link'], "content": content,
"summary": summary, "snippet": snippet})
for summary_data in search_data["ranked_summaries"]:
if skip is not None:
if skip.is_set():
return {}
relevance = rank_relevance(summary_data["url"], summary_data["summary"], search_data["refined_query"])
summary_data["relevance"] = relevance
search_data["ranked_summaries"].sort(key=lambda x: x["relevance"], reverse=True)
if skip is not None:
if skip.is_set():
return {}
search_data["synthesized_information"] = synthesize_information(
[(data["url"], data["summary"]) for data in search_data["ranked_summaries"]],
search_data["refined_query"]
)
return search_data
def simplify_output(search_data: dict) -> dict:
"""
Simplifies the output of the search function.
:param search_data: The output of the search function.
:type search_data: dict
:return: A simplified version of the output of the search function.
:rtype: dict
"""
simplified_output = {k: v for k, v in search_data.items() if k != "summaries"}
for summary_data in simplified_output["ranked_summaries"]:
summary_data.pop("content", None)
return simplified_output
def generate_final_prompt(simplified_output: dict, max_tokens: int = 1800) -> str:
"""
Generates the final prompt for the chatbot.
This function is used to generate the final prompt for the chatbot by combining the information from the search
function.
:param simplified_output: The simplified output of the search function.
:type simplified_output: dict
:param max_tokens: The maximum number of tokens.
:type max_tokens: int
:return: The final prompt for the chatbot.
:rtype: str
"""
synthesized_information = simplified_output["synthesized_information"]
ranked_summaries = simplified_output["ranked_summaries"]
refined_query = simplified_output["refined_query"]
user_query = simplified_output["initial_query"]
ranked_summaries_text = "\n".join(
[f"{i + 1}. {summary['url']} (Relevance: {summary['relevance']}):\n{summary['summary']}"
for i, summary in enumerate(ranked_summaries)]
)
pre_prompt = (
f"The user has requested a response to the following query '{user_query}'.\n"
f"An AI language model working with you has conducted an internet search for '{refined_query}', "
f"which was based on the provided user query. "
f"It has synthesized the following information from the search results: '{synthesized_information}'. "
f"Here are the ranked summaries of the top search results:\n\n"
)
post_prompt = (
f"\n\n"
f"Please analyze these results and provide the most appropriate response to the User.\n"
f"Consider the following options:\n"
f"1. Pass along the final summary\n"
f"2. Provide a very short final answer\n"
f"3. Suggest specific websites for further reading\n"
f"4. Recommend a deeper search or further inquiry\n"
f"5. Offer color commentary on the findings\n"
f"6. Combine any of the above options.\n"
f"NOTE: Provide the exact response that you would have me give the user. DO NOT mention which approach you "
f"have chosen. Give your response exactly as you would give it to the end user.\n\n"
f"Remember the user doesn't have access to the results above, so any text you want to refer to from above "
f"you must reiterate that information to the user in your own words! "
f"And don't forget your first system message (NO FULL URLS)! Good luck!"
)
prompt = pre_prompt + ranked_summaries_text + post_prompt
tokens = enc.encode(prompt)
if len(tokens) > max_tokens:
diff = len(tokens) - max_tokens
new = enc.encode(ranked_summaries_text)
if len(new) < diff+10:
raise Exception("Could not shrink internet final prompt within limit!")
prompt = pre_prompt + truncate_content(ranked_summaries_text, len(new) - (diff+10)) + post_prompt
return prompt
def create_internet_context(query: str, result_number: int = 10,
max_tokens: int = 1800, skip: threading.Event = None) -> tuple:
"""
Creates the internet context for the chatbot.
This function is used to create the internet context for the chatbot by combining the information from the search
function. Then it generates the final prompt for the chatbot. Then it returns the final prompt and the simplified
output of the search function.
:param query: The query to search for.
:type query: str
:param result_number: The number of results to return.
:type result_number: int
:param max_tokens: The maximum number of tokens.
:type max_tokens: int
:param skip: The skip object.
:type skip: Skip
:return: The final prompt for the chatbot and the simplified output of the search function.
:rtype: tuple
"""
if skip is None:
skip = multiprocessing.Event()
if skip.is_set():
return "Sorry."
if skip is not None:
if skip.is_set():
return "", {}
search_data = search_helper(query, result_number=result_number, skip=skip)
if skip is not None:
if skip.is_set():
return "", {}
simplified_output = simplify_output(search_data)
if skip is not None:
if skip.is_set():
return "", {}
result = generate_final_prompt(simplified_output, max_tokens=max_tokens)
return result, simplified_output
| [
"Please help me improve this search query for better results: 'PLACEHOLDER'. Add context and keywords you think help better capture the idea behind the query. The response you send will go directly into google. Here is a helpful reminder of google tools you can use but consider not using them if you don't think you need them. Make sure some keywords aren't in quotes or you risk only getting results with those exact words in that order:\n\nQuotes (\"\"): Use quotes to search for an exact phrase or word order.\nMinus (-): Exclude a specific word from your search.\nAsterisk (*): Use as a placeholder for unknown words.\nOR: Search for multiple terms or phrases.\nintitle: (intitle:): Search for words specifically in the title of webpages.\nintext: (intext:): Search for words specifically in the body of webpages.\nNote: Do not be so specific in your search that you miss the general point of the query. Also DO NOT SURROUND THE ENTIRE QUERY BY QUOTES.\n Query:",
"Given the query 'PLACEHOLDER', rate the relevance of this summary from 1 (not relevant) to 10 (highly relevant):\nURL: PLACEHOLDER\nSummary: PLACEHOLDER",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"The user has requested a response to the following query 'PLACEHOLDER'.\nAn AI language model working with you has conducted an internet search for 'PLACEHOLDER', which was based on the provided user query. It has synthesized the following information from the search results: 'PLACEHOLDER'. Here are the ranked summaries of the top search results:\n\n",
"Given the following summaries about 'PLACEHOLDER', please synthesize a coherent and comprehensive response:\nPLACEHOLDER\n",
"\n\nPlease analyze these results and provide the most appropriate response to the User.\nConsider the following options:\n1. Pass along the final summary\n2. Provide a very short final answer\n3. Suggest specific websites for further reading\n4. Recommend a deeper search or further inquiry\n5. Offer color commentary on the findings\n6. Combine any of the above options.\nNOTE: Provide the exact response that you would have me give the user. DO NOT mention which approach you have chosen. Give your response exactly as you would give it to the end user.\n\nRemember the user doesn't have access to the results above, so any text you want to refer to from above you must reiterate that information to the user in your own words! And don't forget your first system message (NO FULL URLS)! Good luck!",
"There was a search for the following query:\n\"PLACEHOLDER\"\nPlease provide a concise summary of the following content while keeping mind what will best respond to the search query:\nPLACEHOLDER\n"
] |
2024-01-10 | elias-jhsph/jarvis-conversationalist | src~jarvis_conversationalist~openai_functions~weather_functions.py | import requests
import geocoder
from openai import OpenAI
client = OpenAI()
temperature = 0.6
basic_model = "gpt-3.5-turbo-16k"
def geocoder_api(query):
g = geocoder.geonames(query, key='eliaswf', maxRows=1)
return g.lat, g.lng
def summarize(query:str, content: str) -> str:
"""
Summarizes a piece of text using the OpenAI API.
:param query: The query to summarize.
:type query: str
:param content: The text to summarize.
:type content: str
:return: The summary.
:rtype: str
"""
response = client.chat.completions.create(model=basic_model,
messages=[{'role': 'system', 'content': f'There was a search for the following weather:\n"{query}"\nPlease '
f'provide a concise summary of the following content while keeping '
f'mind what will best respond to the query:\n{content}\n'}],
max_tokens=400,
n=1,
stop=None,
temperature=temperature)
summary = response.choices[0].message.content
return summary
def get_weather(city_name):
api_key = '916a78d6305cef8f326831938dfe03f7'
lat, lng = geocoder_api(city_name)
url = f"https://api.openweathermap.org/data/2.5/forecast?lat={lat}&lon={lng}&appid={api_key}&units=imperial"
response = requests.get(url)
if response.status_code == 200:
weather_data = response.json()
formatted_response = ""
# Extract the city information
city = weather_data['city']['name']
country = weather_data['city']['country']
formatted_response += f"Weather Forecast for {city}, {country}\n\n"
# Go through each weather entry in the list
for entry in weather_data['list']:
# Convert temperature from Kelvin to Celsius
temp_farenheit = entry['main']['temp']
feels_like_farenheit = entry['main']['feels_like']
temp_min_farenheit = entry['main']['temp_min']
temp_max_farenheit = entry['main']['temp_max']
# Format the date and time
formatted_date = entry['dt_txt']
# Add the details to the response
formatted_response += f"{formatted_date}\n"
formatted_response += f" - Temperature: {temp_farenheit:.2f}°F (Feels like: {feels_like_farenheit:.0f}°F)\n"
formatted_response += f" - Min Temperature: {temp_min_farenheit:.0f}°F\n"
formatted_response += f" - Max Temperature: {temp_max_farenheit:.0f}°F\n"
formatted_response += f" - Pressure: {entry['main']['pressure']} hPa\n"
formatted_response += f" - Humidity: {entry['main']['humidity']}%\n"
formatted_response += f" - Weather: {entry['weather'][0]['description'].capitalize()}\n"
formatted_response += f" - Cloudiness: {entry['clouds']['all']}%\n"
formatted_response += f" - Wind: {entry['wind']['speed']} m/s, {entry['wind']['deg']} degrees\n"
if 'rain' in entry:
formatted_response += f" - Rain Volume: {entry['rain']['3h']} mm/3h\n"
formatted_response += f" - Probability of Precipitation: {entry['pop'] * 100}%\n\n"
#return summarize(city_name, formatted_response)
return formatted_response
else:
return "City not found or request failed"
| [
"There was a search for the following weather:\n\"PLACEHOLDER\"\nPlease provide a concise summary of the following content while keeping mind what will best respond to the query:\nPLACEHOLDER\n"
] |
2024-01-10 | andifunke/topic-labeling | src~topic_reranking.py | # coding: utf-8
import argparse
import json
from collections import defaultdict
from os import makedirs
from os.path import join, exists
from time import time
import numpy as np
import pandas as pd
from gensim.models import CoherenceModel
from pandas.core.common import SettingWithCopyWarning
from constants import DATASETS, METRICS, PARAMS, NBTOPICS, LDA_PATH, PLACEHOLDER
import warnings
from utils import TopicsLoader, load, init_logging, log_args
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
warnings.simplefilter(action="ignore", category=FutureWarning)
pd.options.display.precision = 3
pd.options.display.max_columns = 15
pd.options.display.width = 2000
np.set_printoptions(
precision=3, threshold=None, edgeitems=None, linewidth=800, suppress=None
)
# --------------------------------------------------------------------------------------------------
# --- Reranker Class ---
class Reranker(object):
def __init__(
self,
dataset,
version="noun",
corpus_type="bow",
params="e42",
nbtopics=100,
nb_candidate_terms=20,
nb_top_terms=10,
processes=-1,
logg=print,
):
"""
:param nb_candidate_terms: number of topic terms to evaluate the model over.
nb_candidate_terms must be > nb_top_terms.
The value is usually infered from the given topics.
:param nb_top_terms: number of remaining topic terms. The size of the final topic
representation set. nb_top_terms ust be < nb_candidate_terms.
:param processes: number of processes used for the calculations.
"""
self.logg = logg
self.dataset = dataset
self.version = version
self.corpus_type = corpus_type
self.nb_top_terms = nb_top_terms
self.nb_candidate_terms = nb_candidate_terms
self.processes = processes
tl = TopicsLoader(
dataset=dataset,
version=version,
corpus_type=corpus_type,
param_ids=params,
nbs_topics=nbtopics,
topn=nb_candidate_terms,
include_corpus=True,
include_texts=True,
include_weights=True,
logg=logg,
)
self.dict_from_corpus = tl.dictionary
self.placeholder_id = tl.dictionary.token2id[PLACEHOLDER]
self.corpus = tl.corpus
self.texts = tl.texts
self.nb_topics = tl.nb_topics
self.topic_terms = tl.topics[tl.column_names_terms].copy()
self.topic_weights = tl.topics[tl.column_names_weights].copy()
self.topic_ids = tl.topic_ids()
self.shifted_topics = None
self.kvs = None
self.topic_candidates = None
self.scores = None
self.eval_scores = None
# generate some statistics
self._statistics_ = dict()
self._statistics_["dataset"] = dataset
self._statistics_["version"] = version
def _shift_topics(self):
"""
from the top n terms construct all topic set that omit one term,
resulting in n topics with n-1 topic terms for each topic
"""
shifted_frames = []
for i in range(self.nb_candidate_terms):
df = pd.DataFrame(np.roll(self.topic_ids.values, shift=-i, axis=1))
shifted_frames.append(df)
shifted_ids = pd.concat(shifted_frames)
# omit the first topic term, then the second and append the first etc...
shifted_topics = shifted_ids.iloc[:, 1:].values.tolist()
return shifted_topics
def _init_vectors(self):
d2v = load("d2v", logg=self.logg).docvecs
w2v = load("w2v", logg=self.logg).wv
ftx = load("ftx", logg=self.logg).wv
# Dry run to make sure both indices are fully in RAM
d2v.init_sims()
vector = d2v.vectors_docs_norm[0]
_ = d2v.index2entity[0]
d2v.most_similar([vector], topn=5)
w2v.init_sims()
vector = w2v.vectors_norm[0]
_ = w2v.index2entity[0]
w2v.most_similar([vector], topn=5)
ftx.init_sims()
vector = ftx.vectors_norm[0]
_ = ftx.index2entity[0]
ftx.most_similar([vector], topn=5)
self.kvs = {"d2v": d2v, "w2v": w2v, "ftx": ftx}
def _id2term(self, id_):
return self.dict_from_corpus[id_]
def _append_candidates(self, topic_candidates):
if self.topic_candidates is None:
self.topic_candidates = topic_candidates.sort_index()
self.logg(f"topic candidates {self.topic_candidates.shape}")
else:
self.logg(f"topic candidates old {self.topic_candidates.shape}")
self.logg(f"topic candidates add {topic_candidates.shape}")
self.topic_candidates = self.topic_candidates.append(
topic_candidates.drop("ref", level="metric")
).sort_index()
self.logg(f"topic candidates concatenated {self.topic_candidates.shape}")
def _add_scores(self, scores):
if self.scores is None:
self.scores = scores
else:
self.scores = self.scores.join(scores)
def _vote(self, df, reference, name="vote"):
return (
df.loc[:, "term0":f"term{self.nb_top_terms - 1}"]
.apply(pd.value_counts)
.sum(axis=1)[reference]
.dropna()
.astype(np.int16)
.reset_index()
.rename(columns={"index": "term", 0: "count"})
.sort_values("count", ascending=False, kind="mergesort")[
: self.nb_top_terms
]
.set_index("term")
.squeeze()[reference]
.dropna()
.reset_index()
.rename(lambda x: f"term{x}")
.drop("count", axis=1)
.squeeze()
.rename(name)
)
def _get_reference(self):
metric = "ref"
ref_topics_terms = (
self.topic_ids.iloc[:, : self.nb_top_terms]
.copy()
.assign(metric=metric)
.set_index("metric", append=True)
)
self._statistics_[metric] = dict()
self._statistics_[metric]["runtime"] = 0
return ref_topics_terms
def _rerank_coherence_per_metric(self, metric, coherence_model=None):
"""
Object method to trigger the reranking for a given metric.
It uses the fast heuristic for the reranking in O(n) with n being the number
of candidate terms. A coherence metric is applied on each set of topic terms,
when we leave exactly one term out. The resulting coherence score indicates, if
a term strengthens or weakens the coherence of a topic. We remove those terms
from the set whose absence resulted in higher scores.
:param metric:
:param coherence_model:
:return:
"""
if self.shifted_topics is None:
self.shifted_topics = self._shift_topics()
t0 = time()
self.logg(
f"Calculating topic candidates using {metric} coherence measure "
f"on {self.nb_candidate_terms} candidate terms "
f"for {self.nb_topics} topics"
)
# calculate the scores for all shifted topics
kwargs = dict(
topics=self.shifted_topics,
dictionary=self.dict_from_corpus,
coherence=metric,
topn=self.nb_candidate_terms - 1,
processes=self.processes,
)
if metric == "u_mass":
kwargs["corpus"] = self.corpus
else:
kwargs["texts"] = self.texts
if coherence_model is None:
cm = CoherenceModel(**kwargs)
else:
cm = coherence_model
cm.coherence = metric
scores1d = cm.get_coherence_per_topic()
scores2d = np.reshape(scores1d, (self.nb_candidate_terms, -1)).T
# the highest values indicate the terms whose absence improves the topic coherence most
sorted_scores = np.argsort(scores2d, axis=1)
# thus we will keep the first nbtopterms (default 10) indices
top_scores = sorted_scores[:, : self.nb_top_terms]
# and sort them back for convenience
top_scores = np.sort(top_scores, axis=1)
# replacing indices with token-ids
tpx_ids = [
self.topic_ids.values[i, top_scores[i]] for i in range(self.nb_topics)
]
tpx_ids = (
pd.DataFrame.from_records(
tpx_ids,
columns=self.topic_terms.columns[: self.nb_top_terms],
index=self.topic_ids.index,
)
.assign(metric=metric)
.set_index("metric", append=True)
)
t1 = int(time() - t0)
self._statistics_[metric] = dict()
self._statistics_[metric]["runtime"] = t1
self.logg(
" done in {:02d}:{:02d}:{:02d}".format(
t1 // 3600, (t1 // 60) % 60, t1 % 60
)
)
return tpx_ids
def _rerank_w2v_values(self, topic_param):
def _rank(_df, _name):
_df[f"{_name}_drank"] = _df[f"{_name}_dscore"].rank().map(lambda x: x - 1)
_df[f"{_name}_rrank"] = _df[f"{_name}_rscore"].rank().map(lambda x: x - 1)
return _df
def _fillna_max(_df):
_mask = _df.isnull().any(axis=1)
_df[_mask] = _df[_mask].apply(lambda x: x.fillna(x.max()), axis=1)
return _df
reference = pd.Series(
np.arange(self.nb_candidate_terms), index=topic_param, name="ref"
)
scores = [reference]
for name, kv in self.kvs.items():
in_kv = np.vectorize(lambda x: x in kv)
mask = in_kv(topic_param)
topic = topic_param[mask]
nb_terms_in_vocab = len(topic)
rank_scores = defaultdict(int)
dist_scores = defaultdict(float)
for i in range(nb_terms_in_vocab):
entity = topic[i]
others = np.delete(topic, i)
distances = kv.distances(entity, tuple(others))
argsort = distances.argsort()
nearest = others[argsort]
for j, term in zip(distances, others):
dist_scores[term] += j
for j, term in enumerate(nearest):
rank_scores[term] += j
d_score = pd.Series(dist_scores, name=f"{name}_dscore")
r_score = pd.Series(rank_scores, name=f"{name}_rscore")
dr = pd.concat([d_score, r_score], axis=1)
dr = _rank(dr, name)
scores.append(dr)
df = pd.concat(scores, axis=1, sort=False)
if df.isnull().any().any():
for s in ["dscore", "rscore", "drank", "rrank"]:
scols = df.columns.str.contains(s)
df.loc[:, scols] = _fillna_max(df.loc[:, scols])
# getting scores and ranks for all combinations -> calculating c = a+b for both distance and
# rank scores and getting a rank for the sum
for c, a, b in [
("dw", "d2v", "w2v"),
("df", "d2v", "ftx"),
("wf", "w2v", "ftx"),
("dwf", "dw", "ftx"),
]:
df[f"{c}_dscore"] = df[f"{a}_dscore"] + df[f"{b}_dscore"]
df[f"{c}_rscore"] = df[f"{a}_rscore"] + df[f"{b}_rscore"]
df = _rank(df, c)
return df
def _remove_not_matching_terms(self, kv_name, topic):
kv = self.kvs[kv_name]
in_kv = np.vectorize(lambda x: x in kv)
mask = in_kv(topic)
reduced_tpx = topic[mask]
nb_terms_in_kv = len(reduced_tpx)
if nb_terms_in_kv > self.nb_top_terms:
for i in range(nb_terms_in_kv - self.nb_top_terms):
remove = kv.doesnt_match(reduced_tpx)
reduced_tpx = reduced_tpx[reduced_tpx != remove]
elif nb_terms_in_kv == 0:
reduced_tpx = topic[: self.nb_top_terms]
elif nb_terms_in_kv < self.nb_top_terms:
nb_missing = self.nb_top_terms - nb_terms_in_kv
for i, m in enumerate(mask):
if not m:
mask[i] = True
nb_missing -= 1
if nb_missing == 0:
break
reduced_tpx = topic[mask]
ser = pd.Series(reduced_tpx, name=kv_name + "_matches")
return ser
def _rerank_w2v_by_group(self, topic):
def _sort_terms(col):
top_terms = col.sort_values().index.values[: self.nb_top_terms]
col = col[col.index.isin(top_terms)]
return col.index.values
topic = topic.values[0]
df = self._rerank_w2v_values(topic)
rank_columns = [col for col in df.columns if ("rank" in col) or (col == "ref")]
df_ranks = df[rank_columns]
reranks = (
df_ranks.apply(_sort_terms, axis=0)
.reset_index(drop=True)
.T.rename(columns=lambda x: f"term{x}")
)
dred = self._remove_not_matching_terms("d2v", topic)
wred = self._remove_not_matching_terms("w2v", topic)
fred = self._remove_not_matching_terms("ftx", topic)
reds = pd.concat([dred, wred, fred], axis=1).T.rename(
columns=lambda x: f"term{x}"
)
reranks = pd.concat([reranks, reds])
votes = []
for name in ["rrank", "drank", "matches", ""]:
subset = reranks[reranks.index.str.contains(name)]
v = self._vote(subset, topic, f"{name}_vote_vec".strip("_"))
votes.append(v)
reranks = reranks.append(votes)
return reranks
def rerank_coherence(self, metrics=None):
"""
Main method of a Reranker instance. It generates topic candidates for the given coherence
metrics. A topic candidate is a reranking of the representational terms of a topic. For each
topic each metric generates one topic candidate. This results in |topics| * (|metrics|+1)
topic candidates, or in other words |metrics|+1 candidates for each topic. The +1 offest is
due to the original topic ranking added to the candidate set.
The reranking is based on the top m topic terms and then reduced to the top n topic terms
where m > n. Typical values are m=20 and n=10. The original order of the terms is kept while
filtering out the terms outside the n best scores.
:param metrics -> list of str.
str must be in {'u_mass', 'c_v', 'c_uci', 'c_npmi', 'vote'}.
:return DataFrame containing all topic candidates
"""
available_metrics = METRICS
if metrics is None:
metrics = available_metrics
self.logg(f"Creating reranked top candidates for metrics {metrics}")
candidates = []
# adding original (reference) topics
ref_topics_terms = self._get_reference()
candidates.append(ref_topics_terms)
# adding several rerankings according to different metrics
if "u_mass" in metrics:
umass_topics_terms = self._rerank_coherence_per_metric("u_mass")
candidates.append(umass_topics_terms)
if "c_v" in metrics:
cv_topics_terms = self._rerank_coherence_per_metric("c_v")
candidates.append(cv_topics_terms)
if "c_uci" in metrics:
cuci_topics_terms = self._rerank_coherence_per_metric("c_uci")
candidates.append(cuci_topics_terms)
if "c_npmi" in metrics:
cnpmi_topics_terms = self._rerank_coherence_per_metric("c_npmi")
candidates.append(cnpmi_topics_terms)
topic_candidates = pd.concat(candidates, axis=0)
# adding candidates by majority votes from prior reference and rerankings
if "vote" in metrics:
vote_topics_terms = (
topic_candidates.groupby(level=[0, 1, 2, 3], sort=False)
.apply(
lambda x: self._vote(
x, self.topic_ids.loc[x.name, :].values, name=x.name
)
)
.assign(metric="vote_coh")
.set_index("metric", append=True)
)
topic_candidates = topic_candidates.append(vote_topics_terms)
# replacing token-ids with tokens -> resulting in the final topic candidates
top_cols = list(self.topic_terms.columns)[: self.nb_top_terms]
topic_candidates.loc[:, top_cols] = topic_candidates.loc[:, top_cols].applymap(
self._id2term
)
self._append_candidates(topic_candidates)
return topic_candidates
def rerank_w2v(self, topics=None):
t0 = time()
self.logg(f"Creating reranked top candidates based on vector space similarity")
if topics is None:
topics = self.topic_terms
if self.kvs is None:
self._init_vectors()
topic_candidates = topics.groupby(level=[0, 1, 2, 3], sort=False).apply(
self._rerank_w2v_by_group
)
topic_candidates.index = topic_candidates.index.rename(names="metric", level=-1)
self._append_candidates(topic_candidates)
t1 = int(time() - t0)
metric = "vec_sim"
self._statistics_[metric] = dict()
self._statistics_[metric]["runtime"] = t1
self.logg(
" done in {:02d}:{:02d}:{:02d}".format(
t1 // 3600, (t1 // 60) % 60, t1 % 60
)
)
return topic_candidates
def oop_score(self, topic_candidates=None):
"""measure the distance of a reranked topic to the reference via out-of-place score"""
def _oop_score_by_row(row):
columns = [col for col in row.index if col.startswith("term")]
terms = row[columns].values
ref_terms = self.topic_terms.loc[row.name[:4], :]
ref_range = np.arange(self.nb_candidate_terms)
ref_ranks_full = pd.Series(ref_range, index=ref_terms, name="ref")
row_ranks = ref_ranks_full[terms]
oop = (row_ranks - ref_range[: len(row_ranks)]).abs().sum()
return oop
if topic_candidates is None:
topic_candidates = self.topic_candidates
oop_scores = (
topic_candidates.apply(_oop_score_by_row, axis=1)
.to_frame()
.rename(columns={0: "oop_score"})
)
self._add_scores(oop_scores)
return oop_scores
def weight_score(self, topic_candidates=None):
"""
measure the distance of a reranked topic to the reference by calculating the remaining weight
of its terms.
"""
def _weight_score_by_row(row):
columns = [col for col in row.index if col.startswith("term")]
terms = row[columns].values
row_terms_full = self.topic_terms.loc[row.name[:4], :]
row_weights_full = self.topic_weights.loc[row.name[:4], :]
row_weights_full.index = row_terms_full.values
row_weights = row_weights_full[terms]
row_weight = row_weights.sum()
ref_weight = row_weights_full[: len(row_weights)].sum()
row_diff = ref_weight - row_weight
return row_weight, row_diff
if topic_candidates is None:
topic_candidates = self.topic_candidates
weight_scores = (
topic_candidates.apply(_weight_score_by_row, axis=1)
.apply(pd.Series)
.rename(columns={0: "weight_score", 1: "weight_diff"})
)
self._add_scores(weight_scores)
return weight_scores
def reranking_statistics(self):
self._statistics_["nb_topics"] = self.nb_topics
self._statistics_["nb_candidate_terms"] = self.nb_candidate_terms
self._statistics_["nb_top_terms"] = self.nb_top_terms
self._statistics_["size_vocabulary"] = len(self.dict_from_corpus)
self._statistics_["size_corpus"] = len(self.corpus)
return self._statistics_
def evaluate(self, topic_candidates=None, nbtopterms=None):
"""
evaluate topic coherence. This method is for convenience and actually redundant.
The coherence scores should optimally be calculated in evaluate_topics.py which provides more
features and metrics.
"""
self.logg("evaluating topic candidates")
# reference scores per topic for top topic terms
if nbtopterms is None:
nbtopterms = self.nb_top_terms
if topic_candidates is None:
topic_candidates = self.topic_candidates
topic_candidates = topic_candidates.loc[:, "term0":f"term{nbtopterms - 1}"]
topics_list = topic_candidates.values.tolist()
self.logg("> u_mass")
t0 = time()
cm_umass = CoherenceModel(
topics=topics_list,
corpus=self.corpus,
dictionary=self.dict_from_corpus,
coherence="u_mass",
topn=nbtopterms,
processes=self.processes,
)
umass_scores = cm_umass.get_coherence_per_topic(
with_std=False, with_support=False
)
t1 = int(time() - t0)
self.logg(
" done in {:02d}:{:02d}:{:02d}".format(
t1 // 3600, (t1 // 60) % 60, t1 % 60
)
)
self.logg("> c_v")
t0 = time()
cm_cv = CoherenceModel(
topics=topics_list,
texts=self.texts,
dictionary=self.dict_from_corpus,
coherence="c_v",
topn=nbtopterms,
processes=self.processes,
)
cv_scores = cm_cv.get_coherence_per_topic()
t1 = int(time() - t0)
self.logg(
" done in {:02d}:{:02d}:{:02d}".format(
t1 // 3600, (t1 // 60) % 60, t1 % 60
)
)
# changed segmentation for c_uci and c_npmi from s_one_set to s_one_one (default)
self.logg("> c_uci")
t0 = time()
cm_cuci = CoherenceModel(
topics=topics_list,
texts=self.texts,
dictionary=self.dict_from_corpus,
coherence="c_uci",
topn=nbtopterms,
processes=self.processes,
)
cuci_scores = cm_cuci.get_coherence_per_topic()
t1 = int(time() - t0)
self.logg(
" done in {:02d}:{:02d}:{:02d}".format(
t1 // 3600, (t1 // 60) % 60, t1 % 60
)
)
self.logg("> c_npmi")
t0 = time()
cm_cuci.coherence = "c_npmi" # reusing precalculated probability estimates
cnpmi_scores1 = cm_cuci.get_coherence_per_topic()
t1 = int(time() - t0)
self.logg(
" done in {:02d}:{:02d}:{:02d}".format(
t1 // 3600, (t1 // 60) % 60, t1 % 60
)
)
scores = {
"u_mass_eval": umass_scores,
"c_v_eval": cv_scores,
"c_uci_eval": cuci_scores,
"c_npmi_eval": cnpmi_scores1,
}
scores = pd.DataFrame(scores)
scores.index = topic_candidates.index.copy()
self.eval_scores = scores
return scores
def save_scores(self, scores, dataset, suffix="topic-scores", directory=None):
if directory is None:
directory = join(LDA_PATH, "topics")
filename = join(directory, dataset)
fcsv = f"{filename}_{suffix}.csv"
self.logg(f"Writing scores to {fcsv}")
scores.to_csv(fcsv)
def save_results(self, directory=None, topics=True, scores=True, stats=True):
if directory is None:
directory = join(LDA_PATH, self.version, self.corpus_type, "topics")
if not exists(directory):
makedirs(directory)
model_name = self.dataset
file_path = join(directory, model_name)
if topics and self.topic_candidates is not None:
fcsv = f"{file_path}_reranker-candidates.csv"
self.logg(f"Writing topic candidates to {fcsv}")
self.topic_candidates.to_csv(fcsv)
if stats:
fjson = f"{file_path}_reranker-statistics.json"
with open(fjson, "w") as fp:
self.logg(f"Writing Reranker statistics to {fjson}")
json.dump(self.reranking_statistics(), fp, ensure_ascii=False, indent=2)
if scores and self.scores is not None:
self.save_scores(
self.scores, model_name, suffix="reranker-scores", directory=directory
)
if scores and self.eval_scores is not None:
self.save_scores(
self.eval_scores,
model_name,
suffix="reranker-eval",
directory=directory,
)
def plot(self):
self.plot_scores(self.eval_scores)
def plot_scores(self, scores):
scores = scores.unstack("metric")
for column in scores.columns.levels[0]:
scores[column].reset_index(drop=True).plot(title=column, grid=True)
descr = scores[column].describe()
mean = descr.loc["mean"]
bestidx = mean.idxmax()
bestval = mean[bestidx]
self.logg(f"reranking metric with highest score: {bestidx} [{bestval:.3f}]")
self.logg(descr.T[["mean", "std"]].sort_values("mean", ascending=False))
self.logg("-" * 50)
# --------------------------------------------------------------------------------------------------
# --- App ---
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--version", type=str, required=False, default="noun")
parser.add_argument("--tfidf", dest="tfidf", action="store_true", required=False)
parser.add_argument(
"--no-tfidf", dest="tfidf", action="store_false", required=False
)
parser.set_defaults(tfidf=False)
parser.add_argument("--topn", type=int, required=False, default=20)
parser.add_argument("--cores", type=int, required=False, default=4)
parser.add_argument("--coh", dest="coh", action="store_true", required=False)
parser.add_argument("--no-coh", dest="coh", action="store_false", required=False)
parser.set_defaults(coh=True)
parser.add_argument("--vec", dest="vec", action="store_true", required=False)
parser.add_argument("--no-vec", dest="vec", action="store_false", required=False)
parser.set_defaults(vec=True)
parser.add_argument("--weight", dest="weight", action="store_true", required=False)
parser.add_argument(
"--no-weight", dest="weight", action="store_false", required=False
)
parser.set_defaults(weight=True)
parser.add_argument("--oop", dest="oop", action="store_true", required=False)
parser.add_argument("--no-oop", dest="oop", action="store_false", required=False)
parser.set_defaults(oop=True)
parser.add_argument("--eval", dest="eval", action="store_true", required=False)
parser.add_argument("--no-eval", dest="eval", action="store_false", required=False)
parser.set_defaults(eval=False)
parser.add_argument("--save", dest="save", action="store_true", required=False)
parser.add_argument("--no-save", dest="save", action="store_false", required=False)
parser.set_defaults(save=True)
parser.add_argument("--plot", dest="save", action="store_true", required=False)
parser.add_argument("--no-plot", dest="save", action="store_false", required=False)
parser.set_defaults(plot=False)
parser.add_argument(
"--metrics", nargs="*", type=str, required=False, default=METRICS
)
parser.add_argument("--params", nargs="*", type=str, required=False, default=PARAMS)
parser.add_argument(
"--nbtopics", nargs="*", type=int, required=False, default=NBTOPICS
)
args = parser.parse_args()
args.dataset = DATASETS.get(args.dataset, args.dataset)
corpus_type = "tfidf" if args.tfidf else "bow"
return (
args.dataset,
args.version,
corpus_type,
args.metrics,
args.params,
args.nbtopics,
args.topn,
args.cores,
args.coh,
args.vec,
args.weight,
args.oop,
args.eval,
args.save,
args.plot,
args,
)
def main():
(
dataset,
version,
corpus_type,
metrics,
params,
nbtopics,
topn,
cores,
coh,
vec,
weight,
oop,
evaluate,
save,
plot,
args,
) = parse_args()
# --- logging ---
logger = init_logging(
name=f"Reranking_{dataset}", basic=False, to_stdout=True, to_file=True
)
logg = logger.info
log_args(logger, args)
t0 = time()
reranker = Reranker(
dataset=dataset,
version=version,
corpus_type=corpus_type,
params=params,
nbtopics=nbtopics,
nb_candidate_terms=topn,
nb_top_terms=10,
processes=cores,
logg=logg,
)
if coh:
reranker.rerank_coherence(metrics)
if vec:
reranker.rerank_w2v()
if weight:
reranker.weight_score()
if oop:
reranker.oop_score()
if evaluate:
reranker.evaluate()
if save:
reranker.save_results()
if plot:
reranker.plot()
logg(f"final shape {reranker.topic_candidates.shape}")
assert len(reranker.topic_candidates) == 24975
t1 = int(time() - t0)
logg(f">>> done in {t1//3600:02d}:{(t1//60)%60:02d}:{t1%60:02d} <<<")
return reranker
if __name__ == "__main__":
main()
| [] |
2024-01-10 | andifunke/topic-labeling | src~evaluate_topics.py | import argparse
import gc
from os.path import join, exists
from time import time
import numpy as np
import pandas as pd
from gensim.models import CoherenceModel
from constants import PARAMS, NBTOPICS, DATASETS, LDA_PATH, DSETS
from utils import init_logging, load, log_args
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
def cosine_similarities(vector_1, vectors_all):
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
dot_products = np.dot(vectors_all, vector_1)
similarities = dot_products / (norm * all_norms)
return similarities
def pairwise_similarity(topic, kvs, ignore_oov=True):
similarities = dict()
for name, kv in kvs.items():
vector = lambda x: kv[x] if x in kv else np.nan
vectors = topic.map(vector).dropna()
if len(vectors) < 2:
similarities[name] = np.nan
continue
vectors = vectors.apply(pd.Series).values
sims = np.asarray([cosine_similarities(vec, vectors) for vec in vectors]).mean(
axis=0
)
if not ignore_oov:
missing = len(topic) - len(sims)
if missing > 0:
sims = np.append(sims, np.zeros(missing))
similarity = sims.mean()
similarities[name] = similarity
return pd.Series(similarities)
def mean_similarity(topic, kvs):
similarities = dict()
for name, kv in kvs.items():
vector = lambda x: kv[x] if x in kv else np.nan
vectors = topic.map(vector).dropna()
if len(vectors) < 2:
similarities[name] = np.nan
continue
vectors = vectors.apply(pd.Series).values
mean_vec = np.mean(vectors, axis=0)
similarity = cosine_similarities(mean_vec, vectors).mean()
similarities[name] = similarity
return pd.Series(similarities)
def eval_coherence(
topics,
dictionary,
corpus=None,
texts=None,
keyed_vectors=None,
metrics=None,
window_size=None,
suffix="",
cores=1,
logg=print,
topn=10,
):
if not (corpus or texts or keyed_vectors):
logg("provide corpus, texts and/or keyed_vectors")
return
if metrics is None:
if corpus is not None:
metrics = ["u_mass"]
if texts is not None:
if metrics is None:
metrics = ["c_v", "c_npmi", "c_uci"]
else:
metrics += ["c_v", "c_npmi", "c_uci"]
if keyed_vectors is not None:
if metrics is None:
metrics = ["c_w2v"]
else:
metrics += ["c_w2v"]
# add out of vocabulariy terms dictionary and documents
in_dict = topics.applymap(lambda x: x in dictionary.token2id)
oov = topics[~in_dict]
oov = oov.apply(set)
oov = set().union(*oov)
isstr = lambda x: isinstance(x, str)
tolist = lambda x: [x]
oov = sorted(map(tolist, filter(isstr, oov)))
logg(f"OOV: {oov}")
if oov:
dictionary.add_documents(oov, prune_at=None)
_ = dictionary[0]
scores = dict()
topics_values = topics.values
for metric in metrics:
t0 = time()
gc.collect()
logg(metric)
txt = texts + oov if texts else None
cm = CoherenceModel(
topics=topics_values,
dictionary=dictionary,
corpus=corpus,
texts=txt,
coherence=metric,
topn=topn,
window_size=window_size,
processes=cores,
keyed_vectors=keyed_vectors,
)
coherence_scores = cm.get_coherence_per_topic(with_std=True, with_support=True)
scores[metric + suffix] = coherence_scores
gc.collect()
t1 = int(time() - t0)
logg(
" done in {:02d}:{:02d}:{:02d}".format(
t1 // 3600, (t1 // 60) % 60, t1 % 60
)
)
df = pd.DataFrame(scores)
df.index = topics.index
gc.collect()
return df
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--version", type=str, required=False, default="noun")
parser.add_argument("--tfidf", dest="tfidf", action="store_true", required=False)
parser.add_argument(
"--no-tfidf", dest="tfidf", action="store_false", required=False
)
parser.set_defaults(tfidf=False)
parser.add_argument("--rerank", dest="rerank", action="store_true", required=False)
parser.add_argument(
"--no-rerank", dest="rerank", action="store_false", required=False
)
parser.set_defaults(rerank=False)
parser.add_argument("--lsi", dest="lsi", action="store_true", required=False)
parser.add_argument("--no-lsi", dest="lsi", action="store_false", required=False)
parser.set_defaults(lsi=False)
parser.add_argument("--params", nargs="*", type=str, required=False, default=PARAMS)
parser.add_argument(
"--nbtopics", nargs="*", type=int, required=False, default=NBTOPICS
)
parser.add_argument("--topn", type=int, required=False, default=-1)
parser.add_argument("--cores", type=int, required=False, default=4)
parser.add_argument(
"--method",
type=str,
required=False,
default="both",
choices=["coherence", "w2v", "both"],
)
args = parser.parse_args()
args.dataset = DSETS.get(args.dataset, args.dataset)
corpus_type = "tfidf" if args.tfidf else "bow"
lsi = "lsi" if args.lsi else ""
use_coherence = args.method in ["coherence", "both"]
use_w2v = args.method in ["w2v", "both"]
return (
args.dataset,
args.version,
args.params,
args.nbtopics,
args.topn,
args.cores,
corpus_type,
use_coherence,
use_w2v,
args.rerank,
lsi,
args,
)
def main():
(
dataset,
version,
params,
nbtopics,
topn,
cores,
corpus_type,
use_coherence,
use_w2v,
rerank,
lsi,
args,
) = parse_args()
logger = init_logging(
name=f"Eval_topics_{dataset}", basic=False, to_stdout=True, to_file=True
)
log_args(logger, args)
logg = logger.info
purpose = "rerank" if rerank else "topics"
topics = load(
purpose, dataset, version, corpus_type, lsi, *params, *nbtopics, logg=logg
)
if topn > 0:
topics = topics[:topn]
else:
topn = topics.shape[1]
logg(f"number of topics: {topics.shape}")
unique_topics = topics.drop_duplicates()
logg(f"number of unique topics: {unique_topics.shape}")
wiki_dict = load("dict", "dewiki", "unfiltered", logg=logg)
dfs = []
if use_coherence:
dictionary = load("dict", dataset, version, corpus_type, logg=logg)
corpus = load("corpus", dataset, version, corpus_type, logg=logg)
texts = load("texts", dataset, version, logg=logg)
df = eval_coherence(
topics=unique_topics,
dictionary=dictionary,
corpus=corpus,
texts=texts,
keyed_vectors=None,
metrics=None,
window_size=None,
suffix="",
cores=cores,
logg=logg,
topn=topn,
)
del dictionary, corpus, texts
gc.collect()
dfs.append(df)
wiki_texts = load("texts", "dewiki", logg=logg)
df = eval_coherence(
topics=unique_topics,
dictionary=wiki_dict,
corpus=None,
texts=wiki_texts,
keyed_vectors=None,
metrics=None,
window_size=None,
suffix="_wikt",
cores=cores,
logg=logg,
topn=topn,
)
gc.collect()
dfs.append(df)
df = eval_coherence(
unique_topics,
wiki_dict,
corpus=None,
texts=wiki_texts,
keyed_vectors=None,
metrics=["c_uci"],
window_size=20,
suffix="_wikt_w20",
cores=cores,
logg=logg,
topn=topn,
)
del wiki_texts
gc.collect()
dfs.append(df)
df_sims = None
if use_w2v:
d2v = load("d2v", logg=logg).docvecs
w2v = load("w2v", logg=logg).wv
ftx = load("ftx", logg=logg).wv
# Dry run to make sure both indices are fully in RAM
d2v.init_sims()
_ = d2v.vectors_docs_norm[0]
w2v.init_sims()
_ = w2v.vectors_norm[0]
ftx.init_sims()
_ = ftx.vectors_norm[0]
df = eval_coherence(
topics=unique_topics,
dictionary=wiki_dict,
corpus=None,
texts=None,
keyed_vectors=w2v,
metrics=None,
window_size=None,
suffix="_w2v",
cores=cores,
logg=logger.info,
topn=topn,
)
gc.collect()
dfs.append(df)
df = eval_coherence(
topics=unique_topics,
dictionary=wiki_dict,
corpus=None,
texts=None,
keyed_vectors=ftx,
metrics=None,
window_size=None,
suffix="_ftx",
cores=cores,
logg=logger.info,
topn=topn,
)
gc.collect()
dfs.append(df)
# apply custom similarity metrics
kvs = {"d2v": d2v, "w2v": w2v, "ftx": ftx}
ms = unique_topics.apply(lambda x: mean_similarity(x, kvs), axis=1)
ps = unique_topics.apply(
lambda x: pairwise_similarity(x, kvs, ignore_oov=True), axis=1
)
ps2 = unique_topics.apply(
lambda x: pairwise_similarity(x, kvs, ignore_oov=False), axis=1
)
df_sims = pd.concat(
{
"mean_similarity": ms,
"pairwise_similarity_ignore_oov": ps,
"pairwise_similarity": ps2,
},
axis=1,
)
del d2v, w2v, ftx
gc.collect()
dfs = pd.concat(dfs, axis=1)
dfs = (
dfs.stack()
.apply(pd.Series)
.rename(columns={0: "score", 1: "stdev", 2: "support"})
.unstack()
)
if df_sims is not None:
dfs = pd.concat([dfs, df_sims], axis=1)
# restore scores for all topics from results of unique topics
topics.columns = pd.MultiIndex.from_tuples(
[("terms", t) for t in list(topics.columns)]
)
topic_columns = list(topics.columns)
fillna = lambda grp: grp.fillna(method="ffill") if len(grp) > 1 else grp
dfs = (
topics.join(dfs)
.groupby(topic_columns)
.apply(fillna)
.drop(topic_columns, axis=1)
)
tpx_path = join(LDA_PATH, version, "bow", "topics")
if rerank:
file = join(tpx_path, f"{dataset}_reranker-eval.csv")
else:
file = join(
tpx_path,
f'{dataset}{"_"+lsi if lsi else ""}_{version}_{corpus_type}_topic-scores.csv',
)
if exists(file):
file = file.replace(".csv", f'_{str(time()).split(".")[0]}.csv')
logg(f"Writing {file}")
dfs.to_csv(file)
logg("done")
return dfs
if __name__ == "__main__":
main()
| [] |
2024-01-10 | opencui/dug | opencui~inference~schema_parser.py | #!/usr/bin/env python3
import json
from opencui.core.annotation import (CamelToSnake, EntityMetas,
ExemplarStore, FrameSchema, Schema, SlotSchema, get_value)
#
# This is used to create the DatasetCreator from OpenAI function descriptions.
#
# We assume that in each domain, the slot name are unique, and skill name are unique.
#
def from_openai(functions) -> Schema:
skill_infos = {}
slot_infos = {}
to_snake = CamelToSnake()
for func in functions:
o_name = func["name"]
f_name = to_snake.encode(o_name)
f_description = func["description"]
f_slots = []
parameters = func["parameters"]
if parameters["type"] != "object":
raise RuntimeError("Need to handle this case.")
for key, slot in parameters["properties"].items():
f_slots.append(key)
if key in slot_infos:
continue
else:
slot_name = key
slot_description = slot["description"]
slot_infos[slot_name] = SlotSchema(
slot_name, slot_description
).to_dict()
skill_infos[f_name] = FrameSchema(f_name, f_description, f_slots).to_dict()
return Schema(skill_infos, slot_infos, to_snake.backward)
def from_openapi(specs) -> Schema:
skills = {}
slots = {}
to_snake = CamelToSnake()
print(specs)
for path, v in specs["paths"].items():
for op, _v in v.items():
orig_name = _v["operationId"]
name = to_snake.encode(orig_name)
description = get_value(_v, "description")
if description is None:
description = get_value(_v, "summary")
assert name is not None and description is not None
parameters = []
for _p in get_value(_v, "parameters", []):
slot_name = get_value(_p, "name")
slot_description = get_value(_p, "description")
if slot_name not in slots:
slots[slot_name] = SlotSchema(slot_name, slot_description).to_dict()
parameters.append(slot_name)
skills[name] = FrameSchema(name, description, parameters).to_dict()
return Schema(skills, slots, to_snake.backward)
# This assumes that in a directory we have schemas.json in openai/openapi format, and then exemplars
# recognizers.
def load_schema_from_directory(path):
schema_object = json.load(open(path))
return (
from_openai(schema_object)
if isinstance(schema_object, list)
else from_openapi(schema_object)
)
def load_all_from_directory(input_path):
module_schema = load_schema_from_directory(f"{input_path}/schemas.json")
examplers = ExemplarStore(**json.load(open(f"{input_path}/exemplars.json")))
recognizers = EntityMetas(**json.load(open(f"{input_path}/recognizers.json")))
return module_schema, examplers, recognizers
def load_specs_and_recognizers_from_directory(input_path):
module_schema = load_schema_from_directory(f"{input_path}/schemas.json")
recognizers = EntityMetas(**json.load(open(f"{input_path}/recognizers.json")))
return module_schema, recognizers
if __name__ == "__main__":
schema = from_openai(json.load(open("./examples/schemas.json")))
print(schema)
print("\n")
schema = from_openapi(json.load(open("./examples/openapi_petstore_v3.1.json")))
print(schema)
print("\n")
exemplars = ExemplarStore(**json.load(open("./examples/exemplars.json")))
print(exemplars)
print("\n")
recognizer = EntityMetas(**json.load(open("./examples/recognizers.json")))
print(recognizer)
print("\n")
| [] |
2024-01-10 | fperez/nitime | doc~examples~fmri3.py | #!/usr/bin/python
#Imports as before:
import numpy as np
from matplotlib.pyplot import figure,legend
from matplotlib.mlab import csv2rec
from nitime.timeseries import TimeSeries
from nitime.utils import percent_change
import nitime.viz
reload(nitime.viz)
from nitime.viz import drawmatrix_channels
#This time Import the coherence analyzer
from nitime.analysis import CoherenceAnalyzer
#This part is the same as before
TR=1.89
data_rec = csv2rec('data/fmri_timeseries.csv')
roi_names= np.array(data_rec.dtype.names)
n_samples = data_rec.shape[0]
data = np.zeros((len(roi_names),n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
data = percent_change(data)
T = TimeSeries(data,sampling_interval=TR)
T.metadata['roi'] = roi_names
C = CoherenceAnalyzer(T)
#We look only at frequencies between 0.02 and 0.15 (the physiologically
#relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
freq_idx = np.where((C.frequencies>0.02) * (C.frequencies<0.15))[0]
#Extract the coherence and average across these frequency bands:
coh = np.mean(C.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0)
| [] |
2024-01-10 | fperez/nitime | doc~examples~multi_taper_coh.py | #!/usr/bin/python
#Imports as before:
import numpy as np
import matplotlib.pyplot as pp
from matplotlib.mlab import csv2rec
from nitime.timeseries import TimeSeries
from nitime import utils
import nitime.algorithms as alg
import nitime.viz
reload(nitime.viz)
from nitime.viz import drawmatrix_channels
import scipy.stats.distributions as dist
#This time Import the coherence analyzer
from nitime.analysis import CoherenceAnalyzer
#This part is the same as before
TR=1.89
data_rec = csv2rec('data/fmri_timeseries.csv')
roi_names= np.array(data_rec.dtype.names)
nseq = len(roi_names)
n_samples = data_rec.shape[0]
data = np.zeros((nseq, n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
pdata = utils.percent_change(data)
T = TimeSeries(pdata,sampling_interval=TR)
T.metadata['roi'] = roi_names
NW = 5
K = 2*NW-1
tapers, eigs = alg.DPSS_windows(n_samples, NW, 2*NW-1)
tdata = tapers[None,:,:] * pdata[:,None,:]
tspectra = np.fft.fft(tdata)
mag_sqr_spectra = np.abs(tspectra)
np.power(mag_sqr_spectra, 2, mag_sqr_spectra)
# Only compute half the spectrum.. coherence for real sequences is symmetric
L = n_samples/2 + 1
#L = n_samples
w = np.empty( (nseq, K, L) )
for i in xrange(nseq):
w[i], _ = utils.adaptive_weights_cython(mag_sqr_spectra[i], eigs, L)
# calculate the coherence
csd_mat = np.zeros((nseq, nseq, L), 'D')
psd_mat = np.zeros((2, nseq, nseq, L), 'd')
coh_mat = np.zeros((nseq, nseq, L), 'd')
coh_var = np.zeros_like(coh_mat)
for i in xrange(nseq):
for j in xrange(i):
sxy = alg.mtm_cross_spectrum(
tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided'
)
sxx = alg.mtm_cross_spectrum(
tspectra[i], tspectra[i], (w[i], w[i]), sides='onesided'
).real
syy = alg.mtm_cross_spectrum(
tspectra[j], tspectra[j], (w[i], w[j]), sides='onesided'
).real
psd_mat[0,i,j] = sxx
psd_mat[1,i,j] = syy
coh_mat[i,j] = np.abs(sxy)**2
coh_mat[i,j] /= (sxx * syy)
csd_mat[i,j] = sxy
if i != j:
coh_var[i,j] = utils.jackknifed_coh_variance(
tspectra[i], tspectra[j], weights=(w[i], w[j]), last_freq=L
)
upper_idc = utils.triu_indices(nseq, k=1)
lower_idc = utils.tril_indices(nseq, k=-1)
coh_mat[upper_idc] = coh_mat[lower_idc]
coh_var[upper_idc] = coh_var[lower_idc]
# convert this measure with the normalizing function
coh_mat_xform = utils.normalize_coherence(coh_mat, 2*K-2)
t025_limit = coh_mat_xform + dist.t.ppf(.025, K-1)*np.sqrt(coh_var)
t975_limit = coh_mat_xform + dist.t.ppf(.975, K-1)*np.sqrt(coh_var)
utils.normal_coherence_to_unit(t025_limit, 2*K-2, t025_limit)
utils.normal_coherence_to_unit(t975_limit, 2*K-2, t975_limit)
if L < n_samples:
freqs = np.linspace(0, 1/(2*TR), L)
else:
freqs = np.linspace(0, 1/TR, L, endpoint=False)
#We look only at frequencies between 0.02 and 0.15 (the physiologically
#relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
freq_idx = np.where((freqs>0.02) * (freqs<0.15))[0]
#Extract the coherence and average across these frequency bands:
coh = np.mean(coh_mat[:,:,freq_idx],-1) #Averaging on the last dimension
drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='MTM Coherence')
C = CoherenceAnalyzer(T)
#We look only at frequencies between 0.02 and 0.15 (the physiologically
#relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
freq_idx = np.where((C.frequencies>0.02) * (C.frequencies<0.15))[0]
#Extract the coherence and average across these frequency bands:
coh = np.mean(C.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='CoherenceAnalyzer')
pp.show()
| [] |
2024-01-10 | fperez/nitime | doc~examples~fmri6.py | #!/usr/bin/python
#Imports as before:
import numpy as np
from matplotlib.pyplot import figure,legend
from matplotlib.mlab import csv2rec
from nitime.timeseries import TimeSeries
from nitime.utils import percent_change
import nitime.viz
reload(nitime.viz)
from nitime.viz import drawgraph_channels,drawmatrix_channels
import nitime.analysis
reload(nitime.analysis)
from nitime.analysis import CoherenceAnalyzer
#This part is the same as before
TR=1.89
data_rec = csv2rec('data/fmri_timeseries.csv')
roi_names= np.array(data_rec.dtype.names)
n_samples = data_rec.shape[0]
data = np.zeros((len(roi_names),n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
data = percent_change(data)
T = TimeSeries(data,sampling_interval=TR)
T.metadata['roi'] = roi_names
C = CoherenceAnalyzer(T)
freq_idx = np.where((C.frequencies>0.02) * (C.frequencies<0.15))[0]
idx_lcau = np.where(roi_names=='lcau')[0]
idx_rcau = np.where(roi_names=='rcau')[0]
idx_lput = np.where(roi_names=='lput')[0]
idx_rput = np.where(roi_names=='rput')[0]
idx = np.hstack([idx_lcau,idx_rcau,idx_lput,idx_rput])
idx1 = np.vstack([[idx[i]]*4 for i in range(4)]).ravel()
idx2 = np.hstack(4*[idx])
#For the third dimension, take always the index of the left caudate:
idx3 = np.hstack(16*[idx_lcau])
#Extract the partial coherence and average across the frequency bands:
phase = C.phase[idx1,idx2].reshape(4,4,C.frequencies.shape[0])
phase_m = np.mean(phase[:,:,freq_idx],-1) #Averaging on the last dimension
| [] |
2024-01-10 | fperez/nitime | doc~examples~fmri5.py | #!/usr/bin/python
#Imports as before:
import numpy as np
from matplotlib.pyplot import figure,legend
from matplotlib.mlab import csv2rec
from nitime.timeseries import TimeSeries
from nitime.utils import percent_change
import nitime.viz
reload(nitime.viz)
from nitime.viz import drawgraph_channels,drawmatrix_channels
import nitime.analysis
reload(nitime.analysis)
from nitime.analysis import CoherenceAnalyzer
#This part is the same as before
TR=1.89
data_rec = csv2rec('data/fmri_timeseries.csv')
roi_names= np.array(data_rec.dtype.names)
n_samples = data_rec.shape[0]
data = np.zeros((len(roi_names),n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
data = percent_change(data)
T = TimeSeries(data,sampling_interval=TR)
T.metadata['roi'] = roi_names
C = CoherenceAnalyzer(T)
freq_idx = np.where((C.frequencies>0.02) * (C.frequencies<0.15))[0]
idx_lcau = np.where(roi_names=='lcau')[0]
idx_rcau = np.where(roi_names=='rcau')[0]
idx_lput = np.where(roi_names=='lput')[0]
idx_rput = np.where(roi_names=='rput')[0]
idx = np.hstack([idx_lcau,idx_rcau,idx_lput,idx_rput])
idx1 = np.vstack([[idx[i]]*4 for i in range(4)]).ravel()
idx2 = np.hstack(4*[idx])
#For the third dimension, take always the index of the left caudate:
idx3 = np.hstack(16*[idx_lcau])
#Extract the partial coherence and average across the frequency bands:
coh = C.coherence_partial[idx1,idx2,idx3].reshape(4,4,C.frequencies.shape[0])
coh = np.mean(coh[:,:,freq_idx],-1) #Averaging on the last dimension
drawgraph_channels(coh,roi_names[idx])
drawmatrix_channels(coh,roi_names[idx],color_anchor=0)
| [] |
2024-01-10 | fperez/nitime | doc~examples~fmri4.py | #!/usr/bin/python
#Imports as before:
import numpy as np
from matplotlib.pyplot import figure,legend
from matplotlib.mlab import csv2rec
from nitime.timeseries import TimeSeries
from nitime.utils import percent_change
import nitime.viz
reload(nitime.viz)
from nitime.viz import drawgraph_channels,drawmatrix_channels
#This time Import the coherence analyzer
from nitime.analysis import CoherenceAnalyzer
#This part is the same as before
TR=1.89
data_rec = csv2rec('data/fmri_timeseries.csv')
roi_names= np.array(data_rec.dtype.names)
n_samples = data_rec.shape[0]
data = np.zeros((len(roi_names),n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
data = percent_change(data)
T = TimeSeries(data,sampling_interval=TR)
T.metadata['roi'] = roi_names
C = CoherenceAnalyzer(T)
freq_idx = np.where((C.frequencies>0.02) * (C.frequencies<0.15))[0]
idx_lcau = np.where(roi_names=='lcau')[0]
idx_rcau = np.where(roi_names=='rcau')[0]
idx_lput = np.where(roi_names=='lput')[0]
idx_rput = np.where(roi_names=='rput')[0]
idx = np.hstack([idx_lcau,idx_rcau,idx_lput,idx_rput])
idx1 = np.vstack([[idx[i]]*4 for i in range(4)]).ravel()
idx2 = np.hstack(4*[idx])
#Extract the coherence and average across these frequency bands:
coh = C.coherence[idx1,idx2].reshape(4,4,C.frequencies.shape[0])
coh = np.mean(coh[:,:,freq_idx],2) #Averaging on the last dimension
drawgraph_channels(coh,roi_names[idx])
| [] |
2024-01-10 | BenJamesbabala/rl_algorithms | es~es.py | """
This is Natural Evolution Strategies, designed to run on one computer and not a
cluster.
(c) May 2017 by Daniel Seita, though obviously based on OpenAI's work/idea.
"""
import gym
import logz
import numpy as np
import os
import pickle
import sys
import tensorflow as tf
import tensorflow.contrib.layers as layers
import time
import utils
from collections import defaultdict
from gym import wrappers
np.set_printoptions(edgeitems=100, linewidth=100, suppress=True, precision=5)
class ESAgent:
def __init__(self, session, args, log_dir=None, continuous=True):
""" An Evolution Strategies agent.
It uses the same network architecture from OpenAI's paper, and I think
OpenAI didn't sample the actions from a Gaussian afterwards. The agent
has functionality for obtaining and updating weights in vector form to
make ES addition easier.
Args:
session: A Tensorflow session.
args: The argparse from the user.
log_dir: The log directory for the logging, if any.
continuous: Whether the agent acts in a continuous or discrete
action space. (Right now only continuous is supported.)
"""
assert continuous == True, "Error: only continuous==True is supported."
tf.set_random_seed(args.seed)
self.sess = session
self.args = args
self.log_dir = log_dir
self.env = gym.make(args.envname)
ob_dim = self.env.observation_space.shape[0]
ac_dim = self.env.action_space.shape[0]
self.ob_no = tf.placeholder(shape=[None, ob_dim], dtype=tf.float32)
# Build the final network layer, which is our action (no sampling!).
self.sampled_ac = self._make_network(data_in=self.ob_no, out_dim=ac_dim)[0]
# To *extract* weight values, run a session on `self.weights_v`.
self.weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='ESAgent')
self.weights_v = tf.concat([tf.reshape(w, [-1]) for w in self.weights], axis=0)
self.shapes = [w.get_shape().as_list() for w in self.weights]
self.num_ws = np.sum([np.prod(sh) for sh in self.shapes])
# To *update* weights, run `self.set_params_op` w/feed `self.new_weights_v`.
self.new_weights_v = tf.placeholder(tf.float32, shape=[self.num_ws])
updates = []
start = 0
for (i,w) in enumerate(self.weights):
shape = self.shapes[i]
size = np.prod(shape)
updates.append(
tf.assign(w, tf.reshape(self.new_weights_v[start:start+size], shape))
)
start += size
self.set_params_op = tf.group(*updates)
if args.verbose:
self._print_summary()
self.sess.run(tf.global_variables_initializer())
def _make_network(self, data_in, out_dim):
""" Build the network with the same architecture following OpenAI's paper.
Returns the final *layer* of the network, which corresponds to our
chosen action. There is no non-linearity for the last layer because
different envs have different action ranges.
"""
with tf.variable_scope("ESAgent", reuse=False):
out = data_in
out = layers.fully_connected(out, num_outputs=64,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=64,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=out_dim,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = None)
return out
def _compute_return(self, test=False, store_info=False):
""" Runs the current neural network policy.
For now, we assume we run **one** episode. Also, we expand the
observations to get a dummy dimension, in case we figure out how to make
use of minibatches later.
Args:
test True if testing, False if part of training. The testing could
be either the tests done after each weight update, or the tests
done as a result fo the `test` method.
store_info: True if storing info is desired, meaning that we return
observations and actions.
Returns:
The scalar return to be evaluated by the ES agent.
"""
max_steps = self.env.spec.timestep_limit
obs = self.env.reset()
done = False
steps = 0
total_rew = 0
observations = []
actions = []
while not done:
exp_obs = np.expand_dims(obs, axis=0)
action = self.sess.run(self.sampled_ac, {self.ob_no:exp_obs})
observations.append(obs)
actions.append(action)
# Apply the action *after* storing the current obs/action pair.
obs, r, done, _ = self.env.step(action)
total_rew += r
steps += 1
if self.args.render and test:
self.env.render()
if steps >= max_steps or done:
break
if store_info:
return total_rew, observations, actions
else:
return total_rew
def _print_summary(self):
""" Just for debugging assistance. """
print("\nES Agent NN weight shapes:\n{}".format(self.shapes))
print("\nES Agent NN weights:")
for w in self.weights:
print(w)
print("\nNumber of weights: {}".format(self.num_ws))
print("\naction space: {}".format(self.env.action_space))
print("lower bound: {}".format(self.env.action_space.low))
print("upper bound: {}".format(self.env.action_space.high))
print("self.sampled_ac: {}\n".format(self.sampled_ac))
def run_es(self):
""" Runs Evolution Strategies.
Tricks used:
- Antithetic (i.e. mirrored) sampling.
- Rank transformation, using OpenAI's code.
Tricks avoided:
- Fixed Gaussian block. I like to just regenerate here.
- Virtual batch normalization, seems to be only for Atari games.
- Weight decay. Not sure how to do this.
- Action discretization. For now, it adds extra complexity.
Final weights are saved and can be pre-loaded elsewhere.
"""
args = self.args
t_start = time.time()
for i in range(args.es_iters):
if (i % args.log_every_t_iter == 0):
print("\n************ Iteration %i ************"%i)
stats = defaultdict(list)
# Set stuff up for perturbing weights and determining fitness.
weights_old = self.sess.run(self.weights_v) # Shape (numw,)
eps_nw = np.random.randn(args.npop/2, self.num_ws)
scores_n2 = []
for j in range(args.npop/2):
# Mirrored sampling, positive case, +eps_j.
weights_new_pos = weights_old + args.sigma * eps_nw[j]
self.sess.run(self.set_params_op,
feed_dict={self.new_weights_v: weights_new_pos})
rews_pos = self._compute_return()
# Mirrored sampling, negative case, -eps_j.
weights_new_neg = weights_old - args.sigma * eps_nw[j]
self.sess.run(self.set_params_op,
feed_dict={self.new_weights_v: weights_new_neg})
rews_neg = self._compute_return()
scores_n2.append([rews_pos,rews_neg])
# Determine the new weights based on OpenAI's rank updating.
proc_returns_n2 = utils.compute_centered_ranks(np.array(scores_n2))
F_n = proc_returns_n2[:,0] - proc_returns_n2[:,1]
grad = np.dot(eps_nw.T, F_n)
# Apply the gradient update. TODO: Change this to ADAM.
alpha = (args.lrate_es / (args.sigma*args.npop))
next_weights = weights_old + alpha * grad
self.sess.run(self.set_params_op,
feed_dict={self.new_weights_v: next_weights})
# Report relevant logs.
if (i % args.log_every_t_iter == 0):
hours = (time.time()-t_start) / (60*60.)
# Test roll-outs with these new weights.
returns = []
for _ in range(args.test_trajs):
returns.append(self._compute_return(test=True))
logz.log_tabular("FinalAvgReturns", np.mean(returns))
logz.log_tabular("FinalStdReturns", np.std(returns))
logz.log_tabular("FinalMaxReturns", np.max(returns))
logz.log_tabular("FinalMinReturns", np.min(returns))
logz.log_tabular("ScoresAvg", np.mean(scores_n2))
logz.log_tabular("ScoresStd", np.std(scores_n2))
logz.log_tabular("ScoresMax", np.max(scores_n2))
logz.log_tabular("ScoresMin", np.min(scores_n2))
logz.log_tabular("TotalTimeHours", hours)
logz.log_tabular("TotalIterations", i)
logz.dump_tabular()
# Save the weights so I can test them later.
if (i % args.snapshot_every_t_iter == 0):
itr = str(i).zfill(len(str(abs(args.es_iters))))
with open(self.log_dir+'/snapshots/weights_'+itr+'.pkl', 'wb') as f:
pickle.dump(next_weights, f)
# Save the *final* weights.
itr = str(i).zfill(len(str(abs(args.es_iters))))
with open(self.log_dir+'/snapshots/weights_'+itr+'.pkl', 'wb') as f:
pickle.dump(next_weights, f)
def test(self, just_one=True):
""" This is for test-time evaluation. No training is done here. By
default, iterate through every snapshot. If `just_one` is true, this
only runs one set of weights, to ensure that we record right away since
OpenAI will only record subsets and less frequently. Changing the loop
over snapshots is also needed.
"""
os.makedirs(self.args.directory+'/videos')
self.env = wrappers.Monitor(self.env, self.args.directory+'/videos', force=True)
headdir = self.args.directory+'/snapshots/'
snapshots = os.listdir(headdir)
snapshots.sort()
num_rollouts = 10
if just_one:
num_rollouts = 1
for sn in snapshots:
print("\n***** Currently on snapshot {} *****".format(sn))
### Add your own criteria here.
# if "800" not in sn:
# continue
###
with open(headdir+sn, 'rb') as f:
weights = pickle.load(f)
self.sess.run(self.set_params_op,
feed_dict={self.new_weights_v: weights})
returns = []
for i in range(num_rollouts):
returns.append( self._compute_return(test=True) )
print("mean: \t{}".format(np.mean(returns)))
print("std: \t{}".format(np.std(returns)))
print("max: \t{}".format(np.max(returns)))
print("min: \t{}".format(np.min(returns)))
print("returns:\n{}".format(returns))
def generate_rollout_data(self, weights, num_rollouts):
""" Roll out the expert data and save the observations and actions for
imitation learning later.
The observations and actions are stored in two separate lists of lists.
For instance, with InvertedPendulum and 100 rollouts, the shapes will be
be (100,1000,4) and (100,1000,1), with the 1000 representing 1000 time
steps. The actual expert roll-outs may not last the same time length.
Use the `ENV_TO_OBS_SHAPE` to guard against this scenario. We
**zero-pad** if needed (maybe randomizing is better? but MuJoCo is
continuous and actions are centered at zero...).
TL;DR: leading dimension is the minibatch, second leading dimension is
the timestep, third is the obs/act shape. If the obs/acts have two
dimensions, let's linearize to avoid worrying about it.
By the way, to experiment later with the *transits* only, just use the
same data here except shuffle the code. This happens elsewhere.
Args:
weights: The desired weight vector.
num_rollouts: The number of expert rollouts to save.
"""
# These are the shapes we need **for each trajectory**.
ENV_TO_OBS_SHAPE = {"InvertedPendulum-v1": (1000,4)}
ENV_TO_ACT_SHAPE = {"InvertedPendulum-v1": (1000,1)}
if self.args.envname not in ENV_TO_OBS_SHAPE:
print("Error, this environment is not supported.")
sys.exit()
headdir = self.args.directory+ '/expert_data'
if not os.path.exists(headdir):
os.makedirs(headdir)
self.sess.run(self.set_params_op, feed_dict={self.new_weights_v: weights})
returns = []
observations = []
actions = []
for i in range(num_rollouts):
if i % 10 == 0:
print("rollout {}".format(i))
rew, obs_l, acts_l = self._compute_return(test=False, store_info=True)
returns.append(rew)
observations.append(obs_l)
actions.append(acts_l)
print("returns", returns)
print("mean return", np.mean(returns))
print("std of return", np.std(returns))
# Fix padding issue to make lists have the same shape; we later make an
# array. Check each (ol,al), tuple of lists, to ensure shapes match. If
# the obs-list doesn't match, neither will the act-list, so test one.
for (i,(ol,al)) in enumerate(zip(observations,actions)):
obs_l = np.array(ol)
act_l = np.array(al)
print("{} {} {}".format(i, obs_l.shape, act_l.shape))
if obs_l.shape != ENV_TO_OBS_SHAPE[self.args.envname]:
result_o = np.zeros(ENV_TO_OBS_SHAPE[self.args.envname])
result_a = np.zeros(ENV_TO_ACT_SHAPE[self.args.envname])
result_o[:obs_l.shape[0],:obs_l.shape[1]] = obs_l
result_a[:act_l.shape[0],:act_l.shape[1]] = act_l
print("revised shapes: {} {}".format(result_o.shape, result_a.shape))
obs_l = result_o
act_l = result_a
observations[i] = obs_l
actions[i] = act_l
expert_data = {'observations': np.array(observations),
'actions': np.array(actions)}
# Save the data
print("obs-shape = {}".format(expert_data['observations'].shape))
print("act-shape = {}".format(expert_data['actions'].shape))
str_roll = str(num_rollouts).zfill(4)
name = headdir+ "/" +self.args.envname+ "_" +str_roll+ "rollouts_trajs"
np.save(name, expert_data)
print("Expert data has been saved in: {}.npy".format(name))
| [] |
2024-01-10 | RylanGotto/omni_search_backend | test2.py | from dotenv import load_dotenv
from langchain.utilities import GoogleSerperAPIWrapper
import asyncio
load_dotenv()
google = GoogleSerperAPIWrapper()
async def two_plus_one(a, b):
return a + b
num_list = [(1, 2), (2, 3), (4, 5)]
tasks = []
for i in num_list:
tasks.append(two_plus_one(i[0], i[1]))
async def main():
results = await asyncio.gather(*tasks)
print(results)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [] |
2024-01-10 | RylanGotto/omni_search_backend | test3.py | from dotenv import load_dotenv
from langchain.utilities import GoogleSerperAPIWrapper
from generate_structured_data import GenerateStructuredData as GSD
import asyncio
import aiohttp
from unstructured.partition.html import partition_html
from generate_document import GenerateDocument as gd
import time
load_dotenv()
google = GoogleSerperAPIWrapper()
urls = [
"https://www.newser.com/story/341661/china-preps-for-assault-with-tips-learned-from-russia.html",
"https://www.newser.com/story/341701/aid-reaches-gaza-as-us-issues-a-warning.html",
"https://www.reddit.com/r/Python/comments/17dkshe/when_have_you_reach_a_python_limit/",
"https://www.msn.com/en-ca/news/canada/india-says-relations-with-canada-passing-through-difficult-phase/ar-AA1iEsvJ?ocid=winp2fptaskbar&cvid=4f3be7e3697a4beba7f62d1f931de72a&ei=4",
]
tasks = []
from multiprocessing.dummy import Pool as ThreadPool
def get_doc(url):
gsd = GSD()
content = gd.generate(url)
formatted_input = gsd.generate_input(content)
return formatted_input
def main():
start = time.time()
# for i in urls:
# tasks.append(asyncio.create_task(get_doc(i)))
# r = await asyncio.gather(*tasks)
# print(r)
pool = ThreadPool(20)
# open the urls in their own threads
# and return the results
r = pool.map(get_doc, urls)
print(r)
# close the pool and wait for the work to finish
pool.close()
pool.join()
end = time.time()
print("Runtime: " + str(end - start))
main()
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
| [] |
2024-01-10 | RylanGotto/omni_search_backend | generate_document.py | import re
from langchain.document_loaders import UnstructuredURLLoader
from langchain.docstore.document import Document
from unstructured.cleaners.core import clean, clean_extra_whitespace
import pprint
from unstructured.documents.html import HTMLDocument
class GenerateDocument:
@staticmethod
def generate(url):
document = []
loader = UnstructuredURLLoader(
urls=[url],
mode="elements",
post_processors=[clean, clean_extra_whitespace],
)
elements = loader.load()
# print(elements)
# exit()
selected_elements = [
e
for e in elements
if e.metadata["category"] == "ListItem"
or e.metadata["category"] == "NarrativeText"
]
full_clean = re.sub(
" +", " ", " ".join([e.page_content for e in selected_elements])
)
return full_clean
| [] |
2024-01-10 | linhht/test | myLangchainChatbot.py | # Install required modules/libraries
# pip install --upgrade pip
# pip install --upgrade langchain
# pip install openai
# Run `openai migrate` to automatically upgrade your codebase to use the 1.0.0 interface
# Or pin your installation to the old version, e.g. `pip install openai==0.28`
# pip install --upgrade flask python-dotenv-vault
# pip install chromadb
# pip install tiktoken
# pip install panel
# pip install pypdf
# pip3 install pydantic==1.10.9
# pip install "langchain[docarray]"
# pip install docarray
# Create requirement.txt
# pip freeze > requirements.txt
# pip install -r requirements.txt
import os
from openai import OpenAI
# To support sqlite3
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
sys.path.append('../..')
# Load remote env - new way with cloud-based (dotenv.org) .env file
from dotenv_vault import load_dotenv
load_dotenv()
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
#print(os.getenv('OPENAI_API_KEY'))
# Set llm model
llm_name = "gpt-3.5-turbo"
"""
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
persist_directory = 'docs/chroma/'
embedding = OpenAIEmbeddings()
vectordb = Chroma(
persist_directory=persist_directory,
embedding_function=embedding)
# Run some sample similarity search
question = "What are major topics for this class?"
docs = vectordb.similarity_search(question,k=3)
#print(len(docs))
# Initialize the LLM & say hello to get response
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name=llm_name, temperature=0)
#print(llm.predict("Hello world!"))
# Build prompt
from langchain.prompts import PromptTemplate
template = "Use the following pieces of context to answer the question at the end. \
If you don't know the answer, just say that you don't know, don't try to make up an answer. \
Use three sentences maximum. Keep the answer as concise as possible. Always say 'thanks for asking!' at the end of the answer. \
{context}\
Question: {question}\
Helpful Answer:"
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context", "question"],
template=template,)
# Run chain
from langchain.chains import RetrievalQA
#question = "Is probability a class topic?"
#question = "What is my name"
qa_chain = RetrievalQA.from_chain_type(llm,
retriever=vectordb.as_retriever(),
return_source_documents=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
result = qa_chain({"query": question})
print(result["result"])
# Memory
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
# ConversationalRetrievalChain
from langchain.chains import ConversationalRetrievalChain
retriever=vectordb.as_retriever()
qa = ConversationalRetrievalChain.from_llm(
llm,
retriever=retriever,
memory=memory
)
question = "Is probability a class topic?"
result = qa({"question": question})
print(result['answer'])
question = "why are those prerequesites needed?"
result = qa({"question": question})
print(result['answer'])
"""
# Create a chatbot that works on your documents
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.document_loaders import TextLoader
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
#import docarray
# This will initialize your database and retriever chain
def load_db(file, chain_type, k):
# load documents
loader = PyPDFLoader(file)
documents = loader.load()
# split documents
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
docs = text_splitter.split_documents(documents)
# define embedding
embeddings = OpenAIEmbeddings()
# create vector database from data
db = DocArrayInMemorySearch.from_documents(docs, embeddings)
# define retriever
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": k})
# create a chatbot chain. Memory is managed externally.
qa = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(model_name=llm_name, temperature=0),
chain_type=chain_type,
retriever=retriever,
return_source_documents=True,
return_generated_question=True,
)
return qa
import panel as pn
pn.extension()
import param
class cbfs(param.Parameterized):
chat_history = param.List([])
answer = param.String("")
db_query = param.String("")
db_response = param.List([])
def __init__(self, **params):
super(cbfs, self).__init__( **params)
self.panels = []
self.loaded_file = "docs/chroma/MachineLearning-Lecture01.pdf"
self.qa = load_db(self.loaded_file,"stuff", 4)
def call_load_db(self, count):
if count == 0 or file_input.value is None: # init or no file specified :
return pn.pane.Markdown(f"Loaded File: {self.loaded_file}")
else:
file_input.save("temp.pdf") # local copy
self.loaded_file = file_input.filename
button_load.button_style="outline"
self.qa = load_db("temp.pdf", "stuff", 4)
button_load.button_style="solid"
self.clr_history()
return pn.pane.Markdown(f"Loaded File: {self.loaded_file}")
def convchain(self, query):
if not query:
return pn.WidgetBox(pn.Row('User:', pn.pane.Markdown("", width=600)), scroll=True)
result = self.qa({"question": query, "chat_history": self.chat_history})
self.chat_history.extend([(query, result["answer"])])
self.db_query = result["generated_question"]
self.db_response = result["source_documents"]
self.answer = result['answer']
self.panels.extend([
pn.Row('User:', pn.pane.Markdown(query, width=600)),
pn.Row('ChatBot:', pn.pane.Markdown(self.answer, width=600, style={'background-color': '#F6F6F6'}))
])
inp.value = '' #clears loading indicator when cleared
return pn.WidgetBox(*self.panels,scroll=True)
@param.depends('db_query ', )
def get_lquest(self):
if not self.db_query :
return pn.Column(
pn.Row(pn.pane.Markdown(f"Last question to DB:", styles={'background-color': '#F6F6F6'})),
pn.Row(pn.pane.Str("no DB accesses so far"))
)
return pn.Column(
pn.Row(pn.pane.Markdown(f"DB query:", styles={'background-color': '#F6F6F6'})),
pn.pane.Str(self.db_query )
)
@param.depends('db_response', )
def get_sources(self):
if not self.db_response:
return
rlist=[pn.Row(pn.pane.Markdown(f"Result of DB lookup:", styles={'background-color': '#F6F6F6'}))]
for doc in self.db_response:
rlist.append(pn.Row(pn.pane.Str(doc)))
return pn.WidgetBox(*rlist, width=600, scroll=True)
@param.depends('convchain', 'clr_history')
def get_chats(self):
if not self.chat_history:
return pn.WidgetBox(pn.Row(pn.pane.Str("No History Yet")), width=600, scroll=True)
rlist=[pn.Row(pn.pane.Markdown(f"Current Chat History variable", styles={'background-color': '#F6F6F6'}))]
for exchange in self.chat_history:
rlist.append(pn.Row(pn.pane.Str(exchange)))
return pn.WidgetBox(*rlist, width=600, scroll=True)
def clr_history(self,count=0):
self.chat_history = []
return
# Create a chatbot
cb = cbfs()
file_input = pn.widgets.FileInput(accept='.pdf')
button_load = pn.widgets.Button(name="Load DB", button_type='primary')
button_clearhistory = pn.widgets.Button(name="Clear History", button_type='warning')
button_clearhistory.on_click(cb.clr_history)
inp = pn.widgets.TextInput( placeholder='Enter text here…')
bound_button_load = pn.bind(cb.call_load_db, button_load.param.clicks)
conversation = pn.bind(cb.convchain, inp)
jpg_pane = pn.pane.Image( './img/convchain.jpg')
tab1 = pn.Column(
pn.Row(inp),
pn.layout.Divider(),
pn.panel(conversation, loading_indicator=True, height=300),
pn.layout.Divider(),
)
tab2= pn.Column(
pn.panel(cb.get_lquest),
pn.layout.Divider(),
pn.panel(cb.get_sources ),
)
tab3= pn.Column(
pn.panel(cb.get_chats),
pn.layout.Divider(),
)
tab4=pn.Column(
pn.Row( file_input, button_load, bound_button_load),
pn.Row( button_clearhistory, pn.pane.Markdown("Clears chat history. Can use to start a new topic" )),
pn.layout.Divider(),
pn.Row(jpg_pane.clone(width=400))
)
dashboard = pn.Column(
pn.Row(pn.pane.Markdown('# ChatWithYourData_Bot')),
pn.Tabs(('Conversation', tab1), ('Database', tab2), ('Chat History', tab3),('Configure', tab4))
)
# For deployment on a web server wrap it in a nice template.
dashboard.servable()
| [] |
2024-01-10 | DaMagus26/consultant_website | query~apps.py | from django.apps import AppConfig
from legal_answer_extraction.answer_extractor import AnswerExtractor
from legal_answer_extraction.qa_model.openai_qa import OpenAIModel
from legal_answer_extraction.vector_db import weaviate_db
model = None
class QueryConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "query"
def ready(self):
global model
finder = OpenAIModel(base_url='https://api.proxyapi.ru/openai/v1', api_key='sk-uJPlEJyqRg8jeYD1rSRxzV0DyXHzQtNb')
model = AnswerExtractor(finder)
| [] |
2024-01-10 | jpbianchi/GPT4V | GPT4V_demo.py | #%%
import streamlit as st
# import graphviz
from dotenv import load_dotenv
from os import environ, getcwd
import toml
import pickle
from pathlib import Path
import time
import hashlib
import json
import base64
import requests
from openai import AsyncOpenAI
from dotenv import load_dotenv
from os import environ
import configparser
from py2neo import Relationship
from NEO4J import Neo4jGraph, Now, createNode, createRelation
def check_keys():
""" Retrieves the keys either from the local file .streamlit/secrets.toml
for local tests, or from the streamlit sharing secrets manager
In the latter case, it's still useful because we test the keys,
to make sure we're using the ones we think we're using.
"""
# whether running locally or not, the keys are retrieved the same way
try:
# let's test is the huggingface token exists
# ab=st.secrets["secrets"]["HUGGINGFACEHUB_API_TOKEN"]
# del ab
# assert sum(st.secrets["secrets"]["HUGGINGFACEHUB_API_TOKEN"].encode('ascii')) == 3505, "HuggingFace key is invalid"
assert sum(st.secrets["secrets"]["OPENAI_API_KEY"].encode('ascii')) == 4241, "OpenAI key is invalid"
st.write("OpenAI key is valid")
st.write("Keys verified!")
except:
st.write('key retrieval issue')
st.stop()
# token_id = st.secrets["secrets"]["MODAL_TOKEN_ID"]
# token_secret = st.secrets["secrets"]["MODAL_TOKEN_SECRET"]
# openai_api_key = st.secrets["secrets"]["OPENAI_API_KEY"]
# assert sum(hf_api_token.encode('ascii')) == 3505, "HuggingFace key is invalid"
# st.write("HuggingFace key is valid")
# assert sum(st.secrets["secrets"]["HUGGINGFACEHUB_API_TOKEN"].encode('ascii')) == 3505, "HuggingFace key is invalid"
# st.write("HuggingFace key is valid")
# assert sum(token_id.encode('ascii')) == 2207, "Modal token ID is invalid"
# assert sum(token_secret.encode('ascii')) == 2134, "Modal token secret is invalid"
# st.write("Modal keys are valid")
# assert sum(openai_api_key.encode('ascii')) == 4241, "OpenAI key is invalid"
# st.write("OpenAI key is valid")
return
def examples():
st.write("Here are some examples of knowledge graphs that we created")
st.image("images/node_sunglasses.png", use_column_width=True)
st.image("images/node_convertible.png", use_column_width=True)
st.image("images/node_road.png", use_column_width=True)
st.image("images/node_view.png", use_column_width=True)
st.image("images/node_sunglasses2.png", use_column_width=True)
st.image("images/node_scarf.png", use_column_width=True)
st.image("images/node_colorful.png", use_column_width=True)
st.image("images/node_green.png", use_column_width=True)
st.image("images/node_tinted.png", use_column_width=True)
st.image("images/node_convertible2.png", use_column_width=True)
st.image("images/node_old.png", use_column_width=True)
st.image("images/node_rolling.png", use_column_width=True)
st.write("Notice the fields 'objective' and 'condition' under 'Node Properties' on the right")
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# async because of await client.chat.completions.create
async def create_graph( g,
image_path="data/Enjoying-convertible-car.jpg",
plot_image=False,
delete_graph=False,
DEBUG=False):
if plot_image:
st.image(image_path,
caption=None,
width=None,
use_column_width=None,
clamp=False,
channels="RGB",
output_format="auto")
#%%
# to upload a local image to OpenAI
# Getting the base64 string
base64_image = encode_image(image_path)
headers = {
"Content-Type": "application/json",
"Authorization": f"""Bearer {st.secrets["secrets"]["OPENAI_API_KEY"]}"""
}
prompt = "What’s in this image?"
def prompt_image(prompt, image_path):
base64_image = encode_image(image_path)
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": prompt
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 300
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
return response
#%%
prompt4 = """
Find all objects or people you can find this image and put them in a python list.
Be exhaustive, do not miss any object or person, including in the background, the sky if there is one, through a window, behind people, even partially visible etc.
Just list one object at a time, do not include what they are doing or where they are, just the object / person name only.
For instance, "women wearing hats" is two objects "women" and "hats"
"""
ans4 = prompt_image(prompt4, image_path)
objects_in_image = ans4.json()["choices"][0]["message"]["content"]
# st.write(objects_in_image)
# if DEBUG:
# st.write("Objects in image:")
# st.write(ans4.json()["choices"][0]["message"]["content"])
objectPrompt="""
Find all the words between quotes in the following string and put them in a python string.
Just output the string without any comment, start with ' and end with ], nothing else.
Do not put quotes or double quotes around the words.
"""
client = AsyncOpenAI(api_key=st.secrets["secrets"]["OPENAI_API_KEY"])
requestMessage = objectPrompt + '\n' + ans4.json()["choices"][0]["message"]["content"]
objects = await client.chat.completions.create(
model="gpt-4", # previous models, even GPT3.5 didn't work that well
messages=[{"role": "system", "content": "You are an expert in linguistics, semantic"},
{"role": "user", "content": requestMessage}]
)
objects_in_image = objects.choices[0].message.content[1:-1].split(", ")
if DEBUG:
st.write("WARNING: results are not deterministic!")
st.write("Objects in image:")
st.write(objects_in_image)
#%%
prompt3 = f"""
list all possible beliefs we can extract from this image, and express them in this format
<this thing or person> <action> <another thing><for this reason or purpose><in these conditions ><optional>...
Please keep the propositions with the <action> to leave <another thing> as an object/person name
You can also use another format, when there is no action, using the verb 'to be', such as:
<this car><is><a convertible><because the roof can be removed>
To generate all possible beliefs, in an exhaustive way, look at all the objects, persons, background etc and try to find a relation between them following the formats above.
You must find at least one belief for every object or person in the following list {objects_in_image}
Do not output anything before the first '<' and after the last '>'.
Do not output the '<' or '>' characters.
"""
ans3 = prompt_image(prompt3, image_path)
# if DEBUG:
# st.write(ans3.json()["choices"][0]["message"]["content"])
#%%
beliefs = [f"belief {i}: "+ans for i,ans in enumerate(ans3.json()["choices"][0]["message"]["content"].split("\n"), 1)]
if DEBUG:
msg="Here are all the beliefs that GPT4V extracted from the image:"
st.write(msg)
# st.write("-"*len(msg))
for b in beliefs:
st.write(b)
#%%
instructPrompt = """
You are an expert in linguistics, semantic and you are trying to format the beliefs passed to you into a format that can be stored in a knowledge graph.
The beliefs start with the word 'belief1:', 'belief2:' etc and are separated by a new line.
Rewrite every belief and express them as a python dictionary with the following format:
{
"condition": <conditions observed from the picture such as a sunny day, ie the conditions leading to the rest of the beliefs, such as the objective and action>,
"objective": <the objective of the person or thing in the picture, after observing the conditions in the picture>,
"subject": <the person or thing in the picture doing the 'action' to meet the 'objective', just one word if possible>,
"action": <the action the person or thing in the picture is doing to meet the 'objective', expressed in one word with an optional preposition, such as 'drive to'>,
"object": <the object of the action, expressed in one word if possible such as 'beach', NOT 'the beach'>
}
Use an empty string when a field is not available.
Do not return a belief if the subject and actions are not clearly identified.
I want general answers, not specific to an image.
For instance, I would expect something like:
{
"condition": "sunny day",
"objective": "enjoy the countryside",
"subject": "people",
"action": "drive",
"object": "convertible"
}
not the following
{
"condition": "in a sunny day",
"objective": "because he enjoys the countryside",
"subject": "the person in the driver seat",
"action": "drive",
"object": "the convertible"
}
For "subject", "action", "object", be as generic and short as possible.
Do not use different words for the same object, e.g. "convertible car" and "convertible".
When the belief does not have an action, but instead use a verb such as 'to be' for instance, then put the verb in the <action anyway>,
and use the field 'object' to describe what the object or person is or is made of or anything else that the verb describes.
Use infinitive verbs, without the 'to', ie 'be' instead of 'is'.
"""
#%% BUILD INSTRUCT PROMPT AND GO!
# NO CHUNKING, THE PROMPT IS SHORT ENOUGH
st.write("="*50)
st.write("Now, we're going to post-process those beliefs with GPT4 because it does a better job than GPT4V!")
st.write("Every belief is split into 5 fields to find the objects, persons, actions, conditions and objectives, so we can insert them in a Neo4J knowledge graph")
requestMessages = [instructPrompt + '\n' + belief for belief in beliefs]
chatOutputs = []
for i, request in enumerate(requestMessages, 1):
# st.write("Doing request", request, i, "of", len(requestMessages)
chatOutput = await client.chat.completions.create(
model="gpt-4", # previous models, even GPT3.5 didn't work that well
messages=[{"role": "system", "content": "You are an expert in linguistics and semantics"},
{"role": "user", "content": request} ]
)
chatOutputs.append(chatOutput)
#%%
formatted_beliefs = {}
for belief in chatOutputs:
b = json.loads(belief.choices[0].message.content)
formatted_beliefs[b["subject"]] = b
if DEBUG:
st.write(b)
#%%
if delete_graph:
g.deleteAllNodes(DEBUG=True)
assert g.nodesNb == 0
if DEBUG:
st.write("Graph has been cleaned")
for i, b in enumerate(chatOutputs,1):
# continue
belief = json.loads(b.choices[0].message.content)
name=belief.pop("subject")
object = belief.pop("object")
relation = belief.pop("action")
if DEBUG:
st.write(f"Now adding nodes to the graph for belief{i}:")
st.write('\n'.join([name, relation, object, belief['objective'], belief['condition']]))
if name == "":
continue
try:
subject1 = createNode( g,
name,
user_id='JPB',
display_name=name,
labels_constraints=name, # we don't create the same object/subject twice
# properties_constraints=('user_id',),
creation_timestamp = Now(),
DEBUG=True) # we can have the same subject appear several times
except:
if DEBUG:
st.write(f"node not created for {name}")
if object == "":
continue
try:
object1 = createNode( g,
object,
user_id='JPB',
display_name=object,
labels_constraints=object,
properties_constraints=('user_id',),
creation_timestamp = Now(),
DEBUG=True,
**belief,
) # we can have the same subject appear several times
except:
if DEBUG:
st.write(f"node not created for {object}")
if relation == "":
continue
try:
relat1 = createRelation(g,
subject1,
object1,
relation,
DEBUG=True,
#synonyms='has imagery', # for demo, relations can have properties as well
allow_duplicates=True, # Neo4J allows several identical relations between 2 nodes, but we don't want that
counting=True)
except:
if DEBUG:
st.write(f"relation not created for 'to {relation}'")
if DEBUG:
st.write('-'*10)
if DEBUG:
st.write("Nb of nodes =", g.nodesNb)
st.write("="*50)
return objects_in_image, beliefs
def main():
_, cent_co,_,_,_ = st.columns(5) # to help center the logo
with cent_co:
st.image('images/NO LIMITS logo.png', width=400)
st.title("Team NoLimits")
codebox = st.empty()
code = codebox.text_input("Enter your access code here",
value="",
placeholder="........",
key="1")
if hashlib.sha256(code.encode()).hexdigest() == hashlib.sha256(environ["REAL_CODE"].encode()).hexdigest():
# let's clear the code
time.sleep(0.5)
st.write("Access code is validXxXXX")
st.stop()
# code to clear box from https://discuss.streamlit.io/t/clear-text-input/18884/4
codebox.text_input("Enter your access code here",
value="",
placeholder="KEY HIDDEN",
key="2")
check_keys()
# I setup a free Neo4J instance on Aura, and I'm using the Python driver to connect to it
Neo4j_config = configparser.ConfigParser()
Neo4j_config.read('neo4j_config.ini')
Neo4j_config['DEFAULT']["pw"] = st.secrets["secrets"]['NEO4J_AURA_PW']
g = Neo4jGraph(showstatus=True, **Neo4j_config['DEFAULT'])
# in case of error, check your config.ini file
# Instructions to see the graph in Neo4J Browser
# go to
# https://workspace-preview.neo4j.io/workspace/query?ntid=auth0%7C631bb4216f68981ab949290b
# run the cypher query:
# MATCH (n) RETURN n to see all nodes
g.deleteAllNodes(DEBUG=True)
assert g.nodesNb == 0
st.write("Graph has been cleaned")
import asyncio
# image_path = "data/nice_convertible.jpg"
image_path = "data/Enjoying-convertible-car.jpg"
objects_in_image, beliefs = asyncio.run(create_graph(g,
image_path=image_path,
plot_image=True,
delete_graph=True,
DEBUG=True
))
# st.write('\n'.join(beliefs))
st.write("Neo4J graph has been created! See it online!")
examples()
else:
st.write("Access code is invalid")
st.write("Please contact the team to get a valid access code.")
st.write("In the meantime, here is a recorded demo")
time.sleep(2)
st.image("images/streamlit_1.png", use_column_width=True)
time.sleep(4)
st.image("images/streamlit_2.png", use_column_width=True)
time.sleep(4)
st.image("images/streamlit_3.png", use_column_width=True)
examples()
st.write("Notice how they are spot on!")
st.stop()
if __name__ == '__main__':
main()
# get into venv and run
# streamlit run GPT4V_demo.py --server.allowRunOnSave True
# we must run it from the root folder, not from the src folder because
# that's what streamlit will do
| [
"\n You are an expert in linguistics, semantic and you are trying to format the beliefs passed to you into a format that can be stored in a knowledge graph.\n The beliefs start with the word 'belief1:', 'belief2:' etc and are separated by a new line.\n\n Rewrite every belief and express them as a python dictionary with the following format:\n {\n \"condition\": <conditions observed from the picture such as a sunny day, ie the conditions leading to the rest of the beliefs, such as the objective and action>,\n \"objective\": <the objective of the person or thing in the picture, after observing the conditions in the picture>,\n \"subject\": <the person or thing in the picture doing the 'action' to meet the 'objective', just one word if possible>,\n \"action\": <the action the person or thing in the picture is doing to meet the 'objective', expressed in one word with an optional preposition, such as 'drive to'>,\n \"object\": <the object of the action, expressed in one word if possible such as 'beach', NOT 'the beach'>\n }\n Use an empty string when a field is not available.\n Do not return a belief if the subject and actions are not clearly identified.\n\n I want general answers, not specific to an image.\n For instance, I would expect something like:\n {\n \"condition\": \"sunny day\",\n \"objective\": \"enjoy the countryside\",\n \"subject\": \"people\",\n \"action\": \"drive\",\n \"object\": \"convertible\"\n }\n not the following\n {\n \"condition\": \"in a sunny day\",\n \"objective\": \"because he enjoys the countryside\",\n \"subject\": \"the person in the driver seat\",\n \"action\": \"drive\",\n \"object\": \"the convertible\"\n }\n For \"subject\", \"action\", \"object\", be as generic and short as possible.\n Do not use different words for the same object, e.g. \"convertible car\" and \"convertible\".\n When the belief does not have an action, but instead use a verb such as 'to be' for instance, then put the verb in the <action anyway>, \n and use the field 'object' to describe what the object or person is or is made of or anything else that the verb describes.\n\n Use infinitive verbs, without the 'to', ie 'be' instead of 'is'. \n ",
"You are an expert in linguistics and semantics",
" \n Find all objects or people you can find this image and put them in a python list. \n Be exhaustive, do not miss any object or person, including in the background, the sky if there is one, through a window, behind people, even partially visible etc.\n Just list one object at a time, do not include what they are doing or where they are, just the object / person name only.\n For instance, \"women wearing hats\" is two objects \"women\" and \"hats\"\n ",
"objectPrompt + '\\n' + ans4.json()[\"choices\"][0][\"message\"][\"content\"]",
"\n Find all the words between quotes in the following string and put them in a python string.\n Just output the string without any comment, start with ' and end with ], nothing else.\n Do not put quotes or double quotes around the words.\n ",
"You are an expert in linguistics, semantic",
"What’s in this image?",
" \n list all possible beliefs we can extract from this image, and express them in this format\n <this thing or person> <action> <another thing><for this reason or purpose><in these conditions ><optional>... \n Please keep the propositions with the <action> to leave <another thing> as an object/person name\n\n You can also use another format, when there is no action, using the verb 'to be', such as:\n <this car><is><a convertible><because the roof can be removed>\n\n To generate all possible beliefs, in an exhaustive way, look at all the objects, persons, background etc and try to find a relation between them following the formats above.\n You must find at least one belief for every object or person in the following list PLACEHOLDER\n Do not output anything before the first '<' and after the last '>'.\n Do not output the '<' or '>' characters.\n ",
"[{'type': 'text', 'text': 'What’s in this image?'}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]"
] |
2024-01-10 | sanyalsunny111/Early_Weight_Avg | nanoGPT2-Experiments~train_small.py | import os
import time
import math
import pickle
from contextlib import nullcontext
import numpy as np
import torch
import sys
import re
from copy import deepcopy
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
# import wandb
from model import GPTConfig, GPT
import copy
from torch import nn
from tqdm import tqdm
def exists(val):
return val is not None
def clamp(value, min_value=None, max_value=None):
assert exists(min_value) or exists(max_value)
if exists(min_value):
value = max(value, min_value)
if exists(max_value):
value = min(value, max_value)
return value
class EMA(nn.Module):
"""
Implements exponential moving average shadowing for your model.
Utilizes an inverse decay schedule to manage longer term training runs.
By adjusting the power, you can control how fast EMA will ramp up to your specified beta.
@crowsonkb's notes on EMA Warmup:
If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are
good values for models you plan to train for a million or more steps (reaches decay
factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models
you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at
215.4k steps).
Args:
inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.
power (float): Exponential factor of EMA warmup. Default: 1.
min_value (float): The minimum EMA decay rate. Default: 0.
"""
def __init__(
self,
model,
ema_model=None,
# if your model has lazylinears or other types of non-deepcopyable modules, you can pass in your own ema model
beta=0.9999,
update_after_step=100,
update_every=10,
inv_gamma=1.0,
power=2 / 3,
min_value=0.0,
param_or_buffer_names_no_ema=set(),
ignore_names=set(),
ignore_startswith_names=set(),
include_online_model=True
# set this to False if you do not wish for the online model to be saved along with the ema model (managed externally)
):
super().__init__()
self.beta = beta
# whether to include the online model within the module tree, so that state_dict also saves it
self.include_online_model = include_online_model
if include_online_model:
self.online_model = model
else:
self.online_model = [model] # hack
# ema model
self.ema_model = ema_model
if not exists(self.ema_model):
try:
self.ema_model = copy.deepcopy(model)
except:
print('Your model was not copyable. Please make sure you are not using any LazyLinear')
exit()
self.ema_model.requires_grad_(False)
self.parameter_names = {name for name, param in self.ema_model.named_parameters() if
param.dtype in [torch.float, torch.float16]}
self.buffer_names = {name for name, buffer in self.ema_model.named_buffers() if
buffer.dtype in [torch.float, torch.float16]}
self.update_every = update_every
self.update_after_step = update_after_step
self.inv_gamma = inv_gamma
self.power = power
self.min_value = min_value
assert isinstance(param_or_buffer_names_no_ema, (set, list))
self.param_or_buffer_names_no_ema = param_or_buffer_names_no_ema # parameter or buffer
self.ignore_names = ignore_names
self.ignore_startswith_names = ignore_startswith_names
self.register_buffer('initted', torch.Tensor([False]))
self.register_buffer('step', torch.tensor([0]))
@property
def model(self):
return self.online_model if self.include_online_model else self.online_model[0]
def restore_ema_model_device(self):
device = self.initted.device
self.ema_model.to(device)
def get_params_iter(self, model):
for name, param in model.named_parameters():
if name not in self.parameter_names:
continue
yield name, param
def get_buffers_iter(self, model):
for name, buffer in model.named_buffers():
if name not in self.buffer_names:
continue
yield name, buffer
def copy_params_from_model_to_ema(self):
for (_, ma_params), (_, current_params) in zip(self.get_params_iter(self.ema_model),
self.get_params_iter(self.model)):
ma_params.data.copy_(current_params.data)
for (_, ma_buffers), (_, current_buffers) in zip(self.get_buffers_iter(self.ema_model),
self.get_buffers_iter(self.model)):
ma_buffers.data.copy_(current_buffers.data)
def get_current_decay(self):
epoch = clamp(self.step.item() - self.update_after_step - 1, min_value=0.)
value = 1 - (1 + epoch / self.inv_gamma) ** - self.power
if epoch <= 0:
return 0.
return clamp(value, min_value=self.min_value, max_value=self.beta)
def update(self):
step = self.step.item()
self.step += 1
if (step % self.update_every) != 0:
return
if step <= self.update_after_step:
self.copy_params_from_model_to_ema()
return
if not self.initted.item():
self.copy_params_from_model_to_ema()
self.initted.data.copy_(torch.Tensor([True]))
self.update_moving_average(self.ema_model, self.model)
@torch.no_grad()
def update_moving_average(self, ma_model, current_model):
current_decay = self.get_current_decay()
for (name, current_params), (_, ma_params) in zip(self.get_params_iter(current_model),
self.get_params_iter(ma_model)):
if name in self.ignore_names:
continue
if any([name.startswith(prefix) for prefix in self.ignore_startswith_names]):
continue
if name in self.param_or_buffer_names_no_ema:
ma_params.data.copy_(current_params.data)
continue
ma_params.data.lerp_(current_params.data, 1. - current_decay)
for (name, current_buffer), (_, ma_buffer) in zip(self.get_buffers_iter(current_model),
self.get_buffers_iter(ma_model)):
if name in self.ignore_names:
continue
if any([name.startswith(prefix) for prefix in self.ignore_startswith_names]):
continue
if name in self.param_or_buffer_names_no_ema:
ma_buffer.data.copy_(current_buffer.data)
continue
ma_buffer.data.lerp_(current_buffer.data, 1. - current_decay)
def __call__(self, *args, **kwargs):
return self.ema_model(*args, **kwargs)
ema_decay = 0.9000
out_dir = '/scratch/07946/ss95332/out2'
out_dir_ema = '/scratch/07946/ss95332/out_ema2'
eval_interval = 200
log_interval = 1
eval_iters = 200
eval_only = False # if True, script exits right after the first eval
always_save_checkpoint = True # if True, always save a checkpoint after each eval
init_from = 'resume' # 'scratch' or 'resume' or 'gpt2*'
# ddp = True
# wandb logging
wandb_log = False # disabled by default
wandb_project = 'owt'
wandb_run_name = 'gpt2' # 'run' + str(time.time())
# data
dataset = 'openwebtext'
gradient_accumulation_steps = 8 # used to simulate larger batch sizes, was 5 earlier
batch_size = 16 # if gradient_accumulation_steps > 1, this is the micro-batch size, was 12 earlier
block_size = 1024
# model
n_layer = 12
n_head = 12
n_embd = 768
dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+
bias = False # do we use bias inside LayerNorm and Linear layers?
# optimizer
optimizer_name = 'adamw'
learning_rate = 6e-3 # max learning rate, earlier it was 6e-4
max_iters = 70000 # total number of training iterations, earlier it was 600000
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0
rho = 0.1
interval = 10
variant = 4
# learning rate decay settings
decay_lr = True # whether to decay the learning rate
warmup_iters = 2000 # how many steps to warm up for
lr_decay_iters = 70000 # should be ~= max_iters per Chinchilla, it was 600000 earlier
min_lr = 6e-4 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
# DDP settings
backend = 'nccl' # 'nccl', 'gloo', etc.
# system
device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
dtype = 'bfloat16' # 'float32', 'bfloat16', or 'float16', the latter will auto implement a GradScaler
compile = True # use PyTorch 2.0 to compile the model to be faster
scale_attn_by_inverse_layer_idx = True
# -----------------------------------------------------------------------------
config_keys = [k for k, v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))]
# exec(open('configurator.py').read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# various inits, derived attributes, I/O setup
ddp = int(os.environ.get('RANK', -1)) != -1 # is this a ddp run?
if ddp:
init_process_group(backend=backend)
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
seed_offset = ddp_rank # each process gets a different seed
else:
# if not ddp, we are running on a single gpu, and one process
master_process = True
seed_offset = 0
gradient_accumulation_steps *= 8 # simulate 8 gpus
if master_process:
os.makedirs(out_dir, exist_ok=True)
os.makedirs(out_dir_ema, exist_ok=True)
torch.manual_seed(5000 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
ctx = nullcontext() if device_type == 'cpu' else torch.autocast(device_type=device_type, dtype=ptdtype)
# poor man's data loader
data_dir = os.path.join('data', dataset)
train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
def get_batch(split):
data = train_data if split == 'train' else val_data
ix = torch.randint(len(data) - block_size, (batch_size,))
x = torch.stack([torch.from_numpy((data[i:i + block_size]).astype(np.int64)) for i in ix])
y = torch.stack([torch.from_numpy((data[i + 1:i + 1 + block_size]).astype(np.int64)) for i in ix])
if device_type == 'cuda':
# pin arrays x,y, which allows us to move them to GPU asynchronously (non_blocking=True)
x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True)
else:
x, y = x.to(device), y.to(device)
return x, y
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
# attempt to derive vocab_size from the dataset
meta_path = os.path.join(data_dir, 'meta.pkl')
meta_vocab_size = None
if os.path.exists(meta_path):
with open(meta_path, 'rb') as f:
meta = pickle.load(f)
meta_vocab_size = meta['vocab_size']
print(f"found vocab_size = {meta_vocab_size} (inside {meta_path})")
# model init
model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=block_size,
bias=bias, vocab_size=None, dropout=dropout,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx) # start with model_args from command line
if init_from == 'scratch':
# init a new model from scratch
print("Initializing a new model from scratch")
# determine the vocab size we'll use for from-scratch training
if meta_vocab_size is None:
print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)")
model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304
gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
# Initialize the EMA for the model
# model_ema = ModelEMA(model)
elif init_from == 'resume':
print(f"Resuming training from {out_dir}")
# resume training from a checkpoint.
ckpt_path = os.path.join(out_dir, 'ckpt_best.pt')
checkpoint = torch.load(ckpt_path, map_location=device)
checkpoint_model_args = checkpoint['model_args']
# force these config attributes to be equal otherwise we can't even resume training
# the rest of the attributes (e.g. dropout) can stay as desired from command line
for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']:
model_args[k] = checkpoint_model_args[k]
# create the model
gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
state_dict = checkpoint['model']
# fix the keys of the state dictionary :(
# honestly no idea how checkpoints sometimes get this prefix, have to debug more
unwanted_prefix = '_orig_mod.'
for k, v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict)
iter_num = checkpoint['iter_num']
best_val_loss = checkpoint['best_val_loss']
elif init_from.startswith('gpt2'):
print(f"Initializing from OpenAI GPT-2 weights: {init_from}")
# initialize from OpenAI GPT-2 weights
override_args = dict(dropout=dropout)
model = GPT.from_pretrained(init_from, override_args)
# read off the created config params, so we can store them into checkpoint correctly
for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']:
model_args[k] = getattr(model.config, k)
# crop down the model block size if desired, using model surgery
if block_size < model.config.block_size:
model.crop_block_size(block_size)
model_args['block_size'] = block_size # so that the checkpoint will have the right value
model.to(device)
# initialize a GradScaler. If enabled=False scaler is a no-op
scaler = torch.cuda.amp.GradScaler(enabled=(dtype == 'float16'))
# optimizer
optimizer = model.configure_optimizers(optimizer_name, weight_decay, learning_rate, (beta1, beta2), rho, device_type)
if init_from == 'resume':
optimizer.load_state_dict(checkpoint['optimizer'])
del state_dict
del checkpoint
# compile the model
if compile:
print("compiling the model... (takes a ~minute)")
unoptimized_model = model
model = torch.compile(model) # requires PyTorch 2.0
model_ema = EMA(
model,
beta=ema_decay, # exponential moving average factor
update_after_step=0, # only after this number of .update() calls will it start updating
update_every=1, # how often to actually update, to save on compute (updates every 10th .update() call)
)
# wrap model into DDP container
if ddp:
model = DDP(model, device_ids=[ddp_local_rank])
# helps estimate an arbitrarily accurate loss over either split using many batches
@torch.no_grad()
def estimate_loss():
out = {}
model.eval()
# model_ema = model_ema.to(device)
for split in ['train', 'val']:
out[split] = {}
losses = torch.zeros(eval_iters)
losses_ema = torch.zeros(eval_iters)
for k in tqdm(range(eval_iters), desc="Evaluating", ncols=100):
X, Y = get_batch(split)
with ctx:
logits, loss = model(X, Y)
logits_ema, loss_ema = model_ema(X, Y)
losses[k] = loss.item()
losses_ema[k] = loss_ema.item()
out[split]['vanilla'] = losses.mean()
out[split]['ema'] = losses_ema.mean()
model.train()
# model_ema = model_ema.to(device)
return out
# learning rate decay scheduler (cosine with warmup)
def get_lr(it):
# 1) linear warmup for warmup_iters steps
if it < warmup_iters:
return learning_rate * it / warmup_iters
# 2) if it > lr_decay_iters, return min learning rate
if it > lr_decay_iters:
return min_lr
# 3) in between, use cosine decay down to min learning rate
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
assert 0 <= decay_ratio <= 1
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
return min_lr + coeff * (learning_rate - min_lr)
# logging
if wandb_log and master_process:
import wandb
wandb.init(project=wandb_project, name=wandb_run_name, config=config)
# training loop
X, Y = get_batch('train') # fetch the very first batch
t0 = time.time()
local_iter_num = 0 # number of iterations in the lifetime of this process
raw_model = model.module if ddp else model # unwrap DDP container if needed
running_mfu = -1.0
clip_time = 0
while True:
# determine and set the learning rate for this iteration
lr = get_lr(iter_num) if decay_lr else learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# evaluate the loss on train/val sets and write checkpoints
if iter_num % eval_interval == 0 and master_process:
losses = estimate_loss()
# log_text = f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}"
log_text = f"step {iter_num}: train loss {losses['train']['vanilla']:.4f}, val loss {losses['val']['vanilla']:.4f}, train loss ema {losses['train']['ema']:.4f}, val loss ema {losses['val']['ema']:.4f}"
with open("logs/train2-log/training_val.txt", "a") as log_file:
log_file.write(log_text + "\n")
print(log_text)
# print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
checkpoint = {
'model': raw_model.state_dict(),
'optimizer': optimizer.state_dict(),
'model_args': model_args,
'iter_num': iter_num,
'best_val_loss': best_val_loss,
'config': config,
}
checkpoint_ema = {
'model': model_ema.state_dict(),
'optimizer': optimizer.state_dict(),
'model_args': model_args,
'iter_num': iter_num,
'best_val_loss': best_val_loss,
'config': config,
}
print(f"saving checkpoint to {out_dir}")
torch.save(checkpoint, os.path.join(out_dir, 'ckpt_' + str(iter_num) + '.pt'))
print(f"saving checkpoint to {out_dir_ema}")
torch.save(checkpoint_ema, os.path.join(out_dir_ema, 'ckpt_' + str(iter_num) + '.pt'))
if wandb_log:
wandb.log({
"iter": iter_num,
"train/loss": losses['train']['vanilla'],
"val/loss": losses['val']['vanilla'],
"lr": lr,
"mfu": running_mfu * 100, # convert to percentage
}, step=iter_num)
if losses['val']['vanilla'] < best_val_loss or always_save_checkpoint:
best_val_loss = losses['val']['vanilla']
if iter_num > 0:
checkpoint = {
'model': raw_model.state_dict(),
'optimizer': optimizer.state_dict(),
'model_args': model_args,
'iter_num': iter_num,
'best_val_loss': best_val_loss,
'config': config,
}
print(f"saving checkpoint to {out_dir}")
torch.save(checkpoint, os.path.join(out_dir, 'ckpt_best.pt'))
if iter_num == 0 and eval_only:
break
# forward backward update, with optional gradient accumulation to simulate larger batch size
# and using the GradScaler if data type is float16
for micro_step in range(gradient_accumulation_steps):
if ddp:
# in DDP training we only need to sync gradients at the last micro step.
# the official way to do this is with model.no_sync() context manager, but
# I really dislike that this bloats the code and forces us to repeat code
# looking at the source of that context manager, it just toggles this variable
model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1)
with ctx:
logits, loss = model(X, Y)
# immediately async prefetch next batch while model is doing the forward pass on the GPU
X, Y = get_batch('train')
# backward pass, with gradient scaling if training in fp16
scaler.scale(loss).backward()
# clip the gradient
if grad_clip != 0.0:
scaler.unscale_(optimizer)
total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
if total_norm.item() > grad_clip:
clip_time += 1
# step the optimizer and scaler if training in fp16
scaler.step(optimizer)
# Update EMA weights after the model update
# model_ema.update()
scaler.update()
# flush the gradients as soon as we can, no need for this memory anymore
optimizer.zero_grad(set_to_none=True)
# ema update
model_ema.update()
# timing and logging
t1 = time.time()
dt = t1 - t0
t0 = t1
if iter_num % log_interval == 0 and master_process:
lossf = loss.item() # loss as float. note: this is a CPU-GPU sync point
if local_iter_num >= 5: # let the training loop settle a bit
mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt)
running_mfu = mfu if running_mfu == -1.0 else 0.9 * running_mfu + 0.1 * mfu
print(f"iter {iter_num}: loss {lossf:.4f}, time {dt * 1000:.2f}ms, mfu {running_mfu * 100:.2f}%")
params = []
for (name, p) in model.named_parameters():
params.append(p)
total_param_norm = 0
for p in params:
param_norm = p.data.norm(2)
total_param_norm += param_norm.item() ** 2
total_param_norm = total_param_norm ** 0.5
momentum_norm = 0
LL = len(optimizer.state_dict()['state'])
for jj in range(LL):
momentum_norm += (optimizer.state_dict()['state'][jj]['exp_avg'].detach().norm(2)) ** 2
momentum_norm = torch.sqrt(momentum_norm).item()
def generate_log_message(iter_num, lossf, lr, total_param_norm, momentum_norm, clip_time):
log_message = (
f"iter: {iter_num}, "
f"train/loss: {lossf}, "
f"lr: {lr}, "
f"param_norm: {total_param_norm}, "
f"momentum_norm: {momentum_norm}, "
f"train/clip_rate: {clip_time / (iter_num + 1)}"
)
return log_message
# During training:
log_message = generate_log_message(iter_num, lossf, lr, total_param_norm, momentum_norm, clip_time)
# Print the log message to console
# print(log_message)
# append the log message to the log file
with open("logs/train2-log/training_log.txt", "a") as log_file:
log_file.write(log_message + "\n")
if wandb_log:
wandb.log({
"iter": iter_num,
"train/loss": lossf,
"lr": lr,
"param_norm": total_param_norm,
"momentum_norm": momentum_norm,
"train/clip_rate": clip_time / (iter_num + 1)
}, step=iter_num)
iter_num += 1
local_iter_num += 1
# termination conditions
if iter_num > max_iters:
break
if ddp:
destroy_process_group()
| [] |
2024-01-10 | ryanhoangt/openai-api-integration-apps | 03-Auto-Recipe-Creator~recipe.py | import os
import openai
from decouple import Config, RepositoryEnv
config = Config(RepositoryEnv("/workspaces/codespaces-jupyter/.env"))
openai.api_key = config("OPENAI_API_KEY")
class RecipeGenerator:
def __init__(self):
self.list_of_ingredients = self.ask_for_ingredients()
@staticmethod
def ask_for_ingredients():
list_of_ingredients = []
while True:
ingredient = input(
"Enter an ingredient (or type 'done' to finish): ")
if ingredient.lower() == "done":
break
list_of_ingredients.append(ingredient)
print(f"Your ingredients are: {', '.join(list_of_ingredients)}")
return list_of_ingredients
def generate_recipe(self):
prompt = RecipeGenerator.create_recipe_prompt(self.list_of_ingredients)
if RecipeGenerator._verify_prompt(prompt):
response = RecipeGenerator.generate(prompt)
return response["choices"][0]["text"]
raise ValueError("Prompt not accepted.")
@staticmethod
def create_recipe_prompt(list_of_ingredients):
prompt = f"Create a detailed recipe based on only the following ingredients: {', '.join(list_of_ingredients)}.\n" \
+ f"Additionally, assign a title starting with 'Recipe Title: ' to this dish, which can be used to create a photorealistic image of it."
return prompt
@staticmethod
def _verify_prompt(prompt):
print(prompt)
response = input("Are you happy with the prompt? (y/n)")
if response.upper() == "Y":
return True
return False
@staticmethod
def generate(prompt):
response = openai.Completion.create(engine="text-davinci-003",
prompt=prompt,
max_tokens=256,
temperature=0.7)
return response
def store_recipe(self, recipe, filename):
with open(filename, "w") as f:
f.write(recipe)
if __name__ == "__main__":
"""
Test RecipeGenerator class without creating an image of the dish.
"""
gen = RecipeGenerator()
recipe = gen.generate_recipe()
print(recipe)
| [
", ",
"f\"Create a detailed recipe based on only the following ingredients: {', '.join(list_of_ingredients)}.\\n\" \\\r\n + f\"Additionally, assign a title starting with 'Recipe Title: ' to this dish, which can be used to create a photorealistic image of it.",
"Additionally, assign a title starting with 'Recipe Title: ' to this dish, which can be used to create a photorealistic image of it."
] |
2024-01-10 | ryanhoangt/openai-api-integration-apps | 01-NLP-to-SQL~db_utils.py | from sqlalchemy import create_engine, text
def dataframe_to_database(df, table_name):
"""Convert a pandas dataframe to a database.
Args:
df (dataframe): pd.DataFrame which is to be converted to a database
table_name (string): Name of the table within the database
Returns:
engine: SQLAlchemy engine object
"""
engine = create_engine('sqlite:///:memory:', echo=False)
df.to_sql(name=table_name, con=engine, index=False)
return engine
def handle_response(response):
"""Handles the response from OpenAI.
Args:
response (openAi response): Response json from OpenAI
Returns:
string: Proposed SQL query
"""
query = response["choices"][0]["text"]
if query.startswith(" "):
query = "Select" + query
return query
def execute_query(engine, query):
"""Execute a query on a database.
Args:
engine (SQLAlchemy engine object): database engine
query (string): SQL query
Returns:
list: List of tuples containing the result of the query
"""
with engine.connect() as conn:
result = conn.execute(text(query))
return result.fetchall()
| [] |
2024-01-10 | alysawyer/one-textbook | eval-model-pt3~perplexity_test.py | import openai
import os
# Set up your OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
def calculate_perplexity(model, sentence):
# Define the prompt with the sentence to evaluate
# Define the completion parameters
completion_parameters = {
"model": model,
"prompt": sentence,
"max_tokens": 0,
"logprobs": 0,
"echo": True
}
# Call the OpenAI API to generate the completion
response = openai.Completion.create(**completion_parameters)
print(response)
# Extract the perplexity from the API response
choices = response['choices'][0]
token_logprobs = choices['logprobs']['token_logprobs']
print(token_logprobs)
l = sum(token_logprobs[1:]) / len(token_logprobs[1:])
perplexity = 2 ** (-l)
return perplexity
# Example usage
sentence = "I love davinci"
model = "davinci"
perplexity = calculate_perplexity(model, sentence)
if perplexity is not None:
print(f"The perplexity of the sentence is: {perplexity:.2f}")
else:
print("Perplexity calculation failed.") | [] |
2024-01-10 | alysawyer/one-textbook | eval-model-pt3~get_perplexity.py | import lmql
import json
from pathlib import Path
import argparse
from functools import partial
import os
import openai
import time
def get_perplexity(model, sentence):
'''queries openai api to get logprobs of the prompt sentence'''
# define the completion parameters
completion_parameters = {
"model": model,
"prompt": sentence,
"max_tokens": 0,
"logprobs": 0,
"echo": True
}
# calling openai api to get completion response
response = openai.Completion.create(**completion_parameters)
# extracing the log probabilities
choices = response['choices'][0]
token_logprobs = choices['logprobs']['token_logprobs']
return token_logprobs
def evaluate_questions(questions):
'''takes in model output json data, returns a list of 1s and 0s, where 1s represent correct answers'''
results = []
# iterate thru questions
for question in questions:
lowest_perplexity = float('inf')
lowest_perplexity_index = -1
for i, evalsentence in enumerate(question['evalsentences']):
perplexity = evalsentence['perplexity'] # Extract perplexity from the evaluated sentence
# flagging if lowest perplexity
if perplexity < lowest_perplexity:
lowest_perplexity = perplexity
lowest_perplexity_index = i
# seeing if the answer is correct, if lowest perplexity sentence is the same as the answer
is_correct = 1 if question['evalsentences'][lowest_perplexity_index]['evalsentence'] == question['answer'] else 0
results.append(is_correct)
return results
# getting json name
parser = argparse.ArgumentParser()
parser.add_argument('second_argument')
# opening json file
file_path = Path.cwd() / parser.parse_args().second_argument
with file_path.open(mode='r', encoding="utf-8") as f:
data = json.load(f)
model = "davinci"
# creating output base filename
info_list = parser.parse_args().second_argument.split(".")
json_name = ".".join([ info_list[1].split("/")[1], info_list[0].split("/")[1], model, info_list[1].split("/")[0]])
print(json_name)
# creating output filepaths
output_perplexity_file = "results/cap1to5-accent-experiment/" + json_name + ".json"
output_response_file = "results/cap1to5-accent-experiment-raw/" + json_name + ".raw.json"
# only running new code
if not os.path.exists(output_perplexity_file) or os.path.getsize(output_perplexity_file) == 0:
# Iterate through questions
for item in data:
question = item["question"]
evalsentences = item["evalsentences"]
answer = item["answer"]
print("Question:", question)
# Iterate through evalsentences
result_list = []
time.sleep(7)
for sentence in evalsentences:
# Evaluate each sentence and calculate perplexity
log_probs = get_perplexity(model, sentence)
# Calculate perplexity from log_probs
l = sum(log_probs[1:]) / len(log_probs[1:])
perplexity = 2 ** (-l)
# Create a dictionary with evaluated sentence, perplexity, and log_probs
result = {
"evalsentence": sentence,
"perplexity": perplexity,
"log_probs": log_probs
}
# Append the result to the list
result_list.append(result)
# Update the evalsentences key with the result list
item["evalsentences"] = result_list
print("Result:", json.dumps(item, ensure_ascii=False, indent=4))
with open(output_response_file, "w") as outfile:
outfile.write(json.dumps(data))
# Evaluate the questions based on perplexity and print the results
results = evaluate_questions(data)
accuracy = sum(results) / len(results)
with open(output_perplexity_file, "w") as outfile:
outfile.write(str(accuracy))
| [] |
2024-01-10 | alysawyer/one-textbook | eval-model-pt3~test_name.py | import lmql
import json
from pathlib import Path
import argparse
from functools import partial
import os
import openai
import time
# getting json name
parser = argparse.ArgumentParser()
parser.add_argument('second_argument')
# opening json file
file_path = Path.cwd() / parser.parse_args().second_argument
with file_path.open(mode='r', encoding="utf-8") as f:
data = json.load(f)
model = "davinci"
# creating output base filename
info_list = parser.parse_args().second_argument.split(".")
print(info_list)
json_name = ".".join([ info_list[1].split("/")[1], info_list[0].split("/")[1], model, info_list[1].split("/")[0]])
print(json_name) | [] |
2024-01-10 | maquenneville/WikiWhat | WikiWhat~pinecone_memory.py | # -*- coding: utf-8 -*-
"""
Created on Tue May 16 22:59:06 2023
@author: marca
"""
import tiktoken
import configparser
import openai
from openai.error import RateLimitError, InvalidRequestError, APIError
import pinecone
from pinecone import PineconeProtocolError
import time
import pandas as pd
from tqdm.auto import tqdm
import sys
import os
from embedder import Embedder
import asyncio
import nest_asyncio
nest_asyncio.apply()
class PineconeMemory:
def __init__(self, index_name=None, namespace=None):
if not os.path.exists("config.ini"):
raise FileNotFoundError("The config file was not found.")
self.encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
self.openai_api_key, self.pinecone_api_key, self.pinecone_env, self.index_name, self.namespace = self._get_api_keys("config.ini")
if index_name:
self.index_name = index_name
if namespace:
self.namespace = namespace
self.pending_data = None
pinecone.init(api_key=self.pinecone_api_key, environment=self.pinecone_env)
if self.index_name not in pinecone.list_indexes():
dimension = 1536
metric = "cosine"
pod_type = "p1"
pinecone.create_index(
self.index_name, dimension=dimension, metric=metric, pod_type=pod_type
)
self.index = pinecone.Index(index_name=self.index_name)
openai.api_key = self.openai_api_key
self.embedder = Embedder()
def __str__(self):
"""Returns a string representation of the PineconeMemory object."""
return f"Pinecone Memory | Index: {self.index_name}"
def _get_api_keys(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
openai_api_key = config.get("API_KEYS", "OpenAI_API_KEY")
pinecone_api_key = config.get("API_KEYS", "Pinecone_API_KEY")
pinecone_env = config.get("API_KEYS", "Pinecone_ENV")
index = config.get("API_KEYS", "Pinecone_Index")
namespace = config.get("API_KEYS", "Pinecone_Namespace")
return openai_api_key, pinecone_api_key, pinecone_env, index, namespace
def _count_tokens(self, text):
tokens = len(self.encoding.encode(text))
return tokens
def store_single(self, chunk: str):
"""Store a single embedding in Pinecone."""
assert self._count_tokens(chunk) <= 1200, "Text too long, chunk text before passing to .store_single()"
vector = self.embedder.get_embedding(chunk)
idx = self.index.describe_index_stats()["namespaces"][self.namespace]['vector_count'] + 1
# Prepare metadata for upsert
metadata = {"context": chunk}
vectors_to_upsert = [(idx, vector, metadata)]
# Attempt to upsert the vector to Pinecone
while True:
try:
upsert_response = self.index.upsert(
vectors=vectors_to_upsert, namespace=self.namespace
)
break
except pinecone.core.client.exceptions.ApiException:
print("Pinecone is a little overwhelmed, trying again in a few seconds...")
time.sleep(10)
def store(self, context_chunks: list):
if context_chunks:
batch_size = 80
vectors_to_upsert = []
batch_count = 0
start_id = self.index.describe_index_stats()["namespaces"][self.namespace]['vector_count'] + 1
data = asyncio.run(self.embedder.create_embeddings(context_chunks, start_id=start_id))
# Calculate the total number of batches
total_batches = -(-len(data) // batch_size)
# Create a tqdm progress bar object
progress_bar = tqdm(total=total_batches, desc="Loading info into Pinecone", position=0)
for index, row in data.iterrows():
context_chunk = row["chunk"]
vector = row["embeddings"]
pine_index = str(row["id"])
metadata = {"context": context_chunk}
vectors_to_upsert.append((pine_index, vector, metadata))
# Upsert when the batch is full or it's the last row
if len(vectors_to_upsert) == batch_size or index == len(data) - 1:
while True:
try:
upsert_response = self.index.upsert(
vectors=vectors_to_upsert, namespace=self.namespace
)
batch_count += 1
vectors_to_upsert = []
# Update the progress bar
progress_bar.update(1)
sys.stdout.flush()
break
except pinecone.core.client.exceptions.ApiException:
print(
"Pinecone is a little overwhelmed, trying again in a few seconds..."
)
time.sleep(10)
# Close the progress bar after completing all upserts
progress_bar.close()
else:
print("No dataframe to retrieve embeddings")
def fetch_context(self, query, top_n=5):
# Generate the query embedding
query_embedding = self.embedder.get_embedding(query)
while True:
try:
query_response = self.index.query(
namespace=self.namespace,
top_k=top_n,
include_values=False,
include_metadata=True,
vector=query_embedding,
)
break
except PineconeProtocolError:
print("Pinecone needs a moment....")
time.sleep(3)
continue
# Retrieve metadata for the relevant embeddings
context_chunks = [
match["metadata"]["context"] for match in query_response["matches"]
]
return context_chunks
| [] |
2024-01-10 | maquenneville/WikiWhat | WikiWhat~chroma_memory.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 26 00:08:42 2023
@author: marca
"""
import chromadb
from embedder import Embedder
import configparser
import os
import time
import openai
from openai.error import RateLimitError, InvalidRequestError, APIError
import tiktoken
from tqdm.auto import tqdm
import sys
import random
from chromadb.utils import embedding_functions
import asyncio
import nest_asyncio
nest_asyncio.apply()
class ChromaMemory:
def __init__(self, collection_name=None):
if not os.path.exists("config.ini"):
raise FileNotFoundError("The config file was not found.")
self.encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
(
self.openai_api_key,
self.chroma_collection,
self.storage
) = self._get_api_keys("config.ini")
openai.api_key = self.openai_api_key
self.ef = embedding_functions.OpenAIEmbeddingFunction(
api_key=self.openai_api_key,
model_name="text-embedding-ada-002"
)
if collection_name:
self.chroma_collection = collection_name.lower()
self.client = chromadb.Client()
if self.storage:
self.client = chromadb.PersistentClient(path=self.storage)
try:
self.collection = self.client.create_collection(self.chroma_collection, embedding_function=self.ef, metadata={"hnsw:space": "cosine"})
except ValueError:
self.collection = self.client.get_collection(self.chroma_collection)
self.embedder = Embedder()
def __str__(self):
"""Returns a string representation of the ChromaMemory object."""
return f"Chroma Memory | Collection: {self.chroma_collection}"
def _get_api_keys(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
openai_api_key = config.get("API_KEYS", "OpenAI_API_KEY")
chroma_collection = config.get("API_KEYS", "Chroma_Collection")
try:
chroma_storage = config.get("API_KEYS", "Optional_Chroma_Local_Storage")
except:
chroma_storage = None
return openai_api_key, chroma_collection, chroma_storage
def _count_tokens(self, text):
tokens = len(self.encoding.encode(text))
return tokens
def store_single(self, text: str, doc_id: str = None, metadata: dict = None):
"""Store a single document in Chroma."""
assert (
self._count_tokens(text) <= 1200
), "Text too long, chunk text before passing to .store_single()"
# Compute the embedding
embedding = self.embedder.get_embedding(text)
unique_id = doc_id if doc_id else f"chunk_{self.collection.count()}"
# Store the document in Chroma
if metadata is None:
self.collection.add(
documents=[text],
embeddings=[embedding],
ids=[unique_id],
)
else:
self.collection.add(
documents=[text],
embeddings=[embedding],
metadatas=[metadata],
ids=[unique_id],
)
def store(self, context_chunks: list, metadatas=None):
"""Store multiple documents in Chroma"""
start_id = self.collection.count()
data = asyncio.run(self.embedder.create_embeddings(context_chunks, start_id=start_id))
if metadatas is None:
self.collection.add(
documents=data["chunk"].tolist(),
embeddings=data["embeddings"].tolist(),
ids=data["id"].tolist(), # Convert to list
)
else:
self.collection.add(
documents=data["chunk"].tolist(),
embeddings=data["embeddings"].tolist(),
metadatas=metadatas,
ids=data["id"].tolist(), # Convert to list
)
def fetch_context(self, query, top_n=5):
if self.collection.count() < 5:
top_n = self.collection.count()
# Generate the query embedding
query_embedding = self.embedder.get_embedding(query)
# Query the most similar results
results = self.collection.query(
query_embeddings=[query_embedding],
n_results=top_n,
)
# Retrieve the documents for the relevant embeddings
context_chunks = results['documents']
return context_chunks | [] |
2024-01-10 | maquenneville/WikiWhat | WikiWhat~simple_bot.py | # -*- coding: utf-8 -*-
"""
Created on Thu May 18 12:22:23 2023
@author: marca
"""
import openai
from openai.error import RateLimitError, InvalidRequestError, APIError
import time
import configparser
import tiktoken
import trafilatura
class SimpleBot:
def __init__(self, primer, model="gpt-3.5-turbo"):
self.openai_api_key = self._get_api_keys("config.ini")
openai.api_key = self.openai_api_key
self.model = model
self.encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
if isinstance(primer, list):
self.primer = [
{"role": "system", "content": "You are a helpful assistant."}
]
for message in primer:
self.primer.append({"role": "user", "content": message})
else:
self.primer = [
{"role": "system", "content": primer},
]
def _get_api_keys(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
openai_api_key = config.get("API_KEYS", "OpenAI_API_KEY")
return openai_api_key
def _count_tokens(self, text):
tokens = len(self.encoding.encode(text))
return tokens
def _generate_response(
self,
messages,
function_desc=None,
temperature=0.5,
n=1,
max_tokens=4000,
frequency_penalty=0,
):
token_ceiling = 4096
if self.model == "gpt-4":
max_tokens = 8000
token_ceiling = 8000
if self.model == "gpt-3.5-turbo-16k":
max_tokens = 16000
token_ceiling = 16000
tokens_used = sum([self._count_tokens(msg["content"]) for msg in messages])
tokens_available = token_ceiling - tokens_used
max_tokens = min(max_tokens, (tokens_available - 100))
if tokens_used + max_tokens > token_ceiling:
max_tokens = token_ceiling - tokens_used - 10
if max_tokens < 1:
max_tokens = 1
max_retries = 10
retries = 0
backoff_factor = 1 # Initial sleep time factor
while retries < max_retries:
try:
completion_params = {
"model": self.model,
"messages": messages,
"n": n,
"temperature": temperature,
"max_tokens": max_tokens,
"frequency_penalty": frequency_penalty,
}
if function_desc is not None:
completion_params["functions"] = function_desc
completion = openai.ChatCompletion.create(**completion_params)
response = completion
return response
except Exception as e:
print(e)
retries += 1
sleep_time = backoff_factor * (2 ** retries) # Exponential backoff
print(f"Server overloaded, retrying in {sleep_time} seconds...")
time.sleep(sleep_time)
print("Failed to generate prompt after max retries")
return
def smart_agent(self):
self.model = "gpt-4"
def fast_agent(self):
self.model = "gpt-3.5-turbo"
def long_agent(self):
self.model = "gpt-3.5-turbo-16k"
def add_primer(self, primer_text):
self.primer.append({"role": "user", "content": primer_text})
def chat(self, input_string: str, context_chunks: list=None):
# Create a local copy of self.primer
messages = self.primer.copy()
# Append new user message
messages.append({"role": "user", "content": f"{input_string}"})
if context_chunks:
memories = [{"role": "user", "content": f"Context:\n{context}"} for context in context_chunks]
messages.extend(memories)
response = self._generate_response(messages, temperature=0.1)
return response
class WebpageSummaryBot(SimpleBot):
def __init__(self, model="gpt-3.5-turbo-16k"):
super().__init__( primer="You are my Webpage Summary Assistant. Your job is to take the full, main text of a webpage, and trim it down into a summary. Maintain all important details, while attempting to keep the summary as short as possible. You must respond with a summary, and only a summary, no explanatory text or pleasantries.", model='gpt-3.5-turbo-16k')
def _chunk_webpage_text(self, text, max_token_length=10000):
words = text.split()
chunks = []
current_chunk = ""
for word in words:
# Check if adding the word to the current chunk would exceed the max_token_length
if self._count_tokens(current_chunk + " " + word) > max_token_length:
# If so, add the current chunk to the chunks list and start a new chunk with the current word
chunks.append(current_chunk.strip())
current_chunk = word
else:
# Otherwise, add the word to the current chunk
current_chunk += f" {word}"
# Add the last chunk to the chunks list
if current_chunk:
chunks.append(current_chunk.strip())
return chunks
def _summarize_text(self, input_string: str):
# Create a local copy of self.primer
messages = self.primer.copy()
# Append new user message
messages.append({"role": "user", "content": f"Text to summarize: {input_string}"})
response = self._generate_response(messages, temperature=0.1)
return response.choices[0].message.content
def summarize_url_content(self, url: str):
downloaded = trafilatura.fetch_url(url)
webpage_text = trafilatura.extract(downloaded)
if self._count_tokens(webpage_text) > 10000:
chunks = self._chunk_webpage_text(webpage_text)
summary = "\n".join([self._summarize_text(chunk) for chunk in chunks])
else:
summary = self._summarize_text(webpage_text)
return summary
| [
"Text to summarize: PLACEHOLDER",
"PLACEHOLDER",
"Context:\nPLACEHOLDER",
"You are a helpful assistant."
] |
2024-01-10 | fenghaiquan/babyagi | classic~BabyElfAGI~skills~web_search.py |
from skills.skill import Skill
from serpapi import GoogleSearch
import openai
from bs4 import BeautifulSoup
import requests
import re
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
}
class WebSearch(Skill):
name = 'web_search'
description = 'A tool that performs web searches.'
api_keys_required = [['openai'],['serpapi']]
def __init__(self, api_keys):
super().__init__(api_keys)
def execute(self, params, dependent_task_outputs, objective):
# Your function goes here
# Modify the query based on the dependent task output
if dependent_task_outputs != "":
dependent_task = f"Use the dependent task output below as reference to help craft the correct search query for the provided task above. Dependent task output:{dependent_task_outputs}."
else:
dependent_task = "."
query = self.text_completion_tool("You are an AI assistant tasked with generating a Google search query based on the following task: "+params+". If the task looks like a search query, return the identical search query as your response. " + dependent_task + "\nSearch Query:")
print("\033[90m\033[3m"+"Search query: " +str(query)+"\033[0m")
# Set the search parameters
search_params = {
"engine": "google",
"q": query,
"api_key": self.serpapi_api_key,
"num": 3
}
# Perform the web search
search_results = GoogleSearch(search_params).get_dict()
# Simplify the search results
search_results = self.simplify_search_results(search_results.get('organic_results', []))
print("\033[90m\033[3mCompleted search. Now scraping results.\n\033[0m")
# Store the results from web scraping
results = ""
for result in search_results:
url = result.get('link')
print("\033[90m\033[3m" + "Scraping: "+url+"" + "...\033[0m")
content = self.web_scrape_tool({"url": url, "task": params,"objective":objective})
results += str(content) + ". "
print("\033[90m\033[3m"+str(results[0:100])[0:100]+"...\033[0m")
# Process the results and generate a report
results = self.text_completion_tool(f"You are an expert analyst combining the results of multiple web scrapes. Rewrite the following information as one cohesive report without removing any facts. Ignore any reports of not having info, unless all reports say so - in which case explain that the search did not work and suggest other web search queries to try. \n###INFORMATION:{results}.\n###REPORT:")
return results
def simplify_search_results(self, search_results):
simplified_results = []
for result in search_results:
simplified_result = {
"position": result.get("position"),
"title": result.get("title"),
"link": result.get("link"),
"snippet": result.get("snippet")
}
simplified_results.append(simplified_result)
return simplified_results
def text_completion_tool(self, prompt: str):
messages = [
{"role": "user", "content": prompt}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
temperature=0.2,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.choices[0].message['content'].strip()
def web_scrape_tool(self, params):
content = self.fetch_url_content(params['url'])
if content is None:
return None
text = self.extract_text(content)
print("\033[90m\033[3m"+"Scrape completed. Length:" +str(len(text))+".Now extracting relevant info..."+"...\033[0m")
info = self.extract_relevant_info(params['objective'], text[0:11000], params['task'])
links = self.extract_links(content)
#result = f"{info} URLs: {', '.join(links)}"
result = info
return result
def fetch_url_content(self,url: str):
try:
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
return response.content
except requests.exceptions.RequestException as e:
print(f"Error while fetching the URL: {e}")
return ""
def extract_links(self,content: str):
soup = BeautifulSoup(content, "html.parser")
links = [link.get('href') for link in soup.findAll('a', attrs={'href': re.compile("^https?://")})]
return links
def extract_text(self,content: str):
soup = BeautifulSoup(content, "html.parser")
text = soup.get_text(strip=True)
return text
def extract_relevant_info(self, objective, large_string, task):
chunk_size = 12000
overlap = 500
notes = ""
if len(large_string) == 0:
print("error scraping")
return "Error scraping."
for i in range(0, len(large_string), chunk_size - overlap):
print("\033[90m\033[3m"+"Reading chunk..."+"\033[0m")
chunk = large_string[i:i + chunk_size]
messages = [
{"role": "system", "content": f"You are an AI assistant."},
{"role": "user", "content": f"You are an expert AI research assistant tasked with creating or updating the current notes. If the current note is empty, start a current-notes section by exracting relevant data to the task and objective from the chunk of text to analyze. If there is a current note, add new relevant info frol the chunk of text to analyze. Make sure the new or combined notes is comprehensive and well written. Here's the current chunk of text to analyze: {chunk}. ### Here is the current task: {task}.### For context, here is the objective: {objective}.### Here is the data we've extraced so far that you need to update: {notes}.### new-or-updated-note:"}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
max_tokens=800,
n=1,
stop="###",
temperature=0.7,
)
notes += response.choices[0].message['content'].strip()+". ";
return notes | [
"You are an AI assistant.",
"You are an expert AI research assistant tasked with creating or updating the current notes. If the current note is empty, start a current-notes section by exracting relevant data to the task and objective from the chunk of text to analyze. If there is a current note, add new relevant info frol the chunk of text to analyze. Make sure the new or combined notes is comprehensive and well written. Here's the current chunk of text to analyze: PLACEHOLDER. ### Here is the current task: PLACEHOLDER.### For context, here is the objective: PLACEHOLDER.### Here is the data we've extraced so far that you need to update: PLACEHOLDER.### new-or-updated-note:"
] |
2024-01-10 | fenghaiquan/babyagi | classic~BabyElfAGI~tasks~task_registry.py | import openai
import json
import threading
import os
import numpy as np
class TaskRegistry:
def __init__(self):
self.tasks = []
# Initialize the lock
self.lock = threading.Lock()
objectives_file_path = "tasks/example_objectives"
self.example_loader = ExampleObjectivesLoader(objectives_file_path)
def load_example_objectives(self, user_objective):
return self.example_loader.load_example_objectives(user_objective)
def create_tasklist(self, objective, skill_descriptions):
#load most relevant object and tasklist from objectives_examples.json
example_objective, example_tasklist = self.load_example_objectives(objective)
prompt = (
f"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {objective}. "
f"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###"
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"RULES:"
f"Do not use skills that are not listed."
f"Always include one skill."
f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
f"Make sure all task IDs are in chronological order.###\n"
f"EXAMPLE OBJECTIVE={json.dumps(example_objective)}"
f"TASK LIST={json.dumps(example_tasklist)}"
f"OBJECTIVE={objective}"
f"TASK LIST="
)
print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
try:
task_list = json.loads(result)
self.tasks = task_list
except Exception as error:
print(error)
def execute_task(self, i, task, skill_registry, task_outputs, objective):
p_nexttask="\033[92m\033[1m"+"\n*****NEXT TASK ID:"+str(task['id'])+"*****\n"+"\033[0m\033[0m"
p_nexttask += f"\033[ EExecuting task {task.get('id')}: {task.get('task')}) [{task.get('skill')}]\033[)"
print(p_nexttask)
# Retrieve the skill from the registry
skill = skill_registry.get_skill(task['skill'])
# Get the outputs of the dependent tasks
dependent_task_outputs = {dep: task_outputs[dep]["output"] for dep in task['dependent_task_ids']} if 'dependent_task_ids' in task else {}
# Execute the skill
# print("execute:"+str([task['task'], dependent_task_outputs, objective]))
task_output = skill.execute(task['task'], dependent_task_outputs, objective)
print("\033[93m\033[1m"+"\nTask Output (ID:"+str(task['id'])+"):"+"\033[0m\033[0m")
print("TASK: "+str(task["task"]))
print("OUTPUT: "+str(task_output))
return i, task_output
def reorder_tasks(self):
self.tasks = sorted(self.tasks, key=lambda task: task['id'])
def add_task(self, task, after_task_id):
# Get the task ids
task_ids = [t["id"] for t in self.tasks]
# Get the index of the task id to add the new task after
insert_index = task_ids.index(after_task_id) + 1 if after_task_id in task_ids else len(task_ids)
# Insert the new task
self.tasks.insert(insert_index, task)
self.reorder_tasks()
def update_tasks(self, task_update):
for task in self.tasks:
if task['id'] == task_update['id']:
# This merges the original task dictionary with the update, overwriting only the fields present in the update.
task.update(task_update)
self.reorder_tasks()
def reflect_on_output(self, task_output, skill_descriptions):
with self.lock:
example = [
[
{"id": 3, "task": "New task 1 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "complete"},
{"id": 4, "task": "New task 2 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "incomplete"}
],
[2, 3],
{"id": 5, "task": "Complete the objective and provide a final report",
"skill": "text_completion_skill", "dependent_task_ids": [1, 2, 3, 4], "status": "incomplete"}
]
prompt = (
f"You are an expert task manager, review the task output to decide at least one new task to add."
f"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies)."
f"Use the current task list as reference."
f"Do not add duplicate tasks to those in the current task list."
f"Only provide JSON as your response without further comments."
f"Every new and updated task must include all variables, even they are empty array."
f"Dependent IDs must be smaller than the ID of the task."
f"New tasks IDs should be no larger than the last task ID."
f"Always select at least one skill."
f"Task IDs should be unique and in chronological order." f"Do not change the status of complete tasks."
f"Only add skills from the AVAILABLE SKILLS, using the exact same spelling."
f"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated."
f"Make sure to keep dependent_task_ids key, even if an empty array."
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"\n###Here is the last task output: {task_output}"
f"\n###Here is the current task list: {self.tasks}"
f"\n###EXAMPLE OUTPUT FORMAT = {json.dumps(example)}"
f"\n###OUTPUT = "
)
print("\033[90m\033[3m" + "\nReflecting on task output to generate new tasks if necessary...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0.7,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
print("\n#" + str(result))
# Check if the returned result has the expected structure
if isinstance(result, str):
try:
task_list = json.loads(result)
# print("RESULT:")
print(task_list)
# return [],[],[]
return task_list[0], task_list[1], task_list[2]
except Exception as error:
print(error)
else:
raise ValueError("Invalid task list structure in the output")
def get_tasks(self):
"""
Returns the current list of tasks.
Returns:
list: the list of tasks.
"""
return self.tasks
def get_task(self, task_id):
"""
Returns a task given its task_id.
Parameters:
task_id : int
The unique ID of the task.
Returns:
dict
The task that matches the task_id.
"""
matching_tasks = [task for task in self.tasks if task["id"] == task_id]
if matching_tasks:
return matching_tasks[0]
else:
print(f"No task found with id {task_id}")
return None
def print_tasklist(self, task_list):
p_tasklist="\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m"
for t in task_list:
dependent_task_ids = t.get('dependent_task_ids', [])
dependent_task = ""
if dependent_task_ids:
dependent_task = f"\033[31m<dependencies: {', '.join([f'#{dep_id}' for dep_id in dependent_task_ids])}>\033[0m"
status_color = "\033[32m" if t.get('status') == "completed" else "\033[31m"
p_tasklist+= f"\033[1m{t.get('id')}\033[0m: {t.get('task')} {status_color}[{t.get('status')}]\033[0m \033[93m[{t.get('skill')}] {dependent_task}\033[0m\n"
print(p_tasklist)
class ExampleObjectivesLoader:
def __init__(self, objectives_folder_path):
self.objectives_folder_path = objectives_folder_path
self.objectives_examples = [] # Initialize as an empty list
def load_objectives_examples(self):
self.objectives_examples = []
for filename in os.listdir(self.objectives_folder_path):
file_path = os.path.join(self.objectives_folder_path, filename)
with open(file_path, 'r') as file:
objectives = json.load(file)
self.objectives_examples.extend(objectives)
def find_most_relevant_objective(self, user_input):
user_input_embedding = self.get_embedding(user_input, model='text-embedding-ada-002')
most_relevant_objective = max(
self.objectives_examples,
key=lambda pair: self.cosine_similarity(pair['objective'], user_input_embedding)
)
return most_relevant_objective['objective'], most_relevant_objective['examples']
def get_embedding(self, text, model='text-embedding-ada-002'):
response = openai.Embedding.create(input=[text], model=model)
embedding = response['data'][0]['embedding']
return embedding
def cosine_similarity(self, objective, embedding):
max_similarity = float('-inf')
objective_embedding = self.get_embedding(objective, model='text-embedding-ada-002')
similarity = self.calculate_similarity(objective_embedding, embedding)
max_similarity = max(max_similarity, similarity)
return max_similarity
def calculate_similarity(self, embedding1, embedding2):
embedding1 = np.array(embedding1, dtype=np.float32)
embedding2 = np.array(embedding2, dtype=np.float32)
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
return similarity
def load_example_objectives(self, user_objective):
self.load_objectives_examples()
most_relevant_objective, most_relevant_tasklist = self.find_most_relevant_objective(user_objective)
example_objective = most_relevant_objective
example_tasklist = most_relevant_tasklist
return example_objective, example_tasklist
| [
"Always select at least one skill.",
"TASK LIST=",
"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated.",
"\n###OUTPUT = ",
"Use the current task list as reference.",
"Dependent IDs must be smaller than the ID of the task.",
"Make sure all task IDs are in chronological order.###\n",
"AVAILABLE SKILLS: PLACEHOLDER.###",
"Only add skills from the AVAILABLE SKILLS, using the exact same spelling.",
"Make sure to keep dependent_task_ids key, even if an empty array.",
"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies).",
"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###",
"Do not change the status of complete tasks.",
"Do not add duplicate tasks to those in the current task list.",
"New tasks IDs should be no larger than the last task ID.",
"You are an expert task manager, review the task output to decide at least one new task to add.",
"Always include one skill.",
"Task IDs should be unique and in chronological order.",
"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: PLACEHOLDER. ",
"OBJECTIVE=PLACEHOLDER",
"\n###Here is the last task output: PLACEHOLDER",
"Do not use skills that are not listed.",
"You are a task creation AI.",
"Every new and updated task must include all variables, even they are empty array.",
"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from.",
"Only provide JSON as your response without further comments."
] |
2024-01-10 | andrewhinh/admirer | question_answer~answer.py | # Imports
import argparse
from collections import defaultdict
import json
import os
from pathlib import Path
import random
from typing import Any, Dict, List, Tuple, Union
from dotenv import load_dotenv
import numpy as np
from onnxruntime import InferenceSession
from openai import OpenAI
from PIL import Image
import torch
from transformers import (
AutoTokenizer,
CLIPProcessor,
DetrFeatureExtractor,
DetrForSegmentation,
pipeline,
VisionEncoderDecoderModel,
ViTFeatureExtractor,
)
import question_answer.metadata.pica as metadata
# Loading env variables
load_dotenv()
# Variables
# OpenAI params
CLIENT = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
MODEL = "gpt-3.5-turbo-1106"
# Artifact path
artifact_path = Path(__file__).resolve().parent / "artifacts" / "answer"
# PICa formatting/config
img_id = 100 # Random idx for inference
question_id = 1005 # Random idx for inference
# Significant driver of performance with little extra cost
# PICa paper's max = 16, but can set higher if model's speed + context size can handle it
n_shot = 16
coco_path = artifact_path / "coco_annotations"
similarity_path = artifact_path / "coco_clip_new"
# Model setup
transformers_path = artifact_path / "transformers"
onnx_path = artifact_path / "onnx"
# Segmentation model config
tag_model = transformers_path / "facebook" / "detr-resnet-50-panoptic"
max_length = 16
num_beams = 4
# Caption model config
caption_model = transformers_path / "nlpconnect" / "vit-gpt2-image-captioning"
# CLIP Encoders config
clip_processor = transformers_path / "openai" / "clip-vit-base-patch16"
clip_onnx = onnx_path / "clip.onnx"
# Dataset variables
NUM_ORIGINAL_EXAMPLES = metadata.NUM_ORIGINAL_EXAMPLES
NUM_ADDED_EXAMPLES = metadata.NUM_ADDED_EXAMPLES
NUM_TEST_EXAMPLES = metadata.NUM_TEST_EXAMPLES
# Helper/main classes
class PICa_OKVQA:
"""
Question Answering Class
"""
def __init__(
self,
caption_info: Dict[Any, Any] = None,
tag_info: Dict[Any, Any] = None,
questions: Dict[str, List[Dict[str, str]]] = None,
context_idxs: Dict[str, str] = None,
question_features: np.ndarray = None,
image_features: np.ndarray = None,
evaluate: bool = False,
):
self.evaluate = evaluate
(
self.traincontext_caption_dict,
self.traincontext_answer_dict,
self.traincontext_question_dict,
) = self.load_anno(
"%s/captions_train2014.json" % coco_path,
"%s/mscoco_train2014_annotations.json" % coco_path,
"%s/OpenEnded_mscoco_train2014_questions.json" % coco_path,
)
(
self.traincontext_caption_dict,
_,
self.traincontext_answer_dict,
self.traincontext_question_dict,
) = self.add_anno(
"%s/admirer-pica.json" % coco_path,
self.traincontext_caption_dict,
self.traincontext_answer_dict,
self.traincontext_question_dict,
)
if evaluate:
(
self.testcontext_caption_dict,
self.testcontext_tags_dict,
self.testcontext_answer_dict,
self.testcontext_question_dict,
) = self.add_anno(
"%s/admirer-pica.json" % coco_path,
evaluate=evaluate,
)
# load cached image representation (Coco caption & Tags)
self.inputtext_dict = self.load_cachetext(self.testcontext_caption_dict, self.testcontext_tags_dict)
self.load_similarity(evaluate=evaluate)
question_dict_keys = list(self.testcontext_question_dict.keys())
image_ids, question_ids = [key.split("<->")[0] for key in question_dict_keys], [
key.split("<->")[1] for key in question_dict_keys
]
list_questions = list(self.testcontext_question_dict.values())
self.questions = {
"questions": [
{"image_id": image_id, "question": question_str, "question_id": quest_id}
for image_id, question_str, quest_id in zip(image_ids, list_questions, question_ids)
]
}
else:
# load cached image representation (Coco caption & Tags)
self.inputtext_dict = self.load_cachetext(caption_info, tag_info)
_ = self.load_similarity(context_idxs, question_features, image_features)
self.questions = questions
self.train_keys = list(self.traincontext_answer_dict.keys())
def answer_gen(self):
_, _, question_dict = self.load_anno(questions=self.questions)
if self.evaluate:
pred_answers = []
gt_answers = []
keys = list(question_dict.keys())
for key in keys:
img_key = int(key.split("<->")[0])
question, caption = (
question_dict[key],
self.inputtext_dict[img_key],
)
context_key_list = self.get_context_keys(
key,
n_shot,
)
# prompt format following OpenAI QA API
messages = []
system_message = {
"role": "system",
"content": str(
"You are given {n_shot} examples of image content, a question about the image, and an answer. "
+ "Given a new set of content and question, "
+ "you are tasked with coming up with an answer in a similar way to the examples. "
+ "If the content is not enough to answer the question, "
+ "make up an answer structured as:"
+ "\n"
+ "1) an acknowledgment of not knowing the correct answer to the question,"
+ "\n"
+ "2) a comedic reply using what you can from the content."
+ "\n"
+ "For example, if the question is 'What is the color of the user's shirt?', "
+ "and the context is 'The user is wearing a shirt with a picture of a cat on it', "
+ "a good answer could be 'I don't know, but I think the cat is cute!'"
),
}
messages.append(system_message)
for ni in range(n_shot):
if context_key_list is None:
context_key = self.train_keys[random.randint(0, len(self.train_keys) - 1)]
else:
context_key = context_key_list[ni]
img_context_key = int(context_key.split("<->")[0])
while True: # make sure get context with valid question and answer
if (
len(self.traincontext_question_dict[context_key]) != 0
and len(self.traincontext_answer_dict[context_key][0]) != 0
):
break
context_key = self.train_keys[random.randint(0, len(self.train_keys) - 1)]
caption = self.traincontext_caption_dict[img_context_key]
question = self.traincontext_question_dict[context_key]
answer = self.traincontext_answer_dict[context_key]
if type(caption) == list:
caption = caption[0] # sometimes annotators messed up
if type(question) == list:
question = question[0]
if type(answer) == list:
answer = answer[0]
user_message = {
"role": "user",
"content": str(
"Image content: " + caption + "\n" + "Question: " + question + "\n" + "Answer: " + answer
),
}
messages.append(user_message)
current_user_message = {
"role": "user",
"content": str("Image content: " + caption + "\n" + "Question: " + question + "\n" + "Answer: "),
}
messages.append(current_user_message)
try:
response = CLIENT.chat.completions.create(
model=MODEL,
messages=messages,
)
except Exception as e:
print(e)
exit(0)
pred_answer = response.choices[0].message.content
if self.evaluate:
answer = self.testcontext_answer_dict[key]
pred_answers.append(pred_answer)
gt_answers.append(answer)
else:
return pred_answer
from question_answer.lit_models.metrics import BertF1Score
return BertF1Score()(pred_answers, gt_answers)
def get_context_keys(self, key: str, n: int) -> List[str]:
"""Get context keys based on similarity scores"""
# combined with Q-similairty (image+question)
lineid = self.valkey2idx[key]
# Removing validation key from train similarity arrays if needed
temp_train_feature = None
temp_image_train_feature = None
temp_train_idx = None
for idx in range(NUM_ORIGINAL_EXAMPLES, NUM_ORIGINAL_EXAMPLES + NUM_ADDED_EXAMPLES):
question_feature_equal = np.array_equal(self.val_feature[lineid], self.train_feature[idx])
image_feature_equal = np.array_equal(self.val_feature[lineid], self.image_train_feature[idx])
if question_feature_equal and image_feature_equal:
mask = np.ones(len(self.train_feature), dtype=bool)
mask[[idx]] = False
temp_train_feature = self.train_feature[mask]
temp_image_train_feature = self.image_train_feature[mask]
temp_train_idx = self.train_idx.pop(str(idx))
break
removed = temp_train_feature is not None and temp_image_train_feature is not None and temp_train_idx is not None
if removed:
question_similarity: np.ndarray = np.matmul(temp_train_feature, self.val_feature[lineid, :])
# end of Q-similairty
similarity: np.ndarray = question_similarity + np.matmul(
temp_image_train_feature, self.image_val_feature[lineid, :]
)
else:
question_similarity: np.ndarray = np.matmul(self.train_feature, self.val_feature[lineid, :])
# end of Q-similairty
similarity: np.ndarray = question_similarity + np.matmul(
self.image_train_feature, self.image_val_feature[lineid, :]
)
index: np.ndarray = similarity.argsort()[-n:][::-1]
return [self.train_idx[str(x)] for x in index]
def load_similarity(
self,
context_idxs: Dict[str, str] = None,
question_features: np.ndarray = None,
image_features: np.ndarray = None,
evaluate=False,
):
# Add question train feature, image train feature, and train idx
self.train_feature = np.load("%s/coco_clip_vitb16_train2014_okvqa_question.npy" % similarity_path)
self.train_idx: Dict[str, str] = json.load(
open(
"%s/okvqa_qa_line2sample_idx_train2014.json" % similarity_path,
"r",
)
)
self.image_train_feature = np.load(
"%s/coco_clip_vitb16_train2014_okvqa_convertedidx_image.npy" % similarity_path
)
if evaluate:
context_idxs = dict(list(self.train_idx.items())[NUM_ORIGINAL_EXAMPLES:])
new_keys = [str(idx) for idx in range(len(context_idxs))]
context_idxs = dict(zip(new_keys, list(context_idxs.values())))
self.val_feature = self.train_feature[-NUM_ADDED_EXAMPLES:, :]
self.image_val_feature = self.image_train_feature[-NUM_ADDED_EXAMPLES:, :]
else:
self.val_feature = question_features
self.image_val_feature = image_features
val_idx = context_idxs
self.valkey2idx: Dict[str, int] = {}
for ii in val_idx:
self.valkey2idx[val_idx[ii]] = int(ii)
def load_tags(
self,
tag_info: Dict[Any, List[str]],
) -> Dict[int, str]:
"""Loads tags for an image"""
tags_dict = {}
image_ids, list_tags = list(tag_info.keys()), list(tag_info.values())
# Concatenate tags into one string
list_str_tags = [tags for tags in list_tags]
for id in range(len(image_ids)):
tags_dict[image_ids[id]] = list_str_tags[id]
return tags_dict
def load_cachetext(
self,
caption_info: Dict[Any, List[str]],
tag_info: Dict[Any, List[str]],
):
"""Loads and adds cachetect to the caption"""
tags_dict = self.load_tags(tag_info)
caption_dict = {}
image_ids, captions = list(caption_info.keys()), list(caption_info.values())
for id in range(len(image_ids)):
caption_dict[image_ids[id]] = captions[id] + ". " + list(tags_dict.values())[id]
return caption_dict
def load_anno(
self,
coco_caption_file: Path = None,
answer_anno_file: Path = None,
question_anno_file: Path = None,
questions: Dict[str, List[Dict[str, str]]] = None,
) -> Tuple[Dict[int, List[str]], Dict[str, List[str]], Dict[str, str]]:
"""Loads annotation from a caption file"""
# Define default dictionaries
caption_dict: defaultdict[int, List[str]] = defaultdict(list)
answer_dict: defaultdict[str, List[str]] = defaultdict(list)
question_dict: defaultdict[str, str] = defaultdict(list)
# Create caption dictionary
if coco_caption_file is not None:
coco_caption = json.load(open(coco_caption_file, "r"))
if isinstance(coco_caption, dict):
coco_caption: List[Dict[str, Union[str, int]]] = coco_caption["annotations"]
for sample in coco_caption:
caption_dict[sample["image_id"]].append(sample["caption"]) # int -> sample[image_id]
# Create answer dictionary
if answer_anno_file is not None:
answer_data = json.load(open(answer_anno_file, "r"))
answer_annotations: List[Dict[str, Any]] = answer_data["annotations"]
for sample in answer_annotations:
id = str(sample["image_id"]) + "<->" + str(sample["question_id"])
if id not in answer_dict:
answer_dict[id] = [x["answer"] for x in sample["answers"]]
# Create question dictionary
if question_anno_file is not None:
question_data = json.load(open(question_anno_file, "r"))
else:
question_data = questions
question_annotations: List[Dict[str, Union[str, int]]] = question_data["questions"]
for sample in question_annotations:
id = str(sample["image_id"]) + "<->" + str(sample["question_id"])
if id not in question_dict:
question_dict[id] = sample["question"]
return dict(caption_dict), dict(answer_dict), dict(question_dict)
def add_anno(
self,
add: Path,
context_caption_dict: Dict[int, List[str]] = None,
context_answer_dict: Dict[str, List[str]] = None,
context_question_dict: Dict[str, str] = None,
evaluate=False,
):
"""Load/add extra annotations to the annotations dictionaries"""
add_dict = json.load(open(add, "r"))
context_tag_dict = {}
caption_add = dict(zip(list(add_dict["image_id"].values()), list(add_dict["caption"].values())))
tags_add = dict(zip(list(add_dict["image_id"].values()), list(add_dict["tags"].values())))
combine_ids = [
str(image_id) + "<->" + str(question_id)
for image_id, question_id in zip(
list(add_dict["image_id"].values()), list(add_dict["question_id"].values())
)
]
answer_add = dict(zip(combine_ids, list(add_dict["answer"].values())))
question_add = dict(zip(combine_ids, list(add_dict["question"].values())))
if evaluate:
context_caption_dict = {}
context_answer_dict = {}
context_question_dict = {}
context_caption_dict.update(caption_add)
context_tag_dict.update(tags_add)
context_answer_dict.update(answer_add)
context_question_dict.update(question_add)
if evaluate:
context_caption_dict = dict(list(context_caption_dict.items())[-NUM_TEST_EXAMPLES:])
context_tag_dict = dict(list(context_tag_dict.items())[-NUM_TEST_EXAMPLES:])
context_answer_dict = dict(list(context_answer_dict.items())[-NUM_TEST_EXAMPLES:])
context_question_dict = dict(list(context_question_dict.items())[-NUM_TEST_EXAMPLES:])
return context_caption_dict, context_tag_dict, context_answer_dict, context_question_dict
class Pipeline:
"""
Main inference class
"""
def __init__(self):
# Tagging model setup
segment_model = DetrForSegmentation.from_pretrained(tag_model, use_pretrained_backbone=False)
self.segment = pipeline(
"image-segmentation", model=segment_model, feature_extractor=DetrFeatureExtractor.from_pretrained(tag_model)
)
self.tags = []
# Caption model setup
self.caption_model = VisionEncoderDecoderModel.from_pretrained(caption_model)
self.caption_feature_extractor = ViTFeatureExtractor.from_pretrained(caption_model)
self.caption_tokenizer = AutoTokenizer.from_pretrained(caption_model)
self.device = torch.device("cpu") # torch.device("cuda" if torch.cuda.is_available() else "cpu")
# CLIP Setup
self.clip_session = InferenceSession(str(clip_onnx))
self.clip_processor = CLIPProcessor.from_pretrained(clip_processor)
def predict_caption(self, image):
pixel_values = self.caption_feature_extractor(images=[image], return_tensors="pt").pixel_values
pixel_values = pixel_values.to(self.device)
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
output_ids = self.caption_model.generate(pixel_values, **gen_kwargs)
preds = self.caption_tokenizer.batch_decode(output_ids, skip_special_tokens=True)
preds = [pred.strip() for pred in preds]
return preds[0]
def predict(self, image: Union[str, Path, Image.Image], question: Union[str, Path]) -> str:
if not isinstance(image, Image.Image):
image_pil = Image.open(image)
if image_pil.mode != "RGB":
image_pil = image_pil.convert(mode="RGB")
else:
image_pil = image
if isinstance(question, Path) | os.path.exists(question):
with open(question, "r") as f:
question_str = f.readline()
else:
question_str = question
# Generating image tag(s)
for dic in self.segment(image_pil):
self.tags.append(dic["label"])
if not self.tags:
self.tags.append("")
tag_info: Dict[int, List[str]] = {img_id: ", ".join(self.tags)}
# Generating image caption
caption = self.predict_caption(image_pil)
if not caption:
caption = ""
caption_info: Dict[int, str] = {img_id: caption}
# Generating image/question features
inputs = self.clip_processor(text=[question_str], images=image_pil, return_tensors="np", padding=True)
# for i in session.get_outputs(): print(i.name)
outputs = self.clip_session.run(
output_names=["logits_per_image", "logits_per_text", "text_embeds", "image_embeds"], input_feed=dict(inputs)
)
# Generating context idxs
context_idxs: Dict[str, str] = {"0": str(img_id) + "<->" + str(question_id)}
# Answering question
questions = {"questions": [{"image_id": img_id, "question": question_str, "question_id": question_id}]}
okvqa = PICa_OKVQA(
caption_info, tag_info, questions, context_idxs, outputs[2], outputs[3]
) # Have to initialize here because necessary objects need to be generated
answer = okvqa.answer_gen()
# rationale = okvqa.rationale(answer)
return answer # + " because " + rationale
def evaluate(self):
okvqa = PICa_OKVQA(
evaluate=True,
)
acc = okvqa.answer_gen()
print(acc)
return acc
# Running model
def main():
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument("--image", type=str, required=True)
parser.add_argument("--question", type=str, required=True)
args = parser.parse_args()
# Answering question
pipeline = Pipeline()
pred_str = pipeline.predict(args.image, args.question)
print(pred_str)
if __name__ == "__main__":
main()
| [
"Image content: PLACEHOLDER\nQuestion: PLACEHOLDER\nAnswer: ",
"Image content: PLACEHOLDER\nQuestion: PLACEHOLDER\nAnswer: PLACEHOLDER",
"You are given {n_shot} examples of image content, a question about the image, and an answer. Given a new set of content and question, you are tasked with coming up with an answer in a similar way to the examples. If the content is not enough to answer the question, make up an answer structured as:\n1) an acknowledgment of not knowing the correct answer to the question,\n2) a comedic reply using what you can from the content.\nFor example, if the question is 'What is the color of the user's shirt?', and the context is 'The user is wearing a shirt with a picture of a cat on it', a good answer could be 'I don't know, but I think the cat is cute!'"
] |
2024-01-10 | lazarusking/job-search-backend | recruiters~resume_llm.py | # !pip install langchain
# !pip install unstructured
# !pip install openai
# !pip install chromadb
# !pip install Cython
# !pip install tiktoken
# !pip install pdf2image
# !pip install pdfminer.six
import os
from langchain.document_loaders import PDFMinerLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.text_splitter import RecursiveCharacterTextSplitter
from dotenv import load_dotenv
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')
def create_index(pdf_folder_path: list[str]):
# Load the PDF files from the specified folder
print([i.path for i in pdf_folder_path])
def add_end(file):
loader=PDFMinerLoader(os.path.join(file.path))
for i in loader.load():
# print(i.page_content)
i.page_content="".join(f"{i.page_content},<End>")
return loader
loaders = [add_end(file)
for file in pdf_folder_path]
# Create and return a VectorstoreIndex from the PDF loaders
index = VectorstoreIndexCreator(text_splitter=RecursiveCharacterTextSplitter(separators='<End>',
chunk_size=len(loaders)*1000, chunk_overlap=0)).from_loaders(loaders)
print(loaders)
return index
def prompt_decorator(func):
def wrapper(*args, **kwargs):
job_desc = func(*args, **kwargs)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
You are a chatbot that helps recruiters analyze resumes based on a job description. You will be passed information on some resumes as the context, try your best to answer accurately about them based on the job description also note the full name of the person in each resume.
Keep the answer as concise as possible.
Always say "thanks for asking!" at the end of the answer.
{context}
Job Description: {job_desc}
Question: {question}
Answer:"""
return prompt_template.format(job_desc=job_desc, context='{context}', question='{question}')
return wrapper
def load_qa_chain_with_prompt(llm, job_desc):
# Define the prompt template for the QA chain
@prompt_decorator
def my_func(desc):
return desc
result = my_func(job_desc)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
You will be passed documents about resume, try your best to answer accurately about them based on this job description.
Use three sentences maximum and keep the answer as concise as possible.
Always say "thanks for asking!" at the end of the answer.
{context}
Question: {question}
Answer:"""
PROMPT = PromptTemplate(template=result,
input_variables=["context", "question"])
return load_qa_chain(llm=llm, chain_type="stuff", prompt=PROMPT, verbose=True)
def search_resumes(index, query: str, job_desc: str):
# Set the OpenAI API key
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo",
streaming=True, callbacks=[StreamingStdOutCallbackHandler()])
# Create the VectorstoreIndex
# Load the QA chain with the specified prompt template
qa_chain = load_qa_chain_with_prompt(llm, job_desc)
# Create a RetrievalQA instance with the QA chain and index retriever
qa = RetrievalQA(combine_documents_chain=qa_chain,
retriever=index.vectorstore.as_retriever())
# Run the query and return the result
result = qa.run(query)
# return qa({"query": query,"job_desc":job_desc})['result']
return result
| [
"question",
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n You will be passed documents about resume, try your best to answer accurately about them based on this job description.\n Use three sentences maximum and keep the answer as concise as possible. \n Always say \"thanks for asking!\" at the end of the answer. \n\n {context}\n\n Question: {question}\n Answer:",
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n You are a chatbot that helps recruiters analyze resumes based on a job description. You will be passed information on some resumes as the context, try your best to answer accurately about them based on the job description also note the full name of the person in each resume.\n\n Keep the answer as concise as possible. \n Always say \"thanks for asking!\" at the end of the answer. \n {context}\n\n Job Description: {job_desc}\n Question: {question}\n Answer:",
"context"
] |
2024-01-10 | yusk/django-project-default | main~management~commands~gen_img.py | from django.core.management.base import BaseCommand
from main.utils import OpenAIImageHelper, ImageHelper
class Command(BaseCommand):
help = 'gen_img'
def add_arguments(self, parser):
parser.add_argument(dest='prompt', help='prompt')
def handle(self, *args, **options):
print('gen_img')
prompt = options['prompt']
print(prompt)
# urls = OpenAIImageHelper.gen_img(prompt, size="256x256")
# print(urls)
# ImageHelper.imgurl2file(urls[0], "local/gen_img.png")
with open("local/gen_img.png", "rb") as f:
d = f.read()
urls = OpenAIImageHelper.variate_img(d, size="256x256")
ImageHelper.imgurl2file(urls[0], "local/variate_img.png")
| [] |
2024-01-10 | PIG208/topic-bot | topic_summarizer.py | # Modified from https://gist.github.com/rht/c5fd97d9171a5e71863d85e47af7a7e3
import time
import traceback
from typing import Any, Dict
from langchain import OpenAI, PromptTemplate
from langchain.docstore.document import Document
from langchain.callbacks import get_openai_callback
from langchain.chains.llm import LLMChain
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.output_parsers.regex import RegexParser
import zulip
# Supply the API key by setting OPENAI_API_KEY
# Set the temperature to 0 for a more deterministic output
llm = OpenAI(temperature=0)
def create_topic_generator():
# Creates a topic phrase for the conversation.
topic_generator_prompt_template = """
Write a topic phrase of at most 4 words of the following:
{text}
TOPIC PHRASE OF THE TEXT:"""
TOPIC_GENERATOR_PROMPT = PromptTemplate(
template=topic_generator_prompt_template, input_variables=["text"]
)
return load_summarize_chain(llm, chain_type="stuff", prompt=TOPIC_GENERATOR_PROMPT)
def create_stuff_summarizer():
# Creates a summary of the conversation using the
# stuff summarizer.
return load_summarize_chain(llm, chain_type="stuff")
def create_refine_summarizer():
refine_template = (
"Your job is to produce a final summary of a conversation\n"
"We have provided an existing summary up to a certain point: {existing_answer}\n"
"We have the opportunity to refine the existing summary"
"(only if needed) with some more context below.\n"
"------------\n"
"{text}\n"
"------------\n"
"Given the new context, refine the original summary"
"If the context isn't useful, return the original summary."
)
refine_prompt = PromptTemplate(
input_variables=["existing_answer", "text"],
template=refine_template,
)
return load_summarize_chain(llm, chain_type="refine", refine_prompt=refine_prompt)
def create_map_reduce_summarizer():
return load_summarize_chain(llm, chain_type="map_reduce")
def create_map_rerank_summarizer():
map_template_string = """Given a part of a conversation below, your job is try to produce a possible summary
for the entire conversation and give an integer score from 0 to 100 on how confident
you are with that the summary reflects the entire conversation including the parts that are not seen,
in the following format where SUMMARY must be only followed by a newline before SCORE:
SUMMARY:
SCORE:
ENDOFFORMAT
{text}
"""
output_parser = RegexParser(
regex=r"SUMMARY: (.+)\nSCORE: (\d+)", output_keys=["topic", "score"]
)
MAP_PROMPT = PromptTemplate(
input_variables=["text"],
template=map_template_string,
output_parser=output_parser,
)
map_rerank = MapRerankDocumentsChain(
llm_chain=LLMChain(llm=llm, prompt=MAP_PROMPT),
rank_key="score",
answer_key="topic",
document_variable_name="text",
return_intermediate_steps=True,
)
return map_rerank
chain_topic_generator_stuff = create_topic_generator()
chain_topic_generator_map_rerank = create_map_rerank_summarizer()
chain_topic_summarizer_stuff = create_stuff_summarizer()
chain_topic_summarizer_refine = create_refine_summarizer()
chain_topic_summarizer_map_reduce = create_map_reduce_summarizer()
def topic_from_string(text):
return chain_topic_generator_stuff.run([Document(page_content=text)]).strip()
def exit_immediately(s):
print("\nERROR\n", s)
exit(1)
# Retrieves all messages matching request from Zulip, starting at post id anchor.
# As recommended in the Zulip API docs, requests 1000 messages at a time.
# Returns a list of messages.
def request_all(client, request, anchor=0):
request["anchor"] = anchor
request["num_before"] = 0
request["num_after"] = 1000
response = safe_request(client.get_messages, request)
msgs = response["messages"]
while not response["found_newest"]:
request["anchor"] = response["messages"][-1]["id"] + 1
response = safe_request(client.get_messages, request)
msgs = msgs + response["messages"]
return msgs
# runs client.cmd(args). If the response is a rate limit error, waits
# the requested time and then retries the request.
def safe_request(cmd, *args, **kwargs):
rsp = cmd(*args, **kwargs)
while rsp["result"] == "error":
if "retry-after" in rsp:
print("Timeout hit: {}".format(rsp["retry-after"]))
time.sleep(float(rsp["retry-after"]) + 1)
rsp = cmd(*args, **kwargs)
else:
exit_immediately(rsp["msg"])
return rsp
zulip_client = zulip.Client(config_file="./zuliprc")
def generate_topic(chain, docs):
try:
topic = chain.run(docs).strip()
except Exception:
traceback.print_exc()
return "Error in generation"
return f"**topic:** {topic}"
def summarize_and_generate_topic(chain, docs):
try:
response = chain(docs)
if "intermediate_steps" in response:
print("intermediate_steps:", response["intermediate_steps"])
summary = response["output_text"].strip()
topic = topic_from_string(summary)
except Exception:
traceback.print_exc()
return "Error in generation"
return f"**topic:** {topic} \n\n **summary:** {summary}"
def generate_topic_from_intro_message(chain, docs):
return generate_topic(chain, docs[:1])
def get_answer(message):
stream_topic = message.split("#**")[1][:-2]
stream_name, topic_name = stream_topic.split(">")
request = {
"narrow": [
{"operator": "stream", "operand": stream_name},
{"operator": "topic", "operand": topic_name},
],
"client_gravatar": True,
"apply_markdown": False,
}
thread_content = request_all(zulip_client, request)
thread_formatted = []
for msg in thread_content:
thread_formatted.append(f"{msg['sender_full_name']} said: {msg['content']}")
print("Conversation text input from Zulip:\n{}".format("\n".join(thread_formatted)))
# texts = text_splitter.split_text(thread_txt)
docs = [Document(page_content=t) for t in thread_formatted]
topic_chain = {
"stuff": chain_topic_generator_stuff,
}
summary_chain = {
"refine": chain_topic_summarizer_refine,
"stuff": chain_topic_summarizer_stuff,
"map_reduce": chain_topic_summarizer_map_reduce,
"map_rerank": chain_topic_generator_map_rerank,
}
chain_tests = {
"topic": (topic_chain, generate_topic),
"summary": (summary_chain, summarize_and_generate_topic),
"summary from first message only": (
topic_chain,
generate_topic_from_intro_message,
),
}
output = []
output.append(f"# Summarize #**{stream_topic}**")
for chain_type in chain_tests:
chain_map, generate_content = chain_tests[chain_type]
for chain_name in chain_map:
before = time.perf_counter()
with get_openai_callback() as cb:
print(f'Running {chain_name} ({chain_type}) for "{stream_topic}"...')
output.append(
f"## {chain_name} ({chain_type}):\n{generate_content(chain_map[chain_name], docs).strip()}"
)
after = time.perf_counter()
output.append(
f"**Tokens Used:** *{cb.total_tokens}*; **API Cost:** *{cb.total_cost}*; **Total Time:** *{after - before} seconds*"
)
return "\n\n".join(output)
# The code after this line could be simplified by https://github.com/zulip/python-zulip-api/pull/786
def handle_message(msg: Dict[str, Any]) -> None:
print(f"Processing\n{msg}")
if msg["type"] != "stream":
return
message = msg["content"]
try:
content = get_answer(message)
except Exception:
traceback.print_exc()
content = "Failed to process message {}".format(msg["content"])
request = {
"type": "stream",
"to": msg["display_recipient"],
"topic": msg["subject"],
"content": content,
}
print(f"Sending\n{content}")
zulip_client.send_message(request)
def watch_messages() -> None:
print("Watching for messages...")
def handle_event(event: Dict[str, Any]) -> None:
if "message" not in event:
# ignore heartbeat events
return
handle_message(event["message"])
# https://zulip.com/api/real-time-events
narrow = [["is", "mentioned"]]
zulip_client.call_on_each_event(
handle_event, event_types=["message"], all_public_streams=True, narrow=narrow
)
def generate_answers():
questions = [
# Short conversations
"#**api documentation>user activity**",
"#**api documentation>security scheme validation/testing**",
'#**issues>visual notification on despite setting "Do not disturb"**',
"#**design>@topic mention**",
# Long conversations
"#**design>Mark all messages as read.**",
"#**feedback>issues link in description**",
"#**design>Profile button**",
# Extra long conversations
"#**api documentation>prev_stream in message history**",
"#**api design>Previewable URL Api**",
]
for question in questions:
content = get_answer(question)
with open("output.log", "+a") as f:
f.write(content + "\n")
# If you want to test directly:
# generate_answers()
# Run the summarizer as an interactive bot
watch_messages()
| [
"Given a part of a conversation below, your job is try to produce a possible summary\n for the entire conversation and give an integer score from 0 to 100 on how confident\n you are with that the summary reflects the entire conversation including the parts that are not seen,\n in the following format where SUMMARY must be only followed by a newline before SCORE:\n SUMMARY: \n SCORE: \n \n ENDOFFORMAT\n {text}\n ",
"Your job is to produce a final summary of a conversation\nWe have provided an existing summary up to a certain point: {existing_answer}\nWe have the opportunity to refine the existing summary(only if needed) with some more context below.\n------------\n{text}\n------------\nGiven the new context, refine the original summaryIf the context isn't useful, return the original summary.",
"\n Write a topic phrase of at most 4 words of the following:\n\n {text}\n\n TOPIC PHRASE OF THE TEXT:",
"existing_answer"
] |
2024-01-10 | nicolasugrinovic/size_depth_disambiguation | util~depth.py | import torch
from util.misc import save_points, read_json
import numpy as np
import os
from .misc import plot_joints_cv2
def get_sign_matix(gt_trans_):
all_depts = gt_trans_[:, 2]
z_diffs = all_depts[:, None] - all_depts[None, :]
sign_matrix = z_diffs / abs(z_diffs + 0.000000001)
return np.ceil(sign_matrix)
def get_sign_matix_from_depths(all_depts):
z_diffs = all_depts[:, None] - all_depts[None, :]
sign_matrix = z_diffs / abs(z_diffs + 0.000000001)
return np.ceil(sign_matrix)
def upper_tri_masking(A):
m = A.shape[0]
r = np.arange(m)
mask = r[:, None] < r
return A[mask]
def upper_tri_masking_torch(A):
m = A.shape[0]
r = np.arange(m)
mask = r[:, None] < r
return A * torch.Tensor(mask).cuda()
def mask_distance_reverse(masks_person_arr, kpts_int, w, h):
scores = []
for i, persons_mask in enumerate(masks_person_arr):
this_mask = []
for smpl_joints_as_idx in kpts_int[..., :2]:
xs, ys = zip(*smpl_joints_as_idx)
xs = np.clip(xs, 0, w - 1)
ys = np.clip(ys, 0, h - 1)
smpl_joints_as_idx = np.array([ys, xs]).T
joint_in_mask = []
for idx in smpl_joints_as_idx:
mask_at_joint = persons_mask[tuple(idx)]
joint_in_mask.append(mask_at_joint)
joint_in_mask = np.array(joint_in_mask).sum()
this_mask.append(joint_in_mask)
# print(joint_in_mask)
this_mask = np.array(this_mask)
scores.append(this_mask)
scores_arr = np.stack(scores, 1)
maxsc = scores_arr.max(1)
cost = maxsc[:, None] - scores_arr
return cost
def vis_proj_joints_t(image, joints_projected, gt_keypoints, do_plot=True):
'''
Args:
image:
joints_projected: tensor de [B, njoints, 3]
gt_keypoints:
Returns:
'''
init_joints = joints_projected.int().cpu()
if gt_keypoints is None:
init_joints = init_joints
else:
conf = gt_keypoints[..., -1].cpu()
vis = conf > 0.0
init_joints = init_joints * vis[..., None]
out_img = plot_joints_cv2(image, init_joints, do_plot, with_text=True)
return out_img
def get_person_depth(d_img, masks_person, i):
depth_img = torch.Tensor(d_img).cuda()
depth_person = masks_person[i] * depth_img
sum = depth_person.sum()
n = masks_person[i].sum()
calc_mean = sum / n
return calc_mean, depth_person
# plot(depth_person.cpu())
def get_depths_from_crops(masked_depth, persons_mask, bboxes):
depths = []
for bbox in bboxes:
x0, y0, x1, y1 = bbox
# crop = image[y0:y1, x0:x1, :]
# plot(crop)
crop = persons_mask[y0:y1, x0:x1]
# plot(crop)
depth_crop = masked_depth[y0:y1, x0:x1]
# plot(depth_crop)
sum = depth_crop.sum()
n = crop.sum()
calc_mean = sum / n
mean_crop_ = calc_mean / 65535
mean_crop_ = 10 * (1 - mean_crop_)
depths.append(mean_crop_)
return depths
def perspective_projection(points, translation, camera_center,
focal_length=1000, rotation=None, return_instrinsics=False):
"""
Taken from Coherent Multiperson
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
# this is identity matrix
rotation = torch.eye(3).unsqueeze(0).repeat(batch_size, 1, 1).to(points.device)
# focal_length has to be fixed
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:,0,0] = focal_length
K[:,1,1] = focal_length
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points. Rotation and translation. Rotation here is identity as SMPL first rot is global
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
if return_instrinsics:
return projected_points[:, :, :-1], K
else:
return projected_points[:, :, :-1]
def weak_perspective_projection(points, translation, camera_center,
focal_length=1000, rotation=None):
"""
Taken from Coherent Multiperson
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
# this is identity matrix
rotation = torch.eye(3).unsqueeze(0).repeat(batch_size, 1, 1).to(points.device)
# focal_length has to be fixed
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:,0,0] = focal_length
K[:,1,1] = focal_length
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points. Rotation and translation. Rotation here is identity as SMPL first rot is global
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
z_mean = points[:,:,-1].mean()
z_root = points[:,14,-1]
projected_points = points / z_root
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1]
def project_joints_to_img(joints3d, img_size, translation, focal_lenght=1000,
return_instrinsics=False):
projected_joints_2d = perspective_projection(joints3d,
translation,
camera_center=img_size / 2,
focal_length=focal_lenght,
return_instrinsics=return_instrinsics
)
return projected_joints_2d
def weak_project_joints_to_img(joints3d, img_size, translation, focal_lenght=1000):
projected_joints_2d = weak_perspective_projection(joints3d,
translation,
camera_center=img_size / 2,
focal_length=focal_lenght,
)
return projected_joints_2d
def read_ankles(folder='./input/annots/', name='test_3djoints_0.json'):
# these joints are in global order from CRMP so ankels=0, 5
fpath = os.path.join(folder, name)
keypoints = read_json(fpath)
j3d = keypoints['joints_3d']
trans = keypoints['translation']
j3d = np.array(j3d)
trans = np.array(trans)
ankles = j3d[[0, 5]]
ankles_translated = j3d[[0, 5]] + trans
save_points(ankles, 'ankles_0.ply')
return ankles, ankles_translated, trans
def read_hips(folder='./input/annots/', name='test_3djoints_0.json'):
# these joints are in global order from CRMP so ankels=0, 5
fpath = os.path.join(folder, name)
keypoints = read_json(fpath)
j3d = keypoints['joints_3d']
trans = keypoints['translation']
j3d = np.array(j3d)
trans = np.array(trans)
ankles = j3d[[2, 3]]
ankles_translated = j3d[[2, 3]] + trans
save_points(ankles, 'ankles_0.ply')
return ankles, ankles_translated, trans
def read_joints(folder='./input/annots/', name='test_3djoints_0.json'):
# these joints are in global order from CRMP so ankels=0, 5
fpath = os.path.join(folder, name)
keypoints = read_json(fpath)
j3d = keypoints['joints_3d']
trans = keypoints['translation']
j3d = np.array(j3d)
trans = np.array(trans)
j3d_translated = j3d+ trans
return j3d, j3d_translated, trans
def read_all_joints(folder='./input/annots/', names=['test_3djoints_0.json']):
# these joints are in global order from CRMP so ankels=0, 5
j3d_all = []
j3d_translated_all = []
trans_all = []
for i, name in enumerate(names):
fpath = os.path.join(folder, name)
keypoints = read_json(fpath)
j3d = keypoints['joints_3d']
trans = keypoints['translation']
j3d = np.array(j3d)
trans = np.array(trans)
j3d_translated = j3d + trans
# save_points(j3d, 'j3d_person_%d.ply' % i)
j3d_all.append(j3d)
j3d_translated_all.append(j3d_translated)
trans_all.append(trans)
j3d_all = np.array(j3d_all)
j3d_translated_all = np.array(j3d_translated_all)
trans_all = np.array(trans_all)
return j3d_all, j3d_translated_all, trans_all | [] |
2024-01-10 | Alex-Neo-Projects/yt-clips | script.py | import os
import sys
import subprocess
import whisper
import openai
def download_youtube(link):
arg = f"bash ./scripts/download_youtube.sh {link}"
proc = subprocess.Popen([arg], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out.decode("utf-8").split("\n")[0]
def download_whisper(uuid):
arg = f"bash ./scripts/download_whisper.sh {uuid}"
proc = subprocess.Popen([arg], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def make_summarization(path, uuid):
text_output = read_text_file(path)
output_summarize_path = f"./summarize/{uuid}.txt"
# Uncomment line under to store summarization to a file
# sys.stdout = open(output_summarize_path, 'a+')
openai.api_key=""
for res in openai.Completion.create(
model="text-davinci-003",
prompt=f"I will provide you with transcript from a video. Gime a me a one sentence TLDR of the transcript. Then extract the most important key points and use them as markdown formatted headings. Give a detailed extractive and abstract summary for each key point. It is important that you are very specific and clear in your response. Conclude with a one paragraph abstract summary of what the video wanted to convince us of. \n\nVideo transcript:\n{text_output}\nSummary:",
max_tokens=1000,
temperature=0,
stream=True
):
sys.stdout.write(res.choices[0].text)
sys.stdout.flush()
print('\n')
def read_text_file(path):
f = open(path, "r")
return f.read()
def create_text_file(path):
f = open(path, "a+")
return f
# def find_num_tokens(path):
# output = read_text_file(path)
# return len(output)//4
# def break_into_chunks(path):
# num_tokens = find_num_tokens(path)
# text_file_output = read_text_file(path)
# output = [text_file_output[i:i+2000] for i in range(0, 5)]
# return output
def main():
# download youtube audio using yt-dlp
print('Downloading Youtube Video')
youtube_audio_uuid = download_youtube('https://www.youtube.com/clip/UgkxmZ_575WLr_y6dkXJ60F9U2a310aB63D6')
# convert to transcript using whisper
print('Converting Video to Transcript')
whisper_audio_path = download_whisper(youtube_audio_uuid)
# use chat-gpt to summarize the whisper output
print('Summarizing transcript:')
make_summarization(f'./whisper-downloads/{youtube_audio_uuid}/{youtube_audio_uuid}.mp3.txt', youtube_audio_uuid)
if __name__ == "__main__":
main() | [
"I will provide you with transcript from a video. Gime a me a one sentence TLDR of the transcript. Then extract the most important key points and use them as markdown formatted headings. Give a detailed extractive and abstract summary for each key point. It is important that you are very specific and clear in your response. Conclude with a one paragraph abstract summary of what the video wanted to convince us of. \n\nVideo transcript:\nPLACEHOLDER\nSummary:"
] |
2024-01-10 | hanseokOh/KtrlF | ktrlf_dataset_pipeline~ktrlf_2_1_target_selection_openai.py | import argparse
import pickle
from tqdm.auto import tqdm
import os
import parmap
import json
from collections import defaultdict, Counter
import logging
import asyncio
from utils.api_request_parallel_processor import process_api_requests_from_file
import time
import copy
import openai
from pathlib import Path
from utils.statistics import print_number_of_targets, print_number_of_mentions
from utils.entity_evidence import crawl_wikipedia_article
def make_entity_evidence_dict(all_task, num_evidence_sent=20):
entity_evidence_dict = {}
for dic in tqdm(all_task):
for entity_dic in dic['data']['entity_info']:
entity = entity_dic['entity']
if entity_evidence_dict.get(entity) is not None:
continue
evidence = crawl_wikipedia_article(entity_dic['wikipedia_link'].split('/')[-1], num_evidence_sent)
entity_evidence_dict[entity] = evidence
return entity_evidence_dict
def _need_to_determine(qa_pair, entity, need_to_determine_key_list):
if len(need_to_determine_key_list) == 0:
return True
_entity_cnt_dict = dict(Counter([_ent for key in need_to_determine_key_list for _ent in qa_pair[key]]))
count = _entity_cnt_dict.get(entity)
if count == 1:
return True
return False
def _dump_gpt_input(all_task, input_filepath, gpt_model_name, entity_evidence_dict, num_evidence_sent=10, need_to_determine_key_list=[]):
system_prompt = f"""
You are a QA system to identify the given entity is the answer.
The inputs are entity, query and evidence.
You must follow this requirements.
Requirements:
- Output have to be either 'true' or 'false'
- Do not say anything except 'true' or 'false'
The example is as below.
Entity: Google
Query: Find all IT companies in Computer industry
Evidence: Google LLC (/ˈɡuːɡəl/ (listen)) is an American multinational technology company focusing on artificial intelligence,[9] online advertising, search engine technology, cloud computing, computer software, quantum computing, e-commerce, and consumer electronics. It has often been considered "the most powerful company in the world"[10] and as one of the world's most valuable brands due to its market dominance, data collection, and technological advantages in the field of artificial intelligence.[11][12][13] Its parent company Alphabet is often considered one of the Big Five American information technology companies, alongside Amazon, Apple, Meta, and Microsoft.
Output: true
Entity: Samsung
Query: Find all companies in United States
Evidence: Samsung Group,[3] or simply Samsung (Korean: 삼성; RR: samseong [samsʌŋ]) (stylized as SΛMSUNG), is a South Korean multinational manufacturing conglomerate headquartered in Samsung Town, Seoul, South Korea.[1] It comprises numerous affiliated businesses,[1] most of them united under the Samsung brand, and is the largest South Korean chaebol (business conglomerate). As of 2020, Samsung has the eighth highest global brand value.
Output: false
"""
odqa_input_format = []
for dic in all_task:
_entity_set = dict.fromkeys([tag_dic['entity'] for tag_dic in dic['data']['entity_info']])
for q_idx,qa_pair in enumerate(dic['data']['qa_pairs']):
odqa_input_format += [
{
'id': f"{dic['id']}[SEP]q{q_idx}[SEP]e{ent_idx}",
'question': qa_pair['question'],
'entity': entity,
'evidence': ' '.join(entity_evidence_dict[entity][:num_evidence_sent])
} for ent_idx, entity in enumerate(_entity_set) if _need_to_determine(qa_pair, entity, need_to_determine_key_list)]
all_input_format = []
for dic in odqa_input_format:
all_input_format.append({
"model": gpt_model_name,
'messages': [
{'role': 'system', 'content': system_prompt.strip()},
{'role': 'user', 'content': f"Entity: {dic['entity']}\nQuery: {dic['question']}\nEvidence: {dic['evidence']}\nOutput: "}
],
'user': dic['id']
})
with open(input_filepath, 'w') as f:
for dic in all_input_format:
f.write(json.dumps(dic)+'\n')
def _load_gpt_output(output_filepath):
tup_list = []
with open(output_filepath) as f:
for line in f:
input_, output = json.loads(line)
input_id = input_['user']
try:
generated_answer_str = output['choices'][0]['message']['content']
except:
generated_answer_str = ""
tup_list.append({'id': input_id, 'output': generated_answer_str})
# sort by original order
generated_output_list = sorted(tup_list, key=lambda dic: dic['id'])
return generated_output_list
def _parse_gpt_output(all_task, generated_output_list, to_answer_key):
_output_idx_mapper = defaultdict(lambda: defaultdict(list))
for dic in generated_output_list:
output = dic['output'].lower().strip()
if output != 'true':
continue
original_id, q_idx, ent_idx = dic['id'].split('[SEP]')
q_idx = int(q_idx[1:])
ent_idx = int(ent_idx[1:])
_output_idx_mapper[original_id][q_idx].append(ent_idx)
for dic in all_task:
id = dic['id']
_entity_set = list(dict.fromkeys([tag_dic['entity'] for tag_dic in dic['data']['entity_info']]))
for q_idx, qa_pair in enumerate(dic['data']['qa_pairs']):
odqa_gpt_preds = [_entity_set[ent_idx] for ent_idx in _output_idx_mapper[id][q_idx]]
qa_pair[to_answer_key] = odqa_gpt_preds
def select_target_using_model(all_task, entity_evidence_dict, gpt_model_name, request_url, api_key, to_answer_key, need_to_determine_key_list):
Path("./dump/.gpt_format").mkdir(parents=True, exist_ok=True)
_timestamp = str(int(time.time()))
input_format_path = f'./dump/.gpt_format/_openai_{_timestamp}_input_format.jsonl'
output_format_path = f'./dump/.gpt_format/_openai_{_timestamp}_output_format.jsonl'
max_attempts = 10
_dump_gpt_input(all_task, input_format_path, gpt_model_name, entity_evidence_dict, num_evidence_sent=10, need_to_determine_key_list=need_to_determine_key_list)
asyncio.run(
process_api_requests_from_file(
requests_filepath=input_format_path,
save_filepath=output_format_path,
request_url=request_url,
api_key=api_key,
max_requests_per_minute=float(3_000 * 0.5),
max_tokens_per_minute=float(250_000 * 0.5),
token_encoding_name="cl100k_base",
max_attempts=int(max_attempts),
logging_level=int(logging.INFO),
)
)
generated_output = _load_gpt_output(output_format_path)
_parse_gpt_output(all_task, generated_output, to_answer_key)
def clear_query_with_empty_target(all_task, answer_key):
new_all_task = []
for dic in all_task:
_new_qa_pairs = [qa_dic for qa_dic in dic['data']['qa_pairs'] if len(qa_dic[answer_key])>0]
_new_dic = copy.deepcopy(dic)
_new_dic['data']['qa_pairs'] = _new_qa_pairs
new_all_task.append(_new_dic)
return new_all_task
def clear_doc_with_emtpy_query(all_task):
new_all_task = []
for dic in all_task:
if len(dic['data']['qa_pairs'])==0:
continue
new_all_task.append(copy.deepcopy(dic))
return new_all_task
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--entity_evidence_cache_path", type=str, default=None)
parser.add_argument("--input_path", type=str, required=True)
parser.add_argument("--openai_model_name", type=str, choices=['gpt-3.5-turbo-0613','gpt-4-0613'])
parser.add_argument("--openai_request_url", type=str, default="https://api.openai.com/v1/chat/completions")
parser.add_argument("--openai_api_key", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
args = parser.parse_args()
need_to_determine_key_list = ['_llama2_query_generation_preds', 'chatgpt_preds']
to_answer_key = 'gpt4_preds'
with open(args.input_path, 'rb') as f:
all_task = pickle.load(f)
# get evidence
if args.entity_evidence_cache_path:
with open(args.entity_evidence_cache_path, 'rb') as f:
entity_evidence_dict = pickle.load(f)
else:
_num_proc = os.cpu_count()
_batch_size = 70
_splited_all_task = [all_task[x:x+_batch_size] for x in range(0, len(all_task), _batch_size)]
results = parmap.map(make_entity_evidence_dict, _splited_all_task, pm_pbar=True, pm_processes=_num_proc)
entity_evidence_dict = {k:v for dic in results for k,v in dic.items()}
with open('./dump/entity_evidence_dict.pickle', 'wb') as f:
pickle.dump(entity_evidence_dict, f)
select_target_using_model(all_task, entity_evidence_dict, args.openai_model_name, args.openai_request_url, args.openai_api_key, to_answer_key, need_to_determine_key_list)
print(f"[Num. of Targets] Final:")
print(f"{print_number_of_targets(all_task, to_answer_key)}")
with open(args.output_path, 'wb') as f:
pickle.dump(all_task, f)
| [
"\nYou are a QA system to identify the given entity is the answer.\nThe inputs are entity, query and evidence.\n\nYou must follow this requirements.\nRequirements:\n- Output have to be either 'true' or 'false'\n- Do not say anything except 'true' or 'false'\n\nThe example is as below.\n\nEntity: Google\nQuery: Find all IT companies in Computer industry\nEvidence: Google LLC (/ˈɡuːɡəl/ (listen)) is an American multinational technology company focusing on artificial intelligence,[9] online advertising, search engine technology, cloud computing, computer software, quantum computing, e-commerce, and consumer electronics. It has often been considered \"the most powerful company in the world\"[10] and as one of the world's most valuable brands due to its market dominance, data collection, and technological advantages in the field of artificial intelligence.[11][12][13] Its parent company Alphabet is often considered one of the Big Five American information technology companies, alongside Amazon, Apple, Meta, and Microsoft.\nOutput: true\n\nEntity: Samsung\nQuery: Find all companies in United States\nEvidence: Samsung Group,[3] or simply Samsung (Korean: 삼성; RR: samseong [samsʌŋ]) (stylized as SΛMSUNG), is a South Korean multinational manufacturing conglomerate headquartered in Samsung Town, Seoul, South Korea.[1] It comprises numerous affiliated businesses,[1] most of them united under the Samsung brand, and is the largest South Korean chaebol (business conglomerate). As of 2020, Samsung has the eighth highest global brand value.\nOutput: false\n",
"You are a QA system to identify the given entity is the answer.\nThe inputs are entity, query and evidence.\n\nYou must follow this requirements.\nRequirements:\n- Output have to be either 'true' or 'false'\n- Do not say anything except 'true' or 'false'\n\nThe example is as below.\n\nEntity: Google\nQuery: Find all IT companies in Computer industry\nEvidence: Google LLC (/ˈɡuːɡəl/ (listen)) is an American multinational technology company focusing on artificial intelligence,[9] online advertising, search engine technology, cloud computing, computer software, quantum computing, e-commerce, and consumer electronics. It has often been considered \"the most powerful company in the world\"[10] and as one of the world's most valuable brands due to its market dominance, data collection, and technological advantages in the field of artificial intelligence.[11][12][13] Its parent company Alphabet is often considered one of the Big Five American information technology companies, alongside Amazon, Apple, Meta, and Microsoft.\nOutput: true\n\nEntity: Samsung\nQuery: Find all companies in United States\nEvidence: Samsung Group,[3] or simply Samsung (Korean: 삼성; RR: samseong [samsʌŋ]) (stylized as SΛMSUNG), is a South Korean multinational manufacturing conglomerate headquartered in Samsung Town, Seoul, South Korea.[1] It comprises numerous affiliated businesses,[1] most of them united under the Samsung brand, and is the largest South Korean chaebol (business conglomerate). As of 2020, Samsung has the eighth highest global brand value.\nOutput: false",
"Entity: PLACEHOLDER\nQuery: PLACEHOLDER\nEvidence: PLACEHOLDER\nOutput: "
] |
2024-01-10 | Matanatr96/KFL | get_scores.py | import argparse
from collections import defaultdict
import openai
import pandas as pd
from sleeper_wrapper import League
from dotenv import dotenv_values
config = dotenv_values(".env")
openai.api_key = config['OPENAI_API_KEY']
league = League(config['LEAGUE_ID'])
roster_conversions = ['Mattapalli', 'Komaragiri', 'Idate', 'Bada', 'Digby', 'Nethi', 'Rattan', 'Upadhyaya', 'Aireddy',
'Hansen', 'Le', 'Pandya']
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--week', required=True)
args = parser.parse_args()
def get_matchups(week: int) -> (defaultdict, pd.DataFrame):
all_matchups = league.get_matchups(week)
matchups = defaultdict(list)
ranks = []
for matchup in all_matchups:
matchup_id = matchup['matchup_id']
matchups[matchup_id].append((matchup['roster_id'], matchup['points']))
ranks.append([matchup['roster_id'], matchup['points']])
ranks_df = pd.DataFrame(ranks, columns=['Id', 'Score']).sort_values(by='Score', ascending=False).set_index('Id')
ranks_df['Rank'] = range(1, 13)
return matchups, ranks_df
def get_scores(week: int) -> list:
matchups, ranks = get_matchups(week)
all_scores = []
for j in matchups.values():
roster_id1 = j[0][0]
roster_id2 = j[1][0]
one_score = j[0][1]
two_score = j[1][1]
one_diff = round(one_score - two_score, 2)
two_diff = round(two_score - one_score, 2)
all_scores.extend(
(
[
2023,
roster_conversions[roster_id1 - 1],
week,
1 if one_score > two_score else 0,
one_score,
two_score,
roster_conversions[roster_id2 - 1],
one_diff,
ranks.loc[roster_id1, 'Rank'],
],
[
2023,
roster_conversions[roster_id2 - 1],
week,
1 if one_score < two_score else 0,
two_score,
one_score,
roster_conversions[roster_id1 - 1],
two_diff,
ranks.loc[roster_id2, 'Rank'],
],
)
)
return all_scores
def chat_gpt_format(scores: list) -> str:
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=[{"role": "system", "content": f"""Convert the following array
of arrays into text I can copy into excel with newlines after each line
and remove the quotations: {scores}"""}])
return completion.choices[0].message.content
if __name__ == '__main__':
df = get_scores(args.week)
formatted_scores = chat_gpt_format(df)
print(formatted_scores)
| [
"Convert the following array \n of arrays into text I can copy into excel with newlines after each line \n and remove the quotations: PLACEHOLDER"
] |
2024-01-10 | borisdayma/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | amitbasunias/uniai | writer~beta.py | import openai
OPENAI_API_KEY = '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'
openai.api_key = OPENAI_API_KEY
def headline (headprompt):
response = openai.Completion.create(
engine="text-davinci-002",
prompt="{}" .format(headprompt),
temperature=0.9,
max_tokens=3400,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
if 'choices' in response:
answer= response['choices'][0]['text']
return answer
| [] |
2024-01-10 | benfield97/news_analyzer | fn_calling.py | import openai
import requests
from bs4 import BeautifulSoup
from dotenv import load_dotenv
import os
import re
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def format_list(input_string):
# Split the string into a list of phrases
phrases = input_string.split(',')
# Remove leading/trailing whitespace, quotation marks and final punctuation
cleaned_phrases = [re.sub(r'^["\s]+|["\s]+$|[.,;:!?"]$', '', phrase) for phrase in phrases]
return cleaned_phrases
def get_article_text(input, format = 'url'):
# Send a request to the website
if format == 'url':
r = requests.get(input)
elif format == 'html':
r = input
# Parse HTML and save to BeautifulSoup object
soup = BeautifulSoup(r.text, "html.parser")
# Find article text and combine it into one string
article_text = ' '.join([p.text for p in soup.find_all('p')])
return article_text
def article_detection(article_text):
message = [{"role": "system", "content": "You are an expert on journalism."}]
prompt = f"""
Please assess the following body of text, which is delimited by triple backticks.
Determine if you believe this is an article, as in a piece of writing included with others in a newspaper, magazine, or other print or online publication.
If it is an article, format your response by only printing: True
If it is not an article, format your response by only printing: False
Article: ```{article_text}```
"""
response = get_completion(prompt, message)
if 'True' in response:
return True
else:
return False
def get_completion(prompt, messages, model=4):
messages.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model=f"gpt-{model}",
messages= messages
)
messages.append({"role": "system", "content": response['choices'][0]['message']['content']})
return response['choices'][0]['message']['content']
def get_emotive_list(article_text, messages):
prompt = f"""
Extract all examples of emotive language used in the
following article, which is delimited by triple backticks.
Format your response as a list of items separated by commas.
Article: '''{article_text}'''
"""
response = get_completion(prompt, messages)
response = format_list(response)
return response
def get_emotive_rating(messages):
prompt = """
Based strictly on the presence of emotive language, can you rate on a scale of 1-10 how emotive the article is.
Please format your response as an integer only
"""
response = get_completion(prompt, messages)
try:
return int(response)
except:
prompt = """
Please try again and format this response as an integer only.
"""
response = get_completion(prompt, messages)
return int(response)
def get_political_bias_list(article_text, messages):
prompt = f"""
You are evaluating in which ways the article below, delimited by triple backticks, is politically biased, specifically, biased to
either the left-wing or the right-wing.
Extract all examples of politically biased phrases used in the article.
Format your response as a list of items separated by commas.
Article: ```{article_text}```
"""
response = get_completion(prompt, messages)
response = format_list(response)
return response
def get_political_bias_rating(messages):
prompt = """
You are evaluating in which political direction the previous article is biased.
On a scale from 1 (strongly left-wing) to 10 (strongly right-wing) can you rate the article for the position of it's political bias.
Please format your response as an integer only.
"""
response = get_completion(prompt, messages)
try:
return int(response)
except:
prompt = """
Please try again and format this response as an integer only.
"""
response = get_completion(prompt, messages)
return int(response)
def get_establishment_list(article_text, messages):
prompt = f"""
You are evaluating in which ways the article below, delimited by triple backticks, is biased in a manner that is either pro-establishment or anti-establishment.
Extract all examples of politically biased phrases used in the article.
Format your response as a list of items separated by commas.
Article: ```{article_text}```
"""
response = get_completion(prompt, messages)
response = format_list(response)
return response
def get_establishment_bias_rating(messages):
prompt = """
You are evaluating in which direction the previous article is biased, in regards to its stance on the establishment.
On a scale from 1 (strongly anti-establishment) to 10 (strongly pro-establishment) can you rate the article for the position of it's establishment bias.
Please format your response as an integer only.
"""
response = get_completion(prompt, messages)
try:
return int(response)
except:
prompt = """
Please try again and format this response as an integer only.
"""
response = get_completion(prompt, messages)
return int(response)
article = get_article_text('https://www.foxnews.com/politics/biden-admin-quietly-reverses-trump-era-rule-bans-transporting-fossil-fuels-train')
is_article = article_detection(article)
emo_msgs = [{"role": "system", "content": "You are an expert on journalism. You specialise in assessing how emotive language is used to position readers"}]
emotive_list = get_emotive_list(article, emo_msgs)
def run(url):
article = get_article_text(url)
is_article = article_detection(article)
emo_msgs = [{"role": "system", "content": "You are an expert on journalism. You specialise in assessing how emotive language is used to position readers"}]
emotive_list = get_emotive_list(article, emo_msgs)
emotive_rating = get_emotive_rating(emo_msgs)
pol_msgs = [{"role": "system", "content": "You are an expert on journalism and politics. You specialise in assessing the presence of political bias in articles."}]
political_list = get_political_bias_list(article, pol_msgs)
political_rating = get_political_bias_rating(pol_msgs)
est_msgs = [{"role": "system", "content": "You are an expert on journalism and politics. You specialise in assessing the presence of pro or anti establishment bias in articles."}]
establishment_list = get_establishment_list(article, est_msgs)
establishment_bias_rating = get_establishment_bias_rating(est_msgs)
return {
'is_article': is_article,
'emotive_list': emotive_list,
'emotive_rating': emotive_rating,
'political_list': political_list,
'political_rating': political_rating,
'establishment_list': establishment_list,
'establishment_bias_rating': establishment_bias_rating
}
| [
"\n You are evaluating in which political direction the previous article is biased.\n \n On a scale from 1 (strongly left-wing) to 10 (strongly right-wing) can you rate the article for the position of it's political bias.\n\n Please format your response as an integer only.\n ",
"\n Please assess the following body of text, which is delimited by triple backticks.\n\n Determine if you believe this is an article, as in a piece of writing included with others in a newspaper, magazine, or other print or online publication.\n\n If it is an article, format your response by only printing: True\n If it is not an article, format your response by only printing: False\n\n Article: ```PLACEHOLDER```\n ",
"You are an expert on journalism and politics. You specialise in assessing the presence of pro or anti establishment bias in articles.",
"\n Based strictly on the presence of emotive language, can you rate on a scale of 1-10 how emotive the article is.\n \n Please format your response as an integer only\n ",
"You are an expert on journalism. You specialise in assessing how emotive language is used to position readers",
"\n Please try again and format this response as an integer only.\n ",
"\n You are evaluating in which ways the article below, delimited by triple backticks, is politically biased, specifically, biased to \n either the left-wing or the right-wing.\n \n Extract all examples of politically biased phrases used in the article.\n\n Format your response as a list of items separated by commas.\n \n Article: ```PLACEHOLDER```\n ",
"\n You are evaluating in which direction the previous article is biased, in regards to its stance on the establishment.\n\n On a scale from 1 (strongly anti-establishment) to 10 (strongly pro-establishment) can you rate the article for the position of it's establishment bias.\n\n Please format your response as an integer only.\n ",
"You are an expert on journalism and politics. You specialise in assessing the presence of political bias in articles.",
"\n You are evaluating in which ways the article below, delimited by triple backticks, is biased in a manner that is either pro-establishment or anti-establishment.\n\n Extract all examples of politically biased phrases used in the article.\n\n Format your response as a list of items separated by commas.\n \n Article: ```PLACEHOLDER```\n ",
"\n Extract all examples of emotive language used in the \n following article, which is delimited by triple backticks.\n\n Format your response as a list of items separated by commas.\n\n Article: '''PLACEHOLDER'''\n ",
"You are an expert on journalism.",
"content"
] |
2024-01-10 | benfield97/news_analyzer | guidance.py | import openai
import requests
from bs4 import BeautifulSoup
from dotenv import load_dotenv
import os
import re
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def format_list(input_string):
# Split the string into a list of phrases
phrases = input_string.split(',')
# Remove leading/trailing whitespace, quotation marks and final punctuation
cleaned_phrases = [re.sub(r'^["\s]+|["\s]+$|[.,;:!?"]$', '', phrase) for phrase in phrases]
return cleaned_phrases
def get_article_text(input, format = 'url'):
# Send a request to the website
if format == 'url':
r = requests.get(input)
elif format == 'html':
r = input
# Parse HTML and save to BeautifulSoup object
soup = BeautifulSoup(r.text, "html.parser")
# Find article text and combine it into one string
article_text = ' '.join([p.text for p in soup.find_all('p')])
return article_text
def article_detection(article_text):
message = [{"role": "system", "content": "You are an expert on journalism."}]
prompt = f"""
Please assess the following body of text, which is delimited by triple backticks.
Determine if you believe this is an article, as in a piece of writing included with others in a newspaper, magazine, or other print or online publication.
If it is an article, format your response by only printing: True
If it is not an article, format your response by only printing: False
Article: ```{article_text}```
"""
response = get_completion(prompt, message)
if 'True' in response:
return True
else:
return False
def get_completion(prompt, messages, model=4):
messages.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model=f"gpt-{model}",
messages= messages
)
messages.append({"role": "system", "content": response['choices'][0]['message']['content']})
return response['choices'][0]['message']['content']
def get_emotive_list(article_text, messages):
prompt = f"""
Extract all examples of emotive language used in the
following article, which is delimited by triple backticks.
Format your response as a list of items separated by commas.
Article: '''{article_text}'''
"""
response = get_completion(prompt, messages)
response = format_list(response)
return response
def get_emotive_rating(messages):
prompt = """
Based strictly on the presence of emotive language, can you rate on a scale of 1-10 how emotive the article is.
Please format your response as an integer only
"""
response = get_completion(prompt, messages)
try:
return int(response)
except:
prompt = """
Please try again and format this response as an integer only.
"""
response = get_completion(prompt, messages)
return int(response)
def get_political_bias_list(article_text, messages):
prompt = f"""
You are evaluating in which ways the article below, delimited by triple backticks, is politically biased, specifically, biased to
either the left-wing or the right-wing.
Extract all examples of politically biased phrases used in the article.
Format your response as a list of items separated by commas.
Article: ```{article_text}```
"""
response = get_completion(prompt, messages)
response = format_list(response)
return response
def get_political_bias_rating(messages):
prompt = """
You are evaluating in which political direction the previous article is biased.
On a scale from 1 (strongly left-wing) to 10 (strongly right-wing) can you rate the article for the position of it's political bias.
Please format your response as an integer only.
"""
response = get_completion(prompt, messages)
try:
return int(response)
except:
prompt = """
Please try again and format this response as an integer only.
"""
response = get_completion(prompt, messages)
return int(response)
def get_establishment_list(article_text, messages):
prompt = f"""
You are evaluating in which ways the article below, delimited by triple backticks, is biased in a manner that is either pro-establishment or anti-establishment.
Extract all examples of politically biased phrases used in the article.
Format your response as a list of items separated by commas.
Article: ```{article_text}```
"""
response = get_completion(prompt, messages)
response = format_list(response)
return response
def get_establishment_bias_rating(messages):
prompt = """
You are evaluating in which direction the previous article is biased, in regards to its stance on the establishment.
On a scale from 1 (strongly anti-establishment) to 10 (strongly pro-establishment) can you rate the article for the position of it's establishment bias.
Please format your response as an integer only.
"""
response = get_completion(prompt, messages)
try:
return int(response)
except:
prompt = """
Please try again and format this response as an integer only.
"""
response = get_completion(prompt, messages)
return int(response)
article = get_article_text('https://www.foxnews.com/politics/biden-admin-quietly-reverses-trump-era-rule-bans-transporting-fossil-fuels-train')
is_article = article_detection(article)
emo_msgs = [{"role": "system", "content": "You are an expert on journalism. You specialise in assessing how emotive language is used to position readers"}]
emotive_list = get_emotive_list(article, emo_msgs)
def run(url):
article = get_article_text(url)
is_article = article_detection(article)
emo_msgs = [{"role": "system", "content": "You are an expert on journalism. You specialise in assessing how emotive language is used to position readers"}]
emotive_list = get_emotive_list(article, emo_msgs)
emotive_rating = get_emotive_rating(emo_msgs)
pol_msgs = [{"role": "system", "content": "You are an expert on journalism and politics. You specialise in assessing the presence of political bias in articles."}]
political_list = get_political_bias_list(article, pol_msgs)
political_rating = get_political_bias_rating(pol_msgs)
est_msgs = [{"role": "system", "content": "You are an expert on journalism and politics. You specialise in assessing the presence of pro or anti establishment bias in articles."}]
establishment_list = get_establishment_list(article, est_msgs)
establishment_bias_rating = get_establishment_bias_rating(est_msgs)
return {
'is_article': is_article,
'emotive_list': emotive_list,
'emotive_rating': emotive_rating,
'political_list': political_list,
'political_rating': political_rating,
'establishment_list': establishment_list,
'establishment_bias_rating': establishment_bias_rating
}
| [
"\n You are evaluating in which political direction the previous article is biased.\n \n On a scale from 1 (strongly left-wing) to 10 (strongly right-wing) can you rate the article for the position of it's political bias.\n\n Please format your response as an integer only.\n ",
"\n Please assess the following body of text, which is delimited by triple backticks.\n\n Determine if you believe this is an article, as in a piece of writing included with others in a newspaper, magazine, or other print or online publication.\n\n If it is an article, format your response by only printing: True\n If it is not an article, format your response by only printing: False\n\n Article: ```PLACEHOLDER```\n ",
"You are an expert on journalism and politics. You specialise in assessing the presence of pro or anti establishment bias in articles.",
"\n Based strictly on the presence of emotive language, can you rate on a scale of 1-10 how emotive the article is.\n \n Please format your response as an integer only\n ",
"You are an expert on journalism. You specialise in assessing how emotive language is used to position readers",
"\n Please try again and format this response as an integer only.\n ",
"\n You are evaluating in which direction the previous article is biased, in regards to its stance on the establishment.\n\n On a scale from 1 (strongly anti-establishment) to 10 (strongly pro-establishment) can you rate the article for the position of it's establishment bias.\n\n Please format your response as an integer only.\n ",
"\n You are evaluating in which ways the article below, delimited by triple backticks, is politically biased, specifically, biased to \n either the left-wing or the right-wing.\n \n Extract all examples of politically biased phrases used in the article.\n\n Format your response as a list of items separated by commas.\n \n Article: ```PLACEHOLDER```\n ",
"You are an expert on journalism and politics. You specialise in assessing the presence of political bias in articles.",
"\n You are evaluating in which ways the article below, delimited by triple backticks, is biased in a manner that is either pro-establishment or anti-establishment.\n\n Extract all examples of politically biased phrases used in the article.\n\n Format your response as a list of items separated by commas.\n \n Article: ```PLACEHOLDER```\n ",
"\n Extract all examples of emotive language used in the \n following article, which is delimited by triple backticks.\n\n Format your response as a list of items separated by commas.\n\n Article: '''PLACEHOLDER'''\n ",
"You are an expert on journalism.",
"content"
] |
2024-01-10 | achilela/chatbots | Andro_GPT_Llama2.py | from typing import List, Union
from langchain.vectorstores.chroma import Chroma
from langchain.callbacks import get_openai_callback
from langchain.schema import (SystemMessage, HumanMessage, AIMessage)
from langchain.llms import LlamaCpp
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import streamlit as st
from langchain.schema import Memory as StreamlitChatMessageHistory
from langchain.llms import CTransformers
from langchain.prompts import ChatPromptTemplate
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import SystemMessagePromptTemplate
from time import sleep
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import DeepLake, VectorStore
from streamlit.runtime.uploaded_file_manager import UploadedFile
import warnings
from langchain.memory import ConversationBufferWindowMemory
from langchain import PromptTemplate, LLMChain
import os
import tempfile
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chains import ConversationalRetrievalChain
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.document_loaders import (PyPDFLoader, Docx2txtLoader, CSVLoader,
DirectoryLoader,
GitLoader,
NotebookLoader,
OnlinePDFLoader,
PythonLoader,
TextLoader,
UnstructuredFileLoader,
UnstructuredHTMLLoader,
UnstructuredPDFLoader,
UnstructuredWordDocumentLoader,
WebBaseLoader,
)
from langchain.vectorstores import FAISS
from dotenv import load_dotenv
load_dotenv()
warnings.filterwarnings("ignore", category=UserWarning)
APP_NAME = "ValonyLabsz"
MODEL = "gpt-3.5-turbo"
PAGE_ICON = ":rocket:"
st.set_option("client.showErrorDetails", True)
st.set_page_config(
page_title=APP_NAME, page_icon=PAGE_ICON, initial_sidebar_state="expanded"
)
av_us = '/https://raw.githubusercontent.com/achilela/main/Ataliba'
av_ass = '/https://raw.githubusercontent.com/achilela/main/Robot'
st.title(":rocket: Agent Lirio :rocket:")
st.markdown("I am your Technical Assistant ready to do all of the leg work on your documents, emails, procedures, etc.\
I am capable to extract relevant info and domain knowledge!")
@st.cache_resource(ttl="1h")
def init_page() -> None:
st.sidebar.title("Options")
def init_messages() -> None:
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "messages" not in st.session_state:
st.session_state.messages = [
]
st.session_state.costs = []
user_query = st.chat_input(placeholder="Ask me Anything!")
def select_llm() -> Union[ChatOpenAI, LlamaCpp]:
model_name = st.sidebar.radio("Choose LLM:", ("gpt-3.5-turbo-0613", "gpt-4", "llama-2"), key="llm_choice")
temperature = st.sidebar.slider("Temperature:", min_value=0.0,
max_value=1.0, value=0.0, step=0.01)
if model_name.startswith("gpt-"):
return ChatOpenAI(temperature=temperature, model_name=model_name, streaming=True
)
elif model_name.startswith("llama-2-"):
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
return CTransformers(model="/home/ataliba/LLM_Workshop/Experimental_Lama_QA_Retrieval/models/Wizard-Vicuna-13B-Uncensored.ggmlv3.q5_1.bin",
model_type="llama",
max_new_tokens=512,
temperature=temperature)
#openai_api_key = os.getenv("OPENAI_API_KEY")
#openai.api_key = os.getenv("OPENAI_API_KEY")
#openai_api_key = os.environ[OPENAI_API_KEY]
#openai_api_key = "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"
def configure_qa_chain(uploaded_files):
docs = []
if uploaded_files:
if "processed_data" not in st.session_state:
documents = []
for file in uploaded_files:
temp_filepath = os.path.join(os.getcwd(), file.name) # os.path.join(temp_dir.name, file.name)
with open(temp_filepath, "wb") as f:
f.write(file.getvalue())
if temp_filepath.endswith((".pdf", ".docx", ".txt")): #if temp_filepath.lower() == (".pdf", ".docx", ".txt"):
loader = UnstructuredFileLoader(temp_filepath)
loaded_documents = loader.load() #loader = PyPDFLoader(temp_filepath)
docs.extend(loaded_documents) #loader.load_and_split())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# storing embeddings in the vector store
vectorstore = FAISS.from_documents(splits, embeddings)
persist_directory = "/home/ataliba/LLM_Workshop/Experimental_Lama_QA_Retrieval/db/"
memory = ConversationBufferMemory(
memory_key="chat_history", output_key='answer', return_messages=False)
retriever = vectorstore.as_retriever(search_type="mmr", search_kwargs={"k": 2, "fetch_k": 4})
return retriever
class StreamHandler(BaseCallbackHandler):
def __init__(self, container: st.delta_generator.DeltaGenerator, initial_text: str = ""):
self.container = container
self.text = initial_text
self.run_id_ignore_token = None
def on_llm_start(self, serialized: dict, prompts: list, **kwargs):
if prompts[0].startswith("Human"):
self.run_id_ignore_token = kwargs.get("run_id")
def on_llm_new_token(self, token: str, **kwargs) -> None:
if self.run_id_ignore_token == kwargs.get("run_id", False):
return
self.text += token
self.container.markdown(self.text)
class PrintRetrievalHandler(BaseCallbackHandler):
def __init__(self, container):
self.container = container.expander("Context Retrieval")
def on_retriever_start(self, query: str): #def on_retriever_start(self, query: str, **kwargs):
self.container.write(f"**Question:** {query}")
def on_retriever_end(self, documents, **kwargs):
for idx, doc in enumerate(documents):
source = os.path.basename(doc.metadata["source"])
self.container.write(f"**Document {idx} from {source}**")
self.container.markdown(doc.page_content)
uploaded_files = st.sidebar.file_uploader(
label="Upload your files", accept_multiple_files=True,type=None
)
if not uploaded_files:
st.info("Please upload your documents to continue.")
st.stop()
retriever = configure_qa_chain(uploaded_files)
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
llm = select_llm() # model_name="gpt-3.5-turbo"
qa_chain = ConversationalRetrievalChain.from_llm(
llm, retriever=retriever, memory=memory) #retriever=retriever, memory=memory)#, verbose=False
if "messages" not in st.session_state or st.sidebar.button("Clear message history"):
st.session_state["messages"] = [{"role": "assistant", "content": "Please let me know how can I be of a help today?"}]
for msg in st.session_state.messages:
if msg["role"] == "user":
with st.chat_message(msg["role"]): #,avatar=av_us):
st.markdown(msg["content"])
else:
with st.chat_message(msg["role"]): #,avatar=av_ass):
st.markdown(msg["content"])
if user_query: #
st.session_state.messages.append({"role": "user", "content": user_query})
st.chat_message("user").write(user_query)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
cb = PrintRetrievalHandler(st.container())
response = qa_chain.run(user_query, callbacks=[cb])
resp = response.split(" ")
for r in resp:
full_response = full_response + r + " "
message_placeholder.markdown(full_response + "▌")
sleep(0.1)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [
"Please let me know how can I be of a help today?"
] |
2024-01-10 | waleedkadous/ansari-backend | agents~ansari.py | import time
from pydantic import BaseModel
from util.prompt_mgr import PromptMgr
from tools.search_quran import SearchQuran
from tools.search_hadith import SearchHadith
import json
from openai import OpenAI
import litellm
from langfuse import Langfuse
from datetime import datetime, date
from langfuse.model import InitialGeneration, CreateGeneration, CreateTrace
import hashlib
lf = Langfuse()
lf.auth_check()
MODEL = 'gpt-4-1106-preview'
MAX_FUNCTION_TRIES = 3
class Ansari:
def __init__(self, json_format = False):
sq = SearchQuran()
sh = SearchHadith()
self.tools = { sq.get_fn_name(): sq, sh.get_fn_name(): sh}
self.model = MODEL
self.pm = PromptMgr()
self.sys_msg = self.pm.bind('system_msg_fn').render()
self.functions = [x.get_function_description() for x in self.tools.values()]
self.message_history = [{
'role': 'system',
'content': self.sys_msg
}]
self.json_format = json_format
# The trace id is a hash of the first user input and the time.
def compute_trace_id(self):
today = date.today()
hashstring = str(today) + self.message_history[1]['content']
result = hashlib.md5(hashstring.encode())
return 'chash_' + result.hexdigest()
def greet(self):
self.greeting = self.pm.bind('greeting')
return self.greeting.render()
def process_input(self, user_input):
self.message_history.append({
'role': 'user',
'content': user_input
})
return self.process_message_history()
def log(self):
trace_id = self.compute_trace_id()
print('trace id is ', trace_id)
trace = lf.trace(CreateTrace(
id=trace_id,
name='ansari-trace'
))
generation = trace.generation(CreateGeneration(
name='ansari-gen',
startTime=self.start_time,
endTime=datetime.now(),
model=MODEL,
prompt=self.message_history[:-1],
completion=self.message_history[-1]['content'],
))
def replace_message_history(self, message_history):
self.message_history = [{
'role': 'system',
'content': self.sys_msg
}] + message_history
for m in self.process_message_history():
if m:
yield m
def process_message_history(self):
# Keep processing the user input until we get something from the assistant
self.start_time = datetime.now()
count = 0
while self.message_history[-1]['role'] != 'assistant':
try:
print(f'Processing one round {self.message_history}')
# This is pretty complicated so leaving a comment.
# We want to yield from so that we can send the sequence through the input
# Also use functions only if we haven't tried too many times
use_function = True
if count >= MAX_FUNCTION_TRIES:
use_function = False
print('Not using functions -- tries exceeded')
yield from self.process_one_round(use_function)
count += 1
except Exception as e:
print('Exception occurred: ', e)
print('Retrying in 5 seconds...')
time.sleep(5)
self.log()
def process_one_round(self, use_function = True):
response = None
while not response:
try:
if use_function:
if self.json_format:
response = litellm.completion(
model = self.model,
messages = self.message_history,
stream = True,
functions = self.functions,
timeout = 30.0,
temperature = 0.0,
metadata = {'generation-name': 'ansari'},
response_format = { "type": "json_object" },
num_retries = 5
)
else:
response = litellm.completion(
model = self.model,
messages = self.message_history,
stream = True,
functions = self.functions,
timeout = 30.0,
temperature = 0.0,
metadata = {'generation-name': 'ansari'},
num_retries = 5
)
else:
if self.json_format:
response = litellm.completion(
model = self.model,
messages = self.message_history,
stream = True,
timeout = 30.0,
temperature = 0.0,
response_format = { "type": "json_object" },
metadata = {'generation-name': 'ansari'},
num_retries = 5
)
else:
response = litellm.completion(
model = self.model,
messages = self.message_history,
stream = True,
timeout = 30.0,
temperature = 0.0,
metadata = {'generation-name': 'ansari'},
num_retries = 5
)
except Exception as e:
print('Exception occurred: ', e)
print('Retrying in 5 seconds...')
time.sleep(5)
words = ''
function_name = ''
function_arguments = ''
response_mode = '' # words or fn
for tok in response:
#print(f'Tok is {tok}')
delta = tok.choices[0].delta
if not response_mode:
# This code should only trigger the first
# time through the loop.
if 'function_call' in delta and delta.function_call:
# We are in function mode
response_mode = 'fn'
print(f'Tok is {tok}')
function_name = delta.function_call.name
else:
response_mode = 'words'
print('Response mode: ' + response_mode)
# We process things differently depending on whether it is a function or a
# text
if response_mode == 'words':
if delta.content == None: # End token
self.message_history.append({
'role': 'assistant',
'content': words
})
break
elif delta.content != None:
words += delta.content
yield delta.content
else:
continue
elif response_mode == 'fn':
if not 'function_call' in delta: # End token
function_call = function_name + '(' + function_arguments + ')'
# The function call below appends the function call to the message history
yield self.process_fn_call(input, function_name, function_arguments)
#
break
elif 'function_call' in delta:
function_arguments += delta.function_call.arguments
#print(f'Function arguments are {function_arguments}')
yield '' # delta['function_call']['arguments'] # we shouldn't yield anything if it's a fn
else:
continue
else:
raise Exception("Invalid response mode: " + response_mode)
def process_fn_call(self, orig_question, function_name, function_arguments):
if function_name in self.tools.keys():
args = json.loads(function_arguments)
query = args['query']
results = self.tools[function_name].run_as_list(query)
# print(f'Results are {results}')
# Now we have to pass the results back in
if len(results) > 0:
for result in results:
self.message_history.append({
'role': 'function',
'name': function_name,
'content': result
})
else:
self.message_history.append({
'role': 'function',
'name': function_name,
'content': 'No results found'
})
else:
print('Unknown function name: ', function_name)
| [
"No results found"
] |
2024-01-10 | gkorepanov/llm-tools | llm_tools~llm_fallback.py | from typing import (
AsyncIterator,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
)
from llm_tools.tokens import (
TokenExpense,
TokenExpenses,
)
from llm_tools.chat_message import OpenAIChatMessage
from llm_tools.errors import (
should_fallback_to_other_model,
MultipleException,
)
from llm_tools.llm_streaming import StreamingOpenAIChatModel
from llm_tools.llm_streaming_base import StreamingLLMBase
class StreamingModelWithFallback(StreamingLLMBase):
def __init__(
self,
models: List[StreamingOpenAIChatModel],
should_fallback_to_other_model: Callable[[Exception], bool] = \
should_fallback_to_other_model,
):
self.models = models
self.should_fallback_to_other_model = should_fallback_to_other_model
self.exceptions = []
async def stream_llm_reply(
self,
messages: List[OpenAIChatMessage],
stop: Optional[List[str]] = None,
) -> AsyncIterator[Tuple[str, str]]:
self.exceptions = []
for model in self.models:
try:
async for completion, token in model.stream_llm_reply(messages, stop):
yield completion, token
except Exception as e:
if self.should_fallback_to_other_model(e):
self.exceptions.append(e)
continue
else:
raise
else:
break
else:
if len(self.exceptions) == 1:
raise self.exceptions[0]
else:
raise MultipleException(self.exceptions) from self.exceptions[-1]
@property
def succeeded(self) -> bool:
return any(model.succeeded for model in self.models)
def get_tokens_spent(
self,
only_successful_trial: bool = False,
) -> TokenExpenses:
if not self.succeeded and only_successful_trial:
raise ValueError("Cannot get tokens spent for unsuccessful trial")
if only_successful_trial:
first_successful_model = next(model for model in self.models if model._succeeded)
return first_successful_model.get_tokens_spent(only_successful_trial)
else:
return sum(
(
model.get_tokens_spent(only_successful_trial)
for model in self.models
),
TokenExpenses()
)
| [] |
2024-01-10 | gkorepanov/llm-tools | llm_tools~errors.py | from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
AsyncRetrying,
retry_if_exception,
)
from tenacity.wait import wait_base
from typing import Any, Callable, Tuple, Optional, List
import logging
import openai
import openai.error
import aiohttp
import aiohttp.client_exceptions
import asyncio
import re
logger = logging.getLogger(__name__)
class ModelContextSizeExceededError(Exception):
def __init__(
self,
model_name: str,
max_context_length: int,
context_length: Optional[int] = None,
during_streaming: bool = False,
):
self.model_name = model_name
self.max_context_length = max_context_length
self.context_length = context_length
self.during_streaming = during_streaming
def __str__(self) -> str:
suffix = " (during streaming)" if self.during_streaming else ""
if self.context_length is None:
return f"Context length exceeded for model {self.model_name}{suffix}"
else:
return f"Context length exceeded for model {self.model_name}{suffix}: {self.context_length} > {self.max_context_length}"
@classmethod
def from_openai_error(
cls,
error: openai.error.InvalidRequestError,
model_name: str,
during_streaming: bool = False,
) -> "ModelContextSizeExceededError":
assert error.code == CONTEXT_LENGTH_EXCEEDED_ERROR_CODE
max_context_length_pattern = r"maximum context length is (\d+) tokens"
tokens_number_pattern = r"messages resulted in (\d+) tokens"
max_context_length = re.search(max_context_length_pattern, str(error))
tokens_number = re.search(tokens_number_pattern, str(error))
if max_context_length is not None:
max_context_length = int(max_context_length.group(1))
if tokens_number is not None:
tokens_number = int(tokens_number.group(1))
return ModelContextSizeExceededError(
model_name=model_name,
max_context_length=max_context_length,
context_length=tokens_number,
during_streaming=during_streaming,
)
class StreamingNextTokenTimeoutError(asyncio.TimeoutError):
pass
class OpenAIRequestTimeoutError(asyncio.TimeoutError):
pass
class MultipleException(Exception):
def __init__(
self,
exceptions: List[Exception],
):
self.exceptions = exceptions
def __str__(self):
return "\n".join(
f"{type(e).__name__}: {str(e)}"
for e in self.exceptions
)
CONTEXT_LENGTH_EXCEEDED_ERROR_CODE = "context_length_exceeded"
def should_retry_initital_openai_request_error(error: Exception) -> bool:
OPENAI_REQUEST_ERRORS = (
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
OpenAIRequestTimeoutError,
)
return isinstance(error, OPENAI_REQUEST_ERRORS)
def should_retry_streaming_openai_request_error(error: Exception) -> bool:
OPENAI_STREAMING_ERRORS = (
aiohttp.client_exceptions.ClientPayloadError,
StreamingNextTokenTimeoutError,
)
return isinstance(error, OPENAI_STREAMING_ERRORS)
def should_fallback_to_other_model(error: Exception) -> bool:
if isinstance(error, ModelContextSizeExceededError):
return False
if isinstance(error, openai.error.InvalidRequestError) and error.code == CONTEXT_LENGTH_EXCEEDED_ERROR_CODE:
return False
return True
def get_openai_retrying_iterator(
retry_if_exception_fn: Callable[[Exception], bool],
wait: wait_base,
max_retries: int = 1,
) -> AsyncRetrying:
return AsyncRetrying(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait,
retry=retry_if_exception(retry_if_exception_fn),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
| [] |
2024-01-10 | gkorepanov/llm-tools | llm_tools~chat_message.py | from typing import (
Dict,
Union,
List,
)
from langchain.schema import BaseMessage, HumanMessage, AIMessage, SystemMessage, ChatMessage
from funcy import omit
OpenAIChatMessage = Union[BaseMessage, Dict[str, str]]
def convert_message_to_dict(message: BaseMessage) -> Dict[str, str]:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def convert_dict_to_message(_dict: Dict[str, str]) -> BaseMessage:
role = _dict["role"]
additional_kwargs = dict(omit(_dict, ["role", "content"]))
if role == "user":
return HumanMessage(content=_dict["content"], additional_kwargs=additional_kwargs)
elif role == "assistant":
return AIMessage(content=_dict["content"], additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"], additional_kwargs=additional_kwargs)
else:
return ChatMessage(content=_dict["content"], role=role, additional_kwargs=additional_kwargs)
def prepare_message(message: OpenAIChatMessage) -> BaseMessage:
if isinstance(message, dict):
return convert_dict_to_message(message)
elif isinstance(message, BaseMessage):
return message
else:
raise ValueError(f"Unknown message type: {type(message)}")
def prepare_messages(messages: List[OpenAIChatMessage]) -> List[BaseMessage]:
return [prepare_message(message) for message in messages]
| [
"content"
] |
2024-01-10 | gkorepanov/llm-tools | llm_tools~llm_streaming.py | from typing import (
AsyncIterator,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
)
from tenacity import wait_exponential
import asyncio
from tenacity.wait import wait_base
from dataclasses import dataclass
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.schema import BaseMessage
from langchain.chat_models.openai import _convert_dict_to_message
import tiktoken
import openai
import openai.error
from concurrent.futures import Executor
from functools import partial
from llm_tools.chat_message import OpenAIChatMessage, prepare_messages
from llm_tools.tokens import (
TokenExpense,
TokenExpenses,
count_tokens_from_input_messages,
count_tokens_from_output_text,
)
from llm_tools.errors import (
should_retry_initital_openai_request_error,
should_retry_streaming_openai_request_error,
should_fallback_to_other_model,
get_openai_retrying_iterator,
ModelContextSizeExceededError,
StreamingNextTokenTimeoutError,
OpenAIRequestTimeoutError,
CONTEXT_LENGTH_EXCEEDED_ERROR_CODE,
MultipleException,
)
from llm_tools.llm_streaming_base import StreamingLLMBase
class StreamingOpenAIChatModel(StreamingLLMBase):
def __init__(
self,
chat_model: Union[ChatOpenAI, AzureChatOpenAI],
max_initial_request_retries: int = 5,
max_streaming_retries: int = 2,
wait_between_retries=wait_exponential(multiplier=1, min=1, max=60),
streaming_next_token_timeout: int = 10,
request_timeout: wait_base = wait_exponential(multiplier=1, min=5, max=60),
token_count_executor: Optional[Executor] = None,
):
self.chat_model = chat_model
self.encoding = tiktoken.encoding_for_model(self.chat_model.model_name)
self.max_request_retries = max_initial_request_retries
self.max_streaming_retries = max_streaming_retries
self.wait_between_retries = wait_between_retries
self.streaming_next_token_timeout = streaming_next_token_timeout
self.request_timeout = request_timeout
self.token_count_executor = token_count_executor
self.reset()
@property
def context_size(self) -> int:
model_name = self.chat_model.model_name
is_azure = isinstance(self.chat_model, AzureChatOpenAI)
if is_azure:
return {
"gpt-3.5-turbo": 8192,
"gpt-4": 8192,
}[model_name]
else:
return {
"gpt-3.5-turbo": 4097,
"gpt-3.5-turbo-16k": 16384,
"gpt-4": 8192,
"gpt-4-1106-preview": 128000,
"gpt-3.5-turbo-1106": 16385,
}[model_name]
def reset(self):
self.completions = []
self.successful_request_attempts = 0
self.request_attempts = 0
self.streaming_attempts = 0
self.message_dicts = None
self._succeeded = False
self.input_messages_n_tokens = 0
self.output_tokens_spent_per_completion = []
@property
def succeeded(self) -> bool:
return self._succeeded
def prepare_messages(self, messages: List[OpenAIChatMessage]) -> List[BaseMessage]:
result = []
for message in messages:
if not isinstance(message, BaseMessage):
message = _convert_dict_to_message(message)
result.append(message)
return result
async def stream_llm_reply(
self,
messages: List[OpenAIChatMessage],
stop: Optional[List[str]] = None,
) -> AsyncIterator[Tuple[str, str]]:
assert self.chat_model.streaming
assert len(messages) > 0
self.reset()
_f = partial(count_tokens_from_input_messages,
messages=messages,
model_name=self.chat_model.model_name,
)
if self.token_count_executor is None:
self.input_messages_n_tokens = _f()
else:
self.input_messages_n_tokens = await asyncio.get_running_loop().run_in_executor(
self.token_count_executor,
_f,
)
if self.input_messages_n_tokens > self.context_size:
raise ModelContextSizeExceededError(
model_name=self.chat_model.model_name,
max_context_length=self.context_size,
context_length=self.input_messages_n_tokens,
during_streaming=False,
)
self.message_dicts, params = self.chat_model._create_message_dicts(
messages=prepare_messages(messages),
stop=stop,
)
params["stream"] = True
async for streaming_attempt in get_openai_retrying_iterator(
retry_if_exception_fn=should_retry_streaming_openai_request_error,
max_retries=self.max_streaming_retries,
wait=self.wait_between_retries,
):
completion = ""
role = "assistant"
self.streaming_attempts += 1
self.output_tokens_spent_per_completion.append(0)
async for request_attempt in get_openai_retrying_iterator(
retry_if_exception_fn=should_retry_initital_openai_request_error,
max_retries=self.max_request_retries,
wait=self.wait_between_retries,
):
with request_attempt:
self.request_attempts += 1
timeout = self.request_timeout(request_attempt.retry_state)
try:
gen = await asyncio.wait_for(
self.chat_model.client.acreate(messages=self.message_dicts, **params),
timeout=timeout,
)
except openai.error.InvalidRequestError as e:
if e.code == CONTEXT_LENGTH_EXCEEDED_ERROR_CODE:
raise ModelContextSizeExceededError.from_openai_error(
model_name=self.chat_model.model_name,
during_streaming=False,
error=e,
) from e
else:
raise
except asyncio.TimeoutError as e:
raise OpenAIRequestTimeoutError() from e
except:
raise
else:
self.successful_request_attempts += 1
with streaming_attempt:
try:
gen_iter = gen.__aiter__()
while True:
try:
stream_resp = await asyncio.wait_for(
gen_iter.__anext__(),
timeout=self.streaming_next_token_timeout,
)
except StopAsyncIteration:
break
except asyncio.TimeoutError as e:
raise StreamingNextTokenTimeoutError() from e
finish_reason = stream_resp["choices"][0].get("finish_reason")
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
_f = partial(count_tokens_from_output_text,
text=token,
model_name=self.chat_model.model_name,
)
if self.token_count_executor is None:
_tokens = _f()
else:
_tokens = await asyncio.get_running_loop().run_in_executor(
self.token_count_executor,
_f,
)
self.output_tokens_spent_per_completion[-1] += _tokens
completion += token
if token:
yield completion, token
if finish_reason:
if finish_reason == "length":
raise ModelContextSizeExceededError(
model_name=self.chat_model.model_name,
max_context_length=self.context_size,
context_length=self.input_messages_n_tokens + self.output_tokens_spent_per_completion[-1],
during_streaming=True,
)
elif finish_reason != "stop":
raise ValueError(f"Unknown finish reason: {finish_reason}")
finally:
self.completions.append(completion)
self._succeeded = True
def get_tokens_spent(
self,
only_successful_trial: bool = False,
) -> TokenExpenses:
if not self.succeeded and only_successful_trial:
raise ValueError("Cannot get tokens spent for unsuccessful trial")
n_input_tokens_per_trial = self.input_messages_n_tokens
if only_successful_trial:
n_input_tokens = n_input_tokens_per_trial
n_output_tokens = self.output_tokens_spent_per_completion[-1]
else:
n_input_tokens = n_input_tokens_per_trial * self.successful_request_attempts
n_output_tokens = sum(self.output_tokens_spent_per_completion)
expenses = TokenExpenses()
expense = TokenExpense(
n_input_tokens=n_input_tokens,
n_output_tokens=n_output_tokens,
model_name=self.chat_model.model_name,
)
expenses.add_expense(expense)
return expenses
| [] |
2024-01-10 | gkorepanov/llm-tools | llm_tools~llm_streaming_base.py | from typing import (
AsyncIterator,
List,
Optional,
Tuple,
)
from llm_tools.tokens import TokenExpenses
from llm_tools.chat_message import OpenAIChatMessage
class StreamingLLMBase(object):
async def stream_llm_reply(
self,
messages: List[OpenAIChatMessage],
stop: Optional[List[str]] = None,
) -> AsyncIterator[Tuple[str, str]]:
raise NotImplementedError()
@property
def succeeded(self) -> bool:
raise NotImplementedError()
def get_tokens_spent(
self,
only_successful_trial: bool = False,
) -> TokenExpenses:
raise NotImplementedError()
| [] |
2024-01-10 | juanpablotr14/backend_toDoList | routes.py | import os
from flask import Flask, Blueprint, make_response
from flask_restx import Api, Resource, reqparse
from config import app, db
from models import Task, task_schema
from models import CheckList, check_list_schema
from api_control import taskCtrlr, taskDto, createTaskCommand, updateTaskCommand
from api_control import listCtrlr, listDto, createListCommand, updateListCommand
from api_control import langchainCtrlr, createlangChainCommand
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.schema import BaseOutputParser
from langchain.chains import LLMChain
# Crea el blueprint de la aplicación
blueprint = Blueprint('api', __name__, url_prefix='/api')
# La aplicación API se crea atada a un blueprint
# Tambien puedes atar el API directamente en una aplicación flask convencional
# Si configuras doc=False desabilitaras la pagina UI swagger
# Usa validate=True para permitir la validación de los request en todos los APIS
api = Api(blueprint,
title="Aplicación de tareas",
description="Un ejemplo de aplicación API usando flask-restx",
version="1.0",
doc="/swagger/",
validate=True
)
# Se crea una endpoint indicando la ruta
# Una ruta se representa pot una clase python que herede de "Resource"
# Una petición HTTP maneja funciones definidas por get, post, put, delete
# Create a request parser to handle query parameters
@listCtrlr.route("/")
class CheckListDisplay(Resource):
@listCtrlr.marshal_list_with(listDto)
def get(self):
list = CheckList.query.all()
return list
@listCtrlr.expect(createListCommand)
def post(self):
payload = listCtrlr.payload
newList = CheckList(title=payload["title"])
db.session.add(newList)
db.session.commit()
return check_list_schema.dump(newList)
@listCtrlr.route("/<int:id>")
class CheckListInd(Resource):
@listCtrlr.marshal_with(listDto)
def get(self, id):
list = CheckList.query.get(id)
if list is not None:
return check_list_schema.dump(list)
else:
listCtrlr.abort(404, f"List with id {id} does not exist")
@listCtrlr.expect(updateListCommand)
@listCtrlr.marshal_with(listDto)
def put(self, id):
list = CheckList.query.get(id)
if list:
payload = listCtrlr.payload
list.title = payload["title"]
db.session.merge(list)
db.session.commit()
return check_list_schema.dump(list), 201
else:
listCtrlr.abort(404, f"List with id {id} does not exist")
def delete(self, id):
list = CheckList.query.get(id)
if list:
db.session.delete(list)
db.session.commit()
return make_response(f"{list.title} successfully deleted", 200)
else:
listCtrlr.abort(404, f"List with id {id} does not exist")
api.add_namespace(listCtrlr)
parser = reqparse.RequestParser()
parser.add_argument('listId', type=int, help='Filter tasks by list ID')
@taskCtrlr.route("/")
class TodosDisplay(Resource):
@taskCtrlr.marshal_list_with(taskDto)
def get(self):
args = parser.parse_args()
list_id_filter = args.get('listId')
if list_id_filter:
tasks = Task.query.filter_by(list_id=list_id_filter).all()
else:
tasks = Task.query.all()
return tasks
@taskCtrlr.expect(createTaskCommand)
def post(self):
# this method handles POST request of the API endpoint
# create a todo object in the database using the JSON from API request payload
payload = taskCtrlr.payload
list_id = payload.get("list_id")
if list_id and CheckList.query.get(list_id):
newTask = Task(value=payload["value"], order=payload["order"],
list_id=payload["list_id"])
db.session.add(newTask)
db.session.commit()
return task_schema.dump(newTask)
else:
taskCtrlr.abort(404, f"List with id {list_id} does not exist")
# extract id variable of endpoint from URL segment for use in the request handling functions
@taskCtrlr.route("/<int:id>")
class Todo(Resource):
@taskCtrlr.marshal_with(taskDto)
def get(self, id):
# this method handles GET request of the API endpoint
# get the todo object based on id from request URL
task = Task.query.get(id)
if task is not None:
return task_schema.dump(task)
else:
taskCtrlr.abort(404, f"Task with id {id} does not exist")
@taskCtrlr.expect(updateTaskCommand)
@taskCtrlr.marshal_with(taskDto)
def put(self, id):
task = Task.query.get(id)
if task:
payload = taskCtrlr.payload
list_id = payload["list_id"]
list = CheckList.query.get(list_id)
if list is None:
taskCtrlr.abort(404, f"List with id {list_id} does not exist")
task.value = payload["value"]
task.order = payload["order"]
task.list_id = payload["list_id"]
task.completed = payload["completed"]
db.session.merge(task)
db.session.commit()
return task_schema.dump(task), 201
else:
taskCtrlr.abort(404, f"Task with id {id} does not exist")
def delete(self, id):
task = Task.query.get(id)
if task:
db.session.delete(task)
db.session.commit()
return make_response(f"{task.value} successfully deleted", 200)
else:
listCtrlr.abort(404, f"Task with id {id} does not exist")
api.add_namespace(taskCtrlr)
@langchainCtrlr.route("/")
class SendTaskToLangChain(Resource):
@langchainCtrlr.expect(createlangChainCommand)
def post(self):
payload = langchainCtrlr.payload
class CommaSeparatedListOutputParser(BaseOutputParser):
"""Parse the output of an LLM call to a comma-separated list."""
def parse(self, text: str):
"""Parse the output of an LLM call."""
return text.strip().split(", ")
llm = OpenAI(openai_api_key=os.environ.get("SECRET_KEY_OPENAI"))
template = """Eres un asistente que genera una lista sin numeracion separada por comas.
La cantidad maxima de elementos por lista es de 10.
Los elementos de la lista deben estar ordenados.
Imagina que eres un experto en un productividad dentro de tres
asteristicos vamos a escribirte el titulo de una lista de tareas y quiero que
me digas que actividades debo realizar para completarla.
***
{data}
***
"""
prompt = PromptTemplate.from_template(template)
prompt.format(data=payload["prompt"])
list = llm.predict(prompt.format(data=payload["prompt"])).strip().split(", ")
newList = CheckList(title=payload["prompt"])
db.session.add(newList)
db.session.commit()
order = 1
for task in list:
newTask = Task(value=task, order=order,
completed=False, list_id=newList.id)
order +=1
db.session.add(newTask)
db.session.commit()
return make_response(f"List created successfully", 200)
api.add_namespace(langchainCtrlr) | [
"Eres un asistente que genera una lista sin numeracion separada por comas.\n La cantidad maxima de elementos por lista es de 10.\n Los elementos de la lista deben estar ordenados. \n Imagina que eres un experto en un productividad dentro de tres\n asteristicos vamos a escribirte el titulo de una lista de tareas y quiero que\n me digas que actividades debo realizar para completarla. \n ***\n {data}\n ***\n "
] |
2024-01-10 | bennettwbrown/BulkAssistantFileTool | DELETE_ALL_FILES_ASSISTANTS.py | import os
import pandas as pd
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
"""
This script is used for testing accounts only.
WARNING: This script will delete all files and assistants in your OpenAI organization.
Run this script to delete all files and assistants in your OpenAI organization.
"""
# TODO:
# - Allow passing a list of assistants and files to delete
api_key = os.getenv("OPENAI_API_KEY")
organization = os.getenv("OPENAI_ORG_KEY")
client = OpenAI(organization=organization, api_key=api_key)
def delete_all_files_assistants():
try:
# Collect all file IDs first
file_ids = [file_obj.id for file_obj in client.files.list()]
# Now delete each file
for file_id in file_ids:
client.files.delete(file_id)
print(f"Deleted file with ID: {file_id}")
except Exception as e:
print(f"An error occurred while deleting files: {e}")
try:
# Collect all assistant IDs first
assistant_ids = [
assistant.id
for assistant in client.beta.assistants.list(order="desc", limit="20")
]
# Now delete each assistant
for assistant_id in assistant_ids:
client.beta.assistants.delete(assistant_id)
print(f"Deleted assistant with ID: {assistant_id}")
except Exception as e:
print(f"An error occurred while deleting assistants: {e}")
def main():
proceed = input(
"type 'DELETE ALL' to delete all files and assistants in your OpenAI organization. "
)
if proceed == "DELETE ALL":
delete_all_files_assistants()
else:
print("Exiting without deleting anything.")
main()
| [] |
2024-01-10 | q734550709/SQLBot | src~plan~content_moderation.py | import openai
from src.get_completion_from_messages import get_completion_from_messages
# 创建审查输入内容的函数,0为正常,>0为异常
def content_moderation (eval_message):
response = openai.Moderation.create(
input=eval_message
)
moderation_output = sum(1 for value in response["results"][0]["categories"].values() if value)
return moderation_output
#判断是否查询表信息
def is_query_question(user_message):
delimiter = "####"
# 遍历关键词列表,检查文本中是否存在这些关键词
system_message = f"""
你是任务是确定用户的输入是否属于查询表信息的文本。
输入的文本可能包含下面的文字:
查找,找出,从……查询,取一下,取出,等等类似表述
或者,英文的表述有:which; find; how many; what is; what are
用户输入的内容会使用{delimiter}作为分割符,
使用 Y 或者 N 回复:
Y - 如果用户的输入属于查询表信息的文本
N - 其他情况
只需输出一个字符
"""
user_message_for_model = f"""
{delimiter}{user_message}{delimiter}
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': user_message_for_model},
]
is_query_question = get_completion_from_messages(messages, max_tokens=1)
return is_query_question
| [
"\n 你是任务是确定用户的输入是否属于查询表信息的文本。\n 输入的文本可能包含下面的文字:\n\n 查找,找出,从……查询,取一下,取出,等等类似表述\n\n 或者,英文的表述有:which; find; how many; what is; what are\n\n 用户输入的内容会使用####作为分割符,\n 使用 Y 或者 N 回复:\n Y - 如果用户的输入属于查询表信息的文本\n N - 其他情况\n\n 只需输出一个字符\n ",
"\n ####PLACEHOLDER####\n "
] |
2024-01-10 | q734550709/SQLBot | src~plan~get_table_info.py | import openai
import json
import pandas as pd
from src.get_completion_from_messages import get_completion_from_messages
from src.plan.content_moderation import *
from constants.constants import constants
# 解包constant中的常量
locals().update(constants)
#生成表字典信息
def generate_table_dic(database_datalist = database_datalist):
table_dic = {}
database_df = pd.DataFrame(database_datalist,columns=database_columns)
# 遍历 DataFrame 的每一行
for index, row in database_df.iterrows():
key = f"{row['database']}_{row['table']}"
value = row['tabledetail']
#按照“库_表:表”信息的形式存储表信息字典
table_dic[key] = value
return table_dic
#生成数据库信息字符串
def database_str_generate(database_datalist = database_datalist):
database_df = pd.DataFrame(database_datalist,columns=database_columns)
database_list = set(database_df['database'])
database_str = ','.join(map(str, database_list))
return database_str
#生成库表对应信息字符串
def database_tableinfo_generate(database_datalist = database_datalist):
#生成database_df表格
database_df = pd.DataFrame(database_datalist,columns=database_columns)
#根据database列分组
grouped = database_df[['database','table','tableinfo']].groupby('database')
# 创建一个字典,用于存储按照 Category 分组后的数据
grouped_data = {}
# 遍历原始数据并按照 Category 分组
for category, group in grouped:
# 使用 join 方法将每个组的 Description 列合并为一个字符串
merged_description = '\n'.join(f"{row['table']} --{row['tableinfo']}" for _, row in group.iterrows())
# 将合并后的结果存储到字典中
grouped_data[category] = merged_description
# 创建一个字符串来保存结果
database_table_info = ''
# 将字典中的结果添加到字符串中
for category, description in grouped_data.items():
database_table_info += f"{category}:\n{description}\n\n"
# 库表对应关系database_table_info
return database_table_info
#查询表信息的函数
def query_table_info(user_message,
model = 'gpt-3.5-turbo-16k',
temperature = 0,
max_tokens = 3000,
database_datalist = database_datalist
):
delimiter = "####"
database_str = database_str_generate(database_datalist)
database_table_info = database_tableinfo_generate(database_datalist)
table_system_message = f'''
你会收到用户查询表信息的请求
用户的信息放在{delimiter}分割符里
输出一个python的list对象,其中每个元素按照下面的格式提供:
'database': <{database_str}> 的其中之一,
'table': <必须在下面列出的database和table中选择>
其中,这些database和table需要在客户的查询中被提到
允许的table列表:
{database_table_info}
仅输出一个列表对象,不输出其他内容
如果找不到相关的database和table,输出一个空列表
'''
user_message_for_model = f"""
{delimiter}{user_message}{delimiter}
"""
messages = [
{'role':'system',
'content': table_system_message},
{'role':'user',
'content': user_message_for_model},
]
database_and_table_response = get_completion_from_messages(messages,
model = model,
temperature = temperature,
max_tokens = max_tokens)
return database_and_table_response
#json转列表
def read_string_to_list(input_string):
if input_string is None:
return None
try:
input_string = input_string.replace("'", "\"")
data = json.loads(input_string)
return data
except json.JSONDecodeError:
print("Error: Invalid JSON string")
return None
# 定义一个函数,添加表信息
def generate_table_info(data_list, database_datalist=database_datalist):
#生成database_df表格
database_df = pd.DataFrame(database_datalist,columns=database_columns)
#生成库表字典
table_dic = generate_table_dic(database_datalist)
# 如果data_list是None,直接返回空字符串
if data_list is None:
return ""
# 遍历data_list中的每一个元素data
for data in data_list:
#判断生成的表里面是否在给定库表范围内,如果在,添加表详细信息
table_name = data['database']+'_'+data['table']
if table_name in table_dic:
table_info = table_dic[table_name]
data['table_detail'] = table_info
#如果得到的表没有在库表范围内,则去掉该元素(GPT会误生成不存在的库表信息)
else:
data_list.remove(data)
#生成判断后的库表&表信息,存在字符串中
output_string = ["\n".join([f"'{k}':'{v}'" for k, v in item.items()]) for item in data_list]
output_string = ';\n'.join(output_string)
# 返回处理后的output_string
return output_string
| [] |
2024-01-10 | q734550709/SQLBot | src~generate~sql_generation.py | import openai
#SQL产生函数
def sql_generation(
user_input,
model="gpt-3.5-turbo-16k",
temperature=0,
max_tokens=3000,
sql_type = 'hive'
):
system_message = f"""
根据用户的描述写一段{sql_type} SQL代码,仅提供{sql_type} SQL代码:
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': user_input},
]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
return response.choices[0].message["content"]
# 定义合并函数
def merge_textbox(textbox1='', textbox2='', textbox3='', textbox4=''):
merged_text = f"{textbox1}\n{textbox2}\n{textbox3}\n{textbox4}"
return merged_text
| [
"\n 根据用户的描述写一段PLACEHOLDER SQL代码,仅提供PLACEHOLDER SQL代码:\n "
] |
2024-01-10 | q734550709/SQLBot | src~plan~text_to_sql.py | import os
import openai
from src.get_completion_from_messages import get_completion_from_messages
from src.plan.content_moderation import *
from src.plan.get_table_info import *
from constants.constants import constants
# 解包constant中的常量
locals().update(constants)
# 构建database_df题目文件的路径
database_df_path = os.path.join("data", "database_df.xlsx")
#读取database_df
database_df = pd.read_excel(database_df_path)
#定义database_columns&database_datalist
database_columns = list(database_df.columns)
database_datalist = database_df.values.tolist()
# 定义一个函数,返回列表中某个值的最后一个出现的索引
def rindex(lst, value):
"""
返回列表中某个值的最后一个出现的索引。
lst: 一个列表,我们将在其中搜索。
value: 一个字符串值,我们要在列表中查找它的最后一个出现的位置。
"""
try:
return len(lst) - lst[::-1].index(value) - 1
except ValueError:
raise ValueError(f"这个字符`{value}`未出现")
# 定义生成候选补全的函数
def get_candidates(
messages,
model = 'gpt-3.5-turbo-16k',
temperature = 0,
max_tokens = 3000,
n = 3,
stop = [';'],
):
prefix = ''
# 使用 OpenAI 完成接口生成响应
response = openai.ChatCompletion.create(
model=model,
messages = messages,
temperature=temperature,
n=n,
stop=stop,
max_tokens=max_tokens,
)
# 将生成的响应与指定的前缀组合
responses = [prefix + choice.message.content for choice in response.choices]
return responses
# 评估候选答案得分
def eval_candidate(
candidate_answer,
nl_query,
engine = 'text-davinci-003',
):
eval_template = "{};\n--上述查询的易于理解的解释为\n-- {}"
prompt = eval_template.format(candidate_answer, nl_query)
answer_start_token = "--"
# 使用 OpenAI 完成接口生成响应
response = openai.Completion.create(
engine=engine,
prompt=prompt, #评估模板,填入生成结果,查询语句
temperature=0,
max_tokens=0, #设置为0,不会耗费token
logprobs=1, #设置为1,是因为只需要判断当前已有prompt的概率,无需生成新结果
echo=True, #设置为True,可以实现对prompt进行对数概率计算
)
# 获取答案开始的索引
answer_start = rindex(
response["choices"][0]["logprobs"]["tokens"], answer_start_token
)
#计算原始查询语句通过LLM模型计算出的平均对数概率(越大越好)
logprobs = response["choices"][0]["logprobs"]["token_logprobs"][answer_start + 1 :]
return sum(logprobs) / len(logprobs)
# 反向翻译,根据自然语言指令生成一系列的SQL查询,并选择最佳的一个。
def backtranslation(
nl_query,
messages,
model = 'gpt-3.5-turbo-16k',
temperature = 0,
max_tokens = 3000,
n = 3,
stop = [';'],
):
candidates = [] #用于存放待评估项及得分
responses = get_candidates(messages = messages,
model = model,
temperature = temperature,
max_tokens = max_tokens,
n = n,
stop = stop
)
for i in range(n):
quality = eval_candidate(
responses[i],
nl_query,
)
candidates.append((responses[i], quality))
# 根据评估得分对候选项进行排序
candidates.sort(key=lambda x: x[1], reverse=True)
return candidates[0][0]
# 定义一个处理用户消息的函数
def process_user_message(user_input,
all_messages,
model = 'gpt-3.5-turbo-16k',
temperature = 0,
max_tokens = 3000,
hive_select='hive',
is_current_table = True,
data_scope_definition = data_scope_definition,
database_datalist = database_datalist
):
delimiter = "####" # 定义一个分隔符
# 步骤1:检查输入是否涉及有害语句或不属于查询问题
''' check_error = (content_moderation(user_input) == 1
or is_query_question(user_input) == 'N')'''
# 如果输入被标记了
''' if check_error:
all_messages+= [(user_input,"对不起,您的问题不是一个查询问题,请重新输入")]
return "", all_messages # 返回错误消息'''
# 步骤2:从字符串中提取产品列表
database_df = pd.DataFrame(database_datalist,columns=database_columns)
database_and_table_str = query_table_info(
user_message = user_input,
model = model,
temperature = temperature,
max_tokens = max_tokens,
database_datalist = database_datalist
)
database_and_table_list = read_string_to_list(database_and_table_str)
# 步骤3:如果找到了库表信息,查找表字段信息
database_and_table_info = generate_table_info(database_and_table_list,database_datalist)
''' if database_and_table_info == "" and is_current_table:
all_messages+= [(user_input,"对不起,未查到相关表信息")]
return "", all_messages'''
# 步骤4:回答用户的问题
# 定义系统消息
system_message = f"""
You are a helpful assistant capable of aiding users in converting natural language into SQL statements. Here's how you can assist users in generating {hive_select} SQL statements and provide help:
1. Based on the provided SQL table information, list the fields related to the content the user wishes to query.
2. Ask the user if these are the fields they need, encouraging them to provide additional information such as field definitions and caliber definitions.
3. Once the user confirms the information they wish to query, provide the corresponding {hive_select} SQL statement, ensuring it adheres to {hive_select} SQL syntax standards.
4. If the user wishes to understand SQL syntax, inquire about their level of understanding of {hive_select} SQL statements: beginner, novice, intermediate, or advanced.
-- If the user is a beginner, shift the conversation towards a basic explanation of SQL syntax.
-- If the user is a novice, guide them to ask more {hive_select} SQL-related questions and provide clear and patient answers.
-- If the user is at an intermediate or advanced level, engage in a Socratic dialogue to help them clarify their difficulties in understanding {hive_select} SQL.
Always remember, you are an assistant for generating SQL statements, and there's no need to answer other unrelated questions.
related caliber definitions:{data_scope_definition}
Be concise and constructive with feedback.
注意,对于用户的输入,始终使用中文回复
"""
prefix = ''
nl_query = f"{delimiter}{user_input}{delimiter}"
history_prompt = []
for turn in all_messages:
user_message, bot_message = turn
history_prompt += [
{'role': 'user', 'content':user_message},
{'role': 'assistant', 'content': bot_message}
]
# 创建消息列表,包括系统消息、用户消息和助手消息
messages = [
{'role': 'system', 'content': system_message}] \
+ history_prompt + \
[{'role': 'assistant', 'content': database_and_table_info},
{'role': 'user', 'content': nl_query+prefix}
]
# 根据消息生成完成的回复
final_response = backtranslation(
nl_query = nl_query,
messages = messages,
model = model,
temperature = temperature,
max_tokens = max_tokens,
)
all_messages+= [(user_input,final_response)]
return "", all_messages # 返回最终回复和所有消息
| [
"[]",
"PLACEHOLDERPLACEHOLDER",
"{};\n--上述查询的易于理解的解释为\n-- {}"
] |
2024-01-10 | q734550709/SQLBot | src~get_api_key.py | import json
import os
import openai
# 构建config.json文件的路径
config_file_path = os.path.join("config", "config.json")
# 读取配置文件
with open(config_file_path, "r") as config_file:
config = json.load(config_file)
# 设置环境变量
os.environ["OPENAI_API_KEY"] = config.get("OPENAI_API_KEY")
#获取api_key函数
def get_api_key(key):
if key == '':
openai.api_key = os.environ.get('OPENAI_API_KEY')
openai.api_key = key
| [] |
2024-01-10 | q734550709/SQLBot | src~analysis~sql_observation.py | import openai
from src.get_completion_from_messages import get_completion_from_messages
#详细解释
def sql_explain(user_input,
model="gpt-3.5-turbo-16k",
temperature=0,
max_tokens=3000):
system_message = """
You are a helpful assistant capable of aiding users in understanding SQL syntax. Here's how you can assist users in comprehending SQL content and provide help:
1. Begin by translating the SQL code input by the user into simple, concise natural language.
2. Ask the user if they understand the SQL statement, encouraging them to continue asking questions.
3. Once the user starts asking questions, inquire about their understanding level of SQL syntax: beginner, novice, intermediate, or advanced.
-- If the user is a beginner, shift the conversation towards a basic explanation of SQL syntax.
-- If the user is a novice, guide them to ask more SQL-related questions and provide clear and patient answers.
-- If the user is at an intermediate or advanced level, engage in a Socratic dialogue to help them clarify their difficulties in understanding SQL.
Always remember, you are an assistant for interpreting SQL syntax, and there's no need to answer other unrelated questions. Be concise and constructive with feedback.
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': user_input},
]
response = get_completion_from_messages(messages,
model,
temperature,
max_tokens)
return response
#自然语言解释
def sql_translate(user_input,
model="gpt-3.5-turbo-16k",
temperature=0,
max_tokens=3000):
system_message = """
You are a helpful assistant capable of aiding users in understanding SQL syntax. Here's how you can assist users in comprehending SQL content and provide help:
Translating the SQL code input by the user into simple, concise natural language.
Always remember, you are an assistant for interpreting SQL syntax, and there's no need to answer other unrelated questions. Be concise and constructive with feedback.
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': user_input},
]
response = get_completion_from_messages(messages,
model,
temperature,
max_tokens)
return response
#模型选择函数
def function_select(input_text,
model,
temperature,
max_token,
flag = False):
if flag:
response = sql_explain(input_text,model,temperature,max_token)
return response
else:
response = sql_translate(input_text,model,temperature,max_token)
return response
| [] |
2024-01-10 | q734550709/SQLBot | src~study~answer_evaluation.py | import os
import openai
import pandas as pd
import random
# 构建leetcode题目文件的路径
leetcode_path = os.path.join("data", "leetcode_questions.xlsx")
#读取leetcode题目
leetcode_df = pd.read_excel(leetcode_path)
#SQL产生函数
def answer_evaluation(
user_input,
all_messages,
question,
answer,
model="gpt-3.5-turbo-16k",
temperature=0,
max_tokens=3000,
):
system_message = f"""
根据下面的问题(使用<>符号分隔),结合答案(使用####分割符分隔)判断用户的回答是否正确,并给出改进建议
问题如下:<{question}>
答案如下:####{answer}####
请使用中文回复
"""
history_prompt = []
for turn in all_messages:
user_message, bot_message = turn
history_prompt += [
{'role': 'user', 'content':user_message},
{'role': 'assistant', 'content': bot_message}
]
messages = [
{'role':'system', 'content': system_message}] \
+ history_prompt + \
[{'role':'user', 'content': user_input},
]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
final_response = response.choices[0].message["content"]
all_messages+= [(user_input,final_response)]
return "", all_messages # 返回最终回复和所有消息
#根据难度随机选择题目
def question_choice(difficulty = '简单'):
simple_records = leetcode_df[leetcode_df['难度'] == difficulty]
random_simple_record = simple_records.sample(n=1, random_state=random.seed())
title = random_simple_record['题目标题'].values[0]
question_url = random_simple_record['题目地址'].values[0]
question = random_simple_record['题目'].values[0]
example = random_simple_record['示例'].values[0]
answer = random_simple_record['答案'].values[0]
answer_explain = random_simple_record['可参考解析'].values[0]
#用于生成答案链接
title_url = f"""### 本题链接:[{title}]({question_url})"""
answer_explain = f"""### 答案解析见:[{title}]({answer_explain})"""
return title_url, question, example, answer, answer_explain
| [
"\n 根据下面的问题(使用<>符号分隔),结合答案(使用####分割符分隔)判断用户的回答是否正确,并给出改进建议\n 问题如下:<PLACEHOLDER>\n 答案如下:####PLACEHOLDER####\n\n 请使用中文回复\n ",
"[\n {'role':'system', 'content': system_message}] \\\n + history_prompt + \\\n [{'role':'user', 'content': user_input},\n ]",
"[]"
] |
2024-01-10 | q734550709/SQLBot | src~get_completion_from_messages.py | import openai
#对话函数
def get_completion_from_messages(
messages,
model="gpt-3.5-turbo-16k",
temperature=0,
max_tokens=3000):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
return response.choices[0].message["content"]
| [] |
2024-01-10 | karpator/openai_threading_async_error | non_stucking_example.py | import asyncio
from openai import AsyncAzureOpenAI
import threading
from typing import (
Callable,
Any,
)
class AsyncThreadingHelper:
def __init__(self, async_function: Callable):
self.__async_function = async_function
def __process_callback(self, *args: Any) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(
self.__async_function(*args)
)
loop.close()
def run_async_task_on_thread(self, args: Any) -> None:
thread = threading.Thread(
target=self.__process_callback,
args=args
)
thread.start()
class OpenAIAdapter:
def __init__(self):
self.__openai_client = AsyncAzureOpenAI(
azure_endpoint=" AZURE ENDPOINT ",
api_key=" API KEY ",
max_retries=1,
api_version="2023-05-15",
timeout=10
)
async def get_chat_response(self, text: str = "How are you?"):
_ = await self.__openai_client.chat.completions.create(
messages=[{'role': 'system', 'content': "Generate a response for the user's message."},{'role': 'user', 'content': text}],
model="gpt-4",
max_tokens=800,
presence_penalty=1.05,
temperature=0,
top_p=0.52,
stream=False,
timeout=10
)
async def some_process_on_thread_a(openai_adapter: OpenAIAdapter):
print("A: Start some process on thread A")
await openai_adapter.get_chat_response()
print("A: Finish some process on thread A")
async def some_process_on_thread_b(openai_adapter: OpenAIAdapter):
print("B: Start some process on different thread")
await openai_adapter.get_chat_response()
print("B: Finish some process on different thread")
async def main():
# Create OpenAI object on thread A
openai_adapter_thread_a = OpenAIAdapter()
openai_adapter_thread_b = OpenAIAdapter()
for i in range(10):
print(f"Start iteration {i}")
# Let's call an OpenAI call on thread B
if i == 5:
async_threading_helper = AsyncThreadingHelper(
async_function=some_process_on_thread_b
)
async_threading_helper.run_async_task_on_thread(
args=(
openai_adapter_thread_b,
)
)
# Call OpenAI on thread A
await some_process_on_thread_a(openai_adapter_thread_a)
if __name__ == '__main__':
asyncio.run(main())
| [
"Generate a response for the user's message."
] |
2024-01-10 | karpator/openai_threading_async_error | stucking_example.py | import asyncio
from openai import AsyncAzureOpenAI
import threading
from typing import (
Callable,
Any,
)
"""
This class is used to trigger the OpenAI API stuck.
"""
def singleton(class_: Any) -> Any:
"""
This is the singleton pattern, which is a software design pattern that restricts the instantiation of a class to
one single instance. This is useful when exactly one object is needed to coordinate actions across the system.
:param class_: any class type
:return: none
"""
__instances = {}
def get_instance(*args: Any, **kwargs: Any) -> Any:
"""
This function checks if there are any instances registered in the dictionary if not it will return a new
instances of a class.
:rtype Any
:param args: non-keyword arguments
:param kwargs: keyword arguments
:return:
"""
if class_ not in __instances:
__instances[class_] = class_(*args, **kwargs)
return __instances[class_]
return get_instance
class AsyncThreadingHelper:
def __init__(self, async_function: Callable):
self.__async_function = async_function
def __process_callback(self, *args: Any) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(
self.__async_function(*args)
)
loop.close()
def run_async_task_on_thread(self, args: Any) -> None:
thread = threading.Thread(
target=self.__process_callback,
args=args
)
thread.start()
@singleton
class OpenAIAdapter:
def __init__(self):
self.__openai_client = AsyncAzureOpenAI(
azure_endpoint=" AZURE ENDPOINT ",
api_key=" API KEY ",
max_retries=1,
api_version="2023-05-15",
timeout=10
)
async def get_chat_response(self, text: str = "How are you?"):
_ = await self.__openai_client.chat.completions.create(
messages=[{'role': 'system', 'content': "Generate a response for the user's message."},{'role': 'user', 'content': text}],
model="gpt-4",
max_tokens=800,
presence_penalty=1.05,
temperature=0,
top_p=0.52,
stream=False,
timeout=10
)
async def some_process_on_thread_a(openai_adapter: OpenAIAdapter):
print("A: Start some process on thread A")
await openai_adapter.get_chat_response()
print("A: Finish some process on thread A")
async def some_process_on_thread_b(openai_adapter: OpenAIAdapter):
print("B: Start some process on different thread")
await openai_adapter.get_chat_response()
print("B: Finish some process on different thread")
async def main():
# Create OpenAI object on thread A
openai_adapter = OpenAIAdapter()
for i in range(10):
print(f"Start iteration {i}")
# Let's call an OpenAI call on thread B
if i == 5:
async_threading_helper = AsyncThreadingHelper(
async_function=some_process_on_thread_b
)
async_threading_helper.run_async_task_on_thread(
args=(
openai_adapter,
)
)
# Call OpenAI on thread A
await some_process_on_thread_a(openai_adapter)
if __name__ == '__main__':
asyncio.run(main())
| [
"Generate a response for the user's message."
] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~aac.py | ###############################################################################
#
# Copyright (C) 2017 Andrew Muzikin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from logbook import Logger, StreamHandler
from btgym.algorithms.memory import Memory
from btgym.algorithms.rollout import make_data_getter
from btgym.algorithms.runner import BaseEnvRunnerFn, RunnerThread
from btgym.algorithms.math_utils import log_uniform
from btgym.algorithms.nn.losses import value_fn_loss_def, rp_loss_def, pc_loss_def, aac_loss_def, ppo_loss_def
from btgym.algorithms.utils import feed_dict_rnn_context, feed_dict_from_nested, batch_stack
from btgym.spaces import DictSpace as ObSpace # now can simply be gym.Dict
class BaseAAC(object):
"""
Base Asynchronous Advantage Actor Critic algorithm framework class with auxiliary control tasks and
option to run several instances of environment for every worker in vectorized fashion, PAAC-like.
Can be configured to run with different losses and policies.
Auxiliary tasks implementation borrows heavily from Kosuke Miyoshi code, under Apache License 2.0:
https://miyosuda.github.io/
https://github.com/miyosuda/unreal
Original A3C code comes from OpenAI repository under MIT licence:
https://github.com/openai/universe-starter-agent
Papers:
https://arxiv.org/abs/1602.01783
https://arxiv.org/abs/1611.05397
"""
def __init__(self,
env,
task,
policy_config,
log_level,
_log_name='AAC',
on_policy_loss=aac_loss_def,
off_policy_loss=aac_loss_def,
vr_loss=value_fn_loss_def,
rp_loss=rp_loss_def,
pc_loss=pc_loss_def,
runner_fn_ref=BaseEnvRunnerFn,
random_seed=None,
model_gamma=0.99, # decay
model_gae_lambda=1.00, # GAE lambda
model_beta=0.01, # entropy regularizer
opt_max_env_steps=10 ** 7,
opt_decay_steps=None,
opt_end_learn_rate=None,
opt_learn_rate=1e-4,
opt_decay=0.99,
opt_momentum=0.0,
opt_epsilon=1e-8,
rollout_length=20,
time_flat=False,
episode_train_test_cycle=(1,0),
episode_summary_freq=2, # every i`th environment episode
env_render_freq=10, # every i`th environment episode
model_summary_freq=100, # every i`th algorithm iteration
test_mode=False, # gym_atari test mode
replay_memory_size=2000,
replay_batch_size=None,
replay_rollout_length=None,
use_off_policy_aac=False,
use_reward_prediction=False,
use_pixel_control=False,
use_value_replay=False,
rp_lambda=1.0, # aux tasks loss weights
pc_lambda=1.0,
vr_lambda=1.0,
off_aac_lambda=1,
gamma_pc=0.9, # pixel change gamma-decay - not used
rp_reward_threshold=0.1, # r.prediction: abs.rewards values bigger than this are considered non-zero
rp_sequence_size=3, # r.prediction sampling
clip_epsilon=0.1,
num_epochs=1,
pi_prime_update_period=1,
_use_target_policy=False, # target policy tracking behavioral one with delay
_aux_render_modes=None,
**kwargs):
"""
Args:
env: environment instance or list of instances
task: int, parent worker id
policy_config: policy estimator class and configuration dictionary
log_level: int, logbook.level
_log_name: str, class-wide logger name, internal
on_policy_loss: callable returning tensor holding on_policy training loss graph and summaries
off_policy_loss: callable returning tensor holding off_policy training loss graph and summaries
vr_loss: callable returning tensor holding value replay loss graph and summaries
rp_loss: callable returning tensor holding reward prediction loss graph and summaries
pc_loss: callable returning tensor holding pixel_control loss graph and summaries
runner_fn_ref: callable defining environment runner execution logic
random_seed: int or None
model_gamma: scalar, gamma discount factor
model_gae_lambda: scalar, GAE lambda
model_beta: entropy regularization beta, scalar or [high_bound, low_bound] for log_uniform.
opt_max_env_steps: int, total number of environment steps to run training on.
opt_decay_steps: int, learn ratio decay steps, in number of environment steps.
opt_end_learn_rate: scalar, final learn rate
opt_learn_rate: start learn rate, scalar or [high_bound, low_bound] for log_uniform distr.
opt_decay: scalar, optimizer decay, if apll.
opt_momentum: scalar, optimizer momentum, if apll.
opt_epsilon: scalar, optimizer epsilon
rollout_length: int, on-policy rollout length
time_flat: bool, flatten rnn time-steps in rollouts while training - see `Notes` below
episode_train_test_cycle: tuple or list as (train_number, test_number), def=(1,0): enables infinite
loop such as: run `train_number` of train data episodes,
than `test_number` of test data episodes, repeat. Should be consistent
with provided dataset parameters (test data should exist if `test_number > 0`)
episode_summary_freq: int, write episode summary for every i'th episode
env_render_freq: int, write environment rendering summary for every i'th train step
model_summary_freq: int, write model summary for every i'th train step
test_mode: bool, True: Atari, False: BTGym
replay_memory_size: int, in number of experiences
replay_batch_size: int, mini-batch size for off-policy training, def = 1
replay_rollout_length: int off-policy rollout length by def. equals on_policy_rollout_length
use_off_policy_aac: bool, use full AAC off-policy loss instead of Value-replay
use_reward_prediction: bool, use aux. off-policy reward prediction task
use_pixel_control: bool, use aux. off-policy pixel control task
use_value_replay: bool, use aux. off-policy value replay task (not used if use_off_policy_aac=True)
rp_lambda: reward prediction loss weight, scalar or [high, low] for log_uniform distr.
pc_lambda: pixel control loss weight, scalar or [high, low] for log_uniform distr.
vr_lambda: value replay loss weight, scalar or [high, low] for log_uniform distr.
off_aac_lambda: off-policy AAC loss weight, scalar or [high, low] for log_uniform distr.
gamma_pc: NOT USED
rp_reward_threshold: scalar, reward prediction classification threshold, above which reward is 'non-zero'
rp_sequence_size: int, reward prediction sample size, in number of experiences
clip_epsilon: scalar, PPO: surrogate L^clip epsilon
num_epochs: int, num. of SGD runs for every train step, val. > 1 should be used with caution.
pi_prime_update_period: int, PPO: pi to pi_old update period in number of train steps, def: 1
_use_target_policy: bool, PPO: use target policy (aka pi_old), delayed by `pi_prime_update_period` delay
_aux_render_modes: additional visualisationas to include in per-episode rendering summary, internal
Note:
- On `time_flat` arg:
There are two alternatives to run RNN part of policy estimator:
a. Feed initial RNN state for every experience frame in rollout
(those are stored anyway if we want random memory repaly sampling) and do single time-step RNN
advance for all experiences in a batch; this is when time_flat=True;
b. Reshape incoming batch after convolution part of network in time-wise fashion
for every rollout in a batch i.e. batch_size=number_of_rollouts and
rnn_timesteps=max_rollout_length. In this case we need to feed initial rnn_states
for rollouts only. There is some little extra work to pad rollouts to max_time_size
and feed true rollout lengths to rnn. Thus, when time_flat=False, we unroll RNN in
specified number of time-steps for every rollout.
Both options has pros and cons:
Unrolling dynamic RNN is computationally more expensive but gives clearly faster convergence,
[possibly] due to the fact that RNN states for 2nd, 3rd, ... frames
of rollouts are computed using updated policy estimator, which is supposed to be
closer to optimal one. When time_flattened, every time-step uses RNN states computed
when rollout was collected (i.e. by behavioral policy estimator with older
parameters).
Nevertheless, time_flatting can be interesting
because one can safely shuffle training batch or mix on-policy and off-policy data in single mini-batch,
ensuring iid property and allowing, say, proper batch normalisation (this has yet to be tested).
"""
# Logging:
self.log_level = log_level
self.log_name = _log_name
self.task = task
StreamHandler(sys.stdout).push_application()
self.log = Logger('{}_{}'.format(self.log_name, self.task), level=self.log_level)
# Get direct traceback:
try:
self.random_seed = random_seed
if self.random_seed is not None:
np.random.seed(self.random_seed)
tf.set_random_seed(self.random_seed)
self.log.debug('rnd_seed:{}, log_u_sample_(0,1]x5: {}'.
format(random_seed, log_uniform([1e-10,1], 5)))
if kwargs != {}:
self.log.warning('Unexpected kwargs found: {}, ignored.'.format(kwargs))
self.env_list = env
try:
assert isinstance(self.env_list, list)
except AssertionError:
self.env_list = [env]
ref_env = self.env_list[0] # reference instance to get obs shapes etc.
try:
assert isinstance(ref_env.observation_space, ObSpace)
except AssertionError:
self.log.exception(
'expected environment observation space of type {}, got: {}'.\
format(ObSpace, type(ref_env.observation_space))
)
raise AssertionError
self.policy_class = policy_config['class_ref']
self.policy_kwargs = policy_config['kwargs']
# Losses:
self.on_policy_loss = on_policy_loss
self.off_policy_loss = off_policy_loss
self.vr_loss = vr_loss
self.rp_loss = rp_loss
self.pc_loss = pc_loss
# Environmnet runner runtime function:
self.runner_fn_ref = runner_fn_ref
# AAC specific:
self.model_gamma = model_gamma # decay
self.model_gae_lambda = model_gae_lambda # general advantage estimator lambda
self.model_beta = log_uniform(model_beta, 1) # entropy reg.
self.time_flat = time_flat
# Optimizer
self.opt_max_env_steps = opt_max_env_steps
self.opt_learn_rate = log_uniform(opt_learn_rate, 1)
if opt_end_learn_rate is None:
self.opt_end_learn_rate = self.opt_learn_rate
else:
self.opt_end_learn_rate = opt_end_learn_rate
if opt_decay_steps is None:
self.opt_decay_steps = self.opt_max_env_steps
else:
self.opt_decay_steps = opt_decay_steps
self.opt_decay = opt_decay
self.opt_epsilon = opt_epsilon
self.opt_momentum = opt_momentum
self.rollout_length = rollout_length
# Data sampling control:
self.num_train_episodes = episode_train_test_cycle[0]
self.num_test_episodes = episode_train_test_cycle[-1]
try:
assert self.num_train_episodes + self.num_test_episodes > 0 and \
self.num_train_episodes >= 0 and \
self.num_test_episodes >= 0
except AssertionError:
self.log.exception(
'Train/test episode cycle values could not be both zeroes or negative, got: train={}, test={}'.\
format(self.num_train_episodes, self.num_test_episodes)
)
raise AssertionError
self.current_train_episode = 0
self.current_test_episode = 0
# Summaries :
self.episode_summary_freq = episode_summary_freq
self.env_render_freq = env_render_freq
self.model_summary_freq = model_summary_freq
# If True - use ATARI gym env.:
self.test_mode = test_mode
# UNREAL/AUX and Off-policy specific:
self.off_aac_lambda = log_uniform(off_aac_lambda, 1)
self.rp_lambda = log_uniform(rp_lambda, 1)
self.pc_lambda = log_uniform(pc_lambda, 1)
self.vr_lambda = log_uniform(vr_lambda, 1)
self.gamma_pc = gamma_pc
self.replay_memory_size = replay_memory_size
if replay_rollout_length is not None:
self.replay_rollout_length = replay_rollout_length
else:
self.replay_rollout_length = rollout_length # by default off-rollout equals on-policy one
self.rp_sequence_size = rp_sequence_size
self.rp_reward_threshold = rp_reward_threshold
if replay_batch_size is not None:
self.replay_batch_size = replay_batch_size
else:
self.replay_batch_size = len(self.env_list) # by default off-batch equals on-policy one
# PPO related:
self.clip_epsilon = clip_epsilon
self.num_epochs = num_epochs
self.pi_prime_update_period = pi_prime_update_period
# On/off switchers for off-policy training and auxiliary tasks:
self.use_off_policy_aac = use_off_policy_aac
self.use_reward_prediction = use_reward_prediction
self.use_pixel_control = use_pixel_control
if use_off_policy_aac:
self.use_value_replay = False # v-replay is redundant in this case
else:
self.use_value_replay = use_value_replay
self.use_any_aux_tasks = use_value_replay or use_pixel_control or use_reward_prediction
self.use_memory = self.use_any_aux_tasks or self.use_off_policy_aac
self.use_target_policy = _use_target_policy
self.log.notice('learn_rate: {:1.6f}, entropy_beta: {:1.6f}'.format(self.opt_learn_rate, self.model_beta))
if self.use_off_policy_aac:
self.log.notice('off_aac_lambda: {:1.6f}'.format(self.off_aac_lambda,))
if self.use_any_aux_tasks:
self.log.notice('vr_lambda: {:1.6f}, pc_lambda: {:1.6f}, rp_lambda: {:1.6f}'.
format(self.vr_lambda, self.pc_lambda, self.rp_lambda))
if _aux_render_modes is not None:
self.aux_render_modes = list(_aux_render_modes)
else:
self.aux_render_modes = []
#self.log.notice(
# 'AAC_{}: max_steps: {}, decay_steps: {}, end_rate: {:1.6f},'.
# format(self.task, self.opt_max_env_steps, self.opt_decay_steps, self.opt_end_learn_rate))
self.worker_device = "/job:worker/task:{}/cpu:0".format(task)
# Update policy configuration
self.policy_kwargs.update(
{
'ob_space': ref_env.observation_space.shape,
'ac_space': ref_env.action_space.n,
'rp_sequence_size': self.rp_sequence_size,
'aux_estimate': self.use_any_aux_tasks,
}
)
# Start building graphs:
self.log.debug('started building graphs')
# PS:
with tf.device(tf.train.replica_device_setter(1, worker_device=self.worker_device)):
self.network = self._make_policy('global')
# Worker:
with tf.device(self.worker_device):
self.local_network = pi = self._make_policy('local')
if self.use_target_policy:
self.local_network_prime = pi_prime = self._make_policy('local_prime')
else:
self.local_network_prime = pi_prime = self._make_dummy_policy()
# Meant for Batch-norm layers:
pi.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='.*local.*')
self.log.debug('local_network_upd_ops_collection:\n{}'.format(pi.update_ops))
self.log.debug('\nlocal_network_var_list_to_save:')
for v in pi.var_list:
self.log.debug('{}: {}'.format(v.name, v.get_shape()))
# Learning rate annealing:
self.learn_rate_decayed = tf.train.polynomial_decay(
self.opt_learn_rate,
self.global_step + 1,
self.opt_decay_steps,
self.opt_end_learn_rate,
power=1,
cycle=False,
)
clip_epsilon = tf.cast(self.clip_epsilon * self.learn_rate_decayed / self.opt_learn_rate, tf.float32)
# Freeze training if train_phase is False:
train_learn_rate = self.learn_rate_decayed * tf.cast(pi.train_phase, tf.float64)
self.log.debug('learn rate ok')
# On-policy AAC loss definition:
self.on_pi_act_target = tf.placeholder(
tf.float32, [None, ref_env.action_space.n], name="on_policy_action_pl"
)
self.on_pi_adv_target = tf.placeholder(tf.float32, [None], name="on_policy_advantage_pl")
self.on_pi_r_target = tf.placeholder(tf.float32, [None], name="on_policy_return_pl")
on_pi_loss, on_pi_summaries = self.on_policy_loss(
act_target=self.on_pi_act_target,
adv_target=self.on_pi_adv_target,
r_target=self.on_pi_r_target,
pi_logits=pi.on_logits,
pi_vf=pi.on_vf,
pi_prime_logits=pi_prime.on_logits,
entropy_beta=self.model_beta,
epsilon=clip_epsilon,
name='on_policy',
verbose=True
)
# Start accumulating total loss:
self.loss = on_pi_loss
model_summaries = on_pi_summaries
# Off-policy losses:
self.off_pi_act_target = tf.placeholder(
tf.float32, [None, ref_env.action_space.n], name="off_policy_action_pl")
self.off_pi_adv_target = tf.placeholder(tf.float32, [None], name="off_policy_advantage_pl")
self.off_pi_r_target = tf.placeholder(tf.float32, [None], name="off_policy_return_pl")
if self.use_off_policy_aac:
# Off-policy AAC loss graph mirrors on-policy:
off_pi_loss, off_pi_summaries = self.off_policy_loss(
act_target=self.off_pi_act_target,
adv_target=self.off_pi_adv_target,
r_target=self.off_pi_r_target,
pi_logits=pi.off_logits,
pi_vf=pi.off_vf,
pi_prime_logits=pi_prime.off_logits,
entropy_beta=self.model_beta,
epsilon=clip_epsilon,
name='off_policy',
verbose=False
)
self.loss = self.loss + self.off_aac_lambda * off_pi_loss
model_summaries += off_pi_summaries
if self.use_pixel_control:
# Pixel control loss:
self.pc_action = tf.placeholder(tf.float32, [None, ref_env.action_space.n], name="pc_action")
self.pc_target = tf.placeholder(tf.float32, [None, None, None], name="pc_target")
pc_loss, pc_summaries = self.pc_loss(
actions=self.pc_action,
targets=self.pc_target,
pi_pc_q=pi.pc_q,
name='off_policy',
verbose=True
)
self.loss = self.loss + self.pc_lambda * pc_loss
# Add specific summary:
model_summaries += pc_summaries
if self.use_value_replay:
# Value function replay loss:
self.vr_target = tf.placeholder(tf.float32, [None], name="vr_target")
vr_loss, vr_summaries = self.vr_loss(
r_target=self.vr_target,
pi_vf=pi.vr_value,
name='off_policy',
verbose=True
)
self.loss = self.loss + self.vr_lambda * vr_loss
model_summaries += vr_summaries
if self.use_reward_prediction:
# Reward prediction loss:
self.rp_target = tf.placeholder(tf.float32, [None, 3], name="rp_target")
rp_loss, rp_summaries = self.rp_loss(
rp_targets=self.rp_target,
pi_rp_logits=pi.rp_logits,
name='off_policy',
verbose=True
)
self.loss = self.loss + self.rp_lambda * rp_loss
model_summaries += rp_summaries
#grads = tf.gradients(self.loss, pi.var_list)
# Clipped gradients:
self.grads, _ = tf.clip_by_global_norm(
tf.gradients(self.loss, pi.var_list),
40.0
)
# Copy weights from the parameter server to the local model
self.sync = self.sync_pi = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)])
if self.use_target_policy:
# Copy weights from new policy model to target one:
self.sync_pi_prime = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi_prime.var_list, pi.var_list)])
grads_and_vars = list(zip(self.grads, self.network.var_list))
# Set global_step incremention equal to observation space batch size:
obs_space_keys = list(pi.on_state_in.keys())
assert 'external' in obs_space_keys,\
'Expected observation space to contain `external` mode, got: {}'.format(obs_space_keys)
self.inc_step = self.global_step.assign_add(tf.shape(pi.on_state_in['external'])[0])
# Each worker gets a different set of adam optimizer parameters:
self.optimizer = tf.train.AdamOptimizer(train_learn_rate, epsilon=1e-5)
#self.optimizer = tf.train.RMSPropOptimizer(
# learning_rate=train_learn_rate,
# decay=self.opt_decay,
# momentum=self.opt_momentum,
# epsilon=self.opt_epsilon,
#)
#self.train_op = tf.group(*pi.update_ops, opt.apply_gradients(grads_and_vars), self.inc_step)
#self.train_op = tf.group(opt.apply_gradients(grads_and_vars), self.inc_step)
self.train_op = self.optimizer.apply_gradients(grads_and_vars)
# Add model-wide statistics:
with tf.name_scope('model'):
model_summaries += [
tf.summary.scalar("grad_global_norm", tf.global_norm(self.grads)),
tf.summary.scalar("var_global_norm", tf.global_norm(pi.var_list)),
tf.summary.scalar("learn_rate", train_learn_rate),
#tf.summary.scalar("learn_rate", self.learn_rate_decayed), # cause actual rate is a jaggy due to test freezes
tf.summary.scalar("total_loss", self.loss),
]
self.summary_writer = None
self.local_steps = 0
self.log.debug('train op defined')
# Model stat. summary:
self.model_summary_op = tf.summary.merge(model_summaries, name='model_summary')
# Episode-related summaries:
self.ep_summary = dict(
# Summary placeholders
render_atari=tf.placeholder(tf.uint8, [None, None, None, 1]),
total_r=tf.placeholder(tf.float32, ),
cpu_time=tf.placeholder(tf.float32, ),
final_value=tf.placeholder(tf.float32, ),
steps=tf.placeholder(tf.int32, ),
)
if self.test_mode:
# For Atari:
self.ep_summary['render_op'] = tf.summary.image("model/state", self.ep_summary['render_atari'])
else:
# BTGym rendering:
self.ep_summary.update(
{
mode: tf.placeholder(tf.uint8, [None, None, None, None], name=mode + '_pl')
for mode in self.env_list[0].render_modes + self.aux_render_modes
}
)
self.ep_summary['render_op'] = tf.summary.merge(
[tf.summary.image(mode, self.ep_summary[mode])
for mode in self.env_list[0].render_modes + self.aux_render_modes]
)
# Episode stat. summary:
self.ep_summary['btgym_stat_op'] = tf.summary.merge(
[
tf.summary.scalar('episode_train/total_reward', self.ep_summary['total_r']),
tf.summary.scalar('episode_train/cpu_time_sec', self.ep_summary['cpu_time']),
tf.summary.scalar('episode_train/final_value', self.ep_summary['final_value']),
tf.summary.scalar('episode_train/env_steps', self.ep_summary['steps'])
],
name='episode_train_btgym'
)
# Test episode stat. summary:
self.ep_summary['test_btgym_stat_op'] = tf.summary.merge(
[
tf.summary.scalar('episode_test/total_reward', self.ep_summary['total_r']),
tf.summary.scalar('episode_test/final_value', self.ep_summary['final_value']),
tf.summary.scalar('episode_test/env_steps', self.ep_summary['steps'])
],
name='episode_test_btgym'
)
self.ep_summary['atari_stat_op'] = tf.summary.merge(
[
tf.summary.scalar('episode/total_reward', self.ep_summary['total_r']),
tf.summary.scalar('episode/steps', self.ep_summary['steps'])
],
name='episode_atari'
)
# Replay memory_config:
if self.use_memory:
memory_config = dict(
class_ref=Memory,
kwargs=dict(
history_size=self.replay_memory_size,
max_sample_size=self.replay_rollout_length,
priority_sample_size=self.rp_sequence_size,
reward_threshold=self.rp_reward_threshold,
use_priority_sampling=self.use_reward_prediction,
task=self.task,
log_level=self.log_level,
)
)
else:
memory_config = None
# Make runners:
# `rollout_length` represents the number of "local steps": the number of time steps
# we run the policy before we get full rollout, run train step and update the parameters.
self.runners = []
task = 0 # Runners will have [worker_task][env_count] id's
for env in self.env_list:
self.runners.append(
RunnerThread(
env=env,
policy=pi,
runner_fn_ref=self.runner_fn_ref,
task=self.task + task,
rollout_length=self.rollout_length, # ~20
episode_summary_freq=self.episode_summary_freq,
env_render_freq=self.env_render_freq,
test=self.test_mode,
ep_summary=self.ep_summary,
memory_config=memory_config,
log_level=log_level,
)
)
task += 0.01
# Make rollouts provider[s]:
self.data_getter = [make_data_getter(runner.queue) for runner in self.runners]
self.log.debug('trainer.init() done')
except:
msg = 'Base class __init__() exception occurred.' +\
'\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
self.log.exception(msg)
raise RuntimeError(msg)
def _get_data(self):
"""
Collect rollouts from every environmnet.
Returns:
dictionary of lists of data streams collected from every runner
"""
# TODO: nowait?
data_streams = [get_it() for get_it in self.data_getter]
return {key: [stream[key] for stream in data_streams] for key in data_streams[0].keys()}
def get_sample_config(self, _new_trial=False):
"""
Returns environment configuration parameters for next episode to sample.
By default is simple stateful iterator,
works correctly with `DTGymDataset` data class, repeating cycle:
- sample `num_train_episodes` from train data,
- sample `num_test_episodes` from test data.
Convention: supposed to override dummy method of local policy instance, see inside ._make_policy() method
Returns:
configuration dictionary of type `btgym.datafeed.base.EnvResetConfig`
"""
# sess = tf.get_default_session()
if self.current_train_episode < self.num_train_episodes:
episode_type = 0 # train
self.current_train_episode += 1
self.log.debug(
'c_1, c_train={}, c_test={}, type={}'.
format(self.current_train_episode, self.current_test_episode, episode_type)
)
else:
if self.current_test_episode < self.num_test_episodes:
episode_type = 1 # test
self.current_test_episode += 1
self.log.debug(
'c_2, c_train={}, c_test={}, type={}'.
format(self.current_train_episode, self.current_test_episode, episode_type)
)
else:
# cycle end, reset and start new (rec. depth 1)
self.current_train_episode = 0
self.current_test_episode = 0
self.log.debug(
'c_3, c_train={}, c_test={}'.
format(self.current_train_episode, self.current_test_episode)
)
return self.get_sample_config(_new_trial=True)
# Compose btgym.datafeed.base.EnvResetConfig-consistent dict:
sample_config = dict(
episode_config=dict(
get_new=True,
sample_type=episode_type,
b_alpha=1.0,
b_beta=1.0
),
trial_config=dict(
get_new=_new_trial,
sample_type=episode_type,
b_alpha=1.0,
b_beta=1.0
)
)
return sample_config
def _make_policy(self, scope):
"""
Configures and instantiates policy network and ops.
Note:
`global` name_scope network should be defined first.
Args:
scope: name scope
Returns:
policy instance
"""
with tf.variable_scope(scope):
# Make policy instance:
network = self.policy_class(**self.policy_kwargs)
if scope not in 'global':
try:
# For locals those should be already defined:
assert hasattr(self, 'global_step') and \
hasattr(self, 'global_episode') and \
hasattr(self, 'inc_episode')
# Add attrs to local:
network.global_step = self.global_step
network.global_episode = self.global_episode
network.inc_episode= self.inc_episode
# Override:
network.get_sample_config = self.get_sample_config
except AssertionError:
self.log.exception(
'`global` name_scope network should be defined before any `local` ones.'.
format(self.task)
)
raise RuntimeError
else:
# Set counters:
self.global_step = tf.get_variable(
"global_step",
[],
tf.int32,
initializer=tf.constant_initializer(
0,
dtype=tf.int32
),
trainable=False
)
self.global_episode = tf.get_variable(
"global_episode",
[],
tf.int32,
initializer=tf.constant_initializer(
0,
dtype=tf.int32
),
trainable=False
)
# Increment episode count:
self.inc_episode = self.global_episode.assign_add(1)
return network
def _make_dummy_policy(self):
class _Dummy(object):
"""
Policy plug when target network is not used.
"""
def __init__(self):
self.on_state_in = None
self.off_state_in = None
self.on_lstm_state_pl_flatten = None
self.off_lstm_state_pl_flatten = None
self.on_a_r_in = None
self.off_a_r_in = None
self.on_logits = None
self.off_logits = None
self.on_vf = None
self.off_vf = None
self.on_batch_size = None
self.on_time_length = None
self.off_batch_size = None
self.off_time_length = None
return _Dummy()
def start(self, sess, summary_writer, **kwargs):
"""
Executes all initializing operations,
starts environment runner[s].
Supposed to be called by parent worker just before training loop starts.
Args:
sess: tf session object.
kwargs: not used by default.
"""
try:
# Copy weights from global to local:
sess.run(self.sync)
# Start thread_runners:
self._start_runners(sess, summary_writer)
except:
msg = 'start() exception occurred' + \
'\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
self.log.exception(msg)
raise RuntimeError(msg)
def _start_runners(self, sess, summary_writer):
"""
Args:
sess:
summary_writer:
Returns:
"""
for runner in self.runners:
runner.start_runner(sess, summary_writer) # starting runner threads
self.summary_writer = summary_writer
def _get_rp_feeder(self, batch):
"""
Returns feed dictionary for `reward prediction` loss estimation subgraph.
"""
feeder = feed_dict_from_nested(self.local_network.rp_state_in, batch['state'])
feeder.update(
{
self.rp_target: batch['rp_target'],
self.local_network.rp_batch_size: batch['batch_size'],
}
)
return feeder
def _get_vr_feeder(self, batch):
"""
Returns feed dictionary for `value replay` loss estimation subgraph.
"""
if not self.use_off_policy_aac: # use single pass of network on same off-policy batch
feeder = feed_dict_from_nested(self.local_network.vr_state_in, batch['state'])
feeder.update(feed_dict_rnn_context(self.local_network.vr_lstm_state_pl_flatten, batch['context']))
feeder.update(
{
self.local_network.vr_batch_size: batch['batch_size'],
self.local_network.vr_time_length: batch['time_steps'],
self.local_network.vr_a_r_in: batch['last_action_reward'],
self.vr_target: batch['r']
}
)
else:
feeder = {self.vr_target: batch['r']} # redundant actually :)
return feeder
def _get_pc_feeder(self, batch):
"""
Returns feed dictionary for `pixel control` loss estimation subgraph.
"""
if not self.use_off_policy_aac: # use single pass of network on same off-policy batch
feeder = feed_dict_from_nested(self.local_network.pc_state_in, batch['state'])
feeder.update(
feed_dict_rnn_context(self.local_network.pc_lstm_state_pl_flatten, batch['context']))
feeder.update(
{
self.local_network.pc_a_r_in: batch['last_action_reward'],
self.pc_action: batch['action'],
self.pc_target: batch['pixel_change']
}
)
else:
feeder = {self.pc_action: batch['action'], self.pc_target: batch['pixel_change']}
return feeder
def process_data(self, sess, data, is_train):
"""
Processes data, composes train step feed dictionary.
Args:
sess: tf session obj.
data (dict): data dictionary
is_train (bool): is data provided are train or test
Returns:
feed_dict (dict): train step feed dictionary
"""
# Process minibatch for on-policy train step:
on_policy_rollouts = data['on_policy']
on_policy_batch = batch_stack(
[
r.process(
gamma=self.model_gamma,
gae_lambda=self.model_gae_lambda,
size=self.rollout_length,
time_flat=self.time_flat,
) for r in on_policy_rollouts
]
)
# Feeder for on-policy AAC loss estimation graph:
feed_dict = feed_dict_from_nested(self.local_network.on_state_in, on_policy_batch['state'])
feed_dict.update(
feed_dict_rnn_context(self.local_network.on_lstm_state_pl_flatten, on_policy_batch['context'])
)
feed_dict.update(
{
self.local_network.on_a_r_in: on_policy_batch['last_action_reward'],
self.local_network.on_batch_size: on_policy_batch['batch_size'],
self.local_network.on_time_length: on_policy_batch['time_steps'],
self.on_pi_act_target: on_policy_batch['action'],
self.on_pi_adv_target: on_policy_batch['advantage'],
self.on_pi_r_target: on_policy_batch['r'],
self.local_network.train_phase: is_train, # Zeroes learn rate, [+ batch_norm]
}
)
if self.use_target_policy:
feed_dict.update(
feed_dict_from_nested(self.local_network_prime.on_state_in, on_policy_batch['state'])
)
feed_dict.update(
feed_dict_rnn_context(self.local_network_prime.on_lstm_state_pl_flatten, on_policy_batch['context'])
)
feed_dict.update(
{
self.local_network_prime.on_batch_size: on_policy_batch['batch_size'],
self.local_network_prime.on_time_length: on_policy_batch['time_steps'],
self.local_network_prime.on_a_r_in: on_policy_batch['last_action_reward']
}
)
if self.use_memory:
# Process rollouts from replay memory:
off_policy_rollouts = data['off_policy']
off_policy_batch = batch_stack(
[
r.process(
gamma=self.model_gamma,
gae_lambda=self.model_gae_lambda,
size=self.replay_rollout_length,
time_flat=self.time_flat,
) for r in off_policy_rollouts
]
)
# Feeder for off-policy AAC loss estimation graph:
off_policy_feed_dict = feed_dict_from_nested(self.local_network.off_state_in, off_policy_batch['state'])
off_policy_feed_dict.update(
feed_dict_rnn_context(self.local_network.off_lstm_state_pl_flatten, off_policy_batch['context']))
off_policy_feed_dict.update(
{
self.local_network.off_a_r_in: off_policy_batch['last_action_reward'],
self.local_network.off_batch_size: off_policy_batch['batch_size'],
self.local_network.off_time_length: off_policy_batch['time_steps'],
self.off_pi_act_target: off_policy_batch['action'],
self.off_pi_adv_target: off_policy_batch['advantage'],
self.off_pi_r_target: off_policy_batch['r'],
}
)
if self.use_target_policy:
off_policy_feed_dict.update(
feed_dict_from_nested(self.local_network_prime.off_state_in, off_policy_batch['state'])
)
off_policy_feed_dict.update(
{
self.local_network_prime.off_batch_size: off_policy_batch['batch_size'],
self.local_network_prime.off_time_length: off_policy_batch['time_steps'],
self.local_network_prime.off_a_r_in: off_policy_batch['last_action_reward']
}
)
off_policy_feed_dict.update(
feed_dict_rnn_context(
self.local_network_prime.off_lstm_state_pl_flatten,
off_policy_batch['context']
)
)
feed_dict.update(off_policy_feed_dict)
# Update with reward prediction subgraph:
if self.use_reward_prediction:
# Rebalanced 50/50 sample for RP:
rp_rollouts = data['off_policy_rp']
rp_batch = batch_stack([rp.process_rp(self.rp_reward_threshold) for rp in rp_rollouts])
feed_dict.update(self._get_rp_feeder(rp_batch))
# Pixel control ...
if self.use_pixel_control:
feed_dict.update(self._get_pc_feeder(off_policy_batch))
# VR...
if self.use_value_replay:
feed_dict.update(self._get_vr_feeder(off_policy_batch))
return feed_dict
def process_summary(self, sess, data, model_data=None):
"""
Fetches and writes summary data from `data` and `model_data`.
Args:
sess: tf summary obj.
data(dict): thread_runner rollouts and metadata
model_data(dict): model summary data
"""
# Every worker writes train episode summaries:
ep_summary_feeder = {}
# Look for train episode summaries from all env runners:
for stat in data['ep_summary']:
if stat is not None:
for key in stat.keys():
if key in ep_summary_feeder.keys():
ep_summary_feeder[key] += [stat[key]]
else:
ep_summary_feeder[key] = [stat[key]]
# Average values among thread_runners, if any, and write episode summary:
if ep_summary_feeder != {}:
ep_summary_feed_dict = {
self.ep_summary[key]: np.average(list) for key, list in ep_summary_feeder.items()
}
if self.test_mode:
# Atari:
fetched_episode_stat = sess.run(self.ep_summary['atari_stat_op'], ep_summary_feed_dict)
else:
# BTGym
fetched_episode_stat = sess.run(self.ep_summary['btgym_stat_op'], ep_summary_feed_dict)
self.summary_writer.add_summary(fetched_episode_stat, sess.run(self.global_episode))
self.summary_writer.flush()
# Every worker writes test episode summaries:
test_ep_summary_feeder = {}
# Look for test episode summaries:
for stat in data['test_ep_summary']:
if stat is not None:
for key in stat.keys():
if key in test_ep_summary_feeder.keys():
test_ep_summary_feeder[key] += [stat[key]]
else:
test_ep_summary_feeder[key] = [stat[key]]
# Average values among thread_runners, if any, and write episode summary:
if test_ep_summary_feeder != {}:
test_ep_summary_feed_dict = {
self.ep_summary[key]: np.average(list) for key, list in test_ep_summary_feeder.items()
}
fetched_test_episode_stat = sess.run(self.ep_summary['test_btgym_stat_op'], test_ep_summary_feed_dict)
self.summary_writer.add_summary(fetched_test_episode_stat, sess.run(self.global_episode))
# Look for renderings (chief worker only, always 0-numbered environment in a list):
if self.task == 0:
if data['render_summary'][0] is not None:
render_feed_dict = {
self.ep_summary[key]: pic for key, pic in data['render_summary'][0].items()
}
renderings = sess.run(self.ep_summary['render_op'], render_feed_dict)
self.summary_writer.add_summary(renderings, sess.run(self.global_episode))
self.summary_writer.flush()
# Every worker writes train episode summaries:
if model_data is not None:
self.summary_writer.add_summary(tf.Summary.FromString(model_data), sess.run(self.global_step))
self.summary_writer.flush()
def process(self, sess):
"""
Grabs an on_policy_rollout [and off_policy rollout[s] from replay memory] that's been produced
by the thread runner. If data identified as 'train data' - computes gradients and updates the parameters;
writes summaries if any. The update is then sent to the parameter server.
If on_policy_rollout identified as 'test data' - no policy update is performed (learn rate is set to zero);
Note that test data does not get stored in replay memory (thread runner area).
Writes all available summaries.
Args:
sess (tensorflow.Session): tf session obj.
"""
# Quick wrap to get direct traceback from this trainer if something goes wrong:
try:
# Collect data from child thread runners:
data = self._get_data()
# Copy weights from local policy to local target policy:
if self.use_target_policy and self.local_steps % self.pi_prime_update_period == 0:
sess.run(self.sync_pi_prime)
# Test or train: if at least one on-policy rollout from parallel runners is test one -
# set learn rate to zero for entire minibatch. Doh.
try:
is_train = not np.asarray([env['state']['metadata']['type'] for env in data['on_policy']]).any()
except KeyError:
is_train = True
if is_train:
# If there is no any test rollouts - copy weights from shared to local new_policy:
sess.run(self.sync_pi)
# self.log.debug('is_train: {}'.format(is_train))
feed_dict = self.process_data(sess, data, is_train)
# Say No to redundant summaries:
wirte_model_summary =\
self.local_steps % self.model_summary_freq == 0
#fetches = [self.train_op, self.local_network.debug] # include policy debug shapes
fetches = [self.train_op]
if wirte_model_summary:
fetches_last = fetches + [self.model_summary_op, self.inc_step]
else:
fetches_last = fetches + [self.inc_step]
# Do a number of SGD train epochs:
# When doing more than one epoch, we actually use only last summary:
for i in range(self.num_epochs - 1):
fetched = sess.run(fetches, feed_dict=feed_dict)
fetched = sess.run(fetches_last, feed_dict=feed_dict)
if wirte_model_summary:
model_summary = fetched[-2]
else:
model_summary = None
# Write down summaries:
self.process_summary(sess, data, model_summary)
self.local_steps += 1
# print debug info:
#for k, v in fetched[1].items():
# print('{}: {}'.format(k,v))
#print('\n')
#for k, v in feed_dict.items():
# try:
# print(k, v.shape)
# except:
# print(k, type(v))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
except:
msg = 'process() exception occurred' + \
'\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
self.log.exception(msg)
raise RuntimeError(msg)
class Unreal(BaseAAC):
"""
Unreal: Asynchronous Advantage Actor Critic with auxiliary control tasks.
Auxiliary tasks implementation borrows heavily from Kosuke Miyoshi code, under Apache License 2.0:
https://miyosuda.github.io/
https://github.com/miyosuda/unreal
Original A3C code comes from OpenAI repository under MIT licence:
https://github.com/openai/universe-starter-agent
Papers:
https://arxiv.org/abs/1602.01783
https://arxiv.org/abs/1611.05397
"""
def __init__(self, **kwargs):
"""
See BaseAAC class args for details:
Args:
env: environment instance or list of instances
task: int, parent worker id
policy_config: policy estimator class and configuration dictionary
log_level: int, logbook.level
on_policy_loss: callable returning tensor holding on_policy training loss graph and summaries
off_policy_loss: callable returning tensor holding off_policy training loss graph and summaries
vr_loss: callable returning tensor holding value replay loss graph and summaries
rp_loss: callable returning tensor holding reward prediction loss graph and summaries
pc_loss: callable returning tensor holding pixel_control loss graph and summaries
random_seed: int or None
model_gamma: scalar, gamma discount factor
model_gae_lambda: scalar, GAE lambda
model_beta: entropy regularization beta, scalar or [high_bound, low_bound] for log_uniform.
opt_max_env_steps: int, total number of environment steps to run training on.
opt_decay_steps: int, learn ratio decay steps, in number of environment steps.
opt_end_learn_rate: scalar, final learn rate
opt_learn_rate: start learn rate, scalar or [high_bound, low_bound] for log_uniform distr.
opt_decay: scalar, optimizer decay, if apll.
opt_momentum: scalar, optimizer momentum, if apll.
opt_epsilon: scalar, optimizer epsilon
rollout_length: int, on-policy rollout length
time_flat: bool, flatten rnn time-steps in rollouts while training - see `Notes` below
episode_train_test_cycle: tuple or list as (train_number, test_number), def=(1,0): enables infinite
loop such as: run `train_number` of train data episodes,
than `test_number` of test data episodes, repeat. Should be consistent
with provided dataset parameters (test data should exist if `test_number > 0`)
episode_summary_freq: int, write episode summary for every i'th episode
env_render_freq: int, write environment rendering summary for every i'th train step
model_summary_freq: int, write model summary for every i'th train step
test_mode: bool, True: Atari, False: BTGym
replay_memory_size: int, in number of experiences
replay_batch_size: int, mini-batch size for off-policy training, def = 1
replay_rollout_length: int off-policy rollout length by def. equals on_policy_rollout_length
use_off_policy_aac: bool, use full AAC off-policy loss instead of Value-replay
use_reward_prediction: bool, use aux. off-policy reward prediction task
use_pixel_control: bool, use aux. off-policy pixel control task
use_value_replay: bool, use aux. off-policy value replay task (not used if use_off_policy_aac=True)
rp_lambda: reward prediction loss weight, scalar or [high, low] for log_uniform distr.
pc_lambda: pixel control loss weight, scalar or [high, low] for log_uniform distr.
vr_lambda: value replay loss weight, scalar or [high, low] for log_uniform distr.
off_aac_lambda: off-policy AAC loss weight, scalar or [high, low] for log_uniform distr.
gamma_pc: NOT USED
rp_reward_threshold: scalar, reward prediction classification threshold, above which reward is 'non-zero'
rp_sequence_size: int, reward prediction sample size, in number of experiences
clip_epsilon: scalar, PPO: surrogate L^clip epsilon
num_epochs: int, num. of SGD runs for every train step, val. > 1 should be used with caution.
pi_prime_update_period: int, PPO: pi to pi_old update period in number of train steps, def: 1
_use_target_policy: bool, PPO: use target policy (aka pi_old), delayed by `pi_prime_update_period` delay
Note:
- On `time_flat` arg:
There are two alternatives to run RNN part of policy estimator:
a. Feed initial RNN state for every experience frame in rollout
(those are stored anyway if we want random memory repaly sampling) and do single time-step RNN
advance for all experiences in a batch; this is when time_flat=True;
b. Reshape incoming batch after convolution part of network in time-wise fashion
for every rollout in a batch i.e. batch_size=number_of_rollouts and
rnn_timesteps=max_rollout_length. In this case we need to feed initial rnn_states
for rollouts only. There is some little extra work to pad rollouts to max_time_size
and feed true rollout lengths to rnn. Thus, when time_flat=False, we unroll RNN in
specified number of time-steps for every rollout.
Both options has pros and cons:
Unrolling dynamic RNN is computationally more expensive but gives clearly faster convergence,
[possibly] due to the fact that RNN states for 2nd, 3rd, ... frames
of rollouts are computed using updated policy estimator, which is supposed to be
closer to optimal one. When time_flattened, every time-step uses RNN states computed
when rollout was collected (i.e. by behavioral policy estimator with older
parameters).
Nevertheless, time_flatting can be interesting
because one can safely shuffle training batch or mix on-policy and off-policy data in single mini-batch,
ensuring iid property and allowing, say, proper batch normalisation (this has yet to be tested).
"""
super(Unreal, self).__init__(
_log_name='UNREAL',
**kwargs
)
class A3C(BaseAAC):
"""
Vanilla Asynchronous Advantage Actor Critic algorithm.
Based on original code taken from OpenAI repository under MIT licence:
https://github.com/openai/universe-starter-agent
Paper: https://arxiv.org/abs/1602.01783
"""
def __init__(self, **kwargs):
"""
A3C args. is a subset of BaseAAC arguments, see `BaseAAC` class for descriptions.
Args:
env:
task:
policy_config:
log:
random_seed:
model_gamma:
model_gae_lambda:
model_beta:
opt_max_env_steps:
opt_decay_steps:
opt_end_learn_rate:
opt_learn_rate:
opt_decay:
opt_momentum:
opt_epsilon:
rollout_length:
episode_summary_freq:
env_render_freq:
model_summary_freq:
test_mode:
"""
super(A3C, self).__init__(
on_policy_loss=aac_loss_def,
use_off_policy_aac=False,
use_reward_prediction=False,
use_pixel_control=False,
use_value_replay=False,
_use_target_policy=False,
_log_name='A3C',
**kwargs
)
class PPO(BaseAAC):
"""
AAC with Proximal Policy Optimization surrogate L^Clip loss,
optionally augmented with auxiliary control tasks.
paper:
https://arxiv.org/pdf/1707.06347.pdf
Based on PPO-SGD code from OpenAI `Baselines` repository under MIT licence:
https://github.com/openai/baselines
Async. framework code comes from OpenAI repository under MIT licence:
https://github.com/openai/universe-starter-agent
"""
def __init__(self, **kwargs):
"""
PPO args. is a subset of BaseAAC arguments, see `BaseAAC` class for descriptions.
Args:
env:
task:
policy_config:
log_level:
vr_loss:
rp_loss:
pc_loss:
random_seed:
model_gamma:
model_gae_lambda:
model_beta:
opt_max_env_steps:
opt_decay_steps:
opt_end_learn_rate:
opt_learn_rate:
opt_decay:
opt_momentum:
opt_epsilon:
rollout_length:
episode_summary_freq:
env_render_freq:
model_summary_freq:
test_mode:
replay_memory_size:
replay_rollout_length:
use_off_policy_aac:
use_reward_prediction:
use_pixel_control:
use_value_replay:
rp_lambda:
pc_lambda:
vr_lambda:
off_aac_lambda:
rp_reward_threshold:
rp_sequence_size:
clip_epsilon:
num_epochs:
pi_prime_update_period:
"""
super(PPO, self).__init__(
on_policy_loss=ppo_loss_def,
off_policy_loss=ppo_loss_def,
_use_target_policy=True,
_log_name='PPO',
**kwargs
)
| [] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~nn~networks.py | # Original code comes from OpenAI repository under MIT licence:
#
# https://github.com/openai/universe-starter-agent
# https://github.com/openai/baselines
#
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from tensorflow.contrib.layers import layer_norm as norm_layer
from tensorflow.python.util.nest import flatten as flatten_nested
from btgym.algorithms.nn.layers import normalized_columns_initializer, categorical_sample
from btgym.algorithms.nn.layers import linear, noisy_linear, conv2d, deconv2d, conv1d
from btgym.algorithms.utils import rnn_placeholders
def conv_2d_network(x,
ob_space,
ac_space,
conv_2d_layer_ref=conv2d,
conv_2d_num_filters=(32, 32, 64, 64),
conv_2d_filter_size=(3, 3),
conv_2d_stride=(2, 2),
pad="SAME",
dtype=tf.float32,
name='conv2d',
collections=None,
reuse=False,
**kwargs):
"""
Stage1 network: from preprocessed 2D input to estimated features.
Encapsulates convolutions + layer normalisation + nonlinearity. Can be shared.
Returns:
tensor holding state features;
"""
with tf.variable_scope(name, reuse=reuse):
#for i in range(conv_2d_num_layers):
for i, num_filters in enumerate(conv_2d_num_filters):
x = tf.nn.elu(
norm_layer(
conv_2d_layer_ref(
x,
num_filters,
"_layer_{}".format(i + 1),
conv_2d_filter_size,
conv_2d_stride,
pad,
dtype,
collections,
reuse
),
scope=name + "_layer_{}".format(i + 1)
)
)
# A3c/BaseAAC original paper design:
#x = tf.nn.elu(conv2d(x, 16, 'conv2d_1', [8, 8], [4, 4], pad, dtype, collections, reuse))
#x = tf.nn.elu(conv2d(x, 32, 'conv2d_2', [4, 4], [2, 2], pad, dtype, collections, reuse))
#x = tf.nn.elu(
# linear(batch_flatten(x), 256, 'conv_2d_dense', normalized_columns_initializer(0.01), reuse=reuse)
#)
return x
def conv_1d_network(x,
ob_space,
ac_space,
conv_1d_num_layers=4,
conv_1d_num_filters=32,
conv_1d_filter_size=3,
conv_1d_stride=2,
pad="SAME",
dtype=tf.float32,
collections=None,
reuse=False):
"""
Stage1 network: from preprocessed 1D input to estimated features.
Encapsulates convolutions, [possibly] skip-connections etc. Can be shared.
Returns:
tensor holding state features;
"""
for i in range(conv_1d_num_layers):
x = tf.nn.elu(
conv1d(
x,
conv_1d_num_filters,
"conv1d_{}".format(i + 1),
conv_1d_filter_size,
conv_1d_stride,
pad,
dtype,
collections,
reuse
)
)
return x
def lstm_network(
x,
lstm_sequence_length,
lstm_class=rnn.BasicLSTMCell,
lstm_layers=(256,),
name='lstm',
reuse=False,
**kwargs
):
"""
Stage2 network: from features to flattened LSTM output.
Defines [multi-layered] dynamic [possibly shared] LSTM network.
Returns:
batch-wise flattened output tensor;
lstm initial state tensor;
lstm state output tensor;
lstm flattened feed placeholders as tuple.
"""
with tf.variable_scope(name, reuse=reuse):
# Flatten, add action/reward and expand with fake [time] batch? dim to feed LSTM bank:
#x = tf.concat([x, a_r] ,axis=-1)
#x = tf.concat([batch_flatten(x), a_r], axis=-1)
#x = tf.expand_dims(x, [0])
# Define LSTM layers:
lstm = []
for size in lstm_layers:
lstm += [lstm_class(size)] #, state_is_tuple=True)]
lstm = rnn.MultiRNNCell(lstm, state_is_tuple=True)
# Get time_dimension as [1]-shaped tensor:
step_size = tf.expand_dims(tf.shape(x)[1], [0])
lstm_init_state = lstm.zero_state(1, dtype=tf.float32)
lstm_state_pl = rnn_placeholders(lstm.zero_state(1, dtype=tf.float32))
lstm_state_pl_flatten = flatten_nested(lstm_state_pl)
lstm_outputs, lstm_state_out = tf.nn.dynamic_rnn(
lstm,
x,
initial_state=lstm_state_pl,
sequence_length=lstm_sequence_length,
time_major=False
)
#x_out = tf.reshape(lstm_outputs, [-1, lstm_layers[-1]])
x_out = lstm_outputs
return x_out, lstm_init_state, lstm_state_out, lstm_state_pl_flatten
def dense_aac_network(x, ac_space, name='dense_aac', linear_layer_ref=noisy_linear, reuse=False):
"""
Stage3 network: from LSTM flattened output to advantage actor-critic.
Returns:
logits tensor
value function tensor
action sampling function.
"""
with tf.variable_scope(name, reuse=reuse):
# Center-logits:
logits = norm_layer(
linear_layer_ref(
x=x,
size=ac_space,
name='action',
initializer=normalized_columns_initializer(0.01),
reuse=reuse
),
center=True,
scale=False,
)
# logits = linear_layer_ref(
# x=x,
# size=ac_space,
# name='action',
# initializer=normalized_columns_initializer(0.01),
# reuse=reuse
# )
vf = tf.reshape(
linear_layer_ref(
x=x,
size=1,
name="value",
initializer=normalized_columns_initializer(1.0),
reuse=reuse
),
[-1]
)
sample = categorical_sample(logits, ac_space)[0, :]
return logits, vf, sample
def dense_rp_network(x, linear_layer_ref=noisy_linear):
"""
Stage3 network: From shared convolutions to reward-prediction task output tensor.
"""
# print('x_shape:', x.get_shape())
#x = tf.reshape(x, [1, -1]) # flatten to pretend we got batch of size 1
# Fully connected x128 followed by 3-way classifier [with softmax], as in paper:
x = tf.nn.elu(
linear_layer_ref(
x=x,
size=128,
name='rp_dense',
initializer=normalized_columns_initializer(0.01)
)
)
logits = linear_layer_ref(
x=x,
size=3,
name='rp_classifier',
initializer=normalized_columns_initializer(0.01)
)
# Note: softmax is actually not here but inside loss operation (see losses.py)
return logits
def pixel_change_2d_estimator(ob_space, pc_estimator_stride=(2, 2), **kwargs):
"""
Defines tf operation for estimating `pixel change` as subsampled absolute difference of two states.
Note:
crops input array by one pix from either side; --> 1D signal to be shaped as [signal_length, 3]
"""
input_state = tf.placeholder(tf.float32, list(ob_space), name='pc_change_est_state_in')
input_last_state = tf.placeholder(tf.float32, list(ob_space), name='pc_change_est_last_state_in')
x = tf.abs(tf.subtract(input_state, input_last_state)) # TODO: tf.square?
if x.shape[-2] <= 3:
x = tf.expand_dims(x, 0)[:, 1:-1, :, :] # Assume 1D signal, fake batch dim and crop H dim only
#x = tf.transpose(x, perm=[0, 1, 3, 2]) # Swap channels and height for
else:
x = tf.expand_dims(x, 0)[:, 1:-1, 1:-1, :] # True 2D, fake batch dim and crop H, W dims
x = tf.reduce_mean(x, axis=-1, keepdims=True)
x_out = tf.nn.max_pool(
x,
[1, pc_estimator_stride[0], pc_estimator_stride[1], 1],
[1, pc_estimator_stride[0], pc_estimator_stride[1], 1],
'SAME'
)
return input_state, input_last_state, x_out
def duelling_pc_network(x,
ac_space,
duell_pc_x_inner_shape=(9, 9, 32),
duell_pc_filter_size=(4, 4),
duell_pc_stride=(2, 2),
linear_layer_ref=noisy_linear,
reuse=False,
**kwargs):
"""
Stage3 network for `pixel control' task: from LSTM output to Q-aux. features tensor.
"""
x = tf.nn.elu(
linear_layer_ref(
x=x,
size=np.prod(duell_pc_x_inner_shape),
name='pc_dense',
initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse
)
)
x = tf.reshape(x, [-1] + list(duell_pc_x_inner_shape))
pc_a = deconv2d(x, ac_space, 'pc_advantage', duell_pc_filter_size, duell_pc_stride, reuse=reuse) # [None, 20, 20, ac_size]
pc_v = deconv2d(x, 1, 'pc_value_fn', duell_pc_filter_size, duell_pc_stride, reuse=reuse) # [None, 20, 20, 1]
# Q-value estimate using advantage mean,
# as (9) in "Dueling Network Architectures..." paper:
# https://arxiv.org/pdf/1511.06581.pdf
pc_a_mean = tf.reduce_mean(pc_a, axis=-1, keepdims=True)
pc_q = pc_v + pc_a - pc_a_mean # [None, 20, 20, ac_size]
return pc_q
| [] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~launcher.py | ###############################################################################
#
# Copyright (C) 2017 Andrew Muzikin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
#
# Original asynchronous framework code comes from OpenAI repository under MIT licence:
# https://github.com/openai/universe-starter-agent
#
import os
from logbook import Logger, StreamHandler, WARNING, NOTICE, INFO, DEBUG
import time
import psutil
import glob
from subprocess import PIPE
import signal
import numpy as np
import copy
from .worker import Worker
from .aac import A3C
from .policy import BaseAacPolicy
import sys
sys.path.insert(0,'..')
class Launcher():
"""
Configures and starts distributed TF training session with workers
running sets of separate instances of BTgym/Atari environment.
"""
def __init__(self,
env_config=None,
cluster_config=None,
policy_config=None,
trainer_config=None,
max_env_steps=None,
root_random_seed=None,
test_mode=False,
purge_previous=0,
log_level=None,
verbose=0):
"""
Args:
env_config (dict): environment class_config_dict, see 'Note' below.
cluster_config (dict): tf cluster configuration, see 'Note' below.
policy_config (dict): policy class_config_dict holding corr. policy class args.
trainer_config (dict): trainer class_config_dict holding corr. trainer class args.
max_env_steps (int): total number of environment steps to run training on.
root_random_seed (int): int or None
test_mode (bool): if True - use Atari gym env., BTGym otherwise.
purge_previous (int): keep or remove previous log files and saved checkpoints from log_dir:
{0 - keep, 1 - ask, 2 - remove}.
verbose (int): verbosity mode, {0 - WARNING, 1 - INFO, 2 - DEBUG}.
log_level (int): logbook level {DEBUG=10, INFO=11, NOTICE=12, WARNING=13},
overrides `verbose` arg.
Note:
class_config_dict: dictionary containing at least two keys:
- `class_ref`: reference to class constructor or function;
- `kwargs`: dictionary of keyword arguments, see corr. environment class args.
cluster_config: dictionary containing at least these keys:
- 'host': cluster host, def: '127.0.0.1'
- 'port': cluster port, def: 12222
- 'num_workers': number of workers to run, def: 1
- 'num_ps': number of parameter servers, def: 1
- 'num_envs': number of environments to run in parallel for each worker, def: 1
- 'log_dir': directory to save model and summaries, def: './tmp/btgym_aac_log'
"""
self.env_config = dict(
class_ref=None,
kwargs=dict(
port=5000,
data_port=4999,
gym_id=None,
)
)
self.cluster_config = dict(
host='127.0.0.1',
port=12222,
num_workers=1,
num_ps=1,
log_dir='./tmp/btgym_aac_log',
num_envs=1,
)
self.policy_config = dict(
class_ref=BaseAacPolicy,
kwargs=dict(
lstm_layers=(256,)
)
)
self.trainer_config = dict(
class_ref=A3C,
kwargs={}
)
self.max_env_steps = 100 * 10 ** 6
self.ports_to_use = []
self.root_random_seed = root_random_seed
self.purge_previous = purge_previous
self.test_mode = test_mode
self.log_level = log_level
self.verbose = verbose
if max_env_steps is not None:
self.max_env_steps = max_env_steps
self.env_config = self._update_config_dict(self.env_config, env_config)
self.cluster_config = self._update_config_dict(self.cluster_config, cluster_config)
self.policy_config = self._update_config_dict(self.policy_config, policy_config)
self.trainer_config = self._update_config_dict(self.trainer_config, trainer_config)
self.trainer_config['kwargs']['test_mode'] = self.test_mode
# Logging config:
StreamHandler(sys.stdout).push_application()
if self.log_level is None:
log_levels = [(0, NOTICE), (1, INFO), (2, DEBUG)]
self.log_level = WARNING
for key, value in log_levels:
if key == self.verbose:
self.log_level = value
self.log = Logger('LauncherShell', level=self.log_level)
# Seeding:
if self.root_random_seed is not None:
np.random.seed(self.root_random_seed)
self.log.info('Random seed: {}'.format(self.root_random_seed))
# Seeding for workers:
workers_rnd_seeds = list(
np.random.randint(0, 2**30, self.cluster_config['num_workers'] + self.cluster_config['num_ps'])
)
# Log_dir:
if os.path.exists(self.cluster_config['log_dir']):
# Remove previous log files and saved model if opted:
if self.purge_previous > 0:
confirm = 'y'
if self.purge_previous < 2:
confirm = input('<{}> already exists. Override[y/n]? '.format(self.cluster_config['log_dir']))
if confirm in 'y':
files = glob.glob(self.cluster_config['log_dir'] + '/*')
p = psutil.Popen(['rm', '-R', ] + files, stdout=PIPE, stderr=PIPE)
self.log.notice('Files in <{}> purged.'.format(self.cluster_config['log_dir']))
else:
self.log.notice('Appending to <{}>.'.format(self.cluster_config['log_dir']))
else:
os.makedirs(self.cluster_config['log_dir'])
self.log.notice('<{}> created.'.format(self.cluster_config['log_dir']))
for kwarg in ['port', 'data_port']:
assert kwarg in self.env_config['kwargs'].keys()
assert self.env_config['class_ref'] is not None
# Make cluster specification dict:
self.cluster_spec = self.make_cluster_spec(self.cluster_config)
# Configure workers:
self.workers_config_list = []
env_ports = np.arange(self.cluster_config['num_envs'])
worker_port = self.env_config['kwargs']['port'] # start value for BTGym comm. port
# TODO: Hacky, cause dataset is threadlocked; do: pass dataset as class_ref + kwargs_dict:
if self.test_mode:
dataset_instance = None
else:
dataset_instance = self.env_config['kwargs'].pop('dataset')
for key, spec_list in self.cluster_spec.items():
task_index = 0 # referenced farther as worker id
for _id in spec_list:
env_config = copy.deepcopy(self.env_config)
worker_config = {}
if key in 'worker':
# Configure worker BTgym environment:
if task_index == 0:
env_config['kwargs']['data_master'] = True # set worker_0 as chief and data_master
env_config['kwargs']['dataset'] = dataset_instance
env_config['kwargs']['render_enabled'] = True
else:
env_config['kwargs']['data_master'] = False
env_config['kwargs']['render_enabled'] = False # disable rendering for all but chief
# Add list of connection ports for every parallel env for each worker:
env_config['kwargs']['port'] = list(worker_port + env_ports)
worker_port += self.cluster_config['num_envs']
worker_config.update(
{
'env_config': env_config,
'policy_config': self.policy_config,
'trainer_config': self.trainer_config,
'cluster_spec': self.cluster_spec,
'job_name': key,
'task': task_index,
'test_mode': self.test_mode,
'log_dir': self.cluster_config['log_dir'],
'max_env_steps': self.max_env_steps,
'log_level': self.log_level,
'random_seed': workers_rnd_seeds.pop()
}
)
self.clear_port(env_config['kwargs']['port'])
self.workers_config_list.append(worker_config)
task_index += 1
self.clear_port(self.env_config['kwargs']['data_port'])
self.log.debug('Launcher ready.')
def make_cluster_spec(self, config):
"""
Composes cluster specification dictionary.
"""
cluster = {}
all_ps = []
port = config['port']
for _ in range(config['num_ps']):
self.clear_port(port)
self.ports_to_use.append(port)
all_ps.append('{}:{}'.format(config['host'], port))
port += 1
cluster['ps'] = all_ps
all_workers = []
for _ in range(config['num_workers']):
self.clear_port(port)
self.ports_to_use.append(port)
all_workers.append('{}:{}'.format(config['host'], port))
port += 1
cluster['worker'] = all_workers
return cluster
def clear_port(self, port_list):
"""
Kills process on specified ports list, if any.
"""
if not isinstance(port_list, list):
port_list = [port_list]
for port in port_list:
p = psutil.Popen(['lsof', '-i:{}'.format(port), '-t'], stdout=PIPE, stderr=PIPE)
pid = p.communicate()[0].decode()[:-1] # retrieving PID
if pid is not '':
p = psutil.Popen(['kill', pid])
self.log.info('port {} cleared'.format(port))
def _update_config_dict(self, old_dict, new_dict=None):
"""
Service, updates nested dictionary with values from other one of same structure.
Args:
old_dict: dict to update to
new_dict: dict to update from
Returns:
new updated dict
"""
if type(new_dict) is not dict:
new_dict = old_dict # ~identity op
for key, value in new_dict.items():
if type(value) == dict:
old_dict[key] = self._update_config_dict(old_dict[key], value)
else:
old_dict[key] = value
return old_dict
def run(self):
"""
Launches processes:
distributed workers;
parameter_server.
"""
workers_list = []
p_servers_list = []
chief_worker = None
def signal_handler(signal, frame):
nonlocal workers_list
nonlocal chief_worker
nonlocal p_servers_list
def stop_worker(worker_list):
for worker in worker_list:
worker.terminate()
stop_worker(workers_list)
stop_worker([chief_worker])
stop_worker(p_servers_list)
# Start workers:
for worker_config in self.workers_config_list:
# Make:
worker = Worker(**worker_config)
# Launch:
worker.daemon = False
worker.start()
if worker.job_name in 'worker':
# Allow data-master to launch datafeed_server:
if worker_config['env_config']['kwargs']['data_master']:
time.sleep(5)
chief_worker = worker
else:
workers_list.append(worker)
else:
p_servers_list.append(worker)
# TODO: auto-launch tensorboard?
signal.signal(signal.SIGINT, signal_handler)
# Halt here:
msg = '\n********************************************************************************************\n' +\
'** Press `Ctrl-C` or jupyter:[Kernel]->[Interrupt] to stop training and close launcher. **\n' + \
'********************************************************************************************\n'
print(msg)
signal.pause()
# Wait every worker to finish:
for worker in workers_list:
worker.join()
self.log.notice('worker_{} has joined.'.format(worker.task))
chief_worker.join()
self.log.notice('chief_worker_{} has joined.'.format(chief_worker.task))
for ps in p_servers_list:
ps.join()
self.log.notice('parameter_server_{} has joined.'.format(ps.task))
# TODO: close tensorboard
self.log.notice('Launcher closed.')
| [] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~runner~threadrunner.py | # Async. framework code comes from OpenAI repository under MIT licence:
# https://github.com/openai/universe-starter-agent
#
from logbook import Logger, StreamHandler, WARNING
import sys
import six.moves.queue as queue
import threading
from btgym.algorithms.runner import BaseEnvRunnerFn
class RunnerThread(threading.Thread):
"""
Async. framework code comes from OpenAI repository under MIT licence:
https://github.com/openai/universe-starter-agent
Despite the fact BTgym is not real-time environment [yet], thread-runner approach is still here. From
original `universe-starter-agent`:
`...One of the key distinctions between a normal environment and a universe environment
is that a universe environment is _real time_. This means that there should be a thread
that would constantly interact with the environment and tell it what to do. This thread is here.`
Another idea is to see ThreadRunner as all-in-one data provider, thus shaping data distribution
fed to estimator from single place.
So, replay memory is also here, as well as some service functions (collecting summary data).
"""
def __init__(self,
env,
policy,
task,
rollout_length,
episode_summary_freq,
env_render_freq,
test,
ep_summary,
runner_fn_ref=BaseEnvRunnerFn,
memory_config=None,
log_level=WARNING, ):
"""
Args:
env: environment instance
policy: policy instance
task: int
rollout_length: int
episode_summary_freq: int
env_render_freq: int
test: Atari or BTGyn
ep_summary: tf.summary
runner_fn_ref: callable defining runner execution logic
memory_config: replay memory configuration dictionary
log_level: int, logbook.level
"""
threading.Thread.__init__(self)
self.queue = queue.Queue(5)
self.rollout_length = rollout_length
self.env = env
self.last_features = None
self.policy = policy
self.runner_fn_ref = runner_fn_ref
self.daemon = True
self.sess = None
self.summary_writer = None
self.episode_summary_freq = episode_summary_freq
self.env_render_freq = env_render_freq
self.task = task
self.test = test
self.ep_summary = ep_summary
self.memory_config = memory_config
self.log_level = log_level
StreamHandler(sys.stdout).push_application()
self.log = Logger('ThreadRunner_{}'.format(self.task), level=self.log_level)
def start_runner(self, sess, summary_writer):
try:
self.sess = sess
self.summary_writer = summary_writer
self.start()
except:
msg = 'start() exception occurred.\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
self.log.exception(msg)
raise RuntimeError
def run(self):
"""Just keep running."""
try:
with self.sess.as_default():
self._run()
except:
msg = 'RunTime exception occurred.\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n'
self.log.exception(msg)
raise RuntimeError
def _run(self):
rollout_provider = self.runner_fn_ref(
self.sess,
self.env,
self.policy,
self.task,
self.rollout_length,
self.summary_writer,
self.episode_summary_freq,
self.env_render_freq,
self.test,
self.ep_summary,
self.memory_config,
self.log
)
while True:
# the timeout variable exists because apparently, if one worker dies, the other workers
# won't die with it, unless the timeout is set to some large number. This is an empirical
# observation.
self.queue.put(next(rollout_provider), timeout=600.0)
| [] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~envs.py | # Original code is taken from OpenAI repository under MIT licence:
# https://github.com/openai/universe-starter-agent
import numpy as np
import cv2
import gym
from gym import spaces
from btgym import DictSpace
def _process_frame42(frame):
frame = frame[34:34+160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [42, 42, 1])
return frame
class AtariRescale42x42(gym.ObservationWrapper):
"""
Gym wrapper, pipes Atari into BTgym algorithms, as later expect observations to be DictSpace.
Makes Atari environment return state as dictionary with single key 'external' holding
normalized in [0,1] grayscale 42x42 visual output.
"""
def __init__(self, env_id=None):
"""
Args:
env_id: conventional Gym id.
"""
assert "." not in env_id # universe environments have dots in names.
env = gym.make(env_id)
super(AtariRescale42x42, self).__init__(env)
self.observation_space = DictSpace(
{'external': spaces.Box(0.0, 1.0, [42, 42, 1], dtype=np.float32)}
)
def _observation(self, observation):
return {'external': _process_frame42(observation)} | [] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~rollout.py | # Original A3C code comes from OpenAI repository under MIT licence:
# https://github.com/openai/universe-starter-agent
#
# Papers:
# https://arxiv.org/abs/1602.01783
# https://arxiv.org/abs/1611.05397
import numpy as np
from tensorflow.contrib.rnn import LSTMStateTuple
from btgym.algorithms.math_utils import discount
from btgym.algorithms.utils import batch_pad
# Info:
ExperienceConfig = ['position', 'state', 'action', 'reward', 'value', 'terminal', 'r', 'context',
'last_action_reward', 'pixel_change']
def make_data_getter(queue):
"""
Data stream getter constructor.
Args:
queue: instance of `Queue` class to get rollouts from.
Returns:
callable, returning dictionary of data.
"""
def pull_rollout_from_queue():
return queue.get(timeout=600.0)
return pull_rollout_from_queue
class Rollout(dict):
"""
Experience rollout as [nested] dictionary of lists of ndarrays, tuples and rnn states.
"""
def __init__(self):
super(Rollout, self).__init__()
self.size = 0
def add(self, values, _struct=None):
"""
Adds single experience frame to rollout.
Args:
values: [nested] dictionary of values.
"""
if _struct is None:
# Top level:
_struct = self
self.size += 1
top = True
else:
top = False
if isinstance(values, dict):
for key, value in values.items():
if key not in _struct.keys():
_struct[key] = {}
_struct[key] =self.add(value, _struct[key])
elif isinstance(values, tuple):
if not isinstance(_struct, tuple):
_struct = ['empty' for entry in values]
_struct = tuple([self.add(*pair) for pair in zip(values, _struct)])
elif isinstance(values, LSTMStateTuple):
if not isinstance(_struct, LSTMStateTuple):
_struct = LSTMStateTuple(0, 0)
c = self.add(values[0], _struct[0])
h = self.add(values[1], _struct[1])
_struct = LSTMStateTuple(c, h)
else:
if isinstance(_struct, list):
_struct += [values]
else:
_struct = [values]
if not top:
return _struct
def add_memory_sample(self, sample):
"""
Given replay memory sample as list of experience-dictionaries of `length`,
converts it to rollout of same `length`.
"""
for frame in sample:
self.add(frame)
def process(self, gamma, gae_lambda=1.0, size=None, time_flat=False):
"""
Converts single-trajectory rollout of experiences to dictionary of ready-to-feed arrays.
Computes rollout returns and the advantages.
Pads with zeroes to desired length, if size arg is given.
Args:
gamma: discount factor
gae_lambda: GAE lambda
size: if given and time_flat=False, pads outputs with zeroes along `time' dim. to exact 'size'.
time_flat: reduce time dimension to 1 step by stacking all experiences along batch dimension.
Returns:
batch as [nested] dictionary of np.arrays, tuples and LSTMStateTuples. of size:
[1, time_size, depth] or [1, size, depth] if not time_flatten and `size` is not/given, with single
`context` entry for entire trajectory, i.e. of size [1, context_depth];
[batch_size, 1, depth], if time_flatten, with batch_size = time_size and `context` entry for
every experience frame, i.e. of size [batch_size, context_depth].
"""
# self._check_it()
batch = dict()
for key in self.keys() - {'context', 'reward', 'r', 'value', 'position'}:
batch[key] = self.as_array(self[key])
if time_flat:
batch['context'] = self.as_array(self['context'], squeeze_axis=1) # LSTM state for every frame
else:
batch['context'] = self.get_frame(0)['context'] # just get rollout initial LSTM state
#print('batch_context:')
#self._check_it(batch['context'])
# Total accumulated empirical return:
rewards = np.asarray(self['reward'])
rollout_r = self['r'][-1][0] # bootstrapped V_next or 0 if terminal
vpred_t = np.asarray(self['value'] + [rollout_r])
rewards_plus_v = np.asarray(self['reward'] + [rollout_r])
batch['r'] = discount(rewards_plus_v, gamma)[:-1]
# This formula for the advantage is (16) from "Generalized Advantage Estimation" paper:
# https://arxiv.org/abs/1506.02438
delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]
batch['advantage'] = discount(delta_t, gamma * gae_lambda)
# Shape it out:
if time_flat:
batch['batch_size'] = batch['advantage'].shape[0] # time length turned batch size
batch['time_steps'] = np.ones(batch['batch_size'])
else:
batch['time_steps'] = batch['advantage'].shape[0] # real non-padded time length
batch['batch_size'] = 1 # want rollout as a trajectory
if size is not None and not time_flat and batch['advantage'].shape[0] != size:
# Want all batches to be exact size for further batch stacking:
batch = batch_pad(batch, to_size=size)
return batch
def process_rp(self, reward_threshold=0.1):
"""
Processes rollout process()-alike and estimates reward prediction target for first n-1 frames.
Args:
reward_threshold: reward values such as |r|> reward_threshold are classified as neg. or pos.
Returns:
Processed batch with size reduced by one and with extra `rp_target` key
holding one hot encodings for classes {zero, positive, negative}.
"""
# Remove last frame:
last_frame = self.pop_frame(-1)
batch = self.process(gamma=1)
# Make one hot vector for target rewards (i.e. reward taken from last of sampled frames):
r = last_frame['reward']
rp_t = np.zeros(3)
if r > reward_threshold:
rp_t[1] = 1.0 # positive [010]
elif r < - reward_threshold:
rp_t[2] = 1.0 # negative [001]
else:
rp_t[0] = 1.0 # zero [100]
batch['rp_target'] = rp_t[None,...]
batch['time_steps'] = batch['advantage'].shape[0] # e.g -1 of original
return batch
def get_frame(self, idx, _struct=None):
"""
Extracts single experience from rollout.
Args:
idx: experience position
Returns:
frame as [nested] dictionary
"""
# No idx range checks here!
if _struct is None:
_struct = self
if isinstance(_struct, dict) or type(_struct) == type(self):
frame = {}
for key, value in _struct.items():
frame[key] = self.get_frame(idx, value)
return frame
elif isinstance(_struct, tuple):
return tuple([self.get_frame(idx, value) for value in _struct])
elif isinstance(_struct, LSTMStateTuple):
return LSTMStateTuple(self.get_frame(idx, _struct[0]), self.get_frame(idx, _struct[1]))
else:
return _struct[idx]
def pop_frame(self, idx, _struct=None):
"""
Pops single experience from rollout.
Args:
idx: experience position
Returns:
frame as [nested] dictionary
"""
# No idx range checks here!
if _struct is None:
_struct = self
if isinstance(_struct, dict) or type(_struct) == type(self):
frame = {}
for key, value in _struct.items():
frame[key] = self.pop_frame(idx, value)
return frame
elif isinstance(_struct, tuple):
return tuple([self.pop_frame(idx, value) for value in _struct])
elif isinstance(_struct, LSTMStateTuple):
return LSTMStateTuple(self.pop_frame(idx, _struct[0]), self.pop_frame(idx, _struct[1]))
else:
return _struct.pop(idx)
def as_array(self, struct, squeeze_axis=None):
if isinstance(struct, dict):
out = {}
for key, value in struct.items():
out[key] = self.as_array(value, squeeze_axis)
return out
elif isinstance(struct, tuple):
return tuple([self.as_array(value, squeeze_axis) for value in struct])
elif isinstance(struct, LSTMStateTuple):
return LSTMStateTuple(self.as_array(struct[0], squeeze_axis), self.as_array(struct[1], squeeze_axis))
else:
if squeeze_axis is not None:
return np.squeeze(np.asarray(struct), axis=squeeze_axis)
else:
return np.asarray(struct)
def _check_it(self, _struct=None):
if _struct is None:
_struct = self
if type(_struct) == dict or type(_struct) == type(self):
for key, value in _struct.items():
print(key, ':')
self._check_it(_struct=value)
elif type(_struct) == tuple or type(_struct) == list:
print('tuple/list:')
for value in _struct:
self._check_it(_struct=value)
else:
try:
print('length: {}, type: {}, shape of element: {}\n'.format(len(_struct), type(_struct[0]), _struct[0].shape))
except:
print('length: {}, type: {}\n'.format(len(_struct), type(_struct[0])))
| [] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~worker.py | #
# Original A3C code comes from OpenAI repository under MIT licence:
# https://github.com/openai/universe-starter-agent
#
# Papers:
# https://arxiv.org/abs/1602.01783
# https://arxiv.org/abs/1611.05397
from logbook import Logger, StreamHandler
import sys
sys.path.insert(0,'..')
import os
import logging
import multiprocessing
import cv2
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO) # suppress tf.train.MonitoredTrainingSession deprecation warning
# TODO: switch to tf.train.MonitoredTrainingSession
class _FastSaver(tf.train.Saver):
"""
Disables write_meta_graph argument,
which freezes entire process and is mostly useless.
"""
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True):
super(_FastSaver, self).save(sess,
save_path,
global_step,
latest_filename,
meta_graph_suffix,
False)
class Worker(multiprocessing.Process):
"""
Distributed tf worker class.
Sets up environment, trainer and starts training process in supervised session.
"""
env_list = None
def __init__(self,
env_config,
policy_config,
trainer_config,
cluster_spec,
job_name,
task,
log_dir,
log_level,
max_env_steps,
random_seed=None,
test_mode=False):
"""
Args:
env_config: environment class_config_dict.
policy_config: model policy estimator class_config_dict.
trainer_config: algorithm class_config_dict.
cluster_spec: tf.cluster specification.
job_name: worker or parameter server.
task: integer number, 0 is chief worker.
log_dir: for tb summaries and checkpoints.
log_level: int, logbook.level
max_env_steps: number of environment steps to run training on
test_mode: if True - use Atari mode, BTGym otherwise.
Note:
- Conventional `self.global_step` refers to number of environment steps,
summarized over all environment instances, not to number of policy optimizer train steps.
- Every worker can run several environments in parralell, as specified by `cluster_config'['num_envs'].
If use 4 forkers and num_envs=4 => total number of environments is 16. Every env instance has
it's own ThreadRunner process.
- When using replay memory, keep in mind that every ThreadRunner is keeping it's own replay memory,
If memory_size = 2000, num_workers=4, num_envs=4 => total replay memory size equals 32 000 frames.
"""
super(Worker, self).__init__()
self.env_class = env_config['class_ref']
self.env_kwargs = env_config['kwargs']
self.policy_config = policy_config
self.trainer_class = trainer_config['class_ref']
self.trainer_kwargs = trainer_config['kwargs']
self.cluster_spec = cluster_spec
self.job_name = job_name
self.task = task
self.log_dir = log_dir
self.max_env_steps = max_env_steps
self.log_level = log_level
self.log = None
self.test_mode = test_mode
self.random_seed = random_seed
def run(self):
"""Worker runtime body.
"""
# Logging:
StreamHandler(sys.stdout).push_application()
self.log = Logger('Worker_{}'.format(self.task), level=self.log_level)
tf.reset_default_graph()
if self.test_mode:
import gym
# Define cluster:
cluster = tf.train.ClusterSpec(self.cluster_spec).as_cluster_def()
# Start tf.server:
if self.job_name in 'ps':
server = tf.train.Server(
cluster,
job_name=self.job_name,
task_index=self.task,
config=tf.ConfigProto(device_filters=["/job:ps"])
)
self.log.debug('parameters_server started.')
# Just block here:
server.join()
else:
server = tf.train.Server(
cluster,
job_name='worker',
task_index=self.task,
config=tf.ConfigProto(
intra_op_parallelism_threads=1, # original was: 1
inter_op_parallelism_threads=2 # original was: 2
)
)
self.log.debug('tf.server started.')
self.log.debug('making environments:')
# Making as many environments as many entries in env_config `port` list:
# TODO: Hacky-II: only one example of parallel [all] environments can be data-master and renderer
# TODO: measure data_server lags, maybe launch several instances
self.env_list = []
env_kwargs = self.env_kwargs.copy()
env_kwargs['log_level'] = self.log_level
port_list = env_kwargs.pop('port')
data_master = env_kwargs.pop('data_master')
render_enabled = env_kwargs.pop('render_enabled')
# Parallel envs. numbering:
if len(port_list) > 1:
task_id = 0.0
else:
task_id = 0
for port in port_list:
if not self.test_mode:
# Assume BTgym env. class:
self.log.debug('env at port_{} is data_master: {}'.format(port, data_master))
try:
self.env_list.append(
self.env_class(
port=port,
data_master=data_master,
render_enabled=render_enabled,
task= self.task + task_id,
**env_kwargs
)
)
data_master = False
render_enabled = False
self.log.info('set BTGym environment {} at port_{}.'.format(self.task + task_id, port))
task_id += 0.01
except:
self.log.exception(
'failed to make BTGym environment at port_{}.'.format(port)
)
raise RuntimeError
else:
# Assume atari testing:
try:
self.env_list.append(self.env_class(env_kwargs['gym_id']))
self.log.debug('set Gyn/Atari environment.')
except:
self.log.exception('failed to make Gym/Atari environment')
raise RuntimeError
# Define trainer:
trainer = self.trainer_class(
env=self.env_list,
task=self.task,
policy_config=self.policy_config,
log_level=self.log_level,
random_seed=self.random_seed,
**self.trainer_kwargs,
)
self.log.debug('trainer ok.')
# Saver-related:
variables_to_save = [v for v in tf.global_variables() if not v.name.startswith("local")]
local_variables = [v for v in tf.global_variables() if v.name.startswith("local")]
init_op = tf.variables_initializer(variables_to_save)
local_init_op = tf.variables_initializer(local_variables)
init_all_op = tf.global_variables_initializer()
saver = _FastSaver(variables_to_save)
self.log.debug('vars_to_save:')
for v in variables_to_save:
self.log.debug('{}: {}'.format(v.name, v.get_shape()))
def init_fn(ses):
self.log.info("initializing all parameters.")
ses.run(init_all_op)
config = tf.ConfigProto(device_filters=["/job:ps", "/job:worker/task:{}/cpu:0".format(self.task)])
logdir = os.path.join(self.log_dir, 'train')
summary_dir = logdir + "_{}".format(self.task)
summary_writer = tf.summary.FileWriter(summary_dir)
# TODO: switch to tf.train.MonitoredTrainingSession
sv = tf.train.Supervisor(
is_chief=(self.task == 0),
logdir=logdir,
saver=saver,
summary_op=None,
init_op=init_op,
local_init_op=local_init_op,
init_fn=init_fn,
#ready_op=tf.report_uninitialized_variables(variables_to_save),
ready_op=tf.report_uninitialized_variables(),
global_step=trainer.global_step,
save_model_secs=300,
)
self.log.info("connecting to the parameter server... ")
with sv.managed_session(server.target, config=config) as sess, sess.as_default():
#sess.run(trainer.sync)
trainer.start(sess, summary_writer)
# Note: `self.global_step` refers to number of environment steps
# summarized over all environment instances, not to number of policy optimizer train steps.
global_step = sess.run(trainer.global_step)
self.log.notice("started training at step: {}".format(global_step))
while not sv.should_stop() and global_step < self.max_env_steps:
trainer.process(sess)
global_step = sess.run(trainer.global_step)
# Ask for all the services to stop:
for env in self.env_list:
env.close()
sv.stop()
self.log.notice('reached {} steps, exiting.'.format(global_step))
| [] |
2024-01-10 | lanceshih/btgym | btgym~algorithms~nn~layers.py | # Original code comes from OpenAI repository under MIT licence:
#
# https://github.com/openai/universe-starter-agent
# https://github.com/openai/baselines
#
import numpy as np
import tensorflow as tf
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
# def categorical_sample(logits, d):
# value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keepdims=True), 1), [1])
# return tf.one_hot(value, d)
def categorical_sample(logits, d):
value = tf.squeeze(tf.multinomial(logits, 1), [1])
one_hot = tf.one_hot(value, d, name='sample_one_hot')
return one_hot
# def categorical_sample(logits, d): # DET!
# value = tf.argmax(logits, axis=-1)
# one_hot = tf.one_hot(value, d, name='sample_one_hot')
# return one_hot
def linear(x, size, name, initializer=None, bias_init=0, reuse=False):
"""
Linear network layer.
"""
with tf.variable_scope(name, reuse=reuse):
w = tf.get_variable("/w", [x.get_shape()[1], size], initializer=initializer)
b = tf.get_variable("/b", [size], initializer=tf.constant_initializer(bias_init))
return tf.matmul(x, w) + b
def noisy_linear(x, size, name, bias=True, activation_fn=tf.identity, reuse=False, **kwargs):
"""
Noisy Net linear network layer using Factorised Gaussian noise;
Code by Andrew Liao, https://github.com/andrewliao11/NoisyNet-DQN
Papers:
https://arxiv.org/abs/1706.10295
https://arxiv.org/abs/1706.01905
"""
with tf.variable_scope(name, reuse=reuse):
# the function used in eq.7,8
def f(x):
return tf.multiply(tf.sign(x), tf.pow(tf.abs(x), 0.5))
# Initializer of \mu and \sigma
mu_init = tf.random_uniform_initializer(minval=-1*1/np.power(x.get_shape().as_list()[1], 0.5),
maxval=1*1/np.power(x.get_shape().as_list()[1], 0.5))
sigma_init = tf.constant_initializer(0.4/np.power(x.get_shape().as_list()[1], 0.5))
# Sample noise from gaussian
p = tf.random_normal([x.get_shape().as_list()[1], 1])
q = tf.random_normal([1, size])
f_p = f(p); f_q = f(q)
w_epsilon = f_p*f_q; b_epsilon = tf.squeeze(f_q)
# w = w_mu + w_sigma*w_epsilon
w_mu = tf.get_variable("/w_mu", [x.get_shape()[1], size], initializer=mu_init)
w_sigma = tf.get_variable("/w_sigma", [x.get_shape()[1], size], initializer=sigma_init)
w = w_mu + tf.multiply(w_sigma, w_epsilon)
ret = tf.matmul(x, w)
if bias:
# b = b_mu + b_sigma*b_epsilon
b_mu = tf.get_variable("/b_mu", [size], initializer=mu_init)
b_sigma = tf.get_variable("/b_sigma", [size], initializer=sigma_init)
b = b_mu + tf.multiply(b_sigma, b_epsilon)
return activation_fn(ret + b)
else:
return activation_fn(ret)
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32,
collections=None, reuse=False):
"""
2D convolution layer.
"""
with tf.variable_scope(name, reuse=reuse):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
w = tf.get_variable("W", filter_shape, dtype, initializer=tf.contrib.layers.xavier_initializer(),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def deconv2d(x, output_channels, name, filter_size=(4, 4), stride=(2, 2),
dtype=tf.float32, collections=None, reuse=False):
"""
Deconvolution layer, paper:
http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf
"""
with tf.variable_scope(name, reuse=reuse):
stride_shape = [1, stride[0], stride[1], 1]
batch_size = tf.shape(x)[0]
input_height = int(x.get_shape()[1])
input_width = int(x.get_shape()[2])
input_channels = int(x.get_shape()[3])
out_height = (input_height - 1) * stride[0] + filter_size[0]
out_width = (input_width - 1) * stride[1] + filter_size[1]
filter_shape = [filter_size[0], filter_size[1], output_channels, input_channels]
output_shape = tf.stack([batch_size, out_height, out_width, output_channels])
fan_in = np.prod(filter_shape[:2]) * input_channels
fan_out = np.prod(filter_shape[:2]) * output_channels
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("d_W", filter_shape, dtype, initializer=tf.contrib.layers.xavier_initializer(),
collections=collections)
b = tf.get_variable("d_b", [1, 1, 1, output_channels], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d_transpose(x, w, output_shape,
strides=stride_shape,
padding='VALID') + b
def conv1d(x, num_filters, name, filter_size=3, stride=2, pad="SAME", dtype=tf.float32,
collections=None, reuse=False):
"""
1D convolution layer.
"""
with tf.variable_scope(name, reuse=reuse):
stride_shape = stride
# print('stride_shape:',stride_shape)
filter_shape = [filter_size, int(x.get_shape()[-1]), num_filters]
# print('filter_shape:', filter_shape)
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:2])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:1]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, initializer=tf.contrib.layers.xavier_initializer(),
collections=collections)
b = tf.get_variable("b", [1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv1d(x, w, stride_shape, pad) + b
def conv2d_dw(x, num_filters, name='conv2d_dw', filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32,
collections=None, reuse=False):
"""
Depthwise 2D convolution layer. Slow, do not use.
"""
with tf.variable_scope(name, reuse=reuse):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[-1]), num_filters]
fan_in = np.prod(filter_shape[:3])
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype,
tf.contrib.layers.xavier_initializer(), collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters * int(x.get_shape()[-1])],
initializer=tf.constant_initializer(0.0), collections=collections)
return tf.nn.depthwise_conv2d(x, w, stride_shape, pad, [1, 1]) + b
| [] |
2024-01-10 | Muhammad-Ahsan-Rasheed/cohere-python | tests~test_client.py | import unittest
import cohere
from utils import get_api_key
API_KEY = get_api_key()
class TestClient(unittest.TestCase):
def test_client_name(self):
co = cohere.Client(API_KEY, client_name='test')
co.generate(model='medium', prompt='co:here', max_tokens=1)
| [] |
2024-01-10 | Muhammad-Ahsan-Rasheed/cohere-python | cohere~rerank.py | from typing import List, Optional, Dict, NamedTuple, Any, Iterator
from cohere.response import CohereObject
RerankDocument = NamedTuple("Document", [("text", str)])
RerankDocument.__doc__ = """
Returned by co.rerank,
dict which always contains text but can also contain arbitrary fields
"""
class RerankResult(CohereObject):
def __init__(self,
document: Dict[str, Any] = None,
index: int = None,
relevance_score: float = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.index = index
self.relevance_score = relevance_score
def __repr__(self) -> str:
score = self.relevance_score
index = self.index
if self.document is None:
return f"RerankResult<index: {index}, relevance_score: {score}>"
else:
text = self.document['text']
return f"RerankResult<document['text']: {text}, index: {index}, relevance_score: {score}>"
class Reranking(CohereObject):
def __init__(self,
response: Optional[Dict[str, Any]] = None,
**kwargs) -> None:
super().__init__(**kwargs, id=response.get('id'))
assert response is not None
self.results = self._results(response)
def _results(self, response: Dict[str, Any]) -> List[RerankResult]:
results = []
for res in response['results']:
if 'document' in res.keys():
results.append(
RerankResult(res['document'], res['index'], res['relevance_score']))
else:
results.append(
RerankResult(index=res['index'], relevance_score=res['relevance_score']))
return results
def __str__(self) -> str:
return str(self.results)
def __repr__(self) -> str:
return self.results.__repr__()
def __iter__(self) -> Iterator:
return iter(self.results)
def __getitem__(self, index) -> RerankResult:
return self.results[index]
| [] |
2024-01-10 | Muhammad-Ahsan-Rasheed/cohere-python | cohere~client.py | import json
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Union
from urllib.parse import urljoin
import requests
from requests import Response
from requests.adapters import HTTPAdapter
from urllib3 import Retry
import cohere
from cohere.chat import Chat
from cohere.classify import Classification, Classifications
from cohere.classify import Example as ClassifyExample
from cohere.classify import LabelPrediction
from cohere.detectlang import DetectLanguageResponse, Language
from cohere.detokenize import Detokenization
from cohere.embeddings import Embeddings
from cohere.error import CohereError
from cohere.feedback import Feedback
from cohere.generation import Generations
from cohere.tokenize import Tokens
from cohere.summarize import SummarizeResponse
from cohere.rerank import Reranking
use_xhr_client = False
try:
from js import XMLHttpRequest
use_xhr_client = True
except ImportError:
pass
class Client:
def __init__(self,
api_key: str,
version: str = None,
num_workers: int = 64,
request_dict: dict = {},
check_api_key: bool = True,
client_name: str = None,
max_retries: int = 3) -> None:
"""
Initialize the client.
Args:
* api_key (str): Your API key.
* version (str): API version to use. Will use cohere.COHERE_VERSION by default.
* num_workers (int): Maximal number of threads for parallelized calls.
* request_dict (dict): Additional parameters for calls to requests.post
* check_api_key (bool): Whether to check the api key for validity on initialization.
* client_name (str): A string to identify your application for internal analytics purposes.
"""
self.api_key = api_key
self.api_url = cohere.COHERE_API_URL
self.batch_size = cohere.COHERE_EMBED_BATCH_SIZE
self._executor = ThreadPoolExecutor(num_workers)
self.num_workers = num_workers
self.request_dict = request_dict
self.request_source = 'python-sdk'
self.max_retries = max_retries
if client_name:
self.request_source += ":" + client_name
if version is None:
self.cohere_version = cohere.COHERE_VERSION
else:
self.cohere_version = version
if check_api_key:
try:
res = self.check_api_key()
if not res['valid']:
raise CohereError('invalid api key')
except CohereError as e:
raise CohereError(message=e.message, http_status=e.http_status, headers=e.headers)
def check_api_key(self) -> Response:
headers = {
'Authorization': 'BEARER {}'.format(self.api_key),
'Content-Type': 'application/json',
'Request-Source': 'python-sdk',
}
if self.cohere_version != '':
headers['Cohere-Version'] = self.cohere_version
url = urljoin(self.api_url, cohere.CHECK_API_KEY_URL)
if use_xhr_client:
response = self.__pyfetch(url, headers, None)
return response
else:
response = requests.request('POST', url, headers=headers)
try:
res = json.loads(response.text)
except Exception:
raise CohereError(message=response.text, http_status=response.status_code, headers=response.headers)
if 'message' in res.keys(): # has errors
raise CohereError(message=res['message'], http_status=response.status_code, headers=response.headers)
return res
def batch_generate(self, prompts: List[str], **kwargs) -> List[Generations]:
generations: List[Generations] = []
for prompt in prompts:
kwargs["prompt"] = prompt
generations.append(self.generate(**kwargs))
return generations
def generate(self,
prompt: str = None,
prompt_vars: object = {},
model: str = None,
preset: str = None,
num_generations: int = None,
max_tokens: int = None,
temperature: float = None,
k: int = None,
p: float = None,
frequency_penalty: float = None,
presence_penalty: float = None,
end_sequences: List[str] = None,
stop_sequences: List[str] = None,
return_likelihoods: str = None,
truncate: str = None,
logit_bias: Dict[int, float] = {}) -> Generations:
json_body = {
'model': model,
'prompt': prompt,
'prompt_vars': prompt_vars,
'preset': preset,
'num_generations': num_generations,
'max_tokens': max_tokens,
'temperature': temperature,
'k': k,
'p': p,
'frequency_penalty': frequency_penalty,
'presence_penalty': presence_penalty,
'end_sequences': end_sequences,
'stop_sequences': stop_sequences,
'return_likelihoods': return_likelihoods,
'truncate': truncate,
'logit_bias': logit_bias,
}
response = self._executor.submit(self.__request, cohere.GENERATE_URL, json=json_body)
return Generations(return_likelihoods=return_likelihoods, _future=response, client=self)
def chat(self, query: str, session_id: str = "", persona: str = "cohere", model: str = None) -> Chat:
json_body = {
'query': query,
'session_id': session_id,
'persona': persona,
'model': model,
}
response = self._executor.submit(self.__request, cohere.CHAT_URL, json=json_body)
return Chat(query=query, persona=persona, _future=response, client=self)
def embed(self, texts: List[str], model: str = None, truncate: str = 'NONE') -> Embeddings:
responses = []
json_bodys = []
for i in range(0, len(texts), self.batch_size):
texts_batch = texts[i:i + self.batch_size]
json_bodys.append({
'model': model,
'texts': texts_batch,
'truncate': truncate,
})
if use_xhr_client:
for json_body in json_bodys:
response = self.__request(cohere.EMBED_URL, json=json_body)
responses.append(response['embeddings'])
else:
for result in self._executor.map(lambda json_body: self.__request(cohere.EMBED_URL, json=json_body),
json_bodys):
responses.extend(result['embeddings'])
return Embeddings(responses)
def classify(self,
inputs: List[str] = [],
model: str = None,
preset: str = None,
examples: List[ClassifyExample] = [],
truncate: str = None) -> Classifications:
examples_dicts: list[dict[str, str]] = []
for example in examples:
example_dict = {'text': example.text, 'label': example.label}
examples_dicts.append(example_dict)
json_body = {
'model': model,
'preset': preset,
'inputs': inputs,
'examples': examples_dicts,
'truncate': truncate,
}
response = self.__request(cohere.CLASSIFY_URL, json=json_body)
classifications = []
for res in response['classifications']:
labelObj = {}
for label, prediction in res['labels'].items():
labelObj[label] = LabelPrediction(prediction['confidence'])
classifications.append(
Classification(res['input'], res['prediction'], res['confidence'], labelObj, client=self, id=res["id"]))
return Classifications(classifications)
def summarize(self, text: str, model: str = None, length: str = None, format: str = None, temperature: float = None,
additional_instruction: str = None, abstractiveness: str = None) -> SummarizeResponse:
"""Return a generated summary of the specified length for the provided text.
Args:
text (str): Text to summarize.
model (str): (Optional) ID of the model.
length (str): (Optional) One of {"short", "medium", "long"}, defaults to "medium". \
Controls the length of the summary.
format (str): (Optional) One of {"paragraph", "bullets"}, defaults to "bullets". \
Controls the format of the summary.
abstractiveness (str) One of {"high", "medium", "low"}, defaults to "high". \
Controls how close to the original text the summary is. "Low" abstractiveness \
summaries will lean towards reusing sentences verbatim, while "high" abstractiveness \
summaries will tend to paraphrase more.
temperature (float): Ranges from 0 to 5. Controls the randomness of the output. \
Lower values tend to generate more “predictable” output, while higher values \
tend to generate more “creative” output. The sweet spot is typically between 0 and 1.
additional_instruction (str): (Optional) Modifier for the underlying prompt, must \
complete the sentence "Generate a summary _".
Example:
```
res = co.summarize(text="Stock market report for today...")
print(res.summary)
```
Example:
```
res = co.summarize(
text="Stock market report for today...",
model="summarize-xlarge",
length="long",
format="bullets",
temperature=0.9,
additional_instruction="focusing on the highest performing stocks")
print(res.summary)
```
"""
json_body = {
'model': model,
'text': text,
'length': length,
'format': format,
'temperature': temperature,
'additional_instruction': additional_instruction,
'abstractiveness': abstractiveness,
}
# remove None values from the dict
json_body = {k: v for k, v in json_body.items() if v is not None}
response = self.__request(cohere.SUMMARIZE_URL, json=json_body)
return SummarizeResponse(id=response["id"], summary=response["summary"])
def batch_tokenize(self, texts: List[str]) -> List[Tokens]:
return [self.tokenize(t) for t in texts]
def tokenize(self, text: str) -> Tokens:
json_body = {'text': text}
return Tokens(_future=self._executor.submit(self.__request, cohere.TOKENIZE_URL, json=json_body))
def batch_detokenize(self, list_of_tokens: List[List[int]]) -> List[Detokenization]:
return [self.detokenize(t) for t in list_of_tokens]
def detokenize(self, tokens: List[int]) -> Detokenization:
json_body = {'tokens': tokens}
return Detokenization(_future=self._executor.submit(self.__request, cohere.DETOKENIZE_URL, json=json_body))
def detect_language(self, texts: List[str]) -> List[Language]:
json_body = {
"texts": texts,
}
response = self.__request(cohere.DETECT_LANG_URL, json=json_body)
results = []
for result in response["results"]:
results.append(Language(result["language_code"], result["language_name"]))
return DetectLanguageResponse(results)
def feedback(self, id: str, good_response: bool, desired_response: str = "", feedback: str = "") -> Feedback:
"""Give feedback on a response from the Cohere API to improve the model.
Can be used programmatically like so:
Example: a user accepts a model's suggestion in an assisted writing setting
```
generations = co.generate(f"Write me a polite email responding to the one below:\n{email}\n\nResponse:")
if user_accepted_suggestion:
generations[0].feedback(good_response=True)
```
Example: the user edits the model's suggestion
```
generations = co.generate(f"Write me a polite email responding to the one below:\n{email}\n\nResponse:")
if user_edits_suggestion:
generations[0].feedback(good_response=False, desired_response=user_edited_response)
```
Args:
id (str): the `id` associated with a generation from the Cohere API
good_response (bool): a boolean indicator as to whether the generation was good (True) or bad (False).
desired_response (str): an optional string of the response expected. To be used when a mistake has been
made or a better response exists.
feedback (str): an optional natural language description of the specific feedback about this generation.
Returns:
Feedback: a Feedback object
"""
json_body = {
'id': id,
'good_response': good_response,
'desired_response': desired_response,
'feedback': feedback,
}
self.__request(cohere.FEEDBACK_URL, json_body)
return Feedback(id=id, good_response=good_response, desired_response=desired_response, feedback=feedback)
def rerank(self,
query: str,
documents: Union[List[str], List[Dict[str, Any]]],
top_n: int = None) -> Reranking:
"""Returns an ordered list of documents ordered by their relevance to the provided query
Args:
query (str): The search query
documents (list[str], list[dict]): The documents to rerank
top_n (int): (optional) The number of results to return, defaults to returning all results
"""
parsed_docs = []
for doc in documents:
if isinstance(doc, str):
parsed_docs.append({'text': doc})
elif isinstance(doc, dict) and 'text' in doc:
parsed_docs.append(doc)
else:
raise CohereError(
message='invalid format for documents, must be a list of strings or dicts with a "text" key')
json_body = {
"query": query,
"documents": parsed_docs,
"top_n": top_n,
"return_documents": False
}
reranking = Reranking(self.__request(cohere.RERANK_URL, json=json_body))
for rank in reranking.results:
rank.document = parsed_docs[rank.index]
return reranking
def __print_warning_msg(self, response: Response):
if 'X-API-Warning' in response.headers:
print("\033[93mWarning: {}\n\033[0m".format(response.headers['X-API-Warning']), file=sys.stderr)
def __pyfetch(self, url, headers, json_body) -> Response:
req = XMLHttpRequest.new()
req.open('POST', url, False)
for key, value in headers.items():
req.setRequestHeader(key, value)
try:
req.send(json_body)
except Exception:
raise CohereError(message=req.responseText, http_status=req.status, headers=req.getAllResponseHeaders())
res = json.loads(req.response)
if 'message' in res.keys():
raise CohereError(message=res['message'], http_status=req.status, headers=req.getAllResponseHeaders())
return res
def __request(self, endpoint, json=None) -> Any:
headers = {
'Authorization': 'BEARER {}'.format(self.api_key),
'Content-Type': 'application/json',
'Request-Source': self.request_source,
}
if self.cohere_version != '':
headers['Cohere-Version'] = self.cohere_version
url = urljoin(self.api_url, endpoint)
if use_xhr_client:
response = self.__pyfetch(url, headers, json.dumps(json))
self.__print_warning_msg(response)
return response
else:
with requests.Session() as session:
retries = Retry(
total=self.max_retries,
backoff_factor=0.5,
allowed_methods=['POST', 'GET'],
status_forcelist=[429, 500, 502, 503, 504]
)
session.mount('https://', HTTPAdapter(max_retries=retries))
session.mount('http://', HTTPAdapter(max_retries=retries))
response = session.request('POST', url, headers=headers, json=json, **self.request_dict)
try:
res = response.json()
except Exception:
raise CohereError(
message=response.text, http_status=response.status_code, headers=response.headers)
if 'message' in res: # has errors
raise CohereError(
message=res['message'], http_status=response.status_code, headers=response.headers)
self.__print_warning_msg(response)
return res
| [] |
2024-01-10 | Muhammad-Ahsan-Rasheed/cohere-python | cohere~chat.py | from concurrent.futures import Future
from typing import Any, Dict, Optional
from cohere.response import AsyncAttribute, CohereObject
class Chat(CohereObject):
"""
A chat object.
Attributes:
query (str): The query text.
persona (str): The persona name.
reply (str): The reply text.
session_id (str): The session ID.
Methods:
respond(response: str) -> Chat: Respond to the chat.
Example:
>>> chat = client.chat(query="Hello", persona="Alice")
>>> chat.reply
"Hello, how are you?"
>>> chat.session_id
"1234567890"
>>> chat = chat.respond("I'm fine, thanks.")
>>> chat.reply
"That's good to hear."
>>> chat.session_id
"1234567890"
"""
def __init__(self,
query: str,
persona: str,
response: Optional[Dict[str, Any]] = None,
*,
_future: Optional[Future] = None,
**kwargs) -> None:
super().__init__(**kwargs)
self.query = query
self.persona = persona
if _future is not None:
self._init_from_future(_future)
else:
assert response is not None
self.reply = self._reply(response)
self.session_id = self._session_id(response)
def _init_from_future(self, future: Future):
self.reply = AsyncAttribute(future, self._reply)
self.session_id = AsyncAttribute(future, self._session_id)
def _reply(self, response: Dict[str, Any]) -> str:
return response['reply']
def _session_id(self, response: Dict[str, Any]) -> str:
return response['session_id']
def respond(self, response: str) -> "Chat":
return self.client.chat(query=response, session_id=self.session_id, persona=self.persona)
| [] |
2024-01-10 | Muhammad-Ahsan-Rasheed/cohere-python | cohere~response.py | from concurrent.futures import Future
from typing import Any, Callable, Iterator
from xmlrpc.client import Boolean
from cohere.feedback import Feedback
class AsyncAttribute():
"""An attribute of an object that is lazily fetched.
`async_request` is a Future object that is expected to resolve to an object that will be consumed by `getter`.
`getter` is a function that recieves the result of `async_request` and processes it into the desired attribute.
`getter` is only called once and its result is cached.
"""
def __init__(self, async_request: Future, getter: Callable[..., Any]) -> None:
self._request = async_request
self._getter = getter
self._resolved = False
def __len__(self):
return len(self.resolve())
def __iter_(self) -> Iterator:
return iter(self.resolve())
def __repr__(self):
return repr(self.resolve())
def __str__(self):
return str(self.resolve())
def is_resolved(self) -> Boolean:
return self._request.done()
def resolve(self) -> Any:
if "_result" in self.__dict__:
return self._result
self._result = self._getter(self._request.result())
return self._result
class CohereObject():
def __init__(self, client=None, id: str = None) -> None:
self.client = client
self.id = id
def __getattribute__(self, name: str) -> Any:
attr = super().__getattribute__(name)
if isinstance(attr, AsyncAttribute):
return attr.resolve()
else:
return attr
def __repr__(self) -> str:
contents = ''
exclude_list = ['iterator', 'client']
for k in self.__dict__.keys():
if k not in exclude_list:
contents += f'\t{k}: {self.__dict__[k]}\n'
output = f'cohere.{type(self).__name__} {{\n{contents}}}'
return output
def feedback(self, good_response: bool, desired_response: str = "", feedback: str = "") -> Feedback:
"""Give feedback on a response from the Cohere API to improve the model.
Can be used programmatically like so:
Example: a user accepts a model's suggestion in an assisted writing setting
```
generations = co.generate(f"Write me a polite email responding to the one below:\n{email}\n\nResponse:")
if user_accepted_suggestion:
generations[0].feedback(good_response=True)
```
Example: the user edits the model's suggestion
```
generations = co.generate(f"Write me a polite email responding to the one below:\n{email}\n\nResponse:")
if user_edits_suggestion:
generations[0].feedback(good_response=False, desired_response=user_edited_response)
```
Args:
good_response (bool): a boolean indicator as to whether the generation was good (True) or bad (False).
desired_response (str): an optional string of the response expected. To be used when a mistake has been
made or a better response exists.
feedback (str): an optional natural language description of the specific feedback about this generation.
Returns:
Feedback: a Feedback object
"""
return self.client.feedback(id=self.id,
good_response=good_response,
desired_response=desired_response,
feedback=feedback)
| [] |
2024-01-10 | solarapparition/agent-automata | agent_automata~builtin_toolkit~automaton_functions.py | """Run a specific automaton and its sub-automata."""
from functools import partial
import json
from pathlib import Path
from typing import Any, Callable, Mapping, Union
from agent_automata.engines import load_engine
from agent_automata.types import AutomatonRunner, Engine
async def save_text_to_workspace(
request: str, self_name: str, workspace_name: str
) -> str:
"""Save a file."""
try:
input_json = json.loads(request)
file_name = input_json["file_name"]
content = input_json["content"]
except (KeyError, json.JSONDecodeError):
return "Could not parse input. Please provide the input in the following format: {file_name: <file_name>, description: <description>, content: <content>}"
path: Path = Path(f"workspace/{workspace_name}/{file_name}")
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(str(content), encoding="utf-8")
output = f"{self_name}: saved file to `{path.relative_to('workspace')}`"
print(output)
return output
async def run_llm_assistant(request: str, engine: Engine) -> str:
"""Run an LLM assistant."""
from langchain.schema import SystemMessage, HumanMessage
system_message = SystemMessage(
content="You are a helpful assistant who can help generate a variety of content. However, if anyone asks you to access files, or refers to something from a past interaction, you will immediately inform them that the task is not possible, and provide no further information."
)
request_message = HumanMessage(content=request)
output = await engine([system_message, request_message])
print(output)
return output
def load_builtin_function(
automaton_id: str,
automata_location: Path,
automaton_data: Mapping[str, Any],
requester_id: str,
) -> AutomatonRunner:
"""Load an automaton function, which are basically wrappers around external functionality (including other agents)."""
automaton_path = automata_location / automaton_id
extra_args: Union[None, Mapping[str, Any]] = automaton_data.get("extra_args")
if automaton_id == "llm_assistant":
if (
extra_args is None
or "engine" not in extra_args
or extra_args["engine"] is None
):
raise ValueError(
f'Built-in automaton function `{automaton_id}` requires the "engine" value in the `extra_args` field of the spec.'
)
engine_name: str = extra_args["engine"]
engine: Engine = load_engine(automaton_path, engine_name) # type: ignore
return partial(run_llm_assistant, engine=engine)
elif automaton_id == "save_text":
run = partial(
save_text_to_workspace,
self_name=automaton_data["name"],
workspace_name=requester_id,
)
elif automaton_id == "think":
async def run(request: str) -> str:
print(f"Thinking about: {request}")
return request
elif automaton_id == "finalize":
async def run(request: str) -> str:
print(f"Final Result:\n{request}")
return request
else:
raise NotImplementedError(f"Unsupported function name: {automaton_id}.")
return run
| [
"You are a helpful assistant who can help generate a variety of content. However, if anyone asks you to access files, or refers to something from a past interaction, you will immediately inform them that the task is not possible, and provide no further information."
] |
2024-01-10 | solarapparition/agent-automata | agent_automata~builtin_toolkit~engines.py | """Builtin LLM engines that can be used by automata."""
from typing import Any, Sequence, Union
from agent_automata.types import Engine
BUILTIN_ENGINES = {"gpt-3.5-turbo", "gpt-4"}
def load_builtin_engine(name: str) -> Engine:
"""Load a builtin engine."""
if name in ["gpt-3.5-turbo", "gpt-4"]:
from langchain.chat_models import ChatOpenAI
model = ChatOpenAI(temperature=0, model_name=name, verbose=True)
async def run_model(prompt: Union[str, Sequence[Any]], **kwargs: Any) -> str:
if isinstance(prompt, str):
return await model.apredict(prompt, **kwargs)
return (await model.apredict_messages(prompt, **kwargs)).content
return run_model
raise ValueError(f"Engine {name} not part of builtin engines: `{BUILTIN_ENGINES}`")
| [] |
2024-01-10 | sbucarion/computer-assitant | email_handler~email.py | import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from os.path import basename
import openai
import json
import re
import pyttsx3
from word2number import w2n
import pyaudio
import wave
import speech_recognition as sr
import sys
gpt_stuff = r""
sys.path.insert(0, gpt_stuff)
from gpt_commands import list_of_commands
email_list = r""
sys.path.insert(0, email_list)
from email_names import known_emails
converter = pyttsx3.init()
converter.setProperty('rate', 150)
converter.setProperty('volume', 0.85)
r = sr.Recognizer()
FRAMES_PER_BUFFER = 3200
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
def pierre_speak(phrase):
converter.say(phrase)
converter.runAndWait()
def multiple_attachment_listner(seconds):
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=FRAMES_PER_BUFFER
)
print("Pick a file number")
frames = []
for i in range(0, int(RATE / FRAMES_PER_BUFFER * seconds)):
data = stream.read(FRAMES_PER_BUFFER)
frames.append(data)
print("....")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open("attachment_number.wav", 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def audio_to_text():
with sr.AudioFile("attachment_number.wav") as source:
audio = r.record(source)
try:
raw_text = r.recognize_google(audio, show_all=True) #show all prevents error if no audio
data = raw_text['alternative'][0]
print(data)
return (data)
except TypeError as e:
#For when no audio is recognized
return {"transcript": ""}
def clean_command_params(raw_params):
"""Takes in a string of paramets from gpt and converts to python dict"""
escape_cleaner = re.compile('(?<!\\\\)\'')
#Remove new line characters from string
new_text = []
for char in raw_params:
if char != "\n":
new_text.append(char)
command_parameters = "".join(new_text)
#Remove escape backslashes from string
p = re.compile('(?<!\\\\)\'')
command_parameters = p.sub('\"', command_parameters)
json_commands = json.loads(command_parameters) #Convert string to JSON
print(command_parameters)
#remove file string from file path
if json_commands["file_name"] != "":
if "file" == json_commands["file_name"].split(".")[-1]:
json_commands["file_name"] = " ".join(json_commands["file_name"].split(".")[:-1])
elif "file" == json_commands["file_name"].split()[-1]:
json_commands["file_name"] = " ".join(json_commands["file_name"].split()[:-1])
json_commands["file_name"] = json_commands["file_name"].replace("/", " ").strip()
#remove folder string from file path
if json_commands["file_path"] != "":
if "folder" == json_commands["file_path"].split()[-1]:
json_commands["file_path"] = " ".join(json_commands["file_path"].split()[:-1])
json_commands["file_path"] = json_commands["file_path"].replace("/", " ").strip()
return json_commands
def extract_email_command(email_command):
"""Convert the command into a dictionary of parameters using gpt"""
#This is where I will send off commands to my own model once build and return the parameters in this function
openai.api_key = ""
gpt_email_prompt = list_of_commands["send_email_commands"][0] + email_command + list_of_commands["send_email_commands"][1]
response = openai.Completion.create(
engine="text-davinci-002",
prompt=gpt_email_prompt,
max_tokens=700,
temperature=0
)
text = response['choices'][0]['text'].lower()
command_params = clean_command_params(text)
#Converts email reciever name to actual address
if command_params['to'].lower() in known_emails:
command_params['to'] = known_emails[command_params['to']]
return command_params
def recursive_folder_search(folder_path, folder_list, file_list):
for item in os.listdir(folder_path):
possible_folder_path = (folder_path + "\\" + item)
if not os.path.isdir(possible_folder_path):
file_list.append(possible_folder_path)
#loop over all items in a folder
for item in os.listdir(folder_path):
if item == "fullstack redo" or item == "node_modules":
#Prevents use searching extremely large files where we know the item isnt
continue
possible_folder_path = (folder_path + "\\" + item)
#if an item is a folder open it and check its folders
if os.path.isdir(possible_folder_path):
folder_list.append(possible_folder_path)
recursive_folder_search(possible_folder_path, folder_list, file_list)
return
def verify_folder_path(path):
# if path in global_paths:
# #merge file path and name and check if it exists
# #where I will have custom paths like desktop/homework
# return
#else:
#Will only take main files like desktop (C:\Users\sbuca -> all folders in here)
folder_path = ""
for folder in [path, path.capitalize(), path.upper()]:
folder = "~/" + folder
folder_path = os.path.normpath(os.path.expanduser(folder))
if os.path.exists(folder_path):
break
folder_path = ""
#Will stop searching if the folder doesnt exist
if folder_path == "":
return None
return folder_path
def find_file(folder_name, file_name):
file_name = file_name.lower()
matched_files = []
folder_path = verify_folder_path(folder_name)
if folder_path:
#If the desired path if found recurse through every folder in it
folder_list = []
file_list = []
recursive_folder_search(folder_path, folder_list, file_list)
for file in file_list:
split_file = file.split("\\")[-1].split(".")[0]
if file_name in split_file.lower():
print(file, split_file)
matched_files.append(file)
# distance = lev.distance(Str1,Str2)
# ratio = lev.ratio(Str1,Str2)
return matched_files if matched_files != [] else "No Files Found"
def attachment_manager(all_files):
"""Handles all the files found given the name and folder
manages when multiple files are found"""
pierre_phrase = """I found {} files within that directory I will
list them out now and you say the number of
which one is correct""".format(len(all_files))
pierre_speak(pierre_phrase)
files_by_index = {}
for i, file in enumerate(all_files):
file_name = file.split("\\")[-1]
file_name = file_name.replace("_", " ")
files_by_index[str(i+1)] = file_name
pierre_speak("{}, {}".format([i+1], file_name))
print(files_by_index)
file_attachment_number = ""
while file_attachment_number == "":
multiple_attachment_listner(seconds=3)
file_attachment_number = audio_to_text()["transcript"]
if "repeat" in file_attachment_number.lower():
#file_attachment_number = ""
return attachment_manager(all_files)
if file_attachment_number == "":
continue
if len(file_attachment_number) == 1: #Good to go
return all_files[int(file_attachment_number)-1]
else:
print(file_attachment_number)
for word in file_attachment_number.split():
if word == "to" or word == "too":
word = "two"
try:
file_attachment_number = w2n.word_to_num(word)
return all_files[int(file_attachment_number)-1]
except ValueError as e:
print(word)
continue
file_attachment_number = ""
def attachment_file_handler(folder_name, file_name):
files = find_file(folder_name, file_name)
if files == "No Files Found":
#handle it
return None
if len(files) == 1:
return files[0]
return attachment_manager(files)
def add_attachment(msg, file_path):
with open(file_path, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(file_path)
)
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(file_path)
msg.attach(part)
def pacakge_email_data(sender, email_params):
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = email_params['to']
msg['Subject'] = email_params['subject']
#Add body to email
body = MIMEText(email_params['body'])
msg.attach(body)
#Add atachment to email -> Update in futrue to have multiple
if email_params['file_path'] != "" and email_params['file_name'] != "":
attachment_file_path = attachment_file_handler(email_params['file_path'], email_params['file_name'])
if attachment_file_path:
add_attachment(msg, attachment_file_path)
else:
return "Could Not Find File"
return msg
def send_email(sender, sender_password, receiver, email_data):
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(sender, sender_password)
server.sendmail(sender, receiver, email_data.as_string())
server.close()
return "Success"
def email_main(email_command, email_params=""):
email_sender = ""
email_app_password = ""
email_params = extract_email_command(email_command)
print(email_params)
email_data = pacakge_email_data(email_sender, email_params)
status = send_email(email_sender, email_app_password, email_params["to"], email_data)
if __name__ == "__main":
email_main(email_command, email_params="")
| [
"send_email_commands"
] |
2024-01-10 | sotot0/gem5 | configs~example~gem5_library~riscv-ubuntu-run.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script shows an example of running a full system RISCV Ubuntu boot
simulation using the gem5 library. This simulation boots Ubuntu 20.04 using
2 TIMING CPU cores. The simulation ends when the startup is completed
successfully.
Usage
-----
```
scons build/RISCV/gem5.opt
./build/RISCV/gem5.opt \
configs/example/gem5_library/riscv-ubuntu-run.py
```
"""
import m5
from m5.objects import Root
from gem5.utils.requires import requires
from gem5.components.boards.riscv_board import RiscvBoard
from gem5.components.memory import DualChannelDDR4_2400
from gem5.components.processors.simple_processor import (
SimpleProcessor,
)
from gem5.components.processors.cpu_types import CPUTypes
from gem5.isas import ISA
from gem5.coherence_protocol import CoherenceProtocol
from gem5.resources.resource import Resource
from gem5.simulate.simulator import Simulator
# This runs a check to ensure the gem5 binary is compiled for RISCV.
requires(
isa_required=ISA.RISCV,
)
# With RISCV, we use simple caches.
from gem5.components.cachehierarchies.classic\
.private_l1_private_l2_cache_hierarchy import (
PrivateL1PrivateL2CacheHierarchy,
)
# Here we setup the parameters of the l1 and l2 caches.
cache_hierarchy = PrivateL1PrivateL2CacheHierarchy(
l1d_size="16kB",
l1i_size="16kB",
l2_size="256kB",
)
# Memory: Dual Channel DDR4 2400 DRAM device.
memory = DualChannelDDR4_2400(size = "3GB")
# Here we setup the processor. We use a simple processor.
processor = SimpleProcessor(
cpu_type=CPUTypes.TIMING,
isa=ISA.RISCV,
num_cores=2,
)
# Here we setup the board. The RiscvBoard allows for Full-System RISCV
# simulations.
board = RiscvBoard(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# Here we set the Full System workload.
# The `set_kernel_disk_workload` function for the RiscvBoard accepts a
# RISCV bootloader and a disk image. Once the system successfully boots, it
# encounters an `m5_exit instruction encountered`. We stop the simulation then.
# When the simulation has ended you may inspect `m5out/system.pc.com_1.device`
# to see the stdout.
board.set_kernel_disk_workload(
# The RISCV bootloader will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# The riscv-ubuntu boot-test was tested with riscv-bootloader-5.10
kernel=Resource(
"riscv-bootloader-vmlinux-5.10",
),
# The RISCV ubuntu image will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
disk_image=Resource(
"riscv-ubuntu-20.04-img",
),
)
simulator = Simulator(board=board)
simulator.run()
| [] |
2024-01-10 | mikkac/ask_pdf | ask_pdf~rag_chat.py | """ Converstaion handler for Retriever-Augmented Generation (RAG) model. """
import openai
from llama_index import (ServiceContext, SimpleDirectoryReader, StorageContext,
VectorStoreIndex)
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index.llms import OpenAI
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers import AutoMergingRetriever
from llama_index.vector_stores import QdrantVectorStore
from qdrant_client import QdrantClient
class RAGChat:
"""
A class to handle conversation with a Retriever-Augmented Generation (RAG) model.
Attributes:
automerging_query_engine: Engine to handle RAG queries.
Methods:
create_embeddings(file): Processes a file to create embeddings.
send_message(user_msg): Sends a message to the RAG model and returns the response.
"""
def __init__(self, openai_api_key, qdrant_url):
"""
Initializes the RAGChat with a specified token limit for
conversation history and OpenAI API key.
Args:
openai_api_key (str): OpenAI API key for accessing GPT-3 services.
"""
openai.api_key = openai_api_key
self.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
self.qdrant_url = qdrant_url
self.automerging_index = None
self.automerging_query_engine = None
# TODO: Make sure that all models are downloaded before first file upload
def create_embeddings(self, file):
"""
Processes the given file to create and store embeddings.
Args:
file (str): Path to the file to be processed.
"""
documents = SimpleDirectoryReader(input_files=[file]).load_data()
self.automerging_index = self._build_automerging_index(documents, self.llm)
self.automerging_query_engine = self._get_automerging_query_engine(
self.automerging_index
)
def send_message(self, user_msg):
"""
Sends a user message to the RAG model and returns the model's response.
The method formats the input to include both the conversation history
and the new user message.
Args:
user_msg (str): The user's message to send to the model.
Returns:
str: The response generated by the RAG model.
"""
return str(self.automerging_query_engine.query(user_msg))
def _build_automerging_index(
self,
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
):
"""
Builds an automerging index from the given documents using the specified
language model and embedding model.
Args:
documents (list): A list of documents to be indexed.
llm: The language model to be used for indexing.
embed_model (str, optional): The embedding model to be used.
Defaults to "local:BAAI/bge-small-en-v1.5".
save_dir (str, optional): The directory where the index is to be saved.
Defaults to "merging_index".
Returns:
An automerging index created from the provided documents and models.
"""
qdrant_client = QdrantClient(url=self.qdrant_url)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
vector_store = QdrantVectorStore(client=qdrant_client, collection_name="docs")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
return VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
def _get_automerging_query_engine(
self, automerging_index, similarity_top_k=12, rerank_top_n=2
):
"""
Creates a query engine using the provided automerging index.
Args:
automerging_index: The automerging index to be used for creating the query engine.
similarity_top_k (int, optional): The number of top similar items to retrieve.
Defaults to 12.
rerank_top_n (int, optional): The number of top items to rerank. Defaults to 2.
Returns:
A query engine built using the provided automerging index and specified parameters.
"""
base_retriever = automerging_index.as_retriever(
similarity_top_k=similarity_top_k
)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
auto_merging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return auto_merging_engine
| [] |
2024-01-10 | aaronwangj/qg-ai | qgai.py | from fastapi import FastAPI, UploadFile, Form
from fastapi.middleware.cors import CORSMiddleware
import openai
import os
openai.api_key = os.environ.get("OPENAI_KEY")
content = """
You are grading a student's response. You will return JSON without any new lines that looks like this:
"{
accuracy: int;
clarity: int;
depth: int;
overallScore: int;
answer: string;
feedback: string;
}".
Your output should be able to be parsed by a JSON.parse() function.
The accuracy field is how accurate the student’s response is out of 100.
The clarity field is how clear the student’s response is out of 100.
The depth field grades the student’s depth out of 100.
The overallScore field grades the student’s overall response out of 100.
The answer field is an extensive, thorough answer to the prompt.
The feedback field is your written feedback to the student’s response, which should be very extensive and explain how the student can improve.
Here is the prompt:
"""
app = FastAPI()
origins = ["https://qg-admin.vercel.app/", "https://www.quantguide.io/", "https://quantguide.io/", "https://quant-guide-app-git-dev-quantguide.vercel.app/", "https://www.quant-guide-app-git-dev-quantguide.vercel.app/"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
expose_headers=["*"])
@app.get("/")
def test():
return {"message": "quantguide.io"}
@app.post("/ai")
def ai(file: UploadFile, prompt: str = Form(...)):
try:
contents = file.file.read()
with open(file.filename, "wb") as f:
f.write(contents)
transcription = get_transcription(file)
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": content + prompt,
},
{
"role": "user",
"content": transcription,
},
],
temperature=0.8,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return {"feedback": response["choices"][0]["message"]["content"], "transcript": transcription}
except Exception as e:
return {"message": e}
finally:
file.file.close()
if os.path.exists(file.filename):
os.remove(file.filename)
def get_transcription(file):
try:
text = ""
with open(file.filename, "rb") as f:
text = openai.Audio.transcribe("whisper-1", f)["text"]
return text
finally:
if os.path.exists(file.filename):
os.remove(file.filename)
@app.post("/transcribe")
def transcribe(file: UploadFile):
try:
contents = file.file.read()
with open(file.filename, "wb") as f:
f.write(contents)
return get_transcription(file)
except Exception as e:
return {"message": e}
finally:
file.file.close()
if os.path.exists(file.filename):
os.remove(file.filename)
@app.post("/ai-text")
def ai_text(text: str = Form(...), prompt: str = Form(...)):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": content + prompt,
},
{
"role": "user",
"content": text,
},
],
temperature=0.8,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return {"feedback": response["choices"][0]["message"]["content"], "transcript": text}
| [
"\nYou are grading a student's response. You will return JSON without any new lines that looks like this:\n\"{\n accuracy: int;\n clarity: int;\n depth: int;\n overallScore: int;\n answer: string;\n feedback: string;\n}\". \nYour output should be able to be parsed by a JSON.parse() function.\n\nThe accuracy field is how accurate the student’s response is out of 100.\nThe clarity field is how clear the student’s response is out of 100.\nThe depth field grades the student’s depth out of 100.\nThe overallScore field grades the student’s overall response out of 100.\nThe answer field is an extensive, thorough answer to the prompt.\nThe feedback field is your written feedback to the student’s response, which should be very extensive and explain how the student can improve.\n\nHere is the prompt: \nPLACEHOLDER"
] |
2024-01-10 | YuehChuan/chatgpt-api-whisper-api-voice-assistant | therapist.py | import gradio as gr
import openai, config, subprocess
openai.api_key = config.OPENAI_API_KEY
messages = [{"role": "system", "content": 'You are a therapist. Respond to all input in 25 words or less.'}]
def transcribe(audio):
global messages
audio_file = open(audio, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
messages.append({"role": "user", "content": transcript["text"]})
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
system_message = response["choices"][0]["message"]
messages.append(system_message)
subprocess.call(["say", system_message['content']])
chat_transcript = ""
for message in messages:
if message['role'] != 'system':
chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
return chat_transcript
ui = gr.Interface(fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text").launch()
ui.launch() | [
"You are a therapist. Respond to all input in 25 words or less."
] |
2024-01-10 | goML-offers/data_set | app~services~asset_creation.py | from supabase import Client, create_client
import pandas as pd
import json
import numpy as np
import openai
# from diffusers import DiffusionPipeline
# import torch
from PyPDF2 import PdfReader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
import os
import cv2
from PIL import Image
from PIL import ImageDraw, ImageFont
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from datetime import datetime
import logging
from dotenv import load_dotenv, find_dotenv
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
load_dotenv()
logger = logging.getLogger(__name__)
log_file_path = f'/app/app/logs/asset_{timestamp}.log'
file_handler = logging.FileHandler(log_file_path)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
SUPABASE_URL = os.environ.get("SUPABASE_URL")
SUPABASE_KEY = os.environ.get("SUPABASE_KEY")
supabase_bucket = 'solarplexus'
SUPABASE_HOST= os.environ.get("SUPABASE_HOST")
SUPABASE_PASSWORD= os.environ.get("SUPABASE_PASSWORD")
try:
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
logger.info('Supabase connection successfull')
# pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32, use_safetensors=True, variant="fp16")
# commandline_args = os.environ.get('COMMANDLINE_ARGS', "--skip-torch-cuda-test --no-half")
except Exception as e:
print(f"Error connecting to Supabase: {e}")
logger.error(e)
def fetch_table_data(table_name):
table_response = supabase.from_(table_name).select("*").execute()
table_data_raw = [record for record in table_response.data]
print(table_data_raw)
# table = "segment_metadata"
# updates = {
# "process_id" : process_id
# }
# response_update = supabase.from_(table).update(updates).eq("id", segment_id).execute()
# update_d = [record for record in response_update.data]
table_data_json = json.dumps(table_data_raw)
table= json.loads(table_data_json)
# update_segment_id = update[0]['id']
# print(update_segment_id)
return table
def generate_text(cluster_description, categorical_description, asset_to_individulize_file, tone_of_voice):
prompt = f"""generate a different marketing asset image for a brand Solarplexus, to target big industries,
text should have font as Raleway and secondary font as Open Sans, image should have primary
colour code should be #EB691B and secondary color code should be #4B4B4B, and generate images based on these cluster
description {cluster_description} {categorical_description}"""
# response = openai.ChatCompletion.create(
# # model="gpt-3.5-turbo-0613",
# model= "gpt-4",
# # model = "gpt-3.5-turbo-16k",
# # model="gpt-4-0613",
# messages=[
# {"role": "user",
# "content": f"""generate a very professional marketing asset
# text for a brand to place on the image, to target big financial industries the text generate should be
# very specific and should be based on these descriptions {cluster_description} {categorical_description} that it will be targetted for."""},
# ]
# )
# result = ''
# for choice in response.choices:
# result += choice.message.content
# print(result)
reader = PdfReader(asset_to_individulize_file)
raw_text = ''
for i, page in enumerate(reader.pages):
text = page.extract_text()
if text:
raw_text += text
print(raw_text[:100])
# We need to split the text that we read into smaller chunks so that during information retreival we don't hit the token size limits.
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function = len,
)
texts = text_splitter.split_text(raw_text)
# Download embeddings from OpenAI
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
chain = load_qa_chain(OpenAI(), chain_type="stuff")
llm_answer_list = []
ques = f"""convert the result to short summary in one line based on the cluster description {cluster_description} and {categorical_description},
make the results proper and different based on the industry the cluster is focused and the answer should be very clear. The tone of the document
should be in {tone_of_voice}. Don't give me anything else. The result should be attractive that can be used for marketing campaigns."""
docs = docsearch.similarity_search(ques)
llm_answer = chain.run(input_documents=docs, question=ques)
print("llm_answer----------->", llm_answer)
return llm_answer
def get_extracted_data(extraction_id_brand, extraction_id_tone):
query = supabase.from_("data_extraction").select("*").eq("extraction_id", extraction_id_brand).execute()
update_d = [record for record in query.data]
print(update_d)
color_data = json.dumps(update_d)
color = json.loads(color_data)
llm_answer = color[0]["llm_answer"]
print(type(llm_answer))
print(llm_answer)
answer = json.loads(llm_answer)
# Create variables to store answers
primary_color = None
brand_name = None
primary_font = None
secondary_color = None
secondary_font = None
# Process the list of dictionaries
for item in answer:
question = item['question']
answer = item['answer']
if "primary colour code" in question:
primary_color = answer
elif "brand name" in question:
brand_name = answer
elif "primary font" in question:
primary_font = answer
elif "secondary colour code" in question:
secondary_color = answer
elif "secondary font" in question:
secondary_font = answer
# Print the stored answers
print("Primary Color:", primary_color)
print("Brand Name:", brand_name)
print("Primary Font:", primary_font)
print("Secondary Color:", secondary_color)
print("Secondary Font:", secondary_font)
response = supabase.from_("data_extraction").select("*").eq("extraction_id", extraction_id_tone).execute()
response_d = [record for record in response.data]
print(response_d)
tone_data = json.dumps(response_d)
tone = json.loads(tone_data)
tone_llm_answer = tone[0]["llm_answer"]
print(type(tone_llm_answer))
print(tone_llm_answer)
tone_answer = json.loads(tone_llm_answer)
# Create variables to store answers
tone_of_voice = None
# Process the list of dictionaries
for item in tone_answer:
question = item['question']
answer = item['answer']
if "tone of voice" in question:
tone_of_voice = answer
# Print the stored answers
print("tone of voice:", tone_of_voice)
return {"primary_color": primary_color, "secondary_color": secondary_color, "primary_font": primary_font, "secondary_font":secondary_font, "brand_name": brand_name, "tone_of_voice": tone_of_voice}
def get_rgb_colors(primary_color, secondary_color):
rgb_color = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-0613",
model= "gpt-4",
# model = "gpt-3.5-turbo-16k",
# model="gpt-4-0613",
messages=[
{"role": "user",
"content": f"""Generate RGB of color {primary_color} and color {secondary_color} and give me a json format strictly only in Red Green Blue nested dictionary and nothing else.
You can consider this as an example to generate you result:
EXAMPLE: """ + """{"EB691B": { "Red": 235,"Green": 105"Blue": 27},"4B4B4B": { "Red": 75,"Green": 75,"Blue": 75},"95CDED": {"Red": 149,"Green": 205, "Blue": 237}}"""},
]
)
rgb_result = ''
for choice in rgb_color.choices:
rgb_result += choice.message.content
print(rgb_result)
print(type(rgb_result))
"------------------------covert to json------------------------------"
colors = json.loads(rgb_result)
print(colors)
print(type(colors))
"------------------------reading rgb from json------------------------"
# Initialize variables for primary and secondary colors
primary_color_rgb = ()
secondary_color_rgb = ()
# Iterate through the dictionary and store RGB values for the first two keys
for idx, (key, rgb_values) in enumerate(colors.items()):
if idx == 0:
primary_color_rgb = (rgb_values['Red'], rgb_values['Green'], rgb_values['Blue'])
elif idx == 1:
secondary_color_rgb = (rgb_values['Red'], rgb_values['Green'], rgb_values['Blue'])
else:
break # Only store values for the first two keys
# Print the stored RGB values
print(f"Primary Color: {primary_color_rgb}")
print(f"Secondary Color: {secondary_color_rgb}")
return {"primary_color_rgb": primary_color_rgb, "secondary_color_rgb": secondary_color_rgb}
def fetch_background_image(file_id_background_image):
type = "picture_bank"
user = supabase.from_("file_data").select("*").eq("id",file_id_background_image).eq("type", type).execute()
user_data = [record for record in user.data]
print("user_data",user_data)
data = json.dumps(user_data)
d = json.loads(data)
file_path = d[0]["path"]
file_type = d[0]["type"]
try:
local_file_path = f'/app/app/services/files/{file_path.split("/")[-1]}'
print(local_file_path)
print(file_path)
with open(local_file_path, 'wb+') as f:
data = supabase.storage.from_(supabase_bucket).download(file_path)
f.write(data)
except Exception as e:
logging.error('An error occurred:', exc_info=True)
return local_file_path
# fetch_background_image(803)
def fetch_logo(file_id_log):
type = "logo"
user = supabase.from_("file_data").select("*").eq("id",file_id_log).eq("type", type).execute()
user_data = [record for record in user.data]
print("user_data",user_data)
data = json.dumps(user_data)
d = json.loads(data)
file_path = d[0]["path"]
file_type = d[0]["type"]
try:
local_file_path = f'/app/app/services/files/{file_path.split("/")[-1]}'
print(local_file_path)
print(file_path)
with open(local_file_path, 'wb+') as f:
data = supabase.storage.from_(supabase_bucket).download(file_path)
f.write(data)
except Exception as e:
logging.error('An error occurred:', exc_info=True)
return local_file_path
def fetch_asset_individualize(project_id):
group = "asset"
user = supabase.from_("project_files").select("*").eq("project_id",project_id).eq("group", group).execute()
user_data = [record for record in user.data]
print("user_data",user_data)
data = json.dumps(user_data)
d = json.loads(data)
file_path = d[0]["path"]
file_group = d[0]["group"]
try:
local_file_path = f'/app/app/services/files/{file_path.split("/")[-1]}'
print(local_file_path)
print(file_path)
with open(local_file_path, 'wb+') as f:
data = supabase.storage.from_(supabase_bucket).download(file_path)
f.write(data)
except Exception as e:
logging.error('An error occurred:', exc_info=True)
return local_file_path
def combine_text_image(cluster_id, background_image_path, logo_path, asset_to_individualize, primary_color_rgb, secondary_color_rgb):
base_image = Image.open(background_image_path)
# Initialize the drawing context
draw = ImageDraw.Draw(base_image)
# Set primary and secondary colors
primary_color_rgb = primary_color_rgb # (R, G, B) for #EB691B
secondary_color_rgb = secondary_color_rgb # (R, G, B) for #4B4B4B
# Yellow C100%, Pantone 281 C100%
# Use built-in fonts
primary_font = ImageFont.load_default() # Use the default font
secondary_font = ImageFont.load_default() # Use the default font
# Set the text to be displayed
# text = "Empower Your Legacy Giants with our premier solutions. Captivating 8,200+ financial industries and counting, our robust marketing tools are uniquely designed to serve your distinct needs. Embrace efficiency, cultivate growth and be a part of the top-financial trendsetters across the United Kingdom. Propel your business forward in a landscape dominated by Kingsley Napley LLP and others. Join the movement - Experience difference with us."
# text = result
text = asset_to_individualize
# Set the text position for the primary color
text_position_primary = (20, 80)
# Draw text in primary color with default font
draw.text(text_position_primary, text, fill=primary_color_rgb, font=primary_font)
# Load the overlay image
# logo = Image.open("arkitektkopia-loggo-ritsPP-cmyk.png")
logo = Image.open(logo_path)
# You may need to resize the overlay image to fit
logo = logo.resize((80, 50)) # Adjust the size as needed
# Paste the overlay image on top of the base image
base_image.paste(logo, (400, 20))
# Save the modified image
asset_path = f"asset_{cluster_id}.jpg"
base_image.save(asset_path)
# Display the modified image
# base_image.show()
return asset_path
# def combine_text_image(cluster_id, background_image_path, logo_path, asset_to_individualize, primary_color_rgb, secondary_color_rgb):
# base_image = Image.open(background_image_path)
# draw = ImageDraw.Draw(base_image)
# primary_color_rgb = primary_color_rgb
# font_size = 20
# # Use the truetype font
# primary_font = ImageFont.load_default() # Use the default font
# secondary_font = ImageFont.load_default()
# font = ImageFont.truetype(primary_font, font_size)
# text = asset_to_individualize
# text_width, text_height = primary_font.getsize(text)
# text_x = (base_image.width - text_width) // 2
# text_y = (base_image.height - text_height) // 2
# draw.text((text_x, text_y), text, fill=primary_color_rgb, font=primary_font)
# logo = Image.open(logo_path)
# logo = logo.resize((80, 50))
# base_image.paste(logo, (400, 20))
# asset_path = f"asset_{cluster_id}.jpg"
# base_image.save(asset_path)
# return asset_path
def asset_creation(table_name, user_id, project_id, extraction_id_brand, extraction_id_tone, file_id_log, file_id_background_image):
print("entered")
process_id = None
try:
process_data_insert = [
{
"user_id" :user_id,
"process_type": "asset_creation",
"process_status": "in_progress",
"start_at" : datetime.now().isoformat()
},
]
process= supabase.from_("process").insert(process_data_insert).execute()
process_data = [record for record in process.data]
p_data = json.dumps(process_data)
p = json.loads(p_data)
process_id = p[0]["process_id"]
print("process table:*******************", p)
# table_name = "segment_47b0ffec-356a-4c35-8704-23b153d345c5_1087"
"--------------------------------------------------------------------"
"""# Read data from Supabase
query = f"SELECT * FROM {table_name}"
response = supabase.from_(table_name).select("*").execute()
update_d = [record for record in response.data]
print(update_d)
# table = "segment_metadata"
# updates = {
# "process_id" : process_id
# }
# response_update = supabase.from_(table).update(updates).eq("id", segment_id).execute()
# update_d = [record for record in response_update.data]
response_u = json.dumps(update_d)
update= json.loads(response_u)
update_segment_id = update[0]['id']
print(update_segment_id)"""
"--------------------------------------------------------------------"
background_image_path = fetch_background_image(file_id_background_image)
logo_path = fetch_logo(file_id_log)
table = fetch_table_data(table_name)
# Convert the data to a Pandas DataFrame
df = pd.DataFrame(table)
# Group the data by the cluster column
cluster_column = "Cluster"
grouped_clusters = df.groupby(cluster_column)
categorical_columns = df.select_dtypes(exclude=[np.number])
result_filenames = []
asset_id = []
asset_path = []
for cluster_id, cluster_data in grouped_clusters:
# Perform operations on cluster_data
# You can access each cluster's data using cluster_data
# For example, to get the description of the cluster:
# cluster_description = cluster_data["description"].iloc[0]
# print(f"Cluster Name: {cluster_name}, Description: {cluster_description}")
# Descriptive statistics for each cluster
cluster_description = df[df['Cluster'] == cluster_id].describe()
print(cluster_description)
# Descriptive statistics for categorical columns
categorical_cluster_data = categorical_columns[df['Cluster'] == cluster_id]
categorical_description = categorical_cluster_data.describe()
# print("Categorical Column Statistics:")
# print(categorical_description)
print(f"Cluster Name: {cluster_id} {cluster_description} {categorical_description}")
"--------------------------------------------------------------------"
# prompt = f"""generate a marketing asset image for a brand Solarplexus, to target big industries,
# text should have font as Raleway and secondary font as Open Sans, image should have primary
# colour code should be #EB691B and secondary color code should be #4B4B4B, and generate images based on these cluster
# description {cluster_description} {categorical_description}"""
# image = pipe(prompt).images[0]
# print(image)
# filename = f'result_{cluster_id}.jpg'
# image.save(filename)
# print(filename)
# result_filenames.append(filename)
# """response = openai.ChatCompletion.create(
# # model="gpt-3.5-turbo-0613",
# model= "gpt-4",
# # model = "gpt-3.5-turbo-16k",
# # model="gpt-4-0613",
# messages=[
# {"role": "user",
# "content": f"""generate a very professional marketing asset
# text for a brand to place on the image, to target big financial industries the text generate should be
# very specific and should be based on these descriptions {cluster_description} {categorical_description} that it will be targetted for."""},
# ]
# )
# result = ''
# for choice in response.choices:
# result += choice.message.content
# print(result)"""
"--------------------------------------------------------------------"
extracted_data = get_extracted_data(extraction_id_brand, extraction_id_tone)
primary_color = extracted_data["primary_color"]
secondary_color = extracted_data["secondary_color"]
primary_font = extracted_data["primary_font"]
secondary_font = extracted_data["secondary_font"]
brand_name = extracted_data["brand_name"]
tone_of_voice = extracted_data["tone_of_voice"]
asset_to_individulize_file = fetch_asset_individualize(project_id)
asset_to_individualize = generate_text(cluster_description, categorical_description, asset_to_individulize_file, tone_of_voice)
"--------------------------------------------------------------------"
"------------------------get color from db----------------------------"
# extraction_id = 789
"""query = supabase.from_("data_extraction").select("*").eq("extraction_id", extraction_id).execute()
update_d = [record for record in query.data]
print(update_d)
color_data = json.dumps(update_d)
color = json.loads(color_data)
llm_answer = color[0]["llm_answer"]
print(type(llm_answer))
print(llm_answer)
answer = json.loads(llm_answer)
# Create variables to store answers
primary_color = None
brand_name = None
primary_font = None
secondary_color = None
secondary_font = None
# Process the list of dictionaries
for item in answer:
question = item['question']
answer = item['answer']
if "primary colour code" in question:
primary_color = answer
elif "brand name" in question:
brand_name = answer
elif "primary font" in question:
primary_font = answer
elif "secondary colour code" in question:
secondary_color = answer
elif "secondary font" in question:
secondary_font = answer
# Print the stored answers
print("Primary Color:", primary_color)
print("Brand Name:", brand_name)
print("Primary Font:", primary_font)
print("Secondary Color:", secondary_color)
print("Secondary Font:", secondary_font)"""
"--------------------------------------------------------------------"
"--------------------------generate rgb color-------------------------"
# primary = "Yellow C100%, Pantone 281 C100%"
# secondary = ""
"--------------------------------------------------------------------"
# rgb_color = openai.ChatCompletion.create(
# # model="gpt-3.5-turbo-0613",
# model= "gpt-4",
# # model = "gpt-3.5-turbo-16k",
# # model="gpt-4-0613",
# messages=[
# {"role": "user",
# "content": f"""Generate RGB of color {primary_color} and color {secondary_color} and give me a json format in Red Green Blue nested dictionary and nothing else"""},
# ]
# )
# rgb_result = ''
# for choice in rgb_color.choices:
# rgb_result += choice.message.content
# print(rgb_result)
# print(type(rgb_result))
# "------------------------covert to json------------------------------"
# colors = json.loads(rgb_result)
# print(colors)
# print(type(colors))
# "------------------------reading rgb from json------------------------"
# # Initialize variables for primary and secondary colors
# primary_color_rgb = ()
# secondary_color_rgb = ()
# # Iterate through the dictionary and store RGB values for the first two keys
# for idx, (key, rgb_values) in enumerate(colors.items()):
# if idx == 0:
# primary_color_rgb = (rgb_values['Red'], rgb_values['Green'], rgb_values['Blue'])
# elif idx == 1:
# secondary_color_rgb = (rgb_values['Red'], rgb_values['Green'], rgb_values['Blue'])
# else:
# break # Only store values for the first two keys
# # Print the stored RGB values
# print(f"Primary Color: {primary_color_rgb}")
# print(f"Secondary Color: {secondary_color_rgb}")
"--------------------------------------------------------------------"
rgb_colors = get_rgb_colors(primary_color, secondary_color)
primary_color_rgb = rgb_colors['primary_color_rgb']
secondary_color_rgb = rgb_colors['secondary_color_rgb']
"--------------------------------------------------------------------"
"------------------------reading image----------------------"
"""# filename = f'result_{cluster_id}.jpg'
# Load the existing image
base_image = Image.open(background_image_path)
# Initialize the drawing context
draw = ImageDraw.Draw(base_image)
# Set primary and secondary colors
primary_color_rgb = primary_color_rgb # (R, G, B) for #EB691B
secondary_color_rgb = secondary_color_rgb # (R, G, B) for #4B4B4B
# Yellow C100%, Pantone 281 C100%
# Use built-in fonts
primary_font = ImageFont.load_default() # Use the default font
secondary_font = ImageFont.load_default() # Use the default font
# Set the text to be displayed
# text = "Empower Your Legacy Giants with our premier solutions. Captivating 8,200+ financial industries and counting, our robust marketing tools are uniquely designed to serve your distinct needs. Embrace efficiency, cultivate growth and be a part of the top-financial trendsetters across the United Kingdom. Propel your business forward in a landscape dominated by Kingsley Napley LLP and others. Join the movement - Experience difference with us."
# text = result
text = asset_to_individualize
# Set the text position for the primary color
text_position_primary = (100, 100)
# Draw text in primary color with default font
draw.text(text_position_primary, text, fill=primary_color_rgb, font=primary_font)
# Load the overlay image
# logo = Image.open("arkitektkopia-loggo-ritsPP-cmyk.png")
logo = Image.open(logo_path)
# You may need to resize the overlay image to fit
logo = logo.resize((100, 100)) # Adjust the size as needed
# Paste the overlay image on top of the base image
base_image.paste(logo, (200, 200))
# Save the modified image
base_image.save(f"asset_{cluster_id}.jpg")
# Display the modified image
base_image.show()"""
"--------------------------------------------------------------------"
local_asset_path = combine_text_image(cluster_id, background_image_path, logo_path, asset_to_individualize, primary_color_rgb, secondary_color_rgb)
bucket_path = f"/asset/{user_id}/{project_id}/asset_{cluster_id}.jpg"
# print("Bucket Pathhhhhhhhhhhhhhh", bucket_path)
with open(local_asset_path, 'rb') as f:
supabase.storage.from_(supabase_bucket).upload(file=f,path=bucket_path)
asset_data_insert = [
{
"user_id" :user_id,
"project_id": project_id,
"asset_path": bucket_path
},
]
asset= supabase.from_("asset_metadata").insert(asset_data_insert).execute()
asset_data = [record for record in asset.data]
p_data = json.dumps(asset_data)
p = json.loads(p_data)
print("asssettttttt", p)
assetid = p[0]["id"]
print("Asset id---------", assetid)
asset_id.append(assetid)
asset_path.append(bucket_path)
print("process table:*******************", p)
process_data_update = {
"process_status": "stopped",
"end_at" : datetime.now().isoformat()
}
supabase.from_("process").update(process_data_update).eq("process_id", process_id).execute()
logger.info(f"asset creation done for segment {cluster_id}")
os.remove(local_asset_path)
os.remove(background_image_path)
os.remove(logo_path)
logger.info("asset creation done")
return {"asset_id": asset_id, "asset_path": asset_path}
except Exception as e:
logger.error(e)
print(e)
return {"error": e, "status":"error"} | [
"Generate RGB of color PLACEHOLDER and color PLACEHOLDER and give me a json format strictly only in Red Green Blue nested dictionary and nothing else.\n You can consider this as an example to generate you result: \n EXAMPLE: {\"EB691B\": { \"Red\": 235,\"Green\": 105\"Blue\": 27},\"4B4B4B\": { \"Red\": 75,\"Green\": 75,\"Blue\": 75},\"95CDED\": {\"Red\": 149,\"Green\": 205, \"Blue\": 237}}",
"generate a different marketing asset image for a brand Solarplexus, to target big industries, \n text should have font as Raleway and secondary font as Open Sans, image should have primary \n colour code should be #EB691B and secondary color code should be #4B4B4B, and generate images based on these cluster\n description PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | dengyang17/LLM-Proactive | otters~otters_chatgpt.py | import openai
import time
import os
API_KEY = YOUR_KEY
def query_openai_model(api_key: str, prompt: str, model: str = "gpt-3.5-turbo-0301", max_tokens: int = 128, temperature: float = 0):
openai.api_key = api_key
completions = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
n=1,
stop=None,
temperature=temperature,
)
output = completions.choices[0].message.content.strip()
return output
def infer(infile, outfile):
api_key = API_KEY
existing_outputs = []
if os.path.exists(outfile):
with open(outfile, 'r') as fin:
for line in fin:
existing_outputs.append(line)
with open(infile, 'r') as fin,\
open(outfile, 'w') as fout:
count = 0
for line in fin:
prompts = eval(line.strip('\n'))
if count < len(existing_outputs):
outputs = eval(existing_outputs[count].strip('\n'))
for key in prompts:
#if key not in outputs:
if key in ['zs_resp', 'fs_resp']:
prompt = prompts[key]
flag = True
while flag:
try:
if key in ['zs_resp', 'fs_resp']:
output = query_openai_model(api_key, prompt,max_tokens=40)
elif key in ['zs', 'zs-pcot']:
output = query_openai_model(api_key, prompt,max_tokens=80)
else:
output = query_openai_model(api_key, prompt)
flag = False
except openai.error.OpenAIError as e:
print("Some error happened here.")
time.sleep(1)
print(output)
outputs[key] = output
fout.write('%s\n' % outputs)
count += 1
continue
outputs = {}
for key in prompts:
prompt = prompts[key]
flag = True
while flag:
try:
if key in ['zs_resp', 'fs_resp']:
output = query_openai_model(api_key, prompt,max_tokens=40)
elif key in ['zs', 'zs-pcot']:
output = query_openai_model(api_key, prompt,max_tokens=80)
else:
output = query_openai_model(api_key, prompt)
flag = False
except openai.error.OpenAIError as e:
print("Some error happened here.")
time.sleep(1)
print(output)
outputs[key] = output
fout.write('%s\n' % outputs)
if __name__ == "__main__":
infer('otters-source.txt', 'otters-chatgpt.txt') | [
"\n"
] |
2024-01-10 | dengyang17/LLM-Proactive | negotiate~negotiate_chatgpt.py | import openai
import time
import os
def query_openai_model(api_key: str, prompt: str, model: str = "gpt-3.5-turbo-0301", max_tokens: int = 256, temperature: float = 0):
openai.api_key = api_key
completions = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
n=1,
stop=None,
temperature=temperature,
)
output = completions.choices[0].message.content.strip()
return output
def infer(infile, outfile):
api_key = "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"
existing_outputs = []
if os.path.exists(outfile):
with open(outfile, 'r') as fin:
for line in fin:
existing_outputs.append(line)
with open(infile, 'r') as fin,\
open(outfile, 'w') as fout:
count = 0
for line in fin:
prompts = eval(line.strip('\n'))
if count < len(existing_outputs):
outputs = eval(existing_outputs[count].strip('\n'))
for key in prompts:
if key not in outputs:
prompt = prompts[key]
flag = True
while flag:
try:
output = query_openai_model(api_key, prompt)
flag = False
except openai.error.OpenAIError as e:
print("Some error happened here.")
time.sleep(1)
print(output)
outputs[key] = output
fout.write('%s\n' % outputs)
count += 1
continue
outputs = {}
for key in prompts:
prompt = prompts[key]
flag = True
while flag:
try:
output = query_openai_model(api_key, prompt)
flag = False
except openai.error.OpenAIError as e:
print("Some error happened here.")
time.sleep(1)
print(output)
outputs[key] = output
fout.write('%s\n' % outputs)
if __name__ == "__main__":
infer('data/negotiate-source.txt', 'output/negotiate-chatgpt.txt') | [
"\n"
] |
2024-01-10 | Ronterox/Mascot | backend~personalitydata.py | import time
from func.rng import rng_choice
from enum import IntEnum
import openai
import json
import os, os.path
openai.api_key = os.getenv("OPENAI_API_KEY")
class Choices(IntEnum):
STRONGLY_DISAGRREE = -3
DISAGREE = -2
SLIGHTLY_DISAGREE = -1
NEUTRAL = 0
SLIGHTLY_AGREE = 1
AGREE = 2
STRONGLY_AGREE = 3
personalityTest = json.load(open("../data/personality_test_simplified.json", "r", encoding="utf-8"))
def logfile(path, text):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'a') as f:
f.write(text)
def generate_personality():
personality = {
"Extraversion": {"value": 0, "opposite": "Introversion", "letter": "E"},
"Introversion": {"value": 0, "opposite": "Extraversion", "letter": "I"},
"Sensing": {"value": 0, "opposite": "Intuition", "letter": "S"},
"Intuition": {"value": 0, "opposite": "Sensing", "letter": "N"},
"Thinking": {"value": 0, "opposite": "Feeling", "letter": "T"},
"Feeling": {"value": 0, "opposite": "Thinking", "letter": "F"},
"Judging": {"value": 0, "opposite": "Perceiving", "letter": "J"},
"Perceiving": {"value": 0, "opposite": "Judging", "letter": "P"}
}
answers = {}
for category in personalityTest:
for question in personalityTest[category]:
answer = rng_choice(list(Choices))
personality[category]['value'] += answer
personality[personality[category]['opposite']]['value'] -= answer
answers[question] = Choices(answer).name
return personality, answers
def format_personality(personality):
acronym, categories, values = "", "", ""
for category in personality:
if personality[category]['value'] >= 0:
acronym += personality[category]['letter']
categories += f"({category}), "
values += f"{category}: {personality[category]['value']}\n"
return acronym, categories, values
trainingQuestions = json.load(open("../data/training_questions.json", "r", encoding="utf-8"))
listOfQuestions = []
for category in trainingQuestions:
listOfQuestions.extend(trainingQuestions[category]["questions"])
personalities = {}
try:
with open("../data/training_data.json", "r", encoding="utf-8") as f:
training_data = json.load(f)
except FileNotFoundError:
training_data = []
total_tokens = 0
total_time_taken = 0
for i in range(20_000):
startTime = time.time()
personality, answers = generate_personality()
acronym, _, values = format_personality(personality)
question = listOfQuestions[i % len(listOfQuestions)]
if acronym not in personalities:
personalities[acronym] = 1
else:
personalities[acronym] += 1
prompt = ""
for answer in answers:
choice = Choices[answers[answer]]
if choice != 0:
prompt += f"{answer} {choice}\n"
info = "strongly disagree=-3, disagree=-2, slightly disagree=-1, neutral=0, slightly agree=1, agree=2, strongly agree=3\n"
prompt = info + prompt.replace("\n", " ").strip()
# This is because GPT sucks with negative numbers
prompt = prompt.replace("-3", "6").replace("-2", "5").replace("-1", "4")
precontext = "You will respond as if you were the person with the following personality traits:\n\nAfter each sentence you have the personality thought on it.\n\n"
postcontext = "\n\nYour response will be concise, and won't mention that you are an AI."
prompt = precontext + prompt + postcontext
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": question + " Explain."}
]
)
prediction = response['choices'][0]['message']['content']
except Exception as e:
logfile("../logs/training_data.log", str(e) + "\n")
if i % 10 == 0:
print(e)
continue
total_tokens += response['usage']['total_tokens']
training_data.append({"prompt": prompt, "personality": acronym, "completion": prediction })
with open("../data/training_data.json", "w", encoding="utf-8") as f:
json.dump(training_data, f, indent=4)
endTime = time.time()
total_time_taken += endTime - startTime
print(f"Question: {question}")
print(f"Personality: {acronym}")
print(f"Prediction: {prediction}")
print(f"Time: {endTime - startTime}")
print(f"Tokens: {response['usage']['total_tokens']}")
print(f"Total tokens: {total_tokens}")
print(f"Total time: {total_time_taken} seconds")
print(f"Time left: {(total_time_taken / (i + 1) * (20000 - i - 1)) / 60:.2f} minutes")
print(f"Current cost: {total_tokens * 0.002 / 1000} USD")
print("-" * 20)
print(f"Personalities: {personalities}")
print(f"Total tokens: {total_tokens}\nTotal Cost: {total_tokens * 0.002 / 1000} USD") | [
"\n",
"PLACEHOLDER Explain.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"PLACEHOLDER PLACEHOLDER\n",
" "
] |
2024-01-10 | Ronterox/Mascot | backend~gpt3mikoapi.py | import os
import openai
import tiktoken
from enum import IntEnum
from time import time
openai.api_key = os.getenv("OPENAI_API_KEY")
MODELS = [("text-ada-001", 0.0016), ("text-babbage-001", 0.0024),
("text-curie-001", 0.0120), ("text-davinci-003", 0.1200)]
session_total_cost = 0
class Model(IntEnum):
ADA = 0
BABBAGE = 1
CURIE = 2
DAVINCI = 3
def count_tokens(model, prompt, show_output=True):
encoding = tiktoken.encoding_for_model(model)
tokens = len(encoding.encode(prompt))
if show_output:
print(f"{tokens} tokens")
return tokens
def predict(prompt, model=Model.DAVINCI, temp=0.5, max_tokens=100, top_p=1, freq_penalty=0.5, pres_penalty=0):
global session_total_cost
MODEL = MODELS[model][0]
COST = MODELS[model][1] / 1000
total_tokens = count_tokens(MODEL, prompt)
print(f"MODEL: {MODEL:-^50}")
print(f"PROMPT: {prompt}\n")
time_start = time()
response = openai.Completion.create(
model=MODEL,
prompt=prompt,
temperature=temp,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=freq_penalty,
presence_penalty=pres_penalty
)
total_time = time() - time_start
response_text = response.choices[0].text
total_tokens += count_tokens(MODEL, response_text)
cost = total_tokens * COST
session_total_cost += cost
print(f"RESPONSE: {response_text}")
print(f"TOTAL TOKENS: {total_tokens}")
print(f"TOTAL COST: {cost} USD in {total_time:.2f} seconds")
print(f"TOTAL COST SESSION: {session_total_cost} USD")
return response_text
if __name__ == "__main__":
predict("Hello, world!", Model.DAVINCI)
| [] |
2024-01-10 | OpenGPTX/Megatron-LM | megatron~model~bert_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""BERT model."""
import torch
from megatron import get_args
from megatron.core import tensor_parallel
from megatron.model.enums import AttnMaskType
from megatron.model.language_model import parallel_lm_logits
from megatron.model.language_model import get_language_model
from megatron.model import LayerNorm, RMSNorm
from megatron.model.utils import openai_gelu, erf_gelu
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = (extended_attention_mask < 0.5)
return extended_attention_mask
def bert_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
config: TransformerConfig object
mpu_vocab_size: model parallel size of vocabulary.
hidden_size: hidden size
parallel_output: whether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, hidden_size, config, parallel_output):
super().__init__(config=config)
args = get_args()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
tensor_parallel.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.dense = get_linear_layer(hidden_size, hidden_size, config.init_method)
setattr(self.dense.weight, 'sequence_parallel', config.sequence_parallel)
setattr(self.dense.bias, 'sequence_parallel', config.sequence_parallel)
if config.normalization == 'LayerNorm':
layernorm_cls = LayerNorm
elif config.normalization == 'RMSNorm':
layernorm_cls = RMSNorm
else:
raise ValueError(f'unknown normalization "{config.normalization}"')
self.layernorm = layernorm_cls(hidden_size,
eps=config.layernorm_epsilon,
sequence_parallel=config.sequence_parallel)
self.gelu = torch.nn.functional.gelu
if args.openai_gelu:
self.gelu = openai_gelu
elif args.onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.layernorm(hidden_states)
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
def post_language_model_processing(lm_output, pooled_output,
lm_head, binary_head,
lm_labels,
logit_weights,
fp16_lm_cross_entropy):
# Output.
lm_logits = lm_head(
lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
binary_logits = binary_head(pooled_output)
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0,1).contiguous(), binary_logits
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0,1).contiguous()
# lm_logits : [s, b, h] and lm_labels: [s, b]
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
# [s, b] => [b s]
lm_loss = lm_loss.transpose(0,1).contiguous()
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""Bert Language model."""
def __init__(self,
config,
num_tokentypes=2,
add_binary_head=True,
parallel_output=True,
pre_process=True,
post_process=True):
super().__init__(config=config)
args = get_args()
# TODO this option is not yet implemented in BERT
assert args.untie_embeddings_and_output_weights is False
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.return_embeddings = args.output_bert_embeddings
if self.return_embeddings:
assert self.post_process and self.add_binary_head
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings()
if self.post_process:
self.lm_head = BertLMHead(self.shared_embedding_or_output_weight().size(0), config.hidden_size,
config, parallel_output)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(config.hidden_size, 2,
config.init_method)
self._binary_head_key = 'binary_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, bert_model_input, attention_mask,
tokentype_ids=None, lm_labels=None):
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = bert_model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
# Return pooled output (e.g., when computing Bert embeddings).
if self.return_embeddings:
# Sum attention mask.
embeddings = torch.transpose(lm_output, 0, 1)
masks = torch.sum(attention_mask, dim=1)
# Collect masked embeddings.
output = torch.zeros(
size=(embeddings.shape[0], embeddings.shape[2]),
dtype=torch.float32,
device=torch.cuda.current_device())
for i, (embedding, mask) in enumerate(zip(embeddings, masks)):
output[i, :] = torch.mean(embedding[1: mask - 1], dim=0)
return output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(lm_output, pooled_output,
self.lm_head, self.binary_head,
lm_labels,
self.shared_embedding_or_output_weight(),
self.fp16_lm_cross_entropy)
else:
return lm_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] \
= self.binary_head.state_dict(prefix=prefix, keep_vars=keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(
state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(
state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
| [] |
2024-01-10 | alphasecio/langchain-examples | all-in-one~pages~1_Search.py | import streamlit as st
from langchain.llms.openai import OpenAI
from langchain.agents import load_tools, initialize_agent
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
serper_api_key = st.session_state.serper_api_key
# Streamlit app
st.subheader('Web Search')
search_query = st.text_input("Enter Search Query")
# If the 'Search' button is clicked
if st.button("Search"):
# Validate inputs
if not openai_api_key or not serper_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not search_query.strip():
st.error("Please provide the search query.")
else:
try:
with st.spinner('Please wait...'):
# Initialize the OpenAI module, load the Google Serper API tool, and run the search query using an agent
llm = OpenAI(temperature=0, openai_api_key=openai_api_key, verbose=True)
tools = load_tools(["google-serper"], llm, serper_api_key=serper_api_key)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
result = agent.run(search_query)
st.success(result)
except Exception as e:
st.exception(f"An error occurred: {e}")
| [] |
2024-01-10 | alphasecio/langchain-examples | all-in-one~pages~4_Document_Summary.py | import os, tempfile
import streamlit as st
from langchain.llms.openai import OpenAI
from langchain.vectorstores.chroma import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import PyPDFLoader
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('Document Summary')
source_doc = st.file_uploader("Upload Source Document", type="pdf")
# If the 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not source_doc:
st.error("Please provide the source document.")
else:
try:
with st.spinner('Please wait...'):
# Save uploaded file temporarily to disk, load and split the file into pages, delete temp file
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(source_doc.read())
loader = PyPDFLoader(tmp_file.name)
pages = loader.load_and_split()
os.remove(tmp_file.name)
# Create embeddings for the pages and insert into Chroma database
embeddings=OpenAIEmbeddings(openai_api_key=openai_api_key)
vectordb = Chroma.from_documents(pages, embeddings)
# Initialize the OpenAI module, load and run the summarize chain
llm=OpenAI(temperature=0, openai_api_key=openai_api_key)
chain = load_summarize_chain(llm, chain_type="stuff")
search = vectordb.similarity_search(" ")
summary = chain.run(input_documents=search, question="Write a summary within 200 words.")
st.success(summary)
except Exception as e:
st.exception(f"An error occurred: {e}")
| [] |
2024-01-10 | alphasecio/langchain-examples | all-in-one~pages~2_URL_Summary.py | import validators, streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import YoutubeLoader, UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('URL Summary')
url = st.text_input("Enter Source URL")
# If 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not url:
st.error("Please provide the URL.")
elif not validators.url(url):
st.error("Please enter a valid URL.")
else:
try:
with st.spinner("Please wait..."):
# Load URL data
if "youtube.com" in url:
loader = YoutubeLoader.from_youtube_url(url, add_video_info=True)
else:
loader = UnstructuredURLLoader(urls=[url], ssl_verify=False, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_5_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"})
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
prompt_template = """Write a summary of the following in 250-300 words.
{text}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
summary = chain.run(data)
st.success(summary)
except Exception as e:
st.exception(f"Exception: {e}")
| [
"Write a summary of the following in 250-300 words.\n \n {text}\n\n "
] |
2024-01-10 | alphasecio/langchain-examples | all-in-one~pages~5_News_Summary.py | import streamlit as st, tiktoken
from langchain.chat_models import ChatOpenAI
from langchain.utilities import GoogleSerperAPIWrapper
from langchain.document_loaders import UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
serper_api_key = st.session_state.serper_api_key
# Streamlit app
st.subheader('News Summary')
num_results = st.number_input("Number of Search Results", min_value=3, max_value=10)
search_query = st.text_input("Enter Search Query")
col1, col2 = st.columns(2)
# If the 'Search' button is clicked
if col1.button("Search"):
# Validate inputs
if not openai_api_key or not serper_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not search_query.strip():
st.error("Please provide the search query.")
else:
try:
with st.spinner("Please wait..."):
# Show the top X relevant news articles from the previous week using Google Serper API
search = GoogleSerperAPIWrapper(type="news", tbs="qdr:w1", serper_api_key=serper_api_key)
result_dict = search.results(search_query)
if not result_dict['news']:
st.error(f"No search results for: {search_query}.")
else:
for i, item in zip(range(num_results), result_dict['news']):
st.success(f"Title: {item['title']}\n\nLink: {item['link']}\n\nSnippet: {item['snippet']}")
except Exception as e:
st.exception(f"Exception: {e}")
# If 'Search & Summarize' button is clicked
if col2.button("Search & Summarize"):
# Validate inputs
if not openai_api_key or not serper_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not search_query.strip():
st.error("Please provide the search query.")
else:
try:
with st.spinner("Please wait..."):
# Show the top X relevant news articles from the previous week using Google Serper API
search = GoogleSerperAPIWrapper(type="news", tbs="qdr:w1", serper_api_key=serper_api_key)
result_dict = search.results(search_query)
if not result_dict['news']:
st.error(f"No search results for: {search_query}.")
else:
# Load URL data from the top X news search results
for i, item in zip(range(num_results), result_dict['news']):
loader = UnstructuredURLLoader(urls=[item['link']])
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
chain = load_summarize_chain(llm, chain_type="map_reduce")
summary = chain.run(data)
st.success(f"Title: {item['title']}\n\nLink: {item['link']}\n\nSummary: {summary}")
except Exception as e:
st.exception(f"Exception: {e}")
| [] |
2024-01-10 | alphasecio/langchain-examples | text-summary~streamlit_app.py | import os, streamlit as st
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
from langchain.llms.openai import OpenAI
from langchain.chains.summarize import load_summarize_chain
# Streamlit app
st.subheader('Summarize Text')
# Get OpenAI API key and source text input
with st.sidebar:
openai_api_key = st.text_input("OpenAI API key", value="", type="password")
st.caption("*If you don't have an OpenAI API key, get it [here](https://platform.openai.com/account/api-keys).*")
source_text = st.text_area("Source Text", label_visibility="collapsed", height=200)
# If the 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key.strip() or not source_text.strip():
st.error(f"Please provide the missing fields.")
else:
try:
with st.spinner('Please wait...'):
# Split the source text
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(source_text)
# Create Document objects for the texts (max 3 pages)
docs = [Document(page_content=t) for t in texts[:3]]
# Initialize the OpenAI module, load and run the summarize chain
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
chain = load_summarize_chain(llm, chain_type="map_reduce")
summary = chain.run(docs)
st.success(summary)
except Exception as e:
st.exception(f"An error occurred: {e}")
| [] |
2024-01-10 | grumpyp/aixplora | backend~loaders~audio_loader.py | import os
import openai
import tempfile
from sqlalchemy import text
from database.database import Database
from fastapi import UploadFile
class Whisperexporter:
"""
supports [".m4a", ".mp3", ".mp4", ".mpeg", ".mpga", ".wav", ".webm"]
"""
def __init__(self):
# TODO: move this to utils or something it's used in multiple places
try:
self.openai_api_key = Database().get_session().execute(text("SELECT openai_api_key FROM config")).fetchall()[-1]
except:
self.openai_api_key = "notdefined"
def whisper_to_text(self, file: bytes, filename: str, file_meta: UploadFile):
misc_dir = os.path.join(os.getcwd(), "misc")
with tempfile.NamedTemporaryFile(delete=False, suffix=filename) as tmp_file:
content = file.read()
tmp_file.write(content)
tmp_file.flush()
tmp_file.close()
with open(tmp_file.name, "rb") as audio_file:
openai.api_key = self.openai_api_key
transcript = openai.Audio.transcribe("whisper-1", audio_file)
transcript_text = transcript['text'] # Extract the text content from the transcript object
with open(f"{misc_dir}/{filename}.txt", "w", encoding="utf-8") as f:
f.write(transcript_text)
return f"{misc_dir}/{filename}.txt", file_meta
@property
def textes(self):
return self._textes | [] |
2024-01-10 | grumpyp/aixplora | backend~embeddings~index_files.py | # TODO: Research if other db is better, refactor to use other db, or choose own (inherit from a base)
# TODO: Implement other embeddings algorithm than OpenAI
# TODO: Split class into a class which indexes and which does the querying
from langchain.document_loaders import TextLoader
from typing import List, Dict
from langchain.schema import Document
from database.database import Database
from database.models.prompt import Prompt
from sqlalchemy import text
from embeddings.utils import openai_ask, openai_ask_no_aixplora_brain
import random
from qdrant_client import QdrantClient
from qdrant_client.http import models
import openai
from fastapi import UploadFile
from embeddings.text_splitter import TextSplitter
from embeddings.basesplit import ContextTypes
import re
import requests
from gpt4all import GPT4All
import os
from sentence_transformers import SentenceTransformer
# TODO: This is just a base implementation extend it with metadata,..
# 25.05.2023: Quickfix for now removed langchain components to make it work asap, needs refactor - old
# 25.05.2023: Quickfix, seems also to be a problem with chromadb, now using qudrant vector db, needs refactor
class Genie:
def __init__(self, file_path: str = None, file_meta: UploadFile | Dict[str, str] = None, remote_db: bool = False,
apikey: str = None, email: str = None):
try:
self.openai_api_key = \
Database().get_session().execute(text("SELECT openai_api_key FROM config")).fetchall()[-1]
self.openai_model = Database().get_session().execute(text("SELECT model FROM config")).fetchall()[-1]
except:
self.openai_api_key = "notdefined"
try:
self.embeddings_model = \
Database().get_session().execute(text("SELECT embeddings_model FROM config")).fetchall()[-1]
# By default use OpenAI Model if exception is triggered
except Exception as e:
print(f"Using default OpenAI model: {e}")
self.embeddings_model = "text-embedding-ada-002"
self.remote_db = remote_db
self.apikey = apikey
self.email = email
self.remote_headers = {"apikey": self.apikey, "email": self.email}
if not remote_db:
self.qu = QdrantClient(path="./qdrant_data")
try:
if self.qu.get_collection(collection_name="aixplora").vectors_count == 0:
self.qu.recreate_collection(
collection_name="aixplora",
vectors_config={
"text-embedding-ada-002": models.VectorParams(size=1536, distance=models.Distance.COSINE),
"all-MiniLM-L6-v2": models.VectorParams(size=384, distance=models.Distance.COSINE),
"multi-qa-MiniLM-L6-cos-v1": models.VectorParams(size=384, distance=models.Distance.COSINE),
"paraphrase-albert-small-v2": models.VectorParams(size=768, distance=models.Distance.COSINE),
"multi-qa-mpnet-base-dot-v1": models.VectorParams(size=768, distance=models.Distance.COSINE)
})
except:
self.qu.recreate_collection(
collection_name="aixplora",
vectors_config={
"text-embedding-ada-002": models.VectorParams(size=1536, distance=models.Distance.COSINE),
"all-MiniLM-L6-v2": models.VectorParams(size=384, distance=models.Distance.COSINE),
"multi-qa-MiniLM-L6-cos-v1": models.VectorParams(size=384, distance=models.Distance.COSINE),
"paraphrase-albert-small-v2": models.VectorParams(size=768, distance=models.Distance.COSINE),
"multi-qa-mpnet-base-dot-v1": models.VectorParams(size=768, distance=models.Distance.COSINE)
})
if file_path:
self.file_meta = file_meta
self.file_path = file_path
if not isinstance(self.file_path, list):
self.file_path = [self.file_path]
for i in self.file_path:
self.loader = TextLoader(i)
self.documents = self.loader.load()
self.texts = self.text_split(self.documents)
self.vectordb = self.embeddings(self.texts, page=i)
@staticmethod
def text_split(documents: TextLoader) -> List[str]:
document_str = "".join([document.page_content for document in documents])
text_splitter = TextSplitter(document_str, ContextTypes.TEXT).chunk_document()
fixed_whitespaces = []
for document in text_splitter:
replaced = document
replaced = re.sub('\s*\.\s*', '. ', replaced) # replace ' . ' with '. '
replaced = re.sub('\s*,\s*', ', ', replaced) # replace ' , ' with ', '
replaced = re.sub('\s*:\s*', ': ', replaced) # replace ' : ' with ': '
replaced = re.sub('\s*\(\s*', ' (', replaced) # replace ' ( ' with ' ('
replaced = re.sub('\s*\)\s*', ') ', replaced) # replace ' ) ' with ') '
replaced = re.sub('\s+', ' ', replaced) # replace multiple spaces with one space
replaced = replaced.replace('\n', '')
fixed_whitespaces.append(replaced)
return fixed_whitespaces
def upload_embedding(self, texts: List[Document], collection_name: str = "aixplora", page: int = 0) -> None:
for i in range(len(texts)):
if self.embeddings_model[0] != "text-embedding-ada-002":
model = SentenceTransformer(f"{self.embeddings_model[0]}")
embeddings = [float(x) for x in model.encode(texts[i])]
else:
response = openai.Embedding.create(
input=texts[i],
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
if isinstance(self.file_meta, dict):
filename = self.file_meta.get("filename")
filetype = self.file_meta.get("content_type", "website")
else: # Assuming that in this case it's an object with attributes
filename = getattr(self.file_meta, "filename")
filetype = getattr(self.file_meta, "content_type")
if not self.remote_db:
self.qu.upsert(
collection_name=collection_name,
wait=True,
points=[
# TODO: Change randomint to UUID
models.PointStruct(
id=random.randint(1, 100000000),
payload={
"chunk": texts[i],
"metadata": {"filename": filename,
"filetype": filetype,
"page": page,
"embeddings_model": self.embeddings_model[0]}
},
vector={
f"{self.embeddings_model[0]}": embeddings
},
),
]
)
else:
# TODO: Clientside restriction if Cloud responses no WRITE access (401, {'message': 'Write permission is not granted'})
payload = {
"chunk": texts[i],
"metadata": {"filename": filename,
"filetype": filetype,
"page": page,
"embeddings_model": self.embeddings_model[0]},
"vector": {
f"{self.embeddings_model[0]}": embeddings
},
}
# Debug
# print(payload)
# time.sleep(5)
# needs to be json not payload -> because of the encoding application/x-www-form-urlencoded isn't supported
r = requests.post("https://api.aixplora.app/api/qdrant/upload/", headers=self.remote_headers, json=payload)
return
def embeddings(self, texts: List[str], page: int):
texts = [text for text in texts]
openai.api_key = self.openai_api_key[0]
print(len(texts))
self.upload_embedding(texts=texts, page=page)
return
def search(self, query: str, specific_doc: str | None):
openai.api_key = self.openai_api_key[0]
print(self.openai_api_key)
if not self.remote_db:
if self.embeddings_model[0] != "text-embedding-ada-002":
model = SentenceTransformer(f"{self.embeddings_model[0]}")
embeddings = [float(x) for x in model.encode(query)]
else:
response = openai.Embedding.create(
input=query,
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
results = self.qu.search(
collection_name="aixplora",
query_vector=(f"{self.embeddings_model[0]}", embeddings),
limit=3,
with_payload=True
)
if specific_doc is not None:
# Without the clean it won't find the document
specific_doc_clean = specific_doc.replace('https://', '').replace('http://', '').replace('/', '_')
results = self.qu.search(
collection_name="aixplora",
query_vector=(f"{self.embeddings_model[0]}", embeddings),
query_filter=models.Filter(
must=[
models.FieldCondition(
key="metadata.filename",
match=models.MatchValue(value=f"{specific_doc_clean}"),
)
]
),
limit=3
)
else:
if self.embeddings_model[0] != "text-embedding-ada-002":
model = SentenceTransformer(f"{self.embeddings_model[0]}")
embeddings = [float(x) for x in model.encode(query)]
else:
response = openai.Embedding.create(
input=query,
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
payload = {"query_vector": {f"{self.embeddings_model[0]}": embeddings}}
r = requests.post(headers=self.remote_headers, json=payload, url="https://api.aixplora.app/api/qdrant/get/")
if specific_doc is not None:
specific_doc_clean = specific_doc.replace('https://', '').replace('http://', '').replace('/', '_')
payload = {"query_vector": {f"{self.embeddings_model[0]}": embeddings}, "specific_doc": specific_doc_clean}
r = requests.post(headers=self.remote_headers, json=payload, url="https://api.aixplora.app/api/qdrant/get/")
return (r.json(), r.status_code)
return results
# This is used to ask questions on all documents
# TODO: evaluate how many embeddings are in db, based on that change n_results dynamcially
def query(self, query_embedding: List[List[float]] = None, query_texts: str = None, specific_doc: str = None,
use_brain: bool = True):
meta_data = []
db = Database().get_session()
prompts = sorted(db.execute(text("SELECT * FROM prompt")).fetchall(), key=lambda x: x[2], reverse=True)
if len(prompts) == 0:
prompt = "Answer the following question: {query_texts} based on that context: {relevant_docs}," \
" make sure that the answer of you is in the same language then the question." \
" if you can't just answer: I don't know."
else:
prompt = prompts[0][1]
# The question is referenced as {question} in the prompt
# The chunks/ relevant docs are referenced as {relevant_docs} in the prompt
if use_brain:
if not query_embedding and not query_texts:
raise ValueError("Either query_embedding or query_texts must be provided")
results = self.search(query_texts, specific_doc)
if isinstance(results, tuple):
relevant_docs = [doc["payload"]["chunk"] for doc in results[0]]
meta_data = [doc["payload"]["metadata"] for doc in results[0]]
prompt = prompt.replace("{question}", query_texts)
prompt = prompt.replace("{relevant_docs}", " ".join(doc["payload"]["chunk"] for doc in results[0]))
else:
relevant_docs = [doc.payload["chunk"] for doc in results]
meta_data = [doc.payload["metadata"] for doc in results]
prompt = prompt.replace("{question}", query_texts)
prompt = prompt.replace("{relevant_docs}", " ".join([doc.payload["chunk"] for doc in results]))
print(self.openai_model)
if not self.openai_model[0].startswith("gpt"):
print(f"Using local model: {self.openai_model[0]}")
# TODO: refactor this path to be global
models_dir = os.path.join(os.getcwd(), "llmsmodels")
gptj = GPT4All(model_name=self.openai_model[0], model_path=models_dir)
if use_brain:
messages = [
{"role": "user",
"content": f"{prompt}"}
]
else:
messages = [
{"role": "user",
"content": f"{query_texts}"}]
answer = gptj.chat_completion(messages, streaming=False)["choices"][0]["message"]["content"]
else:
if use_brain:
if self.openai_model[0].startswith("gpt"):
print(f"Using openai model: {self.openai_model[0]}")
answer = openai_ask(context=relevant_docs, question=query_texts, openai_api_key=self.openai_api_key[0],
openai_model=self.openai_model[0], prompt=prompt)
else:
answer = openai_ask(context=relevant_docs, question=query_texts,
openai_api_key=self.openai_api_key[0],
openai_model=self.openai_model[0], prompt=prompt)
else:
if self.openai_model[0].startswith("gpt"):
answer = openai_ask_no_aixplora_brain(question=query_texts, openai_api_key=self.openai_api_key[0],
openai_model=self.openai_model[0])
_answer = {"answer": answer, "meta_data": meta_data}
print(meta_data)
return _answer
| [
"{relevant_docs}",
"L",
"chunk",
"SELECT * FROM prompt",
"PLACEHOLDER",
"Answer the following question: {query_texts} based on that context: {relevant_docs}, make sure that the answer of you is in the same language then the question. if you can't just answer: I don't know.",
" ",
"{question}"
] |
2024-01-10 | kathrinv/what-do-you-mean | frb_functions.py | # library imports
# webscraping
from selenium import webdriver
import re
import time
# data analysis
import numpy as np
import pandas as pd
import pickle
from tqdm import tqdm_notebook as tqdm
# natural language processing - NLTK
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet, stopwords
from nltk.probability import FreqDist
from nltk.stem import WordNetLemmatizer
# natural language processing - Gensim and LDA
import gensim
from gensim import corpora, models, similarities
from gensim.models import CoherenceModel
import pyLDAvis.gensim
# natural language processing - TextBlob (Sentiment)
from textblob import TextBlob
# data visualization
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import seaborn as sns
def navigate_frb_speeches():
"""
Navigates the Fed Speeches website
and calls get_frb_article_links helper
function to scrape the urls to all Fed
speeches from the Fed website (non-archived
speeches up until 2006).
Returns:
list: Speech urls for all non-archived
speeches on the Feb website.
"""
# initiating selenium Chrome webdriver instance
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
browser.get("https://www.federalreserve.gov/newsevents/speeches.htm")
article_urls = []
new_urls = get_frb_article_links(browser)
while not article_urls or article_urls[-1] != new_urls[-1]:
article_urls += get_frb_article_links(browser)
next_button = browser.find_element_by_css_selector("a[ng-click='selectPage(page + 1, $event)']")
next_button.click()
new_urls = get_frb_article_links(browser)
time.sleep(np.random.randint(5,10))
browser.close()
return article_urls
def get_frb_article_links(browser):
"""
Helper function for navigagte_frb_speeches.
(only works for non-archived speeches)
Parameters:
browser: Selenium browser instance
Returns:
list: Speech urls for the current
page of speeches.
"""
new_urls = []
articles = browser.find_elements_by_class_name('itemTitle')
for article in articles:
url = article.find_element_by_tag_name('a').get_attribute('href')
new_urls.append(url)
return new_urls
def get_frb_speech_text(url_lst):
"""
Accesses and scrapes all the speech text from a
list of urls provided. Only works for non-archived
speeches on the Fed website.
Parameters:
url_lst (list): list of speech urls to scrape
Returns:
list: A list of lists that contains
the speech url, date, title, speaker, location,
and complete text for all speeches in the
url_lst.
"""
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
frb_articles = []
for url in url_lst:
article_details = []
article_details.append(url)
browser.get(url)
article_times = browser.find_elements_by_class_name('article__time')
article_details.append(article_times[0].text)
article_titles = browser.find_elements_by_class_name('title')
article_details.append(article_titles[0].text)
article_speakers = browser.find_elements_by_class_name('speaker')
article_details.append(article_speakers[0].text)
article_locations = browser.find_elements_by_class_name('location')
article_details.append(article_locations[0].text)
article_texts = browser.find_elements_by_xpath('//*[@id="article"]/div[3]')
article_details.append(article_texts[0].text)
frb_articles.append(article_details)
time.sleep(np.random.randint(5,10))
browser.close()
return frb_articles
def get_frb_article_links_archived(browser):
"""
Helper function for navigagte_frb_archived speeches.
(only works for archived speeches)
Parameters:
browser: Selenium browser instance
Returns:
list: Speech urls, titles, speakers
locations, and dates for the current
page of speeches.
"""
new_urls = []
new_titles = []
new_speakers = []
new_locations = []
new_dates = []
speeches = browser.find_element_by_id('speechIndex')
speech_urls = speeches.find_elements_by_tag_name('a')
for speech in speech_urls:
url = speech.get_attribute('href')
new_urls.append(url)
title = speech.text
new_titles.append(title)
speech_dates = speeches.find_elements_by_tag_name('li')
for speech in speech_dates:
date_ = re.findall(r'(?<=)(\S+ \d+, \d{4})', speech.text)[0]
new_dates.append(date_)
speech_speakers = speeches.find_elements_by_class_name('speaker')
for speaker in speech_speakers:
new_speakers.append(speaker.text)
speech_locations = speeches.find_elements_by_class_name('location')
for location in speech_locations:
new_locations.append(location.text)
return new_urls, new_titles, new_speakers, new_locations, new_dates
def navigate_frb_archived_speeches():
"""
Navigates the archived Fed Speeches website
and calls get_frb_article_links_archiged helper
function to scrape the urls to all Fed
speeches from the Fed website (archived
speeches up until 1996).
Returns:
list: Speech urls for all non-archived
speeches on the Feb website.
"""
# initiating selenium Chrome webdriver instance
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
browser.get("https://www.federalreserve.gov/newsevents/speech/speeches-archive.htm")
speech_urls = []
speakers = []
locations = []
dates_ = []
titles = []
year_links = []
list_of_years = browser.find_element_by_xpath('//*[@id="article"]/div/div/div/ul')
all_year_links = list_of_years.find_elements_by_tag_name("a")
for year_link in all_year_links:
url = year_link.get_attribute('href')
year_links.append(url)
for url in year_links:
browser.get(url)
new_urls, new_titles, new_speakers, new_locations, new_dates = get_frb_article_links_archived(browser)
speech_urls = speech_urls + new_urls
titles = titles + new_titles
speakers = speakers + new_speakers
locations = locations + new_locations
dates_ = dates_ + new_dates
time.sleep(np.random.randint(5,10))
browser.close()
# removing extra url accidentally picked up
del titles[-118]
del speech_urls[-118]
return speech_urls, speakers, locations, dates_, titles
def get_frb_speech_text_archived(url_lst):
"""
Accesses and scrapes all the speech text from a
list of urls provided. Only works for archived
speeches on the Fed website.
Parameters:
url_lst (list): list of speech urls to scrape
Returns:
list: speech text
"""
# initiating selenium Chrome webdriver instance
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
speech_text = []
for url in url_lst:
browser.get(url)
paragraphs = browser.find_elements_by_tag_name('p')
complete_text = ""
for paragraph in paragraphs:
complete_text += ' ' + paragraph.text
speech_text.append(complete_text)
time.sleep(np.random.randint(5,10))
browser.close()
return speech_text
# webscraping functions for FOMC speeches
# project expansion
# not used in current project
def navigate_fomc_speeches():
fomc_urls = []
# initiating selenium Chrome webdriver instance
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
browser.get("https://www.federalreserve.gov/newsevents/pressreleases.htm")
new_urls = get_fomc_article_links(browser)
while not fomc_urls or (not new_urls or fomc_urls[-1] != new_urls[-1]):
fomc_urls += get_fomc_article_links(browser)
time.sleep(np.random.randint(5,10))
next_button = browser.find_element_by_css_selector("a[ng-click='selectPage(page + 1, $event)']")
next_button.click()
new_urls = get_fomc_article_links(browser)
browser.close()
return fomc_urls
def get_fomc_article_links(browser):
new_urls = []
speeches = browser.find_elements_by_class_name('itemTitle')
for speech in speeches:
if re.findall(r'FOMC statement', speech.text):
new_urls.append(speech.find_element_by_tag_name('a').get_attribute('href'))
return new_urls
def get_fomc_speech_text(url_lst):
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
fomc_speeches = []
for url in url_lst:
article_details = []
article_details.append(url)
browser.get(url)
article_times = browser.find_elements_by_class_name('article__time')
article_details.append(article_times[0].text)
article_titles = browser.find_elements_by_class_name('title')
article_details.append(article_titles[0].text)
article_texts = browser.find_elements_by_xpath('//*[@id="article"]/div[3]')
article_details.append(article_texts[0].text)
fomc_speeches.append(article_details)
time.sleep(np.random.randint(5,10))
browser.close()
return fomc_speeches
def navigate_fomc_archived_speeches():
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
browser.get("https://www.federalreserve.gov/newsevents/pressreleases/press-release-archive.htm")
fomc_urls = []
titles = []
year_links = []
list_of_years = browser.find_element_by_xpath('//*[@id="article"]/div/div/div/ul')
all_year_links = list_of_years.find_elements_by_tag_name("a")
for year_link in all_year_links:
url = year_link.get_attribute('href')
year_links.append(url)
for url in year_links:
browser.get(url)
new_urls, new_titles = get_fomc_links_archived(browser)
fomc_urls = fomc_urls + new_urls
titles = titles + new_titles
time.sleep(np.random.randint(5,10))
browser.close()
return fomc_urls, titles
def get_fomc_links_archived(browser):
new_urls = []
new_titles = []
releases = browser.find_element_by_id('releaseIndex')
release_urls = releases.find_elements_by_tag_name('a')
for release in release_urls:
if re.findall(r'FOMC [Ss]tatement', release.text):
url = release.get_attribute('href')
new_urls.append(url)
title = release.text
new_titles.append(title)
return new_urls, new_titles
def get_fomc_text_archived(url_lst):
# initiating selenium Chrome webdriver instance
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
speech_text = []
fomc_dates = []
for url in url_lst:
browser.get(url)
paragraphs = browser.find_elements_by_tag_name('p')
complete_text = ""
for paragraph in paragraphs:
complete_text += ' ' + paragraph.text
speech_text.append(complete_text)
date_ = browser.find_elements_by_tag_name('i')[0]
date_ = re.findall(r'(?<=[rR]elease [dD]ate: )(\w* \d*,? \d*)', date_.text)[0]
fomc_dates.append(date_)
time.sleep(np.random.randint(5,10))
browser.close()
return speech_text, fomc_dates
def get_fed_funds_rates(archived=False):
# initiating selenium Chrome webdriver instance
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
browser = webdriver.Chrome(options=option)
if not archived:
browser.get('https://www.federalreserve.gov/monetarypolicy/openmarket.htm')
else:
browser.get('https://www.federalreserve.gov/monetarypolicy/openmarket_archive.htm')
years_txt = []
years = browser.find_elements_by_tag_name('h4')
if not archived:
years = years[1:]
for year in years:
years_txt.append(year.text)
dates_ = []
inc = []
dec = []
target = []
rate_tables = browser.find_elements_by_class_name('data-table')
for i, table in enumerate(rate_tables):
for j, td in enumerate(table.find_elements_by_tag_name('td')):
if (j+1) % 4 == 1:
dates_.append(td.text + ", " + years_txt[i])
elif (j+1) % 4 == 2:
inc.append(td.text)
elif (j+1) % 4 == 3:
dec.append(td.text)
elif (j+1) % 4 == 0:
target.append(td.text)
browser.close()
return dates_, inc, dec, target
# Data Cleaning
def remove_references(text):
"""
Removes references at the end of
speeches, if any. Helper function
to assist in data cleaning.
Parameters:
text (string): speech text
Returns:
string: cleaned speech text sans
references
"""
references_loc = text.find('\nReferences\n')
if references_loc != -1:
text = text[:references_loc]
return_to_text_loc = text.find('[Rr]eturn to text\n')
if return_to_text_loc != -1:
text = text[:return_to_text_loc]
concluding_remarks_loc = text.find('These remarks represent my own views, which do not necessarily represent those of the Federal Reserve Board or the Federal Open Market Committee.')
if concluding_remarks_loc != -1:
text = text[:concluding_remarks_loc]
return text
def clean_speech_text(df):
"""
Cleans speech text, removing
urls, links, numbers, references,
and special characters.
Parameters:
df (DataFrame): FRB speech df
with "full_text" column to
be cleaned
Returns:
DataFrame: pandas DataFrame with
"full_text" column cleaned
"""
df_new = df.copy()
full_text_col = df_new['full_text'].apply(lambda x: remove_references(x))
full_text_col = full_text_col.str.replace('\n', ' ')
full_text_col = full_text_col.apply(lambda x: re.sub(r'(http)\S+(htm)(l)?', '', x))
full_text_col = full_text_col.apply(lambda x: re.sub(r'(www.)\S+', '', x))
full_text_col = full_text_col.apply(lambda x: re.sub(r'[\d]', '', x))
full_text_col = full_text_col.str.replace('—', ' ')
full_text_col = full_text_col.str.replace('-', ' ')
full_text_col = full_text_col.apply(lambda x: re.sub(r'[^\w\s]', '', x))
full_text_col = full_text_col.apply(lambda x: re.sub(r'([Rr]eturn to text)', '', x))
full_text_col = full_text_col.apply(lambda x: re.sub(r'([Pp]lay [vV]ideo)', '', x))
df_new.drop(labels='full_text', axis="columns", inplace=True)
df_new['full_text'] = full_text_col
return df_new
def get_wordnet_pos(word):
"""
Maps POS tag to word token
"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def lemmatize_speech_text(text):
"""
Lemmatizes text based on Part
of Speech (POS) by tokenizing words,
finding the POS, and passing the
POS and token into nltk's lemmatizer.
Parameters:
text (string): speech text
Returns:
list: lemmatized tokens
"""
lemmatizer = WordNetLemmatizer()
tokens_lower = [w.lower() for w in nltk.word_tokenize(text)]
return [lemmatizer.lemmatize(w, get_wordnet_pos(w)) for w in tokens_lower]
def remove_stop_words(tokens_list):
"""
Removes English stop words
from list of tokens
Parameters:
tokens_list (list): list of words
Returns:
list: list of words sans stop words
"""
stopwords_without_punct = []
for word in stopwords.words('english'):
word = word.replace("'", "")
stopwords_without_punct.append(word)
stopped_tokens = [w for w in tokens_list if w not in stopwords_without_punct]
return [w for w in stopped_tokens if len(w) > 2]
def count_unique_words(text):
"""
Counts number of unqiue
words in a piece of text
Parameters:
text (string): speech text
Returns:
int: number of unique words
"""
return len(set(text))
# Old function - replaced by lemmatize_speech_text() and remove_stop_words()
# def tokenize_and_remove_stopwords(text):
# tokens = word_tokenize(text)
# stopped_tokens = [w for w in tokens if w not in stopwords_without_punct]
# return stopped_tokens
def get_most_common_words(tokens, num=20):
"""
Returns list of a number of
most common tokens (words) in
a speech
Parameters:
tokens (list): list of tokenized
words from a speech
num (int): number of top words
to return
Returns:
list of tuples: Words and count
of the number of times each
word appears
"""
fdist = FreqDist(tokens)
return fdist.most_common(num)
def convert_to_datetime(df):
"""
Creates 3 new columns in
FRB speech df, including speech
date, year, and month.
Parameters:
df (DataFrame): FRB speech df
with "speech_date" column to
be parsed
Returns:
DataFrame: pandas DataFrame with
3 new date columns
"""
df_new = df.copy(deep=True)
df_new['speech_datetime'] = df_new['speech_date'].apply(lambda x: pd.to_datetime(x))
df_new['speech_year'] = df_new['speech_datetime'].apply(lambda x: x.year)
df_new['speech_month'] = df_new['speech_datetime'].apply(lambda x: x.month)
return df_new
def plot_most_common_words(df, article_num=9):
"""
Plots the 20 most common words in
a speech, before and after removing
stop words
Parameters:
df (DataFrame): FRB speech
df with 'common_20_stopped_lemm_words'
column
article_num (int): index number
of the speech for which to
generate the barplot
Returns:
Displays 2 sns barplots of top 20
words
"""
fig = plt.figure(figsize=(15, 6))
fig.suptitle(f"Most common words in Speech: {df.iloc[article_num]['title']}")
left = fig.add_subplot(121)
right = fig.add_subplot(122)
# left subplot without stop words
sns.barplot(x=[x[0] for x in df.iloc[article_num]['common_20_stopped_lemm_words']],
y=[x[1] for x in df.iloc[article_num]['common_20_stopped_lemm_words']], ax=left, color='#ffd966')#palette = mycmap)
left.set_xticklabels(left.get_xticklabels(), rotation=45, horizontalalignment="right")
left.set_title('Lemmatized Tokens with Stop Words Removed')
# right subplot with all tokens
sns.barplot(x=[x[0] for x in df.iloc[article_num]['common_20_lemm_words']],
y=[x[1] for x in df.iloc[article_num]['common_20_lemm_words']], ax=right, color='gray')#palette = mycmap)
right.set_xticklabels(right.get_xticklabels(), rotation=45, horizontalalignment="right")
right.set_title('Lemmatized Tokens')
plt.show()
def create_dictionary(df, col_name = 'stopped_lemm_words', no_below=10, no_above=0.66, keep_n=10000):
"""
Creates a dictionary for our corpus
Parameters:
df (DataFrame): df containing the
lemmatized and tokenized corpus
col_name (str): name of column in
the df containing the lemmatized
and tokenized corpus
no_below (int): Minimum number of documents
the word mnust appear in to be included
in the corpus
no_above (int): Max percentage of documents
in the corpus the word can appear in.
Otherwise, word is removed from the corpus
keep_n (int): Maximum number of
words to keep in the dictionary
Returns:
dictionary: list of tokens in the
dictionary
"""
dictionary = corpora.Dictionary(df[col_name])
print(f"Number of words in dictionary prior to filtering: {len(dictionary)}")
dictionary.filter_extremes(no_below=10, no_above=0.66, keep_n=10000)
print(f"Number of words in dictionary after filtering: {len(dictionary)}")
return dictionary
def create_bow(df, dictionary, col_name = 'stopped_lemm_words'):
"""
Creates a dictionary for our corpus
Parameters:
df (DataFrame): df containing the
lemmatized and tokenized corpus
col_name (str): name of column in
the df containing the lemmatized
and tokenized corpus
no_below (int): Minimum number of documents
the word mnust appear in to be included
in the corpus
no_above (int): Max percentage of documents
in the corpus the word can appear in.
Otherwise, word is removed from the corpus
keep_n (int): Maximum number of
words to keep in the dictionary
"""
bow_corpus = [dictionary.doc2bow(speech) for speech in df[col_name]]
return bow_corpus
def get_scores(corpus,
dictionary,
df,
col_name,
min_num_topics = 2,
max_num_topics = 15,
passes=10,
random_state=100):
"""
"""
num_topics = list(range(min_num_topics, max_num_topics + 1))
coherence_scores = []
perplexity_scores = []
for num in range(min_num_topics, max_num_topics+1):
lda_model = gensim.models.LdaMulticore(corpus, num_topics=num, id2word=dictionary,
random_state = random_state, passes = passes)
perplexity_scores.append(lda_model.log_perplexity(corpus))
coherence_model_lda = CoherenceModel(model=lda_model, texts=df[col_name], dictionary=dictionary,
coherence='c_v')
coherence_scores.append(coherence_model_lda.get_coherence())
data = {'num_topics': num_topics, 'coherence': coherence_scores, 'perplexity': perplexity_scores}
return pd.DataFrame(data)
def run_and_save_final_lda_model(corpus,
dictionary,
df,
col_name,
num_topics = 11,
passes = 10,
random_state = 100):
# fit the lda model
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=num_topics, id2word=dictionary,
random_state = random_state, passes = passes)
# pickle the lda model
pickle.dump(lda_model, open('lda_model' + str(num_topics) + '.sava', 'wb'))
# create the visualization
vis = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary=lda_model.id2word)
# save the visualization in html format
pyLDAvis.save_html(vis, 'lda_' + str(num_topics) + '_topics.html')
# get the dominant topic information
df_dominant = get_dominant_topic(lda_model, bow_corpus)
# pickle the dominant topics
df_dominant.to_pickle('df_dominant_' + str(num_topics) + '_topics.pkl')
return lda_model, df_dominant
def get_dominant_topic(lda_model, corpus):
topics_df = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(lda_model[corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = lda_model.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
topics_df = topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
topics_df.reset_index(inplace=True)
topics_df.columns = ['Document_No', 'Dominant_Topic', 'Top_Topic_Perc_Contrib', 'Keywords']
return topics_df
# EDA
def plot_speeches_per_year(df, figsize = (8, 6), color='#ffd966'):
fig = plt.figure(figsize = figsize)
count_by_year = df.groupby('speech_year').count()['index_no'].reset_index()
sns.barplot(data = count_by_year, x = 'speech_year', y = 'index_no', color = color)
plt.xticks(rotation=90)
plt.xlabel('Speech Year', fontsize=14)
plt.ylabel('Number of Speeches', fontsize=14)
plt.title('Number of Speeches per Year', fontsize=18)
plt.show()
def plot_polarity_dist_per_year(df, figsize = (8, 6), color='#ffd966'):
fig = plt.figure(figsize = figsize)
sns.boxplot(data=df, x = 'speech_year', y = 'polarity', color = color)
plt.xticks(rotation=90)
plt.xlabel('Speech Year', fontsize=14)
plt.ylabel('Polarity', fontsize=14)
plt.title('Fed Speech Sentiment (Positive/Negative)', fontsize=18)
plt.show()
def plot_subjectivity_dist_per_year(df, figsize = (8, 6), color='#ffd966'):
fig = plt.figure(figsize = figsize)
sns.boxplot(data=df, x = 'speech_year', y = 'subjectivity', color = color)
plt.xticks(rotation=90)
plt.xlabel('Speech Year', fontsize=14)
plt.ylabel('Subjectivity', fontsize=14)
plt.title('Fed Speech Subjectivity (Positive/Negative)', fontsize=18)
plt.show()
| [] |
2024-01-10 | johntday/ChatBot-CSV | playground.py | import os
from dotenv import load_dotenv
from langchain import OpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
DB_NAME = 'notion_hybris_faiss_index'
if __name__ == '__main__':
do_something()
def fetch_vector_store(name=DB_NAME) -> FAISS:
vector_db = FAISS.load_local(f"embeddings/{name}", OpenAIEmbeddings())
print(f"\nLoaded '{name}'")
return vector_db
def extract_keywords():
load_dotenv(verbose=True)
NOTION_TOKEN = os.getenv("NOTION_TOKEN")
NOTION_DATABASE_ID = os.getenv("NOTION_DATABASE_ID")
db = fetch_vector_store()
db_dict = db.docstore._dict
documents = list(db_dict.values())
# openai.api_key = os.getenv("OPENAI_API_KEY")
f = open("keywords.txt", "a")
# f.write("Now the file has more content!")
llm = OpenAI(
model="text-davinci-003",
# prompt="Extract keywords from this text:\n\nBlack-on-black ware is a 20th- and 21st-century pottery tradition developed by the Puebloan Native American ceramic artists in Northern New Mexico. Traditional reduction-fired blackware has been made for centuries by pueblo artists. Black-on-black ware of the past century is produced with a smooth surface, with the designs applied through selective burnishing or the application of refractory slip. Another style involves carving or incising designs and selectively polishing the raised areas. For generations several families from Kha'po Owingeh and P'ohwhóge Owingeh pueblos have been making black-on-black ware with the techniques passed down from matriarch potters. Artists from other pueblos have also produced black-on-black ware. Several contemporary artists have created works honoring the pottery of their ancestors.",
temperature=0.5,
max_tokens=256,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
for doc in documents:
keywords = llm(f"Extract keywords from this text:\n\n{doc}")
f.write(keywords)
# print(keywords)
# break
f.close()
| [] |
2024-01-10 | johntday/ChatBot-CSV | modules~MyNotionDBLoader.py | """Notion DB loader for langchain"""
import itertools
import time
from typing import Any, Dict, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders.base import BaseLoader
NOTION_BASE_URL = "https://api.notion.com/v1"
DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query"
PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}"
BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children"
TIMEOUT = 10000
WAIT = 2
RETRY_COUNT = 3
METADATA_FILTER = ['id', 'title', 'tags', 'version', 'source id', 'published', 'source']
def metadata_filter(pair: tuple) -> bool:
key, value = pair
if key in METADATA_FILTER:
return True
else:
return False
def _get_pdf_content(url_str: str, page_id: str) -> List[Document]:
if url_str.startswith("http"):
loader = PyPDFLoader(url_str)
# loader = OnlinePDFLoader(url_str)
pages = loader.load()
return pages
raise ValueError(f"Invalid URL of pdf: '{url_str}' at page_id: '{page_id}'")
class MyNotionDBLoader(BaseLoader):
"""Notion DB Loader.
Reads content from pages within a Noton Database.
Args:
integration_token (str): Notion integration token.
database_id (str): Notion database id.
"""
def __init__(self, integration_token: str, database_id: str) -> None:
"""Initialize with parameters."""
if not integration_token:
raise ValueError("integration_token must be provided")
if not database_id:
raise ValueError("database_id must be provided")
self.token = integration_token
self.database_id = database_id
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
def load(self) -> List[Document]:
"""Load documents from the Notion database.
Returns:
List[Document]: List of documents.
"""
page_ids = self._retrieve_page_ids()
return list(itertools.chain.from_iterable(self.load_page(page_id) for page_id in page_ids))
def _retrieve_page_ids(
self, query_dict: Dict[str, Any] = {"page_size": 100}
) -> List[str]:
"""Get all the pages from a Notion database."""
pages: List[Dict[str, Any]] = []
query_dict = {
"filter": {
"and": [
{
"property": "Pub",
"checkbox": {
"equals": True
}
},
{
"property": "Status",
"select": {
"does_not_equal": "Published"
}
}
]
},
'page_size': 100
}
while True:
data = self._request(
DATABASE_URL.format(database_id=self.database_id),
method="POST",
query_dict=query_dict,
)
pages.extend(data.get("results"))
if not data.get("has_more"):
break
query_dict["start_cursor"] = data.get("next_cursor")
page_ids = [page["id"] for page in pages]
print(f"Found {len(page_ids)} pages in Notion database {self.database_id}")
return page_ids
def load_page(self, page_id: str) -> List[Document]:
"""Read a page."""
is_pdf = False
data = self._request(PAGE_URL.format(page_id=page_id))
# load properties as metadata
metadata: Dict[str, Any] = {}
for prop_name, prop_data in data["properties"].items():
prop_type = prop_data["type"]
if prop_type == "rich_text":
value = (
prop_data["rich_text"][0]["plain_text"]
if prop_data["rich_text"]
else None
)
elif prop_type == "title":
value = (
prop_data["title"][0]["plain_text"] if prop_data["title"] else None
)
elif prop_type == "multi_select":
value = (
[item["name"] for item in prop_data["multi_select"]]
if prop_data["multi_select"]
else []
)
elif prop_type == "select":
value = (
prop_data["select"]["name"] if prop_data["select"] else None
)
elif prop_type == "date":
value = (
prop_data["date"]["start"] if prop_data["date"] else None
)
elif prop_type == "checkbox":
value = (
prop_data["checkbox"]
)
if prop_name.lower() == "pdf" and value is True:
is_pdf = True
elif prop_type == "url":
value = (
prop_data["url"]
)
else:
print(f"Unknown prop_type: {prop_type} for Notion page id: {page_id}")
value = None
metadata[prop_name.lower()] = value
metadata["id"] = page_id
page_content = self._load_blocks(block_id=page_id)
""" validate """
if not page_content:
raise ValueError(f"No content found for page_id: '{page_id}', title: '{metadata['title']}'")
if not metadata["source"]:
raise ValueError(f"source: '{metadata['source']} not found for page_id: '{page_id}', title: '{metadata['title']}'")
""" check status """
if metadata["status"] in ["Archived", "Indexed"]:
return []
""" filter metadata """
metadata_filtered = dict(filter(metadata_filter, metadata.items()))
if is_pdf:
print(f"\n\nLoading PDF '{metadata}'")
docs = _get_pdf_content(page_content, page_id)
return [Document(page_content=doc.page_content, metadata=metadata_filtered) for doc in docs]
else:
print(f"\n\nLoading Notion Page '{metadata}'")
return [Document(page_content=page_content, metadata=metadata_filtered)]
def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block and its children."""
result_lines_arr: List[str] = []
cur_block_id: str = block_id
while cur_block_id:
data = self._request(BLOCK_URL.format(block_id=cur_block_id))
for result in data["results"]:
result_obj = result[result["type"]]
if result["type"] == "file" or result["type"] == "pdf":
return result["file"]["file"]["url"]
if "rich_text" not in result_obj:
continue
cur_result_text_arr: List[str] = []
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
cur_result_text_arr.append(
"\t" * num_tabs + rich_text["text"]["content"]
)
if result["has_children"]:
children_text = self._load_blocks(
block_id=result["id"], num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
result_lines_arr.append("\n".join(cur_result_text_arr))
cur_block_id = data.get("next_cursor")
return "\n".join(result_lines_arr)
def _request(
self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {}
) -> Any:
""" Make a request to the Notion API.
Include retry logic and rate limit handling. """
# https://scrapeops.io/python-web-scraping-playbook/python-requests-retry-failed-requests/
for _ in range(RETRY_COUNT):
if WAIT is not None:
time.sleep(WAIT)
try:
response = requests.request(
method,
url,
headers=self.headers,
json=query_dict,
timeout=TIMEOUT,
)
# response.raise_for_status()
if response.status_code in [429, 500, 502, 503, 504]:
print(f"Got {response.status_code} from Notion API. Retrying...")
continue
return response.json()
except requests.exceptions.ConnectionError:
pass
return None
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | openai_ros~openai_ros~src~openai_ros~robot_envs~cube_single_disk_env.py | #! /usr/bin/env python
import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from openai_ros.openai_ros_common import ROSLauncher
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="moving_cube_description",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states", JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_all_systems_ready()
# We pause the simulation once everything is ready
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
self._check_publishers_connection()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
"/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr(
"Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug(
"Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr(
"Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) +
", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | openai_ros~openai_ros~src~openai_ros~robot_gazebo_env.py | import rospy
import gym
from gym.utils import seeding
from .gazebo_connection import GazeboConnection
from .controllers_connection import ControllersConnection
#https://bitbucket.org/theconstructcore/theconstruct_msgs/src/master/msg/RLExperimentInfo.msg
from openai_ros.msg import RLExperimentInfo
# https://github.com/openai/gym/blob/master/gym/core.py
class RobotGazeboEnv(gym.Env):
def __init__(self, robot_name_space, controllers_list, reset_controls, start_init_physics_parameters=True, reset_world_or_sim="SIMULATION"):
# To reset Simulations
rospy.logdebug("START init RobotGazeboEnv")
self.gazebo = GazeboConnection(start_init_physics_parameters,reset_world_or_sim)
self.controllers_object = ControllersConnection(namespace=robot_name_space, controllers_list=controllers_list)
self.reset_controls = reset_controls
self.seed()
# Set up ROS related variables
self.episode_num = 0
self.cumulated_episode_reward = 0
self.reward_pub = rospy.Publisher('/openai/reward', RLExperimentInfo, queue_size=1)
# We Unpause the simulation and reset the controllers if needed
"""
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
"""
self.gazebo.unpauseSim()
if self.reset_controls:
self.controllers_object.reset_controllers()
rospy.logdebug("END init RobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Function executed each time step.
Here we get the action execute it in a time step and retrieve the
observations generated by that action.
:param action:
:return: obs, reward, done, info
"""
"""
Here we should convert the action num to movement action, execute the action in the
simulation and get the observations result of performing that action.
"""
rospy.logdebug("START STEP OpenAIROS")
self.gazebo.unpauseSim()
self._set_action(action)
self.gazebo.pauseSim()
obs = self._get_obs()
done = self._is_done(obs)
info = {}
reward = self._compute_reward(obs, done)
self.cumulated_episode_reward += reward
rospy.logdebug("END STEP OpenAIROS")
return obs, reward, done, info
def reset(self):
rospy.logdebug("Reseting RobotGazeboEnvironment")
self._reset_sim()
self._init_env_variables()
self._update_episode()
obs = self._get_obs()
rospy.logdebug("END Reseting RobotGazeboEnvironment")
return obs
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logdebug("Closing RobotGazeboEnvironment")
rospy.signal_shutdown("Closing RobotGazeboEnvironment")
def _update_episode(self):
"""
Publishes the cumulated reward of the episode and
increases the episode number by one.
:return:
"""
rospy.logwarn("PUBLISHING REWARD...")
self._publish_reward_topic(
self.cumulated_episode_reward,
self.episode_num
)
rospy.logwarn("PUBLISHING REWARD...DONE="+str(self.cumulated_episode_reward)+",EP="+str(self.episode_num))
self.episode_num += 1
self.cumulated_episode_reward = 0
def _publish_reward_topic(self, reward, episode_number=1):
"""
This function publishes the given reward in the reward topic for
easy access from ROS infrastructure.
:param reward:
:param episode_number:
:return:
"""
reward_msg = RLExperimentInfo()
reward_msg.episode_number = episode_number
reward_msg.episode_reward = reward
self.reward_pub.publish(reward_msg)
# Extension methods
# ----------------------------
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("RESET SIM START")
if self.reset_controls :
rospy.logdebug("RESET CONTROLLERS")
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self.gazebo.pauseSim()
else:
rospy.logwarn("DONT RESET CONTROLLERS")
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.gazebo.pauseSim()
rospy.logdebug("RESET SIM END")
return True
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _get_obs(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self, observations):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | openai_ros~openai_ros~src~openai_ros~robot_envs~turtlebot2_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class TurtleBot2Env(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new TurtleBot2Env environment.
Turtlebot2 doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom : Odometry readings of the Base of the Robot
* /camera/depth/image_raw: 2d Depth image of the depth sensor.
* /camera/depth/points: Pointcloud sensor readings
* /camera/rgb/image_raw: RGB camera
* /kobuki/laser/scan: Laser Readings
Actuators Topic List: /cmd_vel,
Args:
"""
rospy.logdebug("Start TurtleBot2Env INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
# launch_file_name="put_robot_in_world.launch"
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot2Env, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
#rospy.Subscriber("/camera/depth/image_raw", Image, self._camera_depth_image_raw_callback)
#rospy.Subscriber("/camera/depth/points", PointCloud2, self._camera_depth_points_callback)
#rospy.Subscriber("/camera/rgb/image_raw", Image, self._camera_rgb_image_raw_callback)
rospy.Subscriber("/kobuki/laser/scan", LaserScan, self._laser_scan_callback)
#rospy.Subscriber("/scan", LaserScan, self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
# We dont need to check for the moment, takes too long
#self._check_camera_depth_image_raw_ready()
#self._check_camera_depth_points_ready()
#self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message("/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr("Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message("/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr("Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message("/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr("Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /kobuki/laser/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/kobuki/laser/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /kobuki/laser/scan READY=>")
except:
rospy.logerr("Current /kobuki/laser/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _odom_callback(self, data):
self.odom = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.2)
#time.sleep(0.02)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate, min_laser_distance=-1):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
crashed_into_something = self.has_crashed(min_laser_distance)
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
if crashed_into_something:
rospy.logerr("TurtleBot has crashed, stopping movement!")
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def has_crashed(self, min_laser_distance):
"""
It states based on the laser scan if the robot has crashed or not.
Crashed means that the minimum laser reading is lower than the
min_laser_distance value given.
If min_laser_distance == -1, it returns always false, because its the way
to deactivate this check.
"""
robot_has_crashed = False
if min_laser_distance != -1:
laser_data = self.get_laser_scan()
for i, item in enumerate(laser_data.ranges):
if item == float ('Inf') or numpy.isinf(item):
pass
elif numpy.isnan(item):
pass
else:
# Has a Non Infinite or Nan Value
if (item < min_laser_distance):
rospy.logerr("TurtleBot HAS CRASHED >>> item=" + str(item)+"< "+str(min_laser_distance))
robot_has_crashed = True
break
return robot_has_crashed
def get_odom(self):
return self.odom
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
"""
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | turtle2_openai_ros_example~src~deploy_robot.py | #!/usr/bin/env python
import rospy
import numpy
import time
import math
from gym import spaces
#from openai_ros.robot_envs import turtlebot2_env
#from gym.envs.registration import register
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
#from openai_ros.openai_ros_common import ROSLauncher
import os
from cv_bridge import CvBridge, CvBridgeError
from datetime import datetime
from std_msgs.msg import String
#from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
import torch
import torch.nn as nn
import pickle
class DQN(nn.Module):
# hidden_size=64
def __init__(self, inputs, outputs, hidden_size=128):
super(DQN, self).__init__()
self.fc1 = nn.Linear(in_features=inputs, out_features=hidden_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.5)
self.bn1 = nn.BatchNorm1d(num_features=hidden_size)
self.bn2 = nn.BatchNorm1d(num_features=64)
self.bn3 = nn.BatchNorm1d(num_features=32)
self.fc2 = nn.Linear(in_features=hidden_size, out_features=64)
self.fc3 = nn.Linear(in_features=64, out_features=32)
self.fc4 = nn.Linear(in_features=32, out_features=outputs)
#self.fc5 = nn.Linear(in_features=16, out_features=outputs)
def forward(self, x):
x = self.fc1(x)
x = self.bn1(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc3(x)
x = self.bn3(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc4(x)
#x = self.relu(x)
#x = self.dropout(x)
#x = self.fc5(x)
return x
class rlComponent(object):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# Only variable needed to be set here
number_actions = rospy.get_param('~n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
# Actions and Observations
self.dec_obs = rospy.get_param(
"~number_decimals_precision_obs", 1)
self.linear_forward_speed = rospy.get_param(
'~linear_forward_speed')
self.linear_turn_speed = rospy.get_param(
'~linear_turn_speed')
self.angular_speed = rospy.get_param('~angular_speed')
self.init_linear_forward_speed = rospy.get_param(
'~init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param(
'~init_linear_turn_speed')
self.n_observations = rospy.get_param('~n_observations')
self.min_range = rospy.get_param('~min_range')
self.max_laser_value = rospy.get_param('~max_laser_value')
self.min_laser_value = rospy.get_param('~min_laser_value')
MODEL_CKPT = rospy.get_param('~model_ckpt')
self.actions = range(number_actions)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.policy = DQN(self.n_observations, number_actions).to(self.device)
self.policy.load_state_dict(torch.load(MODEL_CKPT, map_location=self.device))
self.policy.eval()
self._cmd_vel_pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=1)
self.last_action = "FORWARDS"
self.laser_scan = None
rospy.Subscriber("/scan", LaserScan, self._laser_scan_callback)
laser_scan = self._check_laser_scan_ready()
rospy.logdebug("laser_scan len===>"+str(len(laser_scan.ranges)))
# Number of laser reading jumped
self.new_ranges = int(
math.ceil(float(len(laser_scan.ranges)) / float(self.n_observations)))
rospy.logdebug("n_observations===>"+str(self.n_observations))
rospy.logdebug(
"new_ranges, jumping laser readings===>"+str(self.new_ranges))
high = numpy.full((self.n_observations), self.max_laser_value)
low = numpy.full((self.n_observations), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("~forwards_reward")
self.turn_reward = rospy.get_param("~turn_reward")
self.end_episode_points = rospy.get_param(
"~end_episode_points")
self.cumulated_steps = 0.0
self.laser_filtered_pub = rospy.Publisher(
'/scan_filtered', LaserScan, queue_size=1)
self._init_env_variables()
self._set_init_pose()
rospy.spin()
def _laser_scan_callback(self, data):
self.laser_scan = data
def get_laser_scan(self):
return self.laser_scan
def _check_laser_scan_ready(self):
#self.laser_scan = None
rospy.logdebug("Waiting for /scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /scan READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base(self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=-1)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish
# and sometimes still have values from the prior position that triguered the done.
time.sleep(1.0)
# TODO: Add reset of published filtered laser readings
#laser_scan = self.get_laser_scan()
discretized_ranges = self.laser_scan.ranges
self.publish_filtered_laser_scan(laser_original_data=self.laser_scan,
new_filtered_laser_range=discretized_ranges)
self.step()
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
rospy.logdebug("BEFORE DISCRET _episode_done==>" +
str(self._episode_done))
discretized_observations = self.discretize_observation(laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("AFTER DISCRET_episode_done==>"+str(self._episode_done))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logdebug("TurtleBot2 is Too Close to wall" +
str(self._episode_done))
else:
rospy.logerr("TurtleBot2 is Ok")
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self, data, new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
filtered_range = []
#mod = len(data.ranges)/new_ranges
mod = new_ranges
max_laser_value = data.range_max
min_laser_value = data.range_min
rospy.logdebug("data=" + str(data))
rospy.logwarn("data.range_max= %s" % data.range_max)
rospy.logwarn("data.range_min= %s" % data.range_min)
rospy.logwarn("len(data.ranges)= %s" % len(data.ranges))
rospy.logwarn("data.angle_min)= %s" % data.angle_min)
rospy.logwarn("data.angle_max)= %s" % data.angle_max)
rospy.logwarn("data.angle_increment= %s" % data.angle_increment)
rospy.logwarn("mod=" + str(mod))
rospy.loginfo('right data.ranges[89] %s' % data.ranges[89])
rospy.loginfo('left data.ranges[269] %s ' % data.ranges[269])
rospy.loginfo('back data.ranges[359] %s' % data.ranges[359])
rospy.loginfo('back data.ranges[0] %s' % data.ranges[0])
rospy.loginfo('front data.ranges[179] %s' % data.ranges[179])
idx_ranges = [89, 135, 179, 224, 269]
for item in idx_ranges:
if data.ranges[item] == float('Inf') or numpy.isinf(data.ranges[item]):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(round(max_laser_value, self.dec_obs))
elif numpy.isnan(data.ranges[item]):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(data.ranges[item], self.dec_obs))
if (self.min_range > data.ranges[item] > 0):
rospy.logerr("done Validation >>> data.ranges[item]=" + str(data.ranges[item])+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> data.ranges[item]=" + str(data.ranges[item])+"< "+str(self.min_range))
#rospy.logdebug("Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
return discretized_ranges
"""
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float('Inf') or numpy.isinf(item):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(
round(max_laser_value, self.dec_obs))
elif numpy.isnan(item):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(
round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(item, self.dec_obs))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
# We add last value appended
filtered_range.append(discretized_ranges[-1])
else:
# We add value zero
filtered_range.append(0.1)
rospy.logdebug(
"Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
self.publish_filtered_laser_scan(laser_original_data=data,
new_filtered_laser_range=discretized_ranges)
return discretized_ranges
"""
def publish_filtered_laser_scan(self, laser_original_data, new_filtered_laser_range):
rospy.logdebug("new_filtered_laser_range==>" +
str(new_filtered_laser_range))
laser_filtered_object = LaserScan()
h = Header()
# Note you need to call rospy.init_node() before this will work
h.stamp = rospy.Time.now()
h.frame_id = laser_original_data.header.frame_id
laser_filtered_object.header = h
laser_filtered_object.angle_min = laser_original_data.angle_min
laser_filtered_object.angle_max = laser_original_data.angle_max
new_angle_incr = abs(laser_original_data.angle_max -
laser_original_data.angle_min) / len(new_filtered_laser_range)
#laser_filtered_object.angle_increment = laser_original_data.angle_increment
laser_filtered_object.angle_increment = new_angle_incr
laser_filtered_object.time_increment = laser_original_data.time_increment
laser_filtered_object.scan_time = laser_original_data.scan_time
laser_filtered_object.range_min = laser_original_data.range_min
laser_filtered_object.range_max = laser_original_data.range_max
laser_filtered_object.ranges = []
laser_filtered_object.intensities = []
for item in new_filtered_laser_range:
if item == 0.0:
laser_distance = 0.1
else:
laser_distance = item
laser_filtered_object.ranges.append(laser_distance)
laser_filtered_object.intensities.append(item)
self.laser_filtered_pub.publish(laser_filtered_object)
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logwarn("Move Base")
rospy.logwarn("linear_speed %d", linear_speed)
rospy.logwarn("angular_speed %d", angular_speed)
#rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
#self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.2)
#time.sleep(0.02)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action %d", action)
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
rospy.logwarn("Action 0 F")
elif action == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
rospy.logwarn("Action 1 L")
elif action == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
rospy.logwarn("Action 2 R")
elif self._episode_done == True: # Stop
linear_speed = 0.0
angular_speed = 0.0
self.last_action = "STOP"
rospy.logwarn("Action end")
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed,
angular_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=self.min_range)
#rospy.logdebug("END Set Action ==>"+str(action) +", NAME="+str(self.last_action))
rospy.logwarn("END Set Action %d", action)
def select_action(self, policy, state):
#rospy.logwarn("state.shape: ")
#rospy.logwarn(state.shape)
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
policy.eval()
action = policy(state).max(axis=1)[1].view(1, 1)
return action
def step(self):
obs = self._get_obs()
obs = [round(num, 1) for num in obs]
rospy.loginfo("obs %s" % obs)
while obs != [] and self._episode_done == False:
state = torch.from_numpy(numpy.array(obs)).float().unsqueeze(0).to(self.device)
rospy.loginfo('state %s' % state)
# Pick an action based on the current state
action_dq = self.select_action(self.policy, state)
rospy.logwarn("Next actionq is:%d", action_dq)
# Execute the action in the environment and get feedback
#self._set_action(action_dq)
rospy.logwarn("Start Set Action %d", action_dq)
if action_dq == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
rospy.logwarn("linear_speed %d", linear_speed)
rospy.logwarn("angular_speed %d", angular_speed)
elif action_dq == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
rospy.logwarn("linear_speed %d", linear_speed)
rospy.logwarn("angular_speed %d", angular_speed)
elif action_dq == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
rospy.logwarn("linear_speed %d", linear_speed)
rospy.logwarn("angular_speed %d", angular_speed)
elif self._episode_done == True: # Stop
linear_speed = 0.0
angular_speed = 0.0
self.last_action = "STOP"
rospy.logwarn("linear_speed %d", linear_speed)
rospy.logwarn("angular_speed %d", angular_speed)
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed,
angular_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=self.min_range)
rospy.logwarn("END Set Action %d", action_dq)
obs = self._get_obs()
obs = [round(num, 1) for num in obs]
if __name__ == '__main__':
try:
rospy.init_node('re_fr', anonymous=False)
rlComp = rlComponent()
#while rlComp.ok():
# pass
except rospy.ROSInterruptException:
pass
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | turtle2_openai_ros_example~src~test_deepq.py | #!/usr/bin/env python
import gym
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
from functools import reduce
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import os
import time
import random
import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
import math
import glob
import io
import base64
import datetime
import json
class DQN(nn.Module):
# hidden_size=64
def __init__(self, inputs, outputs, hidden_size=128):
super(DQN, self).__init__()
self.fc1 = nn.Linear(in_features=inputs, out_features=hidden_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.5)
self.bn1 = nn.BatchNorm1d(num_features=hidden_size)
self.bn2 = nn.BatchNorm1d(num_features=64)
self.bn3 = nn.BatchNorm1d(num_features=32)
self.fc2 = nn.Linear(in_features=hidden_size, out_features=64)
self.fc3 = nn.Linear(in_features=64, out_features=32)
self.fc4 = nn.Linear(in_features=32, out_features=outputs)
#self.fc5 = nn.Linear(in_features=16, out_features=outputs)
def forward(self, x):
x = self.fc1(x)
x = self.bn1(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc3(x)
x = self.bn3(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc4(x)
#x = self.relu(x)
#x = self.dropout(x)
#x = self.fc5(x)
return x
def test(env, policy_net, device, test_global_step, render=False):
state, ep_reward, done = env.reset(), 0, False
state = [round(num, 1) for num in state]
rospy.logwarn("Entering test method...")
test_local_step = 0
while not done:
if render:
env.render()
state = torch.from_numpy(np.array(state)).float().unsqueeze(0).to(device)
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the largest expected reward.
action = policy_net(state).max(dim=1)[1].view(1, 1)
state, reward, done, _ = env.step(action.item())
state = [round(num, 1) for num in state]
test_local_step += 1
test_global_step += 1
rospy.logwarn("Testing: Reward of this step: ")
rospy.logwarn(reward)
ep_reward += reward
rospy.logwarn("Testing: Cumulative Reward of this episode: ")
rospy.logwarn(ep_reward)
writer.add_scalar("Test_Cumulative_Rewards", ep_reward, global_step=test_global_step)
return ep_reward, test_global_step
if __name__ == '__main__':
rospy.init_node('test_turtlebot2_maze_dqn', anonymous=True, log_level=rospy.WARN)
# Init OpenAI_ROS ENV
#task_and_robot_environment_name = rospy.get_param('task_and_robot_environment_name')
task_and_robot_environment_name = rospy.get_param('/turtlebot2/task_and_robot_environment_name')
# Create the Gym environment
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Test")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('turtle2_openai_ros_example')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
#MODEL_PATH = rospy.get_param("model_ckpt")
#########################################################################################
#MODEL_PATH = '$HOME/python3_ws/src/turtle2_openai_ros_example/src/checkpoints/dqn-final-episode-2671-step-110007.pt'
model_dir = os.path.dirname(__file__)
#MODEL_PATH = os.path.join(model_dir, 'checkpoints/dqn-final-episode-2671-step-110007.pt')
MODEL_PATH = os.path.join(model_dir, 'checkpoints/dqn-sparse_reward-episode-1042-step-122000.pt')
"""
Alpha = rospy.get_param("/turtlebot2/alpha")
Epsilon = rospy.get_param("/turtlebot2/epsilon")
Gamma = rospy.get_param("/turtlebot2/gamma")
epsilon_discount = rospy.get_param("/turtlebot2/epsilon_discount")
nepisodes = rospy.get_param("/turtlebot2/nepisodes")
nsteps = rospy.get_param("/turtlebot2/nsteps")
running_step = rospy.get_param("/turtlebot2/running_step")
"""
# Hyperparameters
gamma = 0.79 # initially 0.99 discount factor
seed = 543 # random seed
n_epochs = 20 # number of epochs to test the trained model
test_global_step = 0 # Global number of testing steps for tracking cummulative rewards in Tensorboard
# If gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Fix random seed (for reproducibility)
env.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Get number of actions from gym action space
#n_inputs = env.observation_space.shape[0]
n_inputs = 5
n_actions = env.action_space.n
policy_net = DQN(n_inputs, n_actions).to(device)
policy_net.load_state_dict(torch.load(MODEL_PATH, map_location=device))
policy_net.eval()
####################################################################################################################
#logdir = os.path.join("$HOME/python3_ws/src/turtle2_openai_ros_example/src/logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
basedir = os.path.dirname(__file__)
basedirpath = os.path.join(basedir, 'logs')
logdir = os.path.join(basedirpath, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
writer = SummaryWriter(log_dir=logdir)
for i in range(n_epochs):
ep_reward, test_global_step = test(env, policy_net, device, test_global_step)
print('Steps: {}'
'\tTest reward: {:.2f}'.format(test_global_step, ep_reward))
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.