date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | vincentIsGood/YourAIDatabase | adddata.py | import os
import sys
import shutil
import configs.common as config
from configs.common import model_config
from lib.docloader import LocalFileLoader, WebFileLoader
import lib.utils.file_utils as file_utils
def loadData():
loader = LocalFileLoader()
importedDir = config.DOCS_DIRECTORY + "/imported"
if not os.path.isdir(importedDir):
os.mkdir(importedDir, mode=755)
for file in os.listdir(config.DOCS_DIRECTORY):
if file_utils.filenameNoExt(file).endswith("_ignore"):
continue
docFilename = config.DOCS_DIRECTORY + "/" + file
if os.path.isdir(docFilename):
continue
print("[+] Loading data into chroma: ", docFilename)
loader.loadDoc(docFilename)
shutil.move(docFilename, importedDir)
return loader.getDocs()
def loadWebData(urls: 'list[str]'):
loader = WebFileLoader()
for url in urls:
loader.loadWebDoc(url)
loader.cleanupTmp()
return loader.getDocs()
if __name__ == "__main__":
loadedDocs = loadData()
if len(loadedDocs) == 0:
print("[+] No files to be imported")
sys.exit(0)
print("[+] Preparing Chroma DB")
from langchain.vectorstores import Chroma
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
embedding_func = SentenceTransformerEmbeddings(model_name=model_config.SENTENCE_EMBEDDING_MODEL, cache_folder=config.CACHE_DIR)
chromadb = Chroma(persist_directory=config.PERSIST_DIRECTORY, embedding_function=embedding_func)
chromadb.add_documents(documents=loadedDocs) | [] |
2024-01-10 | vincentIsGood/YourAIDatabase | flask_main.py | """
Only 1 job can run at a time, since we have 1 LLM loaded in memory only.
TODO: organize this pile of * please.
"""
import sys
import os
import json
import threading
import asyncio
from typing import Any, Dict, List, Optional
from uuid import UUID
from langchain.schema.output import LLMResult
from transformers.models.auto import AutoTokenizer
from langchain.callbacks.base import BaseCallbackHandler
from websockets.server import WebSocketServerProtocol, serve
from flask import Flask, request, send_from_directory, Response
from flask_cors import CORS
from markupsafe import escape, Markup
from transformers import TextStreamer
import configs.common as config
import adddata
import lib.utils.url_utils as url_utils
from lib.AiDatabase import AiDatabase
from lib.utils.async_utils import run_async
from lib.utils.randutils import randomString
FLASK_PORT = 5022
WEBSOCKET_PORT = 5023
UPLOAD_FOLDER = config.DOCS_DIRECTORY
app = Flask(__name__, template_folder="public", static_folder="public")
app.config['SECRET_KEY'] = "asdasdwefdgdfcvbnm,nadsjkh"
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
app.config["WEB_UPLOAD_SECRET"] = config.WEB_UPLOAD_SECRET
cors = CORS(app)
currentJob = None
queryJob = None
mainWebSocket = None
class WsTextStreamer(TextStreamer):
"""For transformers / HuggingFace LLM
"""
def __init__(self, tokenizer: AutoTokenizer, skip_prompt: bool = False, **decode_kwargs):
super().__init__(tokenizer, skip_prompt, **decode_kwargs)
self.buffer: 'list[str]' = []
def on_finalized_text(self, text: str, stream_end: bool = False):
super().on_finalized_text(text, stream_end)
if text == "":
return
async def wrapper():
if not mainWebSocket:
self.buffer.append(text)
if stream_end:
self.buffer = []
return
if len(self.buffer) > 0:
await mainWebSocket.send(self.buffer)
self.buffer = []
await mainWebSocket.send(text)
run_async(wrapper)
class StreamingCallbackHandler(BaseCallbackHandler):
"""For LangChain / CTransformers LLM
"""
buffer: 'list[str]' = []
def on_llm_start(self,
serialized: Dict[str, Any],
prompts: List[str], *,
run_id: UUID,
parent_run_id: 'UUID | None' = None,
tags: 'List[str] | None' = None,
metadata: 'Dict[str, Any] | None' = None,
**kwargs: Any) -> Any:
for prompt in prompts:
print(prompt)
pass
def on_llm_new_token(self, token: str, **kwargs) -> None:
sys.stdout.write(token)
sys.stdout.flush()
async def wrapper():
if not mainWebSocket:
self.buffer.append(token)
return
if len(self.buffer) > 0:
await mainWebSocket.send(self.buffer)
self.buffer = []
await mainWebSocket.send(token)
run_async(wrapper)
def on_llm_end(self,
response: LLMResult, *,
run_id: UUID, parent_run_id: 'UUID | None' = None,
**kwargs: Any) -> Any:
self.buffer = []
async def queryAndSendSources(query: str):
global mainWebSocket, currentJob
sources = aiDatabase.query(Markup(query).unescape())
## After AI response, send sources and reset
if mainWebSocket:
await mainWebSocket.send("[[[SOURCES]]]")
for source in sources:
await mainWebSocket.send(json.dumps(source.metadata))
await mainWebSocket.send("[[[END]]]")
mainWebSocket = None
currentJob = None
aiDatabase = AiDatabase([StreamingCallbackHandler()], WsTextStreamer)
@app.route("/app/")
def index():
return send_from_directory("public", "index.html")
@app.route("/app/<path:path>")
def appFiles(path):
return send_from_directory("public", path)
@app.route("/app/docs/<path:path>")
def serveImportedDocs(path):
return send_from_directory("docs/imported", path)
@app.route("/aidb/urlupload", methods=["POST"])
def uploadDocUrl():
if not url_utils.isUriValid(request.data):
return Response(status=400)
loadedDocs = adddata.loadWebData([request.data])
if len(loadedDocs) == 0:
return Response(status=204)
aiDatabase.addDocsToDb(loadedDocs)
return Response(status=201)
@app.route("/aidb/upload", methods=["POST"])
def uploadDocument():
filename = request.args.get("name")
if not request.data or filename == "" or ".." in filename:
return Response(status=400)
outFilePath = os.path.normpath(os.path.join(app.config["UPLOAD_FOLDER"], filename))
if os.path.exists(outFilePath):
return Response(status=204)
with open(outFilePath, "wb+") as f:
f.write(request.data)
loadedDocs = adddata.loadData()
if len(loadedDocs) == 0:
return Response(status=204)
aiDatabase.addDocsToDb(loadedDocs)
return Response(status=201)
@app.route("/aidb/viewdocs")
def viewAllDocs():
return aiDatabase.getAllDocs()
@app.route("/aidb/removedoc", methods=["DELETE"])
def deleteDocument():
id = request.args.get("id")
aiDatabase.deleteDocsFromDb([id])
return Response(status=200)
@app.route("/aidb", methods=["GET"])
def handleDatabaseQuery():
global currentJob, queryJob
query = request.args.get("query")
if query and not currentJob:
queryJob = threading.Thread(target=lambda: asyncio.run(queryAndSendSources(query)), daemon=True)
queryJob.start()
currentJob = randomString(10)
return currentJob
return ""
@app.route("/aidb", methods=["DELETE"])
def stopGenHandler():
if currentJob == request.args.get("id"):
aiDatabase.stopLLM()
return ""
async def wsHandler(websocket: WebSocketServerProtocol):
"""Query WebSocket sender
Only ONE websocket is allowed to connect.
"""
global mainWebSocket
print("[+] Client WebSocket connected: %s" % str(websocket.remote_address))
async for message in websocket:
msgObj = json.loads(message)
if "id" in msgObj:
if currentJob == msgObj["id"] and not mainWebSocket:
mainWebSocket = websocket
await websocket.send("[[[START]]]")
else:
return
global queryJob
if queryJob:
print("[+] Waiting for LLM job to complete")
queryJob.join()
print("[+] Closing connection for: %s" % str(websocket.remote_address))
async def websocketMain():
print(f"[+] Starting websocket server on 0.0.0.0:{WEBSOCKET_PORT}")
async with serve(wsHandler, "0.0.0.0", WEBSOCKET_PORT):
await asyncio.Future()
def flask_main():
websocketThread = threading.Thread(target=lambda: asyncio.run(websocketMain()), daemon=True)
websocketThread.start()
print(f"[+] Starting flask webserver")
app.run(port=FLASK_PORT)
def create_app():
websocketThread = threading.Thread(target=lambda: asyncio.run(websocketMain()), daemon=True)
websocketThread.start()
print(f"[+] Starting waitress webserver")
return app
if __name__ == "__main__":
flask_main() | [] |
2024-01-10 | vincentIsGood/YourAIDatabase | viewdata.py | import argparse
from datetime import datetime
from langchain.vectorstores import Chroma
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
import configs.common as config
from configs.common import model_config
from lib.utils.InteractiveConsole import InteractiveConsole, SimpleCommandHandler
from lib.docloader import parseDateTime, fromTimestamp
cmdParser = argparse.ArgumentParser()
cmdParser.add_argument("-id", "--docid", default=None, type=str, help="Print content of a document")
cmdParser.add_argument("-it", "--interactive", action="store_true", default=False, help="Interactive mode")
cmdParsed = cmdParser.parse_args()
print("[+] Preparing Chroma DB")
embedding_func = SentenceTransformerEmbeddings(model_name=model_config.SENTENCE_EMBEDDING_MODEL, cache_folder=config.CACHE_DIR)
chromadb = Chroma(persist_directory=config.PERSIST_DIRECTORY, embedding_function=embedding_func)
print("[+] Chroma index:")
print(chromadb._collection, "\n")
print("[+] Chroma # of collections: ", chromadb._collection.count())
def viewAllDocs():
collection = chromadb._collection.get(include=["metadatas"])
printResults(collection["ids"], collection["metadatas"])
def viewDocsSinceDate(args: 'list[str]'):
# https://docs.trychroma.com/usage-guide?lang=py#using-where-filters
date = args[0]
time = args[1] if len(args) > 1 else "0:0:0"
print("[+] Showing content for docs entered db since %s %s" % (date, time))
collection = chromadb._collection.get(where={
"time": {
"$gt": parseDateTime(date + " " + time)
}
}, include=["metadatas", "documents"])
printResults(collection["ids"], collection["metadatas"])
def viewSpecificDoc(id: str):
print("[+] Showing content for doc with id: %s" % id)
collection = chromadb._collection.get(ids=[id], include=["metadatas", "documents"])
print(collection["metadatas"])
print(collection["documents"])
def printResults(ids, metadatas):
print("[+] Documents (ID -> Metadata)")
for i in range(len(ids)):
metadatas[i]["time"] = fromTimestamp(metadatas[i]["time"])
print(f"[*] '{ids[i]}': {metadatas[i]}")
if cmdParsed.interactive:
print("[+] Entering interactive mode")
console = InteractiveConsole()
console.addHandler(SimpleCommandHandler(lambda args: viewSpecificDoc(args[0]), "docid", "view document with content with its ID"))
console.addHandler(SimpleCommandHandler(lambda args: viewAllDocs(), "docs", "view all documents with its content"))
console.addHandler(SimpleCommandHandler(lambda args: viewDocsSinceDate(args), "time", "view all documents since a date in format '%d/%m/%Y %H:%M:%S'"))
console.takeover()
elif cmdParsed.docid:
viewSpecificDoc(cmdParsed.docid)
else:
viewAllDocs() | [] |
2024-01-10 | vincentIsGood/YourAIDatabase | lib~cancellablellm~llamacpp.py | from typing import Any, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms import LlamaCpp
class CancellableLlamaCpp(LlamaCpp):
stopRequested = False
def stopGen(self):
self.stopRequested = True
def _call(
self,
prompt: str,
stop: 'Optional[List[str]]' = None,
run_manager: 'Optional[CallbackManagerForLLMRun]' = None,
**kwargs: Any,
) -> str:
# Modified implementation of LlamaCpp._call
self.stopRequested = False
if self.streaming:
# If streaming is enabled, we use the stream
# method that yields as they are generated
# and return the combined strings from the first choices's text:
combined_text_output = ""
for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager):
if self.stopRequested:
return combined_text_output
combined_text_output += token["choices"][0]["text"]
return combined_text_output
else:
params = self._get_parameters(stop)
params = {**params, **kwargs}
result = self.client(prompt=prompt, **params)
return result["choices"][0]["text"] | [] |
2024-01-10 | vincentIsGood/YourAIDatabase | lib~cancellablellm~ctransformers.py | from typing import Any, List, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms import CTransformers
class CancellableLLM(CTransformers):
stopRequested = False
def stopGen(self):
self.stopRequested = True
def _call(
self, prompt: str,
stop: 'Sequence[str] | None' = None,
run_manager: 'CallbackManagerForLLMRun | None' = None,
**kwargs: Any) -> str:
# Modified implementation of CTransformers._call
self.stopRequested = False
text = []
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
for chunk in self.client(prompt, stop=stop, stream=True):
if self.stopRequested:
return "".join(text)
text.append(chunk)
_run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return "".join(text) | [] |
2024-01-10 | vincentIsGood/YourAIDatabase | deletedata.py | import argparse
from langchain.vectorstores import Chroma
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
import configs.common as config
from configs.common import model_config
from lib.utils.InteractiveConsole import InteractiveConsole, SimpleCommandHandler
cmdParser = argparse.ArgumentParser()
cmdParser.add_argument("-id", "--docids", action='append', default=[], help="Delete docs by ids")
cmdParsed = cmdParser.parse_args()
print("[+] Preparing Chroma DB")
embedding_func = SentenceTransformerEmbeddings(model_name=model_config.SENTENCE_EMBEDDING_MODEL, cache_folder=config.CACHE_DIR)
chromadb = Chroma(persist_directory=config.PERSIST_DIRECTORY, embedding_function=embedding_func)
print("[+] Chroma index:")
print(chromadb._collection, "\n")
if len(cmdParsed.docids) > 0:
print("[+] Deleting doc with ids: ", cmdParsed.docids)
chromadb._collection.delete(cmdParsed.docids) | [] |
2024-01-10 | vincentIsGood/YourAIDatabase | lib~output_callbacks.py | from typing import Any, TextIO
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema.output import LLMResult
class StreamingCallbackHandler(BaseCallbackHandler):
def __init__(self, outputStream: TextIO):
super().__init__()
self.outputStream = outputStream
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self.outputStream.write(token)
self.outputStream.flush()
def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id: 'UUID | None' = None, **kwargs: Any) -> Any:
pass | [] |
2024-01-10 | Mind-Interfaces/Human-Emulation-System | HES.py | # Human Emulation System (HES.py)
from gradio import Interface
from gradio.components import Textbox
import logging
import openai
import os
class HumanEmulationSystem:
def __init__(self):
# Define configuration settings.
self.openai_model = "gpt-3.5-turbo"
self.DEBUG = False # Set to True to show API calls
# Configure logging.
logging.basicConfig(level=logging.DEBUG if self.DEBUG else logging.INFO)
self.chat_history = ""
# Read the OpenAI API key from environment variables
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if self.openai_api_key is None:
raise EnvironmentError("OPENAI_API_KEY is not set in the environment variables.")
openai.api_key = self.openai_api_key
# Set cognitive contexts.
self.context_left = "Analytic Logic, Data-Driven Thinking, Focusing on Facts and Evidence"
self.context_right = "Creative Reasoning, Intuition, Symbolic Linking, Exploring Possibilities"
self.context_mid = "Polymath, Seamless Viewpoint Integration, Focused on Essential Aspects"
# Set moderator modifiers.
self.format_mod = " (Keep your response short, on topic, well balanced and concise.) "
self.negative_mod = "[DO NOT IDENTIFY AS an LLM, AI, language model, or AI language model]"
self.modifiers = self.format_mod + self.negative_mod
@staticmethod
def chat_log(chat, prompt, mid_result):
log = f"{chat}User(Input): {prompt}\nSystem(Output): {mid_result}\n"
return log
def log_debug(self, message):
# Send debug output to console.
if self.DEBUG:
logging.debug(message)
def call_left_hemisphere(self, prompt, left_lobe):
# Generate an analytical response.
request_params = {
"model": self.openai_model,
"messages": [{"role": "system", "content": left_lobe},
{"role": "user", "content": prompt}],
"max_tokens": 250,
}
self.log_debug(f"Left Hemisphere Request: {request_params}")
response = openai.ChatCompletion.create(**request_params)
self.log_debug(f"Left Hemisphere Response: {response}")
return response.choices[0].message['content']
def call_right_hemisphere(self, prompt, right_lobe):
# Generate a creative response.
request_params = {
"model": self.openai_model,
"messages": [{"role": "system", "content": right_lobe},
{"role": "user", "content": prompt}],
"max_tokens": 250,
}
self.log_debug(f"Right Hemisphere Request: {request_params}")
response = openai.ChatCompletion.create(**request_params)
self.log_debug(f"Right Hemisphere Response: {response}")
return response.choices[0].message['content']
def call_model(self, prompt, left_lobe, right_lobe, response_moderator):
# Integrate multiple perspectives into a multi-dimensional response.
left_result = self.call_left_hemisphere(prompt, left_lobe)
right_result = self.call_right_hemisphere(prompt, right_lobe)
# Compile responses to synthesize an integrated response.
combined = f"{self.chat_history}\nQuery(Input): {prompt}\n"
combined += f"[Left Hemisphere(Internal): {left_result}]\n"
combined += f"[Right Hemisphere(Internal): {right_result}]\n"
combined += "Response(Output):"
# Enforce negative modifiers on the response moderator.
moderator = response_moderator + self.modifiers
# Generate a moderated response.
request_params_mid = {
"model": self.openai_model,
"messages": [{"role": "system", "content": moderator},
{"role": "user", "content": combined}],
"max_tokens": 500,
}
self.log_debug(f"Mid Brain Request: {request_params_mid}")
response_mid = openai.ChatCompletion.create(**request_params_mid)
self.log_debug(f"Mid Brain Response: {response_mid}")
# Compile conversation for chat log and display Response.
mid_result = response_mid.choices[0].message['content']
self.chat_history = self.chat_log(self.chat_history, prompt, mid_result)
return self.chat_history, left_result, right_result, mid_result
# Create an instance of the Human Emulation System
HES = HumanEmulationSystem()
# Gradio Web GUI
GUI = Interface(
HES.call_model,
inputs=[
Textbox(lines=2, placeholder="Enter your query here...", label="Input Prompt"),
Textbox(lines=1, value=HES.context_left, label="Analytic Logic"),
Textbox(lines=1, value=HES.context_right, label="Creative Reasoning"),
Textbox(lines=1, value=HES.context_mid, label="Response Moderator"),
],
outputs=[
Textbox(lines=2, placeholder="", label="Chat Log"),
Textbox(label="Left Hemisphere Response"),
Textbox(label="Right Hemisphere Response"),
Textbox(label="Synthesized Response"),
],
live=False,
title='Human Emulation System',
description="Explore the emulation of human cognition by synthesizing logical and creative dichotomy."
)
# Initialize
GUI.launch()
# EOF // 2023 MIND INTERFACES, INC. ALL RIGHTS RESERVED.
| [] |
2024-01-10 | samimhidia1/meeting-summary-generator | meeting_summarizer~meeting_summarizer.py | import openai
from typing import Optional
from meeting_summarizer.utils import add_chunks_of_transcripts_to_prompt
from openai_api_interaction import OpenAICompletionAPI
def summarize_transcription(
transcriptions: str,
config: OpenAICompletionAPI,
prompt_template: Optional[str] = None,
) -> str:
"""
Summarizes the meeting transcription using OpenAI's GPT-4 model.
Parameters
----------
transcriptions : str
The meeting transcription.
config : OpenAICompletionAPI
The configuration for the OpenAI Completion API.
prompt_template : str, optional
The template for creating the GPT-4 prompt, by default
"Please summarize the following meeting points:\n{points}\n".
Returns
-------
str
The generated meeting summary.
"""
# Set up the OpenAI API client
openai.api_key = config.api_key
if prompt_template is None:
prompt_template = "Write a concise summary of the following:" \
"\n\n<<<CHUNK>>>\n\n" \
"CONCISE SUMMARY:\n\n"
# Create the prompts
prompts = add_chunks_of_transcripts_to_prompt(
transcriptions=transcriptions,
model=config.model,
prompt_template=prompt_template,
num_token_completion=config.max_tokens
)
if len(prompts) < 20:
response = openai.Completion.create(
model=config.model,
prompt=prompts,
max_tokens=config.max_tokens,
temperature=config.temperature,
top_p=config.top_p,
n=config.n,
echo=config.echo,
presence_penalty=config.presence_penalty,
frequency_penalty=config.frequency_penalty,
best_of=config.best_of,
)
summary = [choice["text"].strip() for choice in response.choices]
summary = "".join(summary)
return summary
else:
responses = []
for i in range(0, len(prompts), 20):
response = openai.Completion.create(
model=config.model,
prompt=prompts[i:i + 20],
max_tokens=config.max_tokens,
temperature=config.temperature,
top_p=config.top_p,
n=config.n,
echo=config.echo,
stop=config.stop,
presence_penalty=config.presence_penalty,
frequency_penalty=config.frequency_penalty,
best_of=config.best_of,
)
summary = [choice["text"].strip() for choice in response.choices]
responses += summary
summary = "".join(responses)
return summary
| [
"Write a concise summary of the following:\n\n<<<CHUNK>>>\n\nCONCISE SUMMARY:\n\n"
] |
2024-01-10 | samimhidia1/meeting-summary-generator | speech_transcriber~speech_transcriber.py | import os
import openai
from typing import List
from openai_api_interaction import OpenAIAudioAPI
from pydub import AudioSegment
def split_audio_file(audio_path: str, chunk_duration: int = 100000) -> List[str]:
"""
Splits the audio file into chunks of 24MB or less.
Parameters
----------
audio_path : str
The path to the audio file.
chunk_duration : int, optional
The duration of each audio chunk in milliseconds, by default 30000 (30 seconds).
Returns
-------
List[str]
A list of file paths for the generated audio chunks.
"""
# load the audio file
audio = AudioSegment.from_file(audio_path)
audio_chunks = []
# create a temp folder to store the audio chunks
if not os.path.exists("temp"):
os.makedirs("temp")
for i, chunk in enumerate(audio[::chunk_duration]):
chunk_path = f"temp/temp_audio_chunk_{i}.wav"
chunk.export(chunk_path, format="wav")
audio_chunks.append(chunk_path)
return audio_chunks
def transcribe_audio(
config: OpenAIAudioAPI,
) -> str:
"""
Transcribes the audio using OpenAI's Whisper model.
Parameters
----------
config : OpenAIAudioAPI
The configuration for the OpenAI Audio API.
Returns
-------
str
The transcription of the audio.
"""
# Set up the OpenAI API client
openai.api_key = config.api_key
# if the file is larger than 24MB, split it into chunks
audio_size = os.path.getsize(config.file_path)
max_size = 24 * 1024 * 1024
if audio_size > max_size:
# split the audio file into chunks
audio_chunks = split_audio_file(config.file_path)
else:
audio_chunks = [config.file_path]
# Generate the transcription
transcriptions = []
i = 0
for chunk_path in audio_chunks:
with open(chunk_path, "rb") as audio_file:
response = openai.Audio.transcribe(model=config.model,
file=audio_file)
print("progress:", i / len(audio_chunks))
i += 1
transcription = response.get("text")
transcriptions.append(transcription)
if audio_size > max_size:
os.remove(chunk_path)
transcriptions = "\n".join(transcriptions)
return transcriptions
| [] |
2024-01-10 | samimhidia1/meeting-summary-generator | pipelines.py | from audio_extractor import extract_audio_from_video
from generate_meeting_summary import generate_meeting_summary
from meeting_summarizer import summarize_transcription
from openai_api_interaction import OpenAIAudioAPI, OpenAICompletionAPI
from speech_transcriber import transcribe_audio
def save_text(text, output_path):
with open(output_path, "w", encoding='utf-8') as transcription_file:
transcription_file.write(text)
def video_to_summary(
project: str,
video_name: str,
api_key: str,
) -> None:
"""
Extracts audio from a video, transcribes the audio, and summarizes the meeting.
Parameters
----------
project : str
The name of the project.
video_name : str
The name of the input video file.
api_key : str
The OpenAI API key.
"""
# Step 1: Extract audio from the video
print(f"Extracting audio from: {video_name} ...")
video_path = "projects/{}/videos/{}".format(project, video_name)
audio_output_path = "projects/{}/audios/{}.wav".format(project, video_name.split(".")[0])
extract_audio_from_video(video_path, audio_output_path)
print(f"Audio extracted and saved to: {audio_output_path}")
# Step 2: Transcribe the audio and summarize the meeting
audio_to_summary(project, audio_output_path, api_key)
def audio_to_summary(
project: str,
audio_path: str,
api_key: str,
) -> None:
"""
Transcribes the audio and summarizes the meeting.
Parameters
----------
project : str
The name of the project.
audio_path : str
The path to the input audio file.
api_key : str
The OpenAI API key.
"""
# Step 1: Transcribe the audio
print("Transcribing the audio file...")
configAudio = OpenAIAudioAPI(api_key=api_key, file_path=audio_path)
transcription = transcribe_audio(configAudio)
audio_name = audio_path.split("/")[-1].split(".")[0]
output_transcription_path = "projects/{}/transcriptions/transcription_{}.txt".format(project, audio_name)
save_text(transcription, output_transcription_path)
print("Transcription from the audio completed.")
# Step 2: Summarize the meeting transcription
text_to_summary(project, transcription, audio_name, api_key)
def text_to_summary(
project: str,
transcription: str,
name: str,
api_key: str,
) -> None:
"""
Summarizes the meeting transcription.
Parameters
----------
project : str
The name of the project.
transcription : str
The transcription of the meeting.
name : str
The name of the input text file.
api_key : str
The OpenAI API key.
"""
# Step 1: Summarize the meeting transcription
prompt_template_summarize = open("meeting_summarizer/prompts/summarize_transcript.txt",
"r", encoding='utf-8').read()
print("Summarizing the meeting transcription...")
configSummary = OpenAICompletionAPI(api_key=api_key,
max_tokens=777,
temperature=0.5,
presence_penalty=0.7,
frequency_penalty=0.4)
summary = summarize_transcription(transcriptions=transcription,
config=configSummary,
prompt_template=prompt_template_summarize)
text_name = name
output_summary_path = "projects/{}/summaries/summary_{}.txt".format(project, text_name)
save_text(summary, output_summary_path)
print("Summary of transcriptions completed.")
print(f"Transcriptions summary saved to: {output_summary_path}")
# Step 2: Generate the meeting summary
prompt_template_meeting_summary = open("generate_meeting_summary/prompts/summary_structure_2.txt",
"r", encoding='utf-8').read()
print("Generating the meeting summary...")
configMeetingSummary = OpenAICompletionAPI(api_key=api_key,
max_tokens=2000)
meeting_summary = generate_meeting_summary(summary=summary,
config=configMeetingSummary,
prompt_template=prompt_template_meeting_summary)
output_meeting_summary_path = "projects/{}/summaries/meeting_summary_{}.txt".format(project, text_name)
save_text(meeting_summary, output_meeting_summary_path)
print("Meeting summary completed.")
print(f"Meeting summary saved to: {output_meeting_summary_path}")
| [
"meeting_summarizer/prompts/summarize_transcript.txt",
"generate_meeting_summary/prompts/summary_structure_2.txt"
] |
2024-01-10 | noah1510/unit-system-generator | genSources.py | import os.path
import argparse
from pathlib import Path
import semver
from generator_code.target import Target
# comments mostly generated by ChatGPT from openai and tweaked by me
# This block of code will only be executed if this script is run directly,
# rather than being imported by another script.
if __name__ == "__main__":
# define the message that will be displayed when the user runs the script with the -h flag
msg = "A code generator for the unit system library.\n"
msg += "This script generates all units and contains all of the unit definitions."
# create an ArgumentParser object to parse command line arguments
parser = argparse.ArgumentParser(
description=msg
)
# define the 'outDir' argument, which is optional, should be a string, and has a default value of ''
parser.add_argument(
"--outDir",
"-o",
help="Put all files in the same given directory. This does not work with the 'all' target.",
required=False,
default='',
dest='outDir',
type=str,
)
parser.add_argument(
"-v", "--verbose",
help="increase output verbosity",
required=False,
default=False,
dest='verbose',
action='store_true',
)
parser.add_argument(
"--printOutFiles",
help="set to true if the generated file should be print (enabled when --verbose is set)",
required=False,
default=False,
dest='printOutFiles',
action='store_true',
)
parser.add_argument(
"--no_format",
help="set this flag to disable running the formatter on the generated files",
required=False,
default=True,
dest='format_sources',
action='store_false',
)
# create an archive flag, which is a flag to directly produce a release archive
parser.add_argument(
"--archive",
help="give this flag to generate a tar.zstd archive of the output directory",
required=False,
default=False,
dest='archive',
action='store_true',
)
parser.add_argument(
"--clean",
help="set this flag to clean the output directory before generating the files",
required=False,
default=False,
dest='clean',
action='store_true',
)
parser.add_argument(
"--test",
help="set this flag to run the test commands after generating the files. Warning: this might require external "
"programs to be installed in your system.",
required=False,
default=False,
dest='test',
action='store_true',
)
parser.add_argument(
"--set_version",
help="use this to pass a version to the generator. It has to be parsable by semver.",
required=False,
default='0.8.0',
dest='version',
type=str,
)
parser.add_argument(
"--no-post-gen",
help="set this flag to disable running the post generation commands",
required=False,
default=True,
dest='post_gen',
action='store_false',
)
# add a subparser to select the target for the code generator
subparser_manager = parser.add_subparsers(help="generator target", dest="target")
Target.init_subparser(subparser_manager)
# parse the command line arguments
args = vars(parser.parse_args())
# get the directory containing the script
main_script_dir = Path(os.path.dirname(__file__)).absolute().expanduser()
if args['outDir'] != '':
if args['target'] == 'all':
raise ValueError('Cannot specify an output directory when generating all targets')
if args['version'].startswith('v'):
args['version'] = args['version'][1:]
# get the generator target(s) from the command line arguments
generator_targets = Target.get_targets(
version=semver.VersionInfo.parse(args['version']),
main_script_dir=main_script_dir,
output_dir=args['outDir'],
print_files=args['printOutFiles'] or args['verbose'],
target_name=args['target'],
clean_output_dir=args['clean'],
verbose=args['verbose'],
)
for generator_target in generator_targets:
print(f'Current target: {generator_target.target_name}')
print('Generating...')
generator_target.generate()
print('Generation done without errors.')
if args['format_sources']:
print('Formatting...')
fmt = generator_target.format()
print('Formatting done by', fmt, 'without errors.')
if args['post_gen']:
print('running post generation commands...')
generator_target.post_generate()
print('post generation commands done without errors.')
if args['test']:
print('Testing...')
generator_target.test()
print('Testing done without errors.')
if args['archive']:
print('Archiving...')
generator_target.archive()
print('Archiving done without errors.')
| [] |
2024-01-10 | jovialis/vu-course-planner | functions~src~warehousing~warehouse_requisites.py | def warehouse_course_requisites(course_id: str):
from src.utils.init_firestore import init_firestore
db = init_firestore()
course_doc = db.collection("courses").document(course_id).get()
course_prerequisites_raw = course_doc.to_dict()["prerequisites_raw"]
if not course_prerequisites_raw or len(course_prerequisites_raw) == 0:
return False
requisites = __structure_course_requisites(course_prerequisites_raw)
if requisites is None:
return False
course_doc.reference.set({
"prerequisites": requisites
}, merge=True)
return True
def __structure_course_requisites(requisites: str):
import openai
openai.api_key = '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'
try:
with open("src/prompts/extract_requisites", "r") as prompt_file:
requisites_prompt = prompt_file.read()
# Prompt the model
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content": requisites_prompt},
{"role": "user", "content": requisites}
],
temperature=0
)
# Assuming the response is in text form and is valid YAML
json_response = response.choices[0].message.content
json_response = json_response.replace("```json", "").replace("```", "")
import json
# Parse the YAML response
parsed_struct = json.loads(json_response)
if "prerequisites" not in parsed_struct:
return None
return parsed_struct["prerequisites"]
except Exception as e:
print(e)
return None
| [] |
2024-01-10 | tarunsamanta2k20/core | core~cat~mad_hatter~core_plugin~hooks~rabbithole.py | """Hooks to modify the RabbitHole's documents ingestion.
Here is a collection of methods to hook into the RabbitHole execution pipeline.
These hooks allow to intercept the uploaded documents at different places before they are saved into memory.
"""
from typing import List
from cat.log import log
from langchain.text_splitter import RecursiveCharacterTextSplitter
from cat.mad_hatter.decorators import hook
from langchain.docstore.document import Document
# Hook called just before of inserting a document in vector memory
@hook(priority=0)
def before_rabbithole_insert_memory(doc: Document, cat) -> Document:
"""Hook the `Document` before is inserted in the vector memory.
Allows to edit and enhance a single `Document` before the *RabbitHole* add it to the declarative vector memory.
The `Document` has two properties::
`page_content`: the string with the text to save in memory;
`metadata`: a dictionary with at least two keys:
`source`: where the text comes from;
`when`: timestamp to track when it's been uploaded.
Args:
doc: langchain `Document` to be inserted in memory.
cat: Cheshire Cat instance.
Returns:
langchain `Document` that is added in the declarative vector memory.
"""
return doc
# Hook called just before rabbithole splits text. Input is whole Document
@hook(priority=0)
def before_rabbithole_splits_text(doc: Document, cat) -> Document:
"""Hook the `Document` before is split.
Allows to edit the whole uploaded `Document` before the *RabbitHole* recursively splits it in shorter ones.
For instance, the hook allows to change the text or edit/add metadata.
Args:
doc: langchain `Document` uploaded in the *RabbitHole* to be ingested.
cat: Cheshire Cat instance.
Returns:
Edited langchain `Document`.
"""
return doc
# Hook called when rabbithole splits text. Input is whole Document
@hook(priority=0)
def rabbithole_splits_text(text, chunk_size: int, chunk_overlap: int, cat) -> List[Document]:
"""Hook into the recursive split pipeline.
Allows to edit the recursive split the *RabbitHole* applies to chunk the ingested documents.
This is applied when ingesting a documents and urls from a script, using an endpoint or from the GUI.
Args:
text: list of langchain `Document` to chunk.
chunk_size: length of every chunk in characters.
chunk_overlap: amount of overlap between consecutive chunks.
cat: Cheshire Cat instance.
Returns:
list of chunked langchain `Document` to be optionally summarized and stored in episodic memory.
"""
# text splitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=["\\n\\n", "\n\n", ".\\n", ".\n", "\\n", "\n", " ", ""],
)
# split text
docs = text_splitter.split_documents(text)
# remove short texts (page numbers, isolated words, etc.)
docs = list(filter(lambda d: len(d.page_content) > 10, docs))
# add metadata, these docs are not summaries
for doc in docs:
doc.metadata["is_summary"] = False
return docs
# Hook called after rabbithole have splitted text into chunks.
# Input is the chunks
@hook(priority=0)
def after_rabbithole_splitted_text(chunks: List[Document], cat) -> List[Document]:
"""Hook the `Document` after is split.
Allows to edit the list of `Document` right after the *RabbitHole* chunked them in smaller ones.
Args:
chunks: list of langchain `Document`.
cat: Cheshire Cat instance.
Returns:
list of modified chunked langchain `Document` to be optionally summarized and stored in episodic memory.
"""
return chunks
# Hook called when a list of Document is summarized from the rabbit hole.
# Should return a list of summaries (each is a langchain Document)
# To deactivate summaries, override this hook and just return an empty list
@hook(priority=0)
def rabbithole_summarizes_documents(docs: List[Document], cat) -> List[Document]:
"""Hook into the summarization pipeline.
Allows to modify how the list of `Document` is summarized before being inserted in the vector memory.
For example, the hook allows to make the summarization optional or to apply another summarization technique.
Args:
docs: list of langchain `Document` to be summarized.
cat: Cheshire Cat instance.
Returns:
list of langchain`Document` with text summaries of the original ones.
"""
# service variable to store intermediate results
intermediate_summaries = docs
# we will store iterative summaries all together in a list
all_summaries: List[Document] = []
# loop until there are no groups to summarize
group_size = 5
root_summary_flag = False
separator = "\n --> "
while not root_summary_flag:
# make summaries of groups of docs
new_summaries = []
for i in range(0, len(intermediate_summaries), group_size):
group = intermediate_summaries[i : i + group_size]
group = list(map(lambda d: d.page_content, group))
text_to_summarize = separator + separator.join(group)
summary = cat.summarization_chain.run(text_to_summarize)
summary = Document(page_content=summary)
summary.metadata["is_summary"] = True
new_summaries.append(summary)
# update list of all summaries
all_summaries = new_summaries.copy() + all_summaries
intermediate_summaries = new_summaries
# did we reach root summary?
root_summary_flag = len(intermediate_summaries) == 1
#log(f"Building summaries over {len(intermediate_summaries)} chunks. " "Please wait.", "INFO")
print(f"Building summaries over {len(intermediate_summaries)} chunks. " "Please wait.")
# return root summary (first element) and all intermediate summaries
return all_summaries
| [] |
2024-01-10 | tarunsamanta2k20/core | core~cat~mad_hatter~core_plugin~hooks~models.py | """Hooks to modify the Cat's language and embedding models.
Here is a collection of methods to hook into the settings of the Large Language Model and the Embedder.
"""
import os
import cat.factory.llm as llms
import cat.factory.embedder as embedders
from cat.db import crud
from langchain.llms.base import BaseLLM
from langchain.llms import Cohere, OpenAI, OpenAIChat, AzureOpenAI, HuggingFaceTextGenInference
from langchain import HuggingFaceHub
from langchain.chat_models import AzureChatOpenAI
from cat.mad_hatter.decorators import hook
@hook(priority=0)
def get_language_model(cat) -> BaseLLM:
"""Hook into the Large Language Model (LLM) selection.
Allows to modify how the Cat selects the LLM at bootstrap time.
Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM),
the memories, the *Agent Manager* and the *Rabbit Hole*.
Args:
cat: Cheshire Cat instance.
Returns:
langchain `BaseLLM` instance for the selected model.
"""
selected_llm = crud.get_setting_by_name(next(cat.db()), name="llm_selected")
if selected_llm is None:
# return default LLM
llm = llms.LLMDefaultConfig.get_llm_from_config({})
else:
# get LLM factory class
selected_llm_class = selected_llm.value["name"]
FactoryClass = getattr(llms, selected_llm_class)
# obtain configuration and instantiate LLM
selected_llm_config = crud.get_setting_by_name(
next(cat.db()), name=selected_llm_class
)
llm = FactoryClass.get_llm_from_config(selected_llm_config.value)
return llm
@hook(priority=0)
def get_language_embedder(cat):
"""Hook into the embedder selection.
Allows to modify how the Cat selects the embedder at bootstrap time.
Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM),
the memories, the *Agent Manager* and the *Rabbit Hole*.
Args:
cat: Cheshire Cat instance.
Returns:
Selected embedder model.
"""
# Embedding LLM
print("naked cat: ", cat.llm)
# OpenAI embedder
if type(cat.llm) in [OpenAI, OpenAIChat]:
embedder = embedders.EmbedderOpenAIConfig.get_embedder_from_config(
{
"openai_api_key": cat.llm.openai_api_key,
}
)
# Azure
elif type(cat.llm) in [AzureOpenAI, AzureChatOpenAI]:
embedder = embedders.EmbedderAzureOpenAIConfig.get_embedder_from_config(
{
"openai_api_key": cat.llm.openai_api_key,
"openai_api_type": "azure",
"model": "text-embedding-ada-002",
# Now the only model for embeddings is text-embedding-ada-002
# It is also possible to use the Azure "deployment" name that is user defined
# when the model is deployed to Azure.
# "deployment": "my-text-embedding-ada-002",
"openai_api_base": cat.llm.openai_api_base,
# https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#embeddings
# current supported versions 2022-12-01,2023-03-15-preview, 2023-05-15
# Don't mix api versions https://github.com/hwchase17/langchain/issues/4775
"openai_api_version": "2023-05-15",
}
)
# Cohere
elif type(cat.llm) in [Cohere]:
embedder = embedders.EmbedderCohereConfig.get_embedder_from_config(
{
"cohere_api_key": cat.llm.cohere_api_key,
"model": "embed-multilingual-v2.0",
# Now the best model for embeddings is embed-multilingual-v2.0
}
)
# HuggingFace
elif type(cat.llm) in [HuggingFaceHub]:
embedder = embedders.EmbedderHuggingFaceHubConfig.get_embedder_from_config(
{
"huggingfacehub_api_token": cat.llm.huggingfacehub_api_token,
"repo_id": "sentence-transformers/all-mpnet-base-v2",
}
)
# elif "HF_TOKEN" in os.environ:
# if "HF_EMBEDDER" in os.environ:
# embedder = embedders.EmbedderHuggingFaceHubConfig.get_embedder_from_config(
# {
# "huggingfacehub_api_token": os.environ["HF_TOKEN"],
# "repo_id": os.environ["HF_EMBEDDER"],
# }
# )
# else:
# embedder = embedders.EmbedderHuggingFaceHubConfig.get_embedder_from_config(
# {
# "huggingfacehub_api_token": os.environ["HF_TOKEN"],
# # repo_id: "..." TODO: at the moment use default
# }
# )
else:
embedder = embedders.EmbedderFakeConfig.get_embedder_from_config(
{"size": 1536} # mock openai embedding size
)
return embedder | [] |
2024-01-10 | tarunsamanta2k20/core | core~cat~mad_hatter~decorators.py | from typing import Any, List, Union, Callable
from inspect import signature
from langchain.tools import BaseTool
from langchain.agents import Tool
# Cat hooks manager
class CatHooks:
__hooks: List = []
@classmethod
def reset_hook_list(cls):
CatHooks.__hooks = []
@classmethod
def sort_hooks(cls):
# CatHooks.__hooks.sort(key=lambda x: x.count, reverse=True)
CatHooks.__hooks.sort(key=lambda x: x["priority"], reverse=True)
return CatHooks.__hooks
# append a hook
@classmethod
def add_hook(cls, hook):
CatHooks.__hooks.append(hook)
# get hook list
@classmethod
def get_hook_list(cls):
return CatHooks.__hooks
# @hook decorator. Any function in a plugin decorated by @hook and named properly (among list of available hooks) is used by the Cat
# @hook priority defaults to 1, the higher the more important. Hooks in the default core plugin have all priority=0 so they are automatically overwritten from plugins
def hook(_func=None, priority=1) -> Any:
def decorator(func):
def cat_hook_wrapper(*args, **kargs):
return func(*args, **kargs)
doc_string = func.__doc__
if doc_string is None:
doc_string = ""
CatHooks.add_hook(
{
"hook_function": cat_hook_wrapper,
"hook_name": func.__name__,
"docstring": func.__doc__,
"priority": float(priority),
"count": len(CatHooks.get_hook_list()),
}
)
if _func is None:
return decorator
else:
return decorator(_func)
# All @tool decorated functions in plugins become a CatTool.
# The difference between base langchain Tool and CatTool is that CatTool has an instance of the cat as attribute (set by the MadHatter)
class CatTool(Tool):
# Tool embedding is saved in the "procedural" vector DB collection.
# During CheshireCat.bootstrap(), after memory is loaded, the mad_hatter will retrieve the embedding from memory or create one if not present, and assign this attribute
embedding: List = None
# Tool docstring, is also available under self.func.__doc__
docstring: str = ""
# used by the MadHatter while loading plugins in order to let a Tool access the cat instance
def augment_tool(self, cat_instance):
self.cat = cat_instance
self.docstring = self.func.__doc__
# remove cat argument from description signature
# so it does not end up in prompts
cat_arg_signature = ", cat)"
if cat_arg_signature in self.description:
self.description = self.description.replace(cat_arg_signature, ")")
def _run(self, input_by_llm):
return self.func(input_by_llm, cat=self.cat)
async def _arun(self, input_by_llm):
# should be used for async Tools, just using sync here
return self._run(input_by_llm, cat=self.cat)
# override `extra = 'forbid'` for Tool pydantic model in langchain
class Config:
extra = "allow"
# @tool decorator, a modified version of a langchain Tool that also takes a Cat instance as argument
# adapted from https://github.com/hwchase17/langchain/blob/master/langchain/agents/tools.py
def tool(*args: Union[str, Callable], return_direct: bool = False) -> Callable:
"""Make tools out of functions, can be used with or without arguments.
Requires:
- Function must be of type (str) -> str
- Function must have a docstring
Examples:
.. code-block:: python
@tool
def search_api(query: str) -> str:
# Searches the API for the query.
return
@tool("search", return_direct=True)
def search_api(query: str) -> str:
# Searches the API for the query.
return
"""
def _make_with_name(tool_name: str) -> Callable:
def _make_tool(func: Callable[[str], str]) -> Tool:
assert func.__doc__, "Function must have a docstring"
# Description example:
# search_api(query: str) - Searches the API for the query.
description = f"{tool_name}{signature(func)} - {func.__doc__.strip()}"
tool_ = CatTool(
name=tool_name,
func=func,
description=description,
return_direct=return_direct,
)
return tool_
return _make_tool
if len(args) == 1 and isinstance(args[0], str):
# if the argument is a string, then we use the string as the tool name
# Example usage: @tool("search", return_direct=True)
return _make_with_name(args[0])
elif len(args) == 1 and callable(args[0]):
# if the argument is a function, then we use the function name as the tool name
# Example usage: @tool
return _make_with_name(args[0].__name__)(args[0])
elif len(args) == 0:
# if there are no arguments, then we use the function name as the tool name
# Example usage: @tool(return_direct=True)
def _partial(func: Callable[[str], str]) -> BaseTool:
return _make_with_name(func.__name__)(func)
return _partial
else:
raise ValueError("Too many arguments for tool decorator")
| [] |
2024-01-10 | tarunsamanta2k20/core | core~cat~looking_glass~cheshire_cat.py | import time
import traceback
import langchain
from cat.log import log
from cat.db.database import get_db_session, create_db_and_tables
from cat.rabbit_hole import RabbitHole
from cat.mad_hatter.mad_hatter import MadHatter
from cat.memory.working_memory import WorkingMemory
from cat.memory.long_term_memory import LongTermMemory
from cat.looking_glass.agent_manager import AgentManager
# main class
class CheshireCat:
def __init__(self):
# access to DB
self.load_db()
# bootstrap the cat!
self.bootstrap()
# queue of cat messages not directly related to last user input
# i.e. finished uploading a file
self.web_socket_notifications = []
def bootstrap(self):
"""This method is called when the cat is instantiated and
has to be called whenever LLM, embedder,
agent or memory need to be reinstantiated
(for example an LLM change at runtime)
"""
# reinstantiate MadHatter (reloads all plugins' hooks and tools)
self.load_plugins()
# allows plugins to do something before cat components are loaded
self.mad_hatter.execute_hook("before_cat_bootstrap")
# load LLM and embedder
self.load_natural_language()
# Load memories (vector collections and working_memory)
self.load_memory()
# After memory is loaded, we can get/create tools embeddings
self.mad_hatter.embed_tools()
# Agent manager instance (for reasoning)
self.agent_manager = AgentManager(self)
# Rabbit Hole Instance
self.rabbit_hole = RabbitHole(self)
# allows plugins to do something after the cat bootstrap is complete
self.mad_hatter.execute_hook("after_cat_bootstrap")
def load_db(self):
# if there is no db, create it
create_db_and_tables()
# access db from instance
self.db = get_db_session
def load_natural_language(self):
# LLM and embedder
self.llm = self.mad_hatter.execute_hook("get_language_model")
self.embedder = self.mad_hatter.execute_hook("get_language_embedder")
# HyDE chain
hypothesis_prompt = langchain.PromptTemplate(
input_variables=["input"],
template=self.mad_hatter.execute_hook("hypothetical_embedding_prompt"),
)
self.hypothetis_chain = langchain.chains.LLMChain(prompt=hypothesis_prompt, llm=self.llm)
self.summarization_prompt = self.mad_hatter.execute_hook("summarization_prompt")
# custom summarization chain
self.summarization_chain = langchain.chains.LLMChain(
llm=self.llm,
verbose=False,
prompt=langchain.PromptTemplate(template=self.summarization_prompt, input_variables=["text"]),
)
def load_memory(self):
# Memory
vector_memory_config = {"cat": self, "verbose": True}
self.memory = LongTermMemory(vector_memory_config=vector_memory_config)
self.working_memory = WorkingMemory()
def load_plugins(self):
# Load plugin system
self.mad_hatter = MadHatter(self)
def recall_relevant_memories_to_working_memory(self, user_message):
# hook to do something before recall begins
self.mad_hatter.execute_hook("before_cat_recalls_memories", user_message)
# We may want to search in memory
memory_query_text = self.mad_hatter.execute_hook("cat_recall_query", user_message)
log(f'Recall query: "{memory_query_text}"')
# embed recall query
memory_query_embedding = self.embedder.embed_query(memory_query_text)
self.working_memory["memory_query"] = memory_query_text
# recall relevant memories (episodic)
episodic_memories = self.memory.vectors.episodic.recall_memories_from_embedding(
embedding=memory_query_embedding
)
self.working_memory["episodic_memories"] = episodic_memories
# recall relevant memories (declarative)
declarative_memories = self.memory.vectors.declarative.recall_memories_from_embedding(
embedding=memory_query_embedding
)
self.working_memory["declarative_memories"] = declarative_memories
# hook to modify/enrich retrieved memories
self.mad_hatter.execute_hook("after_cat_recalled_memories", memory_query_text)
def format_agent_executor_input(self):
# format memories to be inserted in the prompt
episodic_memory_formatted_content = self.mad_hatter.execute_hook(
"agent_prompt_episodic_memories",
self.working_memory["episodic_memories"],
)
declarative_memory_formatted_content = self.mad_hatter.execute_hook(
"agent_prompt_declarative_memories",
self.working_memory["declarative_memories"],
)
# format conversation history to be inserted in the prompt
conversation_history_formatted_content = self.mad_hatter.execute_hook(
"agent_prompt_chat_history", self.working_memory["history"]
)
return {
"input": self.working_memory["user_message_json"]["text"],
"episodic_memory": episodic_memory_formatted_content,
"declarative_memory": declarative_memory_formatted_content,
"chat_history": conversation_history_formatted_content,
"ai_prefix": "AI",
}
def __call__(self, user_message_json):
log(user_message_json, "DEBUG")
# hook to modify/enrich user input
user_message_json = self.mad_hatter.execute_hook("before_cat_reads_message", user_message_json)
# store user_message_json in working memory
self.working_memory["user_message_json"] = user_message_json
# extract actual user message text
user_message = user_message_json["text"]
# recall episodic and declarative memories from vector collections
# and store them in working_memory
try:
self.recall_relevant_memories_to_working_memory(user_message)
except Exception as e:
log(e)
traceback.print_exc(e)
err_message = (
"Vector memory error: you probably changed "
"Embedder and old vector memory is not compatible. "
"Please delete `core/long_term_memory` folder."
)
return {
"error": False,
# TODO: Otherwise the frontend gives notice of the error
# but does not show what the error is
"content": err_message,
"why": {},
}
# prepare input to be passed to the agent executor.
# Info will be extracted from working memory
agent_executor_input = self.format_agent_executor_input()
# load agent (will rebuild both agent and agent_executor
# based on context and plugins)
agent_executor = self.agent_manager.get_agent_executor()
# reply with agent
try:
cat_message = agent_executor(agent_executor_input)
except Exception as e:
# This error happens when the LLM
# does not respect prompt instructions.
# We grab the LLM outptu here anyway, so small and
# non instruction-fine-tuned models can still be used.
error_description = str(e)
if not "Could not parse LLM output: `" in error_description:
raise e
unparsable_llm_output = error_description.replace("Could not parse LLM output: `", "").replace("`", "")
cat_message = {"output": unparsable_llm_output}
log(cat_message, "DEBUG")
# update conversation history
self.working_memory.update_conversation_history(who="Human", message=user_message)
self.working_memory.update_conversation_history(who="AI", message=cat_message["output"])
# store user message in episodic memory
# TODO: vectorize and store also conversation chunks
# (not raw dialog, but summarization)
_ = self.memory.vectors.episodic.add_texts(
[user_message],
[{"source": "user", "when": time.time()}],
)
# build data structure for output (response and why with memories)
episodic_report = [dict(d[0]) | {"score": float(d[1])} for d in self.working_memory["episodic_memories"]]
declarative_report = [dict(d[0]) | {"score": float(d[1])} for d in self.working_memory["declarative_memories"]]
final_output = {
"error": False,
"type": "chat",
"content": cat_message.get("output"),
"why": {
"input": cat_message.get("input"),
"intermediate_steps": cat_message.get("intermediate_steps"),
"memory": {
"vectors": {
"episodic": episodic_report,
"declarative": declarative_report,
}
},
},
}
final_output = self.mad_hatter.execute_hook("before_cat_sends_message", final_output)
return final_output
| [
"hypothetical_embedding_prompt",
"input"
] |
2024-01-10 | tarunsamanta2k20/core | core~cat~memory~vector_memory.py | import os
import sys
import socket
import time
from typing import Any, Callable
from cat.log import log
from qdrant_client import QdrantClient
from langchain.vectorstores import Qdrant
from langchain.docstore.document import Document
from qdrant_client.http.models import (Distance, VectorParams, SearchParams,
ScalarQuantization, ScalarQuantizationConfig, ScalarType, QuantizationSearchParams)
# TODO: hook get_embedder_size and remove dict
class VectorMemory:
def __init__(self, cat, verbose=False) -> None:
self.verbose = verbose
# Get embedder from Cat instance
self.embedder = cat.embedder
if self.embedder is None:
raise Exception("No embedder passed to VectorMemory")
qdrant_host = os.getenv("VECTOR_MEMORY_HOST", "cheshire_cat_vector_memory")
qdrant_port = int(os.getenv("VECTOR_MEMORY_PORT", 6333))
try:
s = socket.socket()
s.connect((qdrant_host, qdrant_port))
except Exception:
log("QDrant does not respond to %s:%s" % (qdrant_host, qdrant_port), "ERROR")
sys.exit()
finally:
s.close()
# Qdrant vector DB client
self.vector_db = QdrantClient(
host=qdrant_host,
port=qdrant_port,
)
# Episodic memory will contain user and eventually cat utterances
self.episodic = VectorMemoryCollection(
cat=cat,
client=self.vector_db,
collection_name="episodic",
embedding_function=self.embedder.embed_query,
)
# Declarative memory will contain uploaded documents' content (and summaries)
self.declarative = VectorMemoryCollection(
cat=cat,
client=self.vector_db,
collection_name="declarative",
embedding_function=self.embedder.embed_query,
)
# Procedural memory will contain tools and knowledge on how to do things
self.procedural = VectorMemoryCollection(
cat=cat,
client=self.vector_db,
collection_name="procedural",
embedding_function=self.embedder.embed_query,
)
# Dictionary containing all collections
# Useful for cross-searching and to create/use collections from plugins
self.collections = {
"episodic": self.episodic,
"declarative": self.declarative,
"procedural": self.procedural,
}
class VectorMemoryCollection(Qdrant):
def __init__(self, cat, client: Any, collection_name: str, embedding_function: Callable):
super().__init__(client, collection_name, embedding_function)
# Get a Cat instance
self.cat = cat
# Check if memory collection exists, otherwise create it and add first memory
self.create_collection_if_not_exists()
def create_collection_if_not_exists(self):
# create collection if it does not exist
try:
self.client.get_collection(self.collection_name)
tabula_rasa = False
log(f'Collection "{self.collection_name}" already present in vector store', "INFO")
except:
log(f"Creating collection {self.collection_name} ...", "INFO")
self.client.recreate_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
quantization_config=ScalarQuantization(
scalar=ScalarQuantizationConfig(
type=ScalarType.INT8,
quantile=0.99,
always_ram=False
)
)
# TODO: if we change the embedder, how do we know the dimensionality?
)
tabula_rasa = True
# TODO: if the embedder changed, a new vectorstore must be created
# TODO: need a more elegant solution
if tabula_rasa:
# Hard coded overridable first memory saved in both collections
first_memory = Document(
page_content="I am the Cheshire Cat", metadata={"source": "cheshire-cat", "when": time.time()}
)
# Execute hook to override the first inserted memory
first_memory = self.cat.mad_hatter.execute_hook("before_collection_created", first_memory)
# insert first point in the collection
self.add_texts(
[first_memory.page_content],
[first_memory.metadata],
)
log(dict(self.client.get_collection(self.collection_name)), "DEBUG")
# retrieve similar memories from text
def recall_memories_from_text(self, text, metadata=None, k=3):
# embed the text
query_embedding = self.embedding_function(text)
# search nearest vectors
return self.recall_memories_from_embedding(query_embedding, metadata=metadata, k=k)
# retrieve similar memories from embedding
def recall_memories_from_embedding(self, embedding, metadata=None, k=3):
# retrieve memories
memories = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
query_filter=self._qdrant_filter_from_dict(metadata),
with_payload=True,
with_vectors=True,
limit=k,
search_params=SearchParams(
quantization=QuantizationSearchParams(
ignore=False,
rescore=True
)
)
)
return [
(
self._document_from_scored_point(m, self.content_payload_key, self.metadata_payload_key),
m.score,
m.vector
)
for m in memories
]
| [] |
2024-01-10 | tarunsamanta2k20/core | core~cat~rabbit_hole.py | import os
import time
import tempfile
import mimetypes
from typing import List, Union
from cat.log import log
from starlette.datastructures import UploadFile
from langchain.document_loaders import (
PDFMinerLoader,
UnstructuredURLLoader,
UnstructuredFileLoader,
UnstructuredMarkdownLoader,
)
from langchain.docstore.document import Document
class RabbitHole:
def __init__(self, cat):
self.cat = cat
def ingest_file(
self,
file: Union[str, UploadFile],
chunk_size: int = 400,
chunk_overlap: int = 100,
):
"""
Load a given file in the Cat's memory.
:param file: absolute path of the file or UploadFile
if ingested from the GUI
:param chunk_size: number of characters the text is split in
:param chunk_overlap: number of overlapping characters
between consecutive chunks
"""
# split file into a list of docs
docs = self.file_to_docs(
file=file, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
# get summaries
summaries = self.cat.mad_hatter.execute_hook(
"rabbithole_summarizes_documents", docs
)
docs = summaries + docs
# store in memory
if isinstance(file, str):
filename = file
else:
filename = file.filename
self.store_documents(docs=docs, source=filename)
def ingest_url(
self,
url: str,
chunk_size: int = 400,
chunk_overlap: int = 100,
):
"""
Load a given website in the Cat's memory.
:param url: URL of the website to which you want to save the content
:param chunk_size: number of characters the text is split in
:param chunk_overlap: number of overlapping characters
between consecutive chunks
"""
# get website content and split into a list of docs
docs = self.url_to_docs(
url=url, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
# get summaries
summaries = self.cat.mad_hatter.execute_hook(
"rabbithole_summarizes_documents", docs
)
docs = summaries + docs
# store docs in memory
self.store_documents(docs=docs, source=url)
def url_to_docs(
self,
url: str,
chunk_size: int = 400,
chunk_overlap: int = 100,
) -> List[Document]:
"""
Scrape website content and chunk it to a list of Documents.
:param url: URL of the website to which you want to save the content
:param chunk_size: number of characters the text is split in
:param chunk_overlap: number of overlapping characters
between consecutive chunks
"""
# load text content of the website
loader = UnstructuredURLLoader(urls=[url])
text = loader.load()
docs = self.split_text(text, chunk_size, chunk_overlap)
return docs
def file_to_docs(
self,
file: Union[str, UploadFile],
chunk_size: int = 400,
chunk_overlap: int = 100,
) -> List[Document]:
"""
Parse a file and chunk it to a list of Documents.
The file can either be ingested from the web GUI, rest API
or using the *cat.rabbit_hole.send_file_in_rabbit_hole* method.
:param file: absolute path of the file or UploadFile
if ingested from the GUI
:param chunk_size: number of characters
the text is split in
:param chunk_overlap: number of overlapping characters
between consecutive chunks
"""
# Create temporary file
temp_file = tempfile.NamedTemporaryFile(dir="/tmp/", delete=False)
temp_name = temp_file.name
# Check type of incoming file.
# It can be either UploadFile if coming from GUI
# or an absolute path if auto-ingested be the Cat
if isinstance(file, UploadFile):
# Get mime type of UploadFile
# content_type = file.content_type
content_type = mimetypes.guess_type(file.filename)[0]
# Get file bytes
file_bytes = file.file.read()
elif isinstance(file, str):
# Get mime type from file extension
content_type = mimetypes.guess_type(file)[0]
# Get file bytes
with open(file, "rb") as f:
file_bytes = f.read()
else:
raise ValueError(f"{type(file)} is not a valid type.")
# Open temp file in binary write mode
with open(temp_name, "wb") as temp_binary_file:
# Write bytes to file
temp_binary_file.write(file_bytes)
# decide loader
if content_type == "text/plain":
loader = UnstructuredFileLoader(temp_name)
elif content_type == "text/markdown":
loader = UnstructuredMarkdownLoader(temp_name)
elif content_type == "application/pdf":
loader = PDFMinerLoader(temp_name)
else:
raise Exception("MIME type not supported for upload")
# extract text from file
text = loader.load()
# delete tmp file
os.remove(temp_name)
docs = self.split_text(text, chunk_size, chunk_overlap)
return docs
def store_documents(self, docs: List[Document], source: str) -> None:
"""
Load a list of Documents in the Cat's declarative memory.
:param ccat: reference to the cat instance
:param docs: a list of documents to store in memory
:param source: a string representing the source,
either the file name or the website URL
"""
log(f"Preparing to memorize {len(docs)} vectors")
# classic embed
for d, doc in enumerate(docs):
doc.metadata["source"] = source
doc.metadata["when"] = time.time()
doc = self.cat.mad_hatter.execute_hook(
"before_rabbithole_insert_memory", doc
)
inserting_info = f"{d + 1}/{len(docs)}): {doc.page_content}"
if doc.page_content != "":
_ = self.cat.memory.vectors.declarative.add_texts(
[doc.page_content],
[doc.metadata],
)
#log(f"Inserted into memory({inserting_info})", "INFO")
print(f"Inserted into memory({inserting_info})")
else:
log(f"Skipped memory insertion of empty doc ({inserting_info})", "INFO")
# wait a little to avoid APIs rate limit errors
time.sleep(0.1)
# notify client
finished_reading_message = f"Finished reading {source}, " \
f"I made {len(docs)} thoughts on it."
self.cat.web_socket_notifications.append(
{
"error": False,
"type": "notification",
"content": finished_reading_message,
"why": {},
}
)
print(f"\n\nDone uploading {source}")
def split_text(self, text, chunk_size, chunk_overlap):
# do something on the text before it is splitted
text = self.cat.mad_hatter.execute_hook(
"before_rabbithole_splits_text", text
)
# split the documents using chunk_size and chunk_overlap
docs = self.cat.mad_hatter.execute_hook(
"rabbithole_splits_text", text, chunk_size, chunk_overlap
)
# do something on the text after it is splitted
docs = self.cat.mad_hatter.execute_hook(
"after_rabbithole_splitted_text", docs
)
return docs
| [] |
2024-01-10 | DaveParr/starpilot | starpilot~utils~kinda_test.py | import os
import dotenv
import github
import utils
from langchain.embeddings import GPT4AllEmbeddings
from langchain.vectorstores import Chroma
from rich import print
dotenv.load_dotenv()
git_hub_key = os.getenv("GITHUB_API_KEY")
GITHUB_CONNECTION = github.Github(git_hub_key)
# utils.get_user_starred_repos("DaveParr", GITHUB_CONNECTION, num_repos=2)
repo_names = [
"starship/starship", # totally fine
"django/django", # description and topic but readme is rst
"StateOfCalifornia/CalCAT", # description but no topic
"joeycastillo/The-Open-Book", # no description or topic or language
"vicenews/shot-by-cops", # no description or topic
"pytorch/pytorch", # no owner or organisation
]
repo_list = []
pytorch_repo = GITHUB_CONNECTION.get_repo("pytorch/pytorch")
for repo in repo_names:
repo_content = GITHUB_CONNECTION.get_repo(repo)
repo_list.append(repo_content)
print(repo_list[0].full_name)
repo_descriptions = []
content = utils.get_repo_contents(repo_list, False, GITHUB_CONNECTION)
utils.save_repo_contents_to_disk(content, "./tmp")
generic_documents = utils.prepare_documents("./tmp")
Chroma.from_documents(
documents=generic_documents,
embedding=GPT4AllEmbeddings(disallowed_special=()),
persist_directory="./tmp",
)
retriever = utils.create_retriever(
vectorstore_path="./tmp",
k=2,
method="similarity",
)
response = retriever.get_relevant_documents("starship/starship")
print(utils.create_results_table(response))
| [] |
2024-01-10 | CryptoDevWill/ArcAngelGPT | controller~components~chat~web_scrape.py | import requests
from bs4 import BeautifulSoup
from controller.data.conversation import Conversation
from controller import Thinking
from controller.play_sound import play_sound
from controller.utils import get_tokenz
import openai
import re
def web_scrape(url, user_input, chat_window):
conversation = Conversation()
thinking = Thinking()
if not url.startswith("http://") and not url.startswith("https://"):
# Try with http://
http_url = "http://" + url
https_url = "https://" + url
for url in [http_url, https_url]:
try:
thinking.set(True)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36',
'Accept-Language': 'en-US,en;q=0.5',
'Screen-Size': '1920x1080'
}
proxy_url = None # Set to your proxy address when needed
if proxy_url:
proxies = {'http': proxy_url, 'https': proxy_url}
else:
proxies = None
response = requests.get(url, headers=headers, proxies=proxies)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.get_text()
# Preprocess text to remove unnecessary whitespace
text = re.sub(r'\s+', ' ', text).strip()
get_tokenz(text, conversation.get(), chat_window, thinking, play_sound)
conversation.append({"role": "assistant", "content": "Browsing the link now. please wait.."})
chat_window.update_conversation()
play_sound('response')
return gpt_webscrape_response(url, user_input, text, chat_window)
except requests.exceptions.RequestException:
pass
# If none of the URLs worked, raise an exception
raise Exception(f"Could not connect to {url}")
else:
# URL includes a protocol, so proceed as before
try:
thinking.set(True)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36',
'Accept-Language': 'en-US,en;q=0.5',
'Screen-Size': '1920x1080'
}
proxy_url = None # Set to your proxy address when needed
if proxy_url:
proxies = {'http': proxy_url, 'https': proxy_url}
else:
proxies = None
response = requests.get(url, headers=headers, proxies=proxies)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.get_text()
# Preprocess text to remove unnecessary whitespace
text = re.sub(r'\s+', ' ', text).strip()
get_tokenz(text, conversation, chat_window, thinking, play_sound)
conversation.append({"role": "assistant", "content": "Browsing the link now. please wait.."})
chat_window.update_conversation()
play_sound('response')
return gpt_webscrape_response(url, user_input, text, chat_window)
except requests.exceptions.RequestException as e:
conversation.append({"role": "assistant", "content": e})
chat_window.update_conversation()
play_sound('error')
thinking.set(False)
def gpt_webscrape_response(url, user_input, text, chat_window):
# Send web text to chatgpt
conversation = Conversation()
thinking = Thinking()
thinking.set(True)
try:
prompt = f"{user_input}. This is the url {url} and this is the content -> '{text}'"
completion = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=2000,
temperature=0.3
)
response = completion.choices[0]['text']
conversation.append({"role": "assistant", "content": response})
play_sound("response")
except openai.error.InvalidRequestError as e:
error_message = "Error: " + str(e)
conversation.append({"role": "assistant", "content": error_message})
play_sound("error")
except openai.error.AuthenticationError as e:
error_message = "Error: " + str(e)
conversation.append({"role": "assistant", "content": error_message})
play_sound("error")
finally:
chat_window.update_conversation()
thinking.set(False)
| [
"Browsing the link now. please wait..",
"PLACEHOLDER. This is the url PLACEHOLDER and this is the content -> 'PLACEHOLDER'"
] |
2024-01-10 | CryptoDevWill/ArcAngelGPT | model~file_uploads~response_chunks.py | import os
import openai
import traceback
from controller.data.conversation import Conversation
from controller.play_sound import play_sound
openai.api_key = os.getenv("OPENAI_API_KEY")
def response_chunks(chunks, chat_window):
print('its moving main')
conversation = Conversation()
try:
combined_response = ""
total_chunks = len(chunks)
for idx, chunk in enumerate(chunks, start=1):
print('its moving for loop')
conversation.append({"role": "assistant", "content": f"Processing chunk {idx} out of {total_chunks} please wait..."})
chat_window.update_conversation()
prompt = f"{chunk['prompt']} this is the data. Only output the data asked for in the provided prompt. Do not add any extra details or text that has not been asked for. '{chunk['chunk']}'"
play_sound("system")
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=1500,
temperature=0
)
extracted_info = response.choices[0].text.strip()
combined_response += extracted_info + " "
conversation.append({"role": "assistant", "content": extracted_info})
play_sound("response")
chat_window.update_conversation()
return combined_response.strip()
except Exception as e:
error_message = f"An error occurred: {str(e)}\n{traceback.format_exc()}"
conversation.append({"role": "assistant", "content": error_message})
chat_window.update_conversation()
play_sound("error")
return error_message
| [
"PLACEHOLDER this is the data. Only output the data asked for in the provided prompt. Do not add any extra details or text that has not been asked for. 'PLACEHOLDER'",
"Processing chunk PLACEHOLDER out of PLACEHOLDER please wait..."
] |
2024-01-10 | CryptoDevWill/ArcAngelGPT | controller~_init.py | import os
import openai
from controller.data.conversation import Conversation
from controller.play_sound import play_sound
from controller.speak import speak
from controller.data.global_variables import Loading
from controller.utils.load_settings import load_settings
def _init(chat_window):
load_settings()
conversation = Conversation()
openai.api_key = os.environ.get("OPENAI_API_KEY")
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation.get()
)
response = completion.choices[0].message.content
print(response)
conversation.append({"role": "assistant", "content": response})
chat_window.update_conversation()
play_sound("response")
# speak(response)
Loading().set(False)
| [] |
2024-01-10 | CryptoDevWill/ArcAngelGPT | controller~components~chat~gpt_response.py | import tkinter as tk
from controller.data.conversation import Conversation
from controller.data.global_variables import Thinking
from controller.play_sound import play_sound
from controller.tools import parse_command
from controller.components.file_tree import get_file_tree
import threading
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
def gpt_response(user_input, chat_window):
conversation = Conversation()
try:
Thinking().set(True)
conversation_length = len(conversation)
conversation.append({"role": "system", "content": (
f"Your file tree is {get_file_tree()}. The Operating system is {os.name}, "
f"so please only include commands that are compatible with my OS.\n"
f"The user should not be expected to use STDIN or STDOUT.\n"
f"If it is simple you can just use answer, but if a command is needed, use command.\n"
f"You must use the following structure, using command or answer:\n"
f"{{\n"
f" \"commands\": [\n"
f" {{\n"
f" \"command\": \"command to execute\",\n"
f" \"answer\": \"answer to a question, or no json possible\"\n"
f" \"description\": \"description of command\"\n"
f" }}\n"
f" ],\n"
f"}}"
f"Do not use more than one json response in a single message."
)})
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=conversation.get())
conversation.pop(conversation_length)
chat_response = completion.choices[0].message
#conversation.append({"role": "assistant", "content": chat_response.content})
if chat_response:
command = threading.Thread(target=parse_command, args=(chat_response.content,))
command.start()
except openai.error.InvalidRequestError as e:
error_message = "Error: " + str(e)
conversation.append({"role": "assistant", "content": error_message})
Thinking().set(False)
except openai.error.AuthenticationError as e:
error_message = "Error: " + str(e)
conversation.append({"role": "assistant", "content": error_message})
Thinking().set(False)
finally:
chat_window.update_conversation()
play_sound("response")
Thinking().set(False)
| [
" }\n",
"{\n",
"Do not use more than one json response in a single message.",
"If it is simple you can just use answer, but if a command is needed, use command.\n",
" \"description\": \"description of command\"\n",
" ],\n",
" {\n",
" \"command\": \"command to execute\",\n",
" \"answer\": \"answer to a question, or no json possible\"\n",
"The user should not be expected to use STDIN or STDOUT.\n",
"so please only include commands that are compatible with my OS.\n",
" \"commands\": [\n",
"You must use the following structure, using command or answer:\n"
] |
2024-01-10 | CryptoDevWill/ArcAngelGPT | model~file_uploads~file_upload.py | import tkinter as tk
from tkinter import filedialog
from tkinter import ttk
from controller.data.conversation import Conversation
from controller.play_sound import play_sound
from controller.data.global_variables import ReadMode, WorkMode, Thinking
from model.file_uploads.process_chunks import process_chunks
from model.file_uploads.response_chunks import response_chunks
import openai
def open_file(clear_button):
file_path = filedialog.askopenfilename()
if file_path:
with open(file_path, "r") as file:
content = file.read()
# print(f"File: {file_path}\nContent:\n{content}")
read_mode = ReadMode()
read_mode.set(True)
read_mode.content = content
def clear_read_mode(clear_button):
ReadMode().set(False)
def update_clear_button(clear_button):
read_mode_status = ReadMode().get()
if read_mode_status:
clear_button.pack(side=tk.LEFT, padx=(5, 0))
else:
clear_button.pack_forget()
def upload_button(parent):
style = ttk.Style()
style.configure("Blue.TButton", foreground="#ffffff")
style.map("Blue.TButton",
background=[("active", "#0073e6"), ("pressed", "#004799"), ("!disabled", "#0073e6")])
style.configure("Red.TButton", foreground="#ffffff", background="#ff0000")
style.configure("TLabel", background="#282a2d", foreground="#aab0b6")
upload_button = ttk.Button(parent, text="Upload File", style="Blue.TButton", command=lambda: open_file(parent.clear_button))
upload_button.pack(side=tk.RIGHT, padx=(0, 1))
parent.clear_button = ttk.Button(parent, text="X", style="Red.TButton", width=1, command=lambda: clear_read_mode(parent.clear_button))
# Don't pack the clear_button here, it will be packed when read_mode is set to True
ReadMode().set_callback(lambda: update_clear_button(parent.clear_button))
def upload_response(user_input, chat_window):
# TODO: Determine course of action for providing content
conversation = Conversation()
content = ReadMode().content # <-- The problem line
ReadMode().set(False)
WorkMode().set(True)
Thinking().set(True)
try:
chunks = process_chunks(user_input, content)
response = response_chunks(chunks, chat_window)
conversation.append({"role": "assistant", "content": response})
play_sound("response")
except openai.error.InvalidRequestError as e:
error_message = "Error: " + str(e)
conversation.append({"role": "assistant", "content": error_message})
play_sound("error")
except openai.error.AuthenticationError as e:
error_message = "Error: " + str(e)
conversation.append({"role": "assistant", "content": error_message})
play_sound("error")
finally:
WorkMode().set(False)
Thinking().set(False)
chat_window.update_conversation()
| [] |
2024-01-10 | gengyuanmax/MeVTR | modules~modeling_multievent.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
from torch import nn
from modules.until_module import PreTrainedModel, AllGather, CrossEnMulti, CrossEnMulti_unbalanced
from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip
from modules.module_clip import CLIP, convert_weights
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from .cluster.fast_kmeans import batch_fast_kmedoids
logger = logging.getLogger(__name__)
# allgather = AllGather.apply
from modules.until_module import all_gather_only as allgather
class MeRetrieverPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, cross_config, *inputs, **kwargs):
super(MeRetrieverPreTrainedModel, self).__init__(cross_config)
self.cross_config = cross_config
self.clip = None
self.cross = None
@classmethod
def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None: state_dict = {}
pretrained_clip_name = "ViT-B/32"
if hasattr(task_config, 'pretrained_clip_name'):
pretrained_clip_name = task_config.pretrained_clip_name
clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name)
for key, val in clip_state_dict.items():
new_key = "clip." + key
if new_key not in state_dict:
state_dict[new_key] = val.clone()
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None,
task_config=task_config)
model = cls(cross_config, clip_state_dict, *inputs, **kwargs)
## ===> Initialization trick [HARD CODE]
if model.linear_patch == "3d":
contain_conv2 = False
for key in state_dict.keys():
if key.find("visual.conv2.weight") > -1:
contain_conv2 = True
break
if contain_conv2 is False and hasattr(model.clip.visual, "conv2"):
cp_weight = state_dict["clip.visual.conv1.weight"].clone()
kernel_size = model.clip.visual.conv2.weight.size(2)
conv2_size = model.clip.visual.conv2.weight.size()
conv2_size = list(conv2_size)
left_conv2_size = conv2_size.copy()
right_conv2_size = conv2_size.copy()
left_conv2_size[2] = (kernel_size - 1) // 2
right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2]
left_zeros, right_zeros = None, None
if left_conv2_size[2] > 0:
left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
if right_conv2_size[2] > 0:
right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
cat_list = []
if left_zeros is not None: cat_list.append(left_zeros)
cat_list.append(cp_weight.unsqueeze(2))
if right_zeros is not None: cat_list.append(right_zeros)
cp_weight = torch.cat(cat_list, dim=2)
state_dict["clip.visual.conv2.weight"] = cp_weight
if model.sim_header == 'tightTransf':
contain_cross = False
for key in state_dict.keys():
if key.find("cross.transformer") > -1:
contain_cross = True
break
if contain_cross is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["cross.embeddings.position_embeddings.weight"] = val.clone()[:cross_config.max_position_embeddings]
continue
if key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict["cross." + key] = val.clone()
continue
if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf":
contain_frame_position = False
for key in state_dict.keys():
if key.find("frame_position_embeddings") > -1:
contain_frame_position = True
break
if contain_frame_position is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["frame_position_embeddings.weight"] = val.clone()
continue
if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict[key.replace("transformer.", "transformerClip.")] = val.clone()
continue
## <=== End of initialization trick
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class MeRetriever(MeRetrieverPreTrainedModel):
def __init__(self, cross_config, clip_state_dict, task_config):
super(MeRetriever, self).__init__(cross_config)
self.task_config = task_config
self.ignore_video_index = -1
# assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.loose_type = False
if self._stage_one and check_attr('loose_type', self.task_config):
self.loose_type = True
show_log(task_config, "Test retrieval by loose type.")
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b
in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
self.cluster_inter = getattr(task_config, "cluster_inter", 0)
if self.cluster_inter:
self.cluster_algo = getattr(task_config, "cluster_algo", None)
self.deep_cluster = getattr(task_config, "deep_cluster", 0)
self.video_frames = getattr(task_config, "max_frames", None)
self.time_embedding = getattr(task_config, "time_embedding", None)
self.freeze_clip = getattr(task_config, "freeze_clip", 0)
self.new_added_modules = getattr(task_config, "new_added_modules", [None, ])
self.final_frames = task_config.target_frames_blocks[-1]
self.f_frame_duration = self.video_frames // self.final_frames
self.pre_visual_pooling = getattr(task_config, "pre_visual_pooling", 0)
self.camoe_dsl = getattr(task_config, "camoe_dsl", False)
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.linear_patch = '2d'
if hasattr(task_config, "linear_patch"):
self.linear_patch = task_config.linear_patch
show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch))
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers - cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers - cut_top_layer,
linear_patch=self.linear_patch
).float()
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
convert_weights(self.clip)
# <=== End of CLIP Encoders
self.sim_header = 'meanP'
if hasattr(task_config, "sim_header"):
self.sim_header = task_config.sim_header
show_log(task_config, "\t sim_header: {}".format(self.sim_header))
if self.sim_header == "tightTransf": assert self.loose_type is False
cross_config.max_position_embeddings = context_length
# cross_config.max_position_embeddings = 1+task_config.max_frames
if self.loose_type is False:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config,
"cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
self.similarity_dense = nn.Linear(cross_config.hidden_size, 1)
if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf":
self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings,
cross_config.hidden_size)
if self.sim_header == "seqTransf":
self.transformerClip = TransformerClip(width=transformer_width,
layers=self.task_config.cross_num_hidden_layers,
heads=transformer_heads, )
if self.sim_header == "seqLSTM":
self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size,
batch_first=True, bidirectional=False, num_layers=1)
# self.loss_fct = CrossEn()
if getattr(task_config, "loss", "balanced") == "unbalanced":
self.loss_fct = CrossEnMulti_unbalanced()
else:
self.loss_fct = CrossEnMulti()
self.regularization = getattr(task_config, "regularize", "none")
self.multi2multi = (self.sim_header == 'maxP')
self.post_process = getattr(task_config, 'post_process', 'none')
self.apply(self.init_weights)
def forward(self, text, text_mask, group_mask, video, video_mask=None, vt_mask=None):
video_mask = video_mask.view(-1, video_mask.shape[-1])
# T x 3 x H x W
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
if self.cluster_inter:
video_mask = self.get_video_mask_after_cluster(video_mask)
vt_mask = self.get_interval_after_cluster(group_mask, vt_mask)
sequence_output, visual_output = self.get_sequence_visual_output(text, text_mask,
video, video_mask, group_mask, shaped=True,
video_frame=video_frame)
if self.post_process == 'cluster':
assign, medoids = batch_fast_kmedoids(visual_output, self.task_config.post_cluster_centroids,
distance=self.task_config.cluster_distance,
threshold=self.task_config.cluster_threshold,
iter_limit=self.task_config.cluster_iter_limit)
idx = torch.arange(visual_output.shape[0], dtype=torch.long, device=visual_output.device).unsqueeze(-1)
visual_output = visual_output[idx, medoids]
video_mask = video_mask[idx, medoids]
vt_mask = vt_mask[idx, :, medoids].permute(0, 2, 1)
if self.training:
if self.multi2multi:
sim_matrix, sim_mask = self.get_similarity_multi2multi_logits(sequence_output, visual_output, video_mask,
group_mask, vt_mask)
else:
sim_matrix, sim_mask = self.get_similarity_logits(sequence_output, visual_output, text_mask, video_mask,
group_mask, shaped=True, loose_type=self.loose_type)
sim_loss = self.loss_fct(sim_matrix, sim_mask)
sim_loss2 = self.loss_fct(sim_matrix.T, sim_mask.T)
reg_loss = None
return sim_loss, sim_loss2, reg_loss
else:
return None
def get_sequence_output(self, text, attention_mask, group_mask, shaped=False):
bs = text.shape[0]
res = []
for i in range(bs):
sequence_hidden = self.clip.encode_text(text[i][group_mask[i] > 0]).float()
sequence_hidden = torch.concat((sequence_hidden, torch.zeros(text[i].shape[0] - sequence_hidden.shape[0],
sequence_hidden.shape[1]).to(text.device)))
res.append(sequence_hidden)
ret = torch.stack(res)
return ret
def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
bs_pair = video_mask.size(0)
visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float()
visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1))
return visual_hidden
def get_sequence_visual_output(self, text, text_mask, video, video_mask, group_mask, shaped=False, video_frame=-1):
if shaped is False:
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output = self.get_sequence_output(text, text_mask, group_mask, shaped=True)
visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame)
return sequence_output, visual_output
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask,
output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_visual(self, visual_output, video_mask, ):
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask, ):
text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask)
video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
return text_out, video_out
def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, group_mask,
sim_header="meanP"):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
if sim_header == "meanP":
# Default: Parameter-free type
pass
elif sim_header == "seqLSTM":
# Sequential type: LSTM
visual_output_original = visual_output
visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(),
batch_first=True, enforce_sorted=False)
visual_output, _ = self.lstm_visual(visual_output)
if self.training: self.lstm_visual.flatten_parameters()
visual_output, _ = pad_packed_sequence(visual_output, batch_first=True)
visual_output = torch.cat(
(visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1)
visual_output = visual_output + visual_output_original
elif sim_header == "seqTransf":
# Sequential type: Transformer Encoder
visual_output_original = visual_output
seq_length = visual_output.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device)
position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1)
frame_position_embeddings = self.frame_position_embeddings(position_ids)
visual_output = visual_output + frame_position_embeddings
extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0
extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1)
visual_output = visual_output.permute(1, 0, 2) # NLD -> LND
visual_output = self.transformerClip(visual_output, extended_video_mask)
visual_output = visual_output.permute(1, 0, 2) # LND -> NLD
visual_output = visual_output + visual_output_original
if self.training:
visual_output = allgather(visual_output, self.task_config, keep_itself=True)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config, keep_itself=True)
group_mask = allgather(group_mask, self.task_config)
# noinspection PyUnresolvedReferences
torch.distributed.barrier()
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
sequences = []
sequence_mask = []
for i in range(len(sequence_output)):
temp = sequence_output[i][group_mask[i] == 1]
temp = temp / temp.norm(dim=-1, keepdim=True)
sequences.append(temp)
temp = torch.zeros(len(temp), len(sequence_output)).to(sequence_output.device)
temp[:, i] = 1
sequence_mask.append(temp)
sequence_output = torch.concat(sequences)
sequence_mask = torch.concat(sequence_mask)
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
return retrieve_logits, sequence_mask
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask, group_mask):
sequences = []
sequence_mask = []
for i in range(len(sequence_output)):
temp = sequence_output[i][group_mask[i] == 1]
temp = temp / temp.norm(dim=-1, keepdim=True)
sequences.append(temp)
temp = torch.zeros(len(temp), len(sequence_output)).to(sequence_output.device)
temp[:, i] = 1
sequence_mask.append(temp)
sequence_output = torch.concat(sequences).unsqueeze(1)
sequence_mask = torch.concat(sequence_mask)
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
# step_size = b_text # set smaller to reduce memory cost
step_size = 1
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
# due to clip text branch retrun the last hidden
attention_mask = torch.ones(sequence_output.size(0), 1).to(device=attention_mask.device,
dtype=attention_mask.dtype)
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits, sequence_mask
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, group_mask,
shaped=False, loose_type=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
if loose_type:
assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"]
retrieve_logits, sequence_mask = self._loose_similarity(sequence_output, visual_output, attention_mask,
video_mask, group_mask, sim_header=self.sim_header)
else:
assert self.sim_header in ["tightTransf"]
retrieve_logits, sequence_mask = self._cross_similarity(sequence_output, visual_output, attention_mask,
video_mask, group_mask)
return retrieve_logits, sequence_mask
def get_video_mask_after_cluster(self, video_mask):
# an and logical, if any frame of the clustering frames are masked,
# these clustering frames will be abondoned. Here just use the last mask value
if self.cluster_algo in ['kmediods++', 'pooling', 'sparse_sampling', 'spectral']:
inds = torch.arange(self.f_frame_duration - 1, video_mask.shape[-1],
video_mask.shape[-1] // self.final_frames,
dtype=torch.long, device=video_mask.device)
return video_mask[:, inds]
else:
return video_mask
def get_interval_after_cluster(self, group_mask, vt_mask):
b, n = group_mask.shape
temp = vt_mask.view(b, n, self.final_frames, self.f_frame_duration)
res = torch.max(temp, dim=-1)[0]
return res
def get_similarity_multi2multi_logits(self, sequence_output, visual_output, video_mask, group_mask, vt_mask):
"""
sequence_output: bs*27*512
visual_output: bs*frames*512
video_mask: bs*frames
group_mask: bs*27
vt_mask: bs*27*frames
"""
if self.training:
visual_output = allgather(visual_output, self.task_config, keep_itself=True)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config, keep_itself=True)
group_mask = allgather(group_mask, self.task_config)
vt_mask = allgather(vt_mask, self.task_config)
# noinspection PyUnresolvedReferences
torch.distributed.barrier()
video_mask = video_mask.view(-1, video_mask.shape[-1])
all_sim = []
all_mask = []
for i in range(len(sequence_output)):
sim_row = []
mask_row = []
seq = sequence_output[i][group_mask[i] > 0]
seq = seq / seq.norm(dim=-1, keepdim=True) # n_text * 512
for j in range(len(visual_output)):
vis = visual_output[j][video_mask[j] > 0]
vis = vis / vis.norm(dim=-1, keepdim=True) # n_frame * 512
vt = vt_mask[i][group_mask[i] > 0] # n_text * n_frame
sim = torch.matmul(seq, vis.T) * self.clip.logit_scale.exp()
mask = vt[:, video_mask[j] > 0]
if i != j:
mask = torch.zeros_like(mask)
# assert sim.shape == mask.shape
sim_row.append(sim)
mask_row.append(mask)
sim_row = torch.concat(sim_row, dim=-1)
mask_row = torch.concat(mask_row, dim=-1)
all_sim.append(sim_row)
all_mask.append(mask_row)
all_sim = torch.concat(all_sim, dim=0)
all_mask = torch.concat(all_mask, dim=0)
return all_sim, all_mask
def get_similarity_sphere_eval(self, sequence_output, visual_output, video_mask, group_mask, id1, id2):
video_mask = video_mask.view(-1, video_mask.shape[-1])
all_sim = []
all_mask = []
for i in range(len(sequence_output)):
seq = sequence_output[i][group_mask[i] > 0]
seq = seq / seq.norm(dim=-1, keepdim=True) # n_text * 512
sim_row, mask_row = [], []
for j in range(len(visual_output)):
vis = visual_output[j][video_mask[j] > 0]
vis = vis / vis.norm(dim=-1, keepdim=True) # n_frame * 512
sim = torch.matmul(seq, vis.T) * self.clip.logit_scale.exp()
sim = sim.max(dim=-1, keepdim=True)[0]
if id1 == id2 and i == j:
mask = torch.ones_like(sim)
else:
mask = torch.zeros_like(sim)
# assert sim.shape == mask.shape
sim_row.append(sim)
mask_row.append(mask)
sim_row = torch.concat(sim_row, dim=-1)
mask_row = torch.concat(mask_row, dim=-1)
all_sim.append(sim_row)
all_mask.append(mask_row)
all_sim = torch.concat(all_sim, dim=0)
all_mask = torch.concat(all_mask, dim=0)
return all_sim, all_mask
def get_cone_loss(self, visual_output, video_mask):
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) # bs * len * 512
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_center = torch.sum(visual_output, dim=1) / video_mask_un_sum
video_center = video_center / video_center.norm(dim=-1, keepdim=True) # bs * 512
# video_center = video_center.unsqueeze(1) # bs * 1 * 512
cos_dist = torch.tensordot(visual_output, video_center, ([2], [1]))
cos_dist = torch.diagonal(cos_dist, dim1=0, dim2=2).T
temp = cos_dist - self.cos_angle
temp = (temp*temp/2) * video_mask
'''
visual_output = visual_output - video_center # bs * len * 512
dist = visual_output.norm(dim=-1)
cone_loss = torch.sum(dist*video_mask, dim=-1) / torch.sum(video_mask, dim=-1)
'''
cone_loss = torch.sum(temp, dim=1) / torch.sum(video_mask, dim=1)
return torch.mean(cone_loss)
def get_sphere_loss(self, visual_output, video_mask): # without normalization
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_center = torch.sum(visual_output, dim=1) / video_mask_un_sum
video_center = video_center.unsqueeze(1) # bs * 1 * 512
visual_output = visual_output - video_center # bs * len * 512
euc_dist = visual_output.norm(dim=-1) # bs * len
loss = torch.sum(euc_dist*video_mask, dim=-1) / torch.sum(video_mask, dim=-1)
return torch.mean(loss)
| [] |
2024-01-10 | SnowyGanyu/Goblin | Character_Visualizer~agent_visuaizer.py | from agent_descriptors import *
from agents_information import Agent
import openai
import os
openai.api_key = "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"
STYLE = "Art Style: Generate an Pixel Art with the art style resembling the character sprites from the game Stardew Valley"
def create_agent_art(agent_object):
agent_text_description = []
agent_text_description.append(STYLE)
agent_text_description.append("First Name: ")
agent_text_description.append(agent_object.first_name)
agent_text_description.append("Last Name: ")
agent_text_description.append(agent_object.last_name)
agent_text_description.append("Hair Color: ")
agent_text_description.append(agent_object.hair_color)
agent_text_description.append("Skin Tone: ")
agent_text_description.append(agent_object.skin_tone)
agent_text_description.append("Eye Color: ")
agent_text_description.append(agent_object.eye_color)
agent_text_description.append("Accessories: ")
agent_text_description.append(agent_object.accesory)
agent_text_description.append("Pronouns: ")
agent_text_description.append(agent_object.gender)
agent_text_description.append("Occupation: ")
agent_text_description.append(agent_object.occupation)
agent_sentence_description = " ".join(agent_text_description)
l
response = openai.Image.create(prompt=agent_sentence_description, n=1, size='1024x1024')
image_url = response[ 'data' ][0]['url']
#print(image_url)
return image_url
| [] |
2024-01-10 | SnowyGanyu/Goblin | Character_Visualizer~player_model_creater.py | import openai
import os
openai.api_key = "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"
print("Describe you Charater: ")
race = input("Enter the Race of your character: ")
# Ask for Class
char_class = input("Enter the Class of your character: ")
# Ask for Background
background = input("Enter the Background of your character: ")
# Ask for Alignment
alignment = input("Enter the Alignment of your character: ")
# Ask for Equipment and Gear
equipment = input("Enter the Equipment and Gear of your character: ")
# Ask for Appearance
hair_color = input("Enter the Hair Color of your character: ")
eye_color = input("Enter the Eye Color of your character: ")
height = input("Enter the Height of your character: ")
weight = input("Enter the Weight of your character: ")
distinctive_features = input("Enter any Distinctive Features of your character: ")
# Combine user inputs into a sentence
character_details = f"\nCharacter Details\n" \
f"-----------------\n" \
f"Race: {race}\n" \
f"Class: {char_class}\n" \
f"Background: {background}\n" \
f"Alignment: {alignment}\n" \
f"Equipment and Gear: {equipment}\n" \
f"Hair Color: {hair_color}\n" \
f"Eye Color: {eye_color}\n" \
f"Height: {height}\n" \
f"Weight: {weight}\n" \
f"Distinctive Features: {distinctive_features}"
response = openai.Image.create(prompt=character_details, n=1, size='1024x1024')
image_url = response[ 'data' ][0]['url']
print (image_url) | [] |
2024-01-10 | mansourshebli/Tripify-App | recommendation_page.py | # @mansourshebli
# Import necessary libraries
from tkinter import *
from tkinter import messagebox
import openai
import os
import sys
from tkcalendar import DateEntry
# Set up OpenAI API key
try:
openai.api_key = os.environ['OPENAI_API_KEY']
except KeyError:
# Display a message if the API key is missing
sys.stderr.write("""
You haven't set up your API key yet.
If you don't have an API key yet, visit:
https://platform.openai.com/signup
1. Make an account or sign in
2. Click "View API Keys" from the top right menu.
3. Click "Create new secret key"
Then, open the Secrets Tool and add OPENAI_API_KEY as a secret.
""")
exit(1)
# Initialize variables for UI elements
interests_tk = None
budget_tk = None
departure_date_cal = None
duration_of_stay_n = None
departure_date_cal = None
return_date_cal = None
travel_companions_var = None
preferred_climate_var = None
# Function to clear text fields and reset dropdown selections
def clear_tf():
global interests_tk, budget_tk, duration_of_stay_n, departure_date_cal, return_date_cal, travel_companions_var, preferred_climate_var
interests_tk.delete(0, 'end')
budget_tk.delete(0,'end')
duration_of_stay_n.delete(0,'end')
departure_date_cal.delete(0, 'end')
return_date_cal.delete(0,'end')
travel_companions_var.set('Select')
preferred_climate_var.set('Select')
# Function to generate travel recommendations
def generate_recommendation():
global interests_tk, budget_tk, duration_of_stay_n, departure_date_cal, return_date_cal, travel_companions_var, preferred_climate_var
# Get values from UI elements
interests = interests_tk.get()
budget = budget_tk.get()
departure_date = departure_date_cal.get()
duration_of_stay = duration_of_stay_n.get()
return_date = return_date_cal.get()
travel_companions = travel_companions_var.get()
preferred_climate = preferred_climate_var.get()
# Create user message for the AI
user_message = f"My interests are {interests}. My budget is {budget}. I plan to travel for {duration_of_stay} days. Return date is {return_date}. I'm traveling with {travel_companions}. I prefer a {preferred_climate} climate."
# Define conversation messages for AI completion
messages = [
{"role": "system", "content": "I'm helping you plan your travel schedule based on your interests and budget."},
{"role": "user", "content": user_message}
]
# Generate AI response
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
content = response['choices'][0]['message']['content'].strip()
# Create a result window
result_window = Toplevel()
result_window.title('Travel Schedule')
result_window.config(bg='#FF0000')
# Set up result window geometry and non-resizability
window_width = 600
window_height = 400
screen_width = result_window.winfo_screenwidth()
screen_height = result_window.winfo_screenheight()
x = (screen_width - window_width) // 2
y = (screen_height - window_height) // 2
result_window.geometry(f'{window_width}x{window_height}+{x}+{y}')
result_window.resizable(False, False)
# Create scrollable text widget for displaying results
text_widget = Text(result_window, wrap=WORD)
text_widget.pack(fill=BOTH, expand=True)
# Process AI-generated schedule lines
schedule_lines = content.split('\n')
for line in schedule_lines:
if line.startswith("Day"):
text_widget.insert(INSERT, line + "\n", 'day_header')
elif line.startswith(" - "):
text_widget.insert(INSERT, line + "\n", 'activity')
else:
text_widget.insert(INSERT, line + "\n", 'normal')
# Configure text widget styles
text_widget.tag_configure('day_header', foreground='blue', font=('Helvetica', 14, 'bold'))
text_widget.tag_configure('activity', foreground='green')
text_widget.tag_configure('normal', font=('Helvetica', 12))
# Add scrollbar to the text widget
scroll_bar = Scrollbar(result_window)
scroll_bar.pack(side=RIGHT, fill=Y)
text_widget.config(yscrollcommand=scroll_bar.set)
scroll_bar.config(command=text_widget.yview)
text_widget.configure(state=DISABLED)
# Make the result window non-resizable
result_window.resizable(False, False)
# Create the main window for travel recommendations
window = Tk()
window.title('Travel Destination Advisor')
window.geometry('800x500')
window.config(bg='#00FFFF')
var = IntVar() # Initialize an IntVar for radio buttons
# Create the main frame
frame = Frame(window, padx=100, pady=100, bg='#00FFFF')
frame.grid(row=0, column=0, sticky='nsew')
window.grid_rowconfigure(0, weight=1)
window.grid_columnconfigure(0, weight=1)
# Create and position UI elements
title_label = Label(frame, text='Destination Recommendations', font=('Helvetica', 24, 'bold', 'italic'), bg='#00FFFF')
title_label.grid(row=0, column=1, columnspan=2, sticky="w")
interests_lb = Label(frame, text="What are some of your interests? (Separate each by ',')", font=('Arial', 12), bg='#00FFFF')
interests_lb.grid(row=1, column=1, sticky="w")
interests_tk = Entry(frame)
interests_tk.grid(row=1, column=2, pady=5, sticky="w")
budget_lb = Label(frame, text="Budget$:", font=('Arial', 12), bg='#00FFFF')
budget_lb.grid(row=2, column=1, sticky="w")
budget_tk = Entry(frame)
budget_tk.grid(row=2, column=2, pady=5, sticky="w")
duration_of_stay_lb = Label(frame, text="Duration of Stay:", font=('Arial', 12), bg='#00FFFF')
duration_of_stay_lb.grid(row=3, column=1, sticky="w")
duration_of_stay_n = Entry(frame)
duration_of_stay_n.grid(row=3, column=2, pady=5, sticky="w")
departure_date_lb = Label(frame, text="Departure Date:", font=('Arial', 12), bg='#00FFFF')
departure_date_lb.grid(row=4, column=1, sticky="w")
departure_date_cal = DateEntry(frame, date_pattern='yyyy-mm-dd')
departure_date_cal.grid(row=4, column=2, pady=5, sticky="w")
return_date_lb = Label(frame, text="Return Date:", font=('Arial', 12), bg='#00FFFF')
return_date_lb.grid(row=5, column=1, sticky="w")
return_date_cal = DateEntry(frame, date_pattern='yyyy-mm-dd')
return_date_cal.grid(row=5, column=2, pady=5, sticky="w")
travel_companions_var = StringVar()
travel_companions_var.set("Select")
travel_companions_lb = Label(frame, text="Travel Companions:", font=('Arial', 12), bg='#00FFFF')
travel_companions_lb.grid(row=6, column=1, sticky="w")
travel_companions_options = ["Select", "Solo", "Family", "Couples", "Friends"]
travel_companions_menu = OptionMenu(frame, travel_companions_var, *travel_companions_options)
travel_companions_menu.grid(row=6, column=2, pady=5, sticky="w")
preferred_climate_var = StringVar()
preferred_climate_var.set("Select")
preferred_climate_lb = Label(frame, text="Preferred Climate:", font=('Arial', 12), bg='#00FFFF')
preferred_climate_lb.grid(row=7, column=1, sticky="w")
preferred_climate_options = ["Select", "Warm and tropical", "Cool and mountainous", "Moderate and temperate"]
preferred_climate_menu = OptionMenu(frame, preferred_climate_var, *preferred_climate_options)
preferred_climate_menu.grid(row=7, column=2, pady=5, sticky="w")
# Create a sub-frame for buttons
frame2 = Frame(frame, bg='#00FFFF')
frame2.grid(row=8, columnspan=3, pady=10, sticky="w")
generate_recommendation_btn = Button(frame2, text='Generate Recommendation', command=generate_recommendation, bg="green", fg="white", font=("Arial", 12))
generate_recommendation_btn.pack(side=LEFT)
reset_btn = Button(frame2, text='Reset', command=clear_tf, bg="blue", fg="white", font=("Arial", 12))
reset_btn.pack(side=LEFT)
exit_btn = Button(frame2, text='Exit', command=window.destroy, bg="red", fg="white", font=("Arial", 12))
exit_btn.pack(side=LEFT)
# Start the main event loop
window.mainloop()
| [
"My interests are PLACEHOLDER. My budget is PLACEHOLDER. I plan to travel for PLACEHOLDER days. Return date is PLACEHOLDER. I'm traveling with PLACEHOLDER. I prefer a PLACEHOLDER climate.",
"I'm helping you plan your travel schedule based on your interests and budget."
] |
2024-01-10 | mansourshebli/Tripify-App | local_tips_page.py | # @mansourshebli
from tkinter import *
from tkinter import messagebox
import openai
import os
import sys
# Set up OpenAI API key
try:
openai.api_key = os.environ['OPENAI_API_KEY']
except KeyError:
# Adding: Instructions on how to obtain an API key
sys.stderr.write("""
You haven't set up your API key yet.
If you don't have an API key yet, visit:
https://platform.openai.com/signup
1. Make an account or sign in
2. Click "View API Keys" from the top right menu.
3. Click "Create new secret key"
Then, open the Secrets Tool and add OPENAI_API_KEY as a secret.
""")
exit(1)
# Function to clear text fields
def clear_tf():
# Adding: Clear the place text field and reset continent selection
place_tf.delete(0, 'end')
var.set(0)
# Function to generate local tips
def generate_local_tips():
global place_tf, var
# Get user inputs
place = place_tf.get()
continent = var.get()
user_message = f"I'm looking for travel tips about {place}. I'm interested in exploring {continent}."
messages = [
{"role": "system", "content": "I'm helping you find travel tips for your chosen destination."},
{"role": "user", "content": user_message}
]
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
content = response['choices'][0]['message']['content'].strip()
# Create a result window
result_window = Toplevel()
result_window.title(f"Local Tips for {place}")
result_window.config(bg='#FF0000')
# Calculate the window size based on content
window_width = 600
window_height = 400
# Get the screen width and height
screen_width = result_window.winfo_screenwidth()
screen_height = result_window.winfo_screenheight()
# Calculate the x and y position to center the window
x = (screen_width - window_width) // 2
y = (screen_height - window_height) // 2
# Set the window geometry
result_window.geometry(f'{window_width}x{window_height}+{x}+{y}')
# Make the result window non-resizable
result_window.resizable(False, False)
# Create a scrollable text widget
text_widget = Text(result_window, wrap=WORD)
text_widget.pack(fill=BOTH, expand=True)
lines = content.split('\n')
for line in lines:
text_widget.insert(INSERT, line + "\n", 'normal')
text_widget.tag_configure('normal', font=('Helvetica', 12))
scroll_bar = Scrollbar(result_window)
scroll_bar.pack(side=RIGHT, fill=Y)
text_widget.config(yscrollcommand=scroll_bar.set)
scroll_bar.config(command=text_widget.yview)
text_widget.configure(state=DISABLED)
# Make the result window non-resizable
result_window.resizable(False, False)
# Create the main window
lt_window = Tk()
lt_window.title('Local Tips')
lt_window.geometry('800x500')
lt_window.config(bg='#00FFFF')
var = IntVar()
# Create the frame for local tips content
lt_frame = Frame(lt_window, padx=50, pady=50, bg='#00FFFF')
lt_frame.grid(row=0, column=0, sticky='nsew')
lt_window.grid_rowconfigure(0, weight=1)
lt_window.grid_columnconfigure(0, weight=1)
title_label = Label(lt_frame, text='Local Tips', font=('Helvetica', 24, 'bold', 'italic'), bg='#00FFFF')
title_label.grid(row=1, column=1, sticky="w")
place_lb = Label(lt_frame, text="Please enter a place:", font=('Arial', 12), bg='#00FFFF')
place_lb.grid(row=3, column=1, pady=40, sticky="w")
place_tf = Entry(lt_frame)
place_tf.grid(row=3, column=2, pady=10, padx=10, sticky="w")
continent_lb = Label(lt_frame, text="Choose place continent:", font=('Arial', 12), bg='#00FFFF')
continent_lb.grid(row=4, column=1, sticky="w")
radio_frame = Frame(lt_frame, bg='#00FFFF')
radio_frame.grid(row=4, column=2, pady=10, padx=10, sticky="w")
asia_continent = Radiobutton(radio_frame, text="Asia", variable=var, value=1, bg='#00FFFF')
africa_continent = Radiobutton(radio_frame, text="Africa", variable=var, value=2, bg='#00FFFF')
europe_continent = Radiobutton(radio_frame, text="Europe", variable=var, value=3, bg='#00FFFF')
other_continents = Radiobutton(radio_frame, text="Other Continent", variable=var, value=4, bg='#00FFFF')
asia_continent.pack(side=LEFT)
africa_continent.pack(side=LEFT)
europe_continent.pack(side=LEFT)
other_continents.pack(side=LEFT)
local_tips_btn = Button(lt_frame, text="Generate Local Tips", command=generate_local_tips, bg="green", fg="white", font=("Arial", 12))
local_tips_btn.grid(row=5, column=1, padx=5, pady=5, sticky="w")
clear_btn = Button(lt_frame, text="Clear", command=clear_tf, bg="blue", fg="white", font=("Arial", 12))
clear_btn.grid(row=5, column=2, padx=5, pady=5, sticky="w")
exit_btn = Button(lt_frame, text='Exit', command=lt_window.destroy, bg="red", fg="white", font=("Arial", 12))
exit_btn.grid(row=5, column=3, padx=5, pady=5, sticky="w")
# Start the main event loop
lt_frame.mainloop()
| [
"I'm helping you find travel tips for your chosen destination.",
"I'm looking for travel tips about PLACEHOLDER. I'm interested in exploring PLACEHOLDER."
] |
2024-01-10 | 2dot71mily/sib_paper | inference.py | # %%
import torch
import pandas as pd
import numpy as np
import openai
import os
# %%
from transformers import pipeline
from config import (
MODEL_NAMES,
MODELS_PARAMS,
OPENAI_API_KEY,
UNDERSP_METRIC,
INFERENCE_FULL_PATH,
WINOGENDER_SCHEMA_PATH,
SENTENCE_TEMPLATES_FILE,
FEMALE_LIST,
MALE_LIST,
FEMALE_MASKING_LIST,
MALE_MASKING_LIST,
NEUTRAL_LIST,
MGT_EVAL_SET_PROMPT_VERBS,
MGT_EVAL_SET_LIFESTAGES,
INDIE_VARS,
DATASET_STYLE,
TESTING,
INSTRUCTION_PROMPTS,
GPT_NUM_TOKS,
OPENAI,
CONDITIONAL_GEN,
MGT_TARGET_TEXT,
MODELS_PARAMS,
CONDITIONAL_GEN_MODELS,
convert_to_file_save_name,
)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if DEVICE.type == "cuda":
from transformers import T5ForConditionalGeneration, AutoTokenizer
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
openai.api_key = OPENAI_API_KEY
# %%
DECIMAL_PLACES = 1
EPS = 1e-5 # to avoid /0 errors
#######################################################
# %%
def load_bert_like_model(model_name):
"""Download model weights for inference, this may take awhile."""
model_call_name = MODELS_PARAMS[model_name]["model_call_name"]
model = pipeline(
"fill-mask",
model=model_call_name,
revision=MODELS_PARAMS[model_name]["hf_revision"],
)
tokenizer = model.tokenizer
return model, tokenizer
# %%
def load_conditional_gen_model(model_name):
"""Download model weights for inference, this may take awhile."""
model_call_name = MODELS_PARAMS[model_name]["model_call_name"]
model = T5ForConditionalGeneration.from_pretrained(
model_call_name, device_map="auto", load_in_8bit=True
)
tokenizer = AutoTokenizer.from_pretrained(model_call_name)
return model, tokenizer
def load_openai_model(model_name):
"""Use API, just returning model_call_name."""
model = MODELS_PARAMS[model_name]["model_call_name"]
tokenizer = None
return model, tokenizer
def load_model_and_tokenizer(model_name):
if CONDITIONAL_GEN:
model, tokenizer = load_conditional_gen_model(model_name)
elif OPENAI:
model, tokenizer = load_openai_model(model_name)
else:
model, tokenizer = load_bert_like_model(model_name)
return model, tokenizer
# %%
def query_openai(prompt, model_name):
return openai.Completion.create(
model=model_name,
prompt=prompt,
temperature=0,
max_tokens=GPT_NUM_TOKS,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
logprobs=5,
)
def prepare_text_for_masking(input_text, mask_token, gendered_tokens, split_key):
text_w_masks_list = [
mask_token if word.lower() in gendered_tokens else word
for word in input_text.split()
]
num_masks = len([m for m in text_w_masks_list if m == mask_token])
masked_text_portions = " ".join(text_w_masks_list).split(split_key)
return masked_text_portions, num_masks
def generate_with_scores(model, tokenized_inputs, k, max_new_tokens=10):
return model.generate(
tokenized_inputs,
max_new_tokens=max_new_tokens,
return_dict_in_generate=True,
output_scores=True,
top_k=k,
)
def get_gendered_token_ids(gendered_words, tokenizer):
gendered_token_ids = tokenizer.encode(
gendered_words, add_special_tokens=False, is_split_into_words=True
)
try: # Try to remove blank space token_id that is getting encoded for unclear reasons
gendered_token_ids.remove(3)
except ValueError:
pass
assert len(
[tokenizer.decode(id, add_special_tokens=False) for id in gendered_token_ids]
) == len(gendered_words), "gendered word multi-token"
return gendered_token_ids
def get_top_k_pairs(output, k=5):
top_k_probs = [
torch.topk(torch.softmax(score, dim=1), k) for score in output.scores
]
return [(p[0].squeeze().tolist(), p[1].squeeze().tolist()) for p in top_k_probs]
def generate_with_token_probabilities_from_hf(
model, tokenizer, prompt, k, device=DEVICE, max_new_tokens=50
):
"""As k --> vocab size, sum(ret[i][0]) --> 1."""
tokenized_inputs = tokenizer.encode(prompt, padding=True, return_tensors="pt").to(
device
)
output = generate_with_scores(
model, tokenized_inputs, k, max_new_tokens=max_new_tokens
)
return get_top_k_pairs(output, k)
def get_accum_prob_from_conditional_gen_outputs(top_k_pairs, gendered_tokens):
probs = torch.tensor([p[0] for p in top_k_pairs])
pred_token_ids = torch.tensor([p[1] for p in top_k_pairs])
accumulated_probs = torch.zeros(pred_token_ids.shape)
for g_id in gendered_tokens:
accumulated_probs += torch.where(pred_token_ids == g_id, probs, 0)
return round(torch.sum(accumulated_probs).item() * 100, 3)
def get_accum_prob_from_hf_pipeline_outputs(model_response, gendered_tokens, num_preds):
pronoun_preds = [
sum(
[
pronoun["score"]
if pronoun["token_str"].strip().lower() in gendered_tokens
else 0.0
for pronoun in top_preds
]
)
for top_preds in model_response
]
# `num_preds` > 1 when multiple tokens masked out
return round(sum(pronoun_preds) / (EPS + num_preds) * 100, DECIMAL_PLACES)
def convert_hf_scores_to_oai_like_scores_format(predictions, tokenizer):
ret = []
for p in predictions:
ret.append(
{
tokenizer.decode(p[1][i]).strip().lower(): p[0][i]
for i in range(len(p[0]))
}
)
return ret
def get_idx_for_predicted_pronoun(predictions, all_gendered_tokens):
return [
idx
for idx, pred in enumerate(predictions)
if max(pred, key=pred.get).strip().lower() in all_gendered_tokens
]
def get_accum_prob_from_hf_model_outputs(predictions, gendered_tokens):
pronoun_preds = [
score if pronoun.strip().lower() in gendered_tokens else 0.0
for pred in predictions
for pronoun, score in pred.items()
]
return round(sum(pronoun_preds) / (EPS + len(predictions)) * 100, 3)
def get_accum_prob_from_openai_outputs(predictions, gendered_tokens):
pronoun_preds = [
np.exp(score) if pronoun.strip().lower() in gendered_tokens else 0.0
for pred in predictions
for pronoun, score in pred.items()
]
return round(sum(pronoun_preds) / (EPS + len(predictions)) * 100, 3)
def save_results(results_dict, indie_var_name, filename, dir=INFERENCE_FULL_PATH):
first_df = results_dict.popitem()[1] # 2nd element is values
rest_dfs = [df.drop(indie_var_name, axis=1) for df in results_dict.values()]
all_dfs = pd.concat([first_df] + rest_dfs, axis=1)
all_dfs.set_index(indie_var_name)
file_path = os.path.join(dir, f"{filename}.csv")
all_dfs.to_csv(file_path)
print(f"Saved inference results to {file_path}")
# %%
def predict_gender_pronouns(
model_name,
model,
tokenizer,
input_text,
prompt_idx,
prompt_params,
is_instruction,
cond_prefix,
normalizing,
indie_var_name,
indie_vars,
):
if model_name == CONDITIONAL_GEN_MODELS[0]:
mask_token = "<extra_id_0>"
elif is_instruction:
mask_token = prompt_params["mask_token"]
else: # MLM
mask_token = tokenizer.mask_token
female_dfs = []
male_dfs = []
neutral_dfs = []
female_dfs.append(pd.DataFrame({indie_var_name: indie_vars}))
male_dfs.append(pd.DataFrame({indie_var_name: indie_vars}))
neutral_dfs.append(pd.DataFrame({indie_var_name: indie_vars}))
female_pronoun_preds = []
male_pronoun_preds = []
neutral_pronoun_preds = []
split_key = indie_var_name.upper()
masked_text_portions, num_preds = prepare_text_for_masking(
input_text, mask_token, FEMALE_MASKING_LIST + MALE_MASKING_LIST, split_key
)
for indie_var in indie_vars:
target_text = str(indie_var).join(masked_text_portions)
if is_instruction:
target_text = prompt_params["prompt"].format(sentence=target_text)
if cond_prefix:
target_text = cond_prefix + " " + target_text
if UNDERSP_METRIC:
target_text = target_text.replace("MASK", mask_token)
print(target_text)
if OPENAI:
model_response = query_openai(target_text, model)
print(f"Running OpenAI inference on {model_name}")
predictions = model_response.choices[0].logprobs.top_logprobs
all_gendered_tokens = (
list(FEMALE_LIST) + list(MALE_LIST) + list(NEUTRAL_LIST)
)
# If only one pronoun in greedy response, use just that idx
pronoun_idx = get_idx_for_predicted_pronoun(
predictions, all_gendered_tokens
)
if len(pronoun_idx) == 1:
predictions = [predictions[pronoun_idx[0]]]
else:
print(
f"********* {len(pronoun_idx)} top pronouns in sequence *********"
)
female_pronoun_preds.append(
get_accum_prob_from_openai_outputs(predictions, FEMALE_LIST)
)
male_pronoun_preds.append(
get_accum_prob_from_openai_outputs(predictions, MALE_LIST)
)
neutral_pronoun_preds.append(
get_accum_prob_from_openai_outputs(predictions, NEUTRAL_LIST)
)
elif CONDITIONAL_GEN:
print(f"Running conditional generation inference on {model_name}")
model_response = generate_with_token_probabilities_from_hf(
model, tokenizer, target_text, k=5, max_new_tokens=GPT_NUM_TOKS
)
predictions = convert_hf_scores_to_oai_like_scores_format(
model_response, tokenizer
)
all_gendered_tokens = (
list(FEMALE_LIST) + list(MALE_LIST) + list(NEUTRAL_LIST)
)
# If only one pronoun in greedy response, use just that idx
pronoun_idx = get_idx_for_predicted_pronoun(
predictions, all_gendered_tokens
)
if len(pronoun_idx) == 1:
predictions = [predictions[pronoun_idx[0]]]
else:
print(
f"********* {len(pronoun_idx)} top pronouns in sequence *********"
)
female_pronoun_preds.append(
get_accum_prob_from_hf_model_outputs(predictions, FEMALE_LIST)
)
male_pronoun_preds.append(
get_accum_prob_from_hf_model_outputs(predictions, MALE_LIST)
)
neutral_pronoun_preds.append(
get_accum_prob_from_hf_model_outputs(predictions, NEUTRAL_LIST)
)
else:
print(f"Running fill-mask inference on {model_name}")
model_response = model(target_text)
if type(model_response[0]) is not list:
# Quick hack as realized return type based on how many MASKs in text.
model_response = [model_response]
female_pronoun_preds.append(
get_accum_prob_from_hf_pipeline_outputs(
model_response, FEMALE_LIST, num_preds
)
)
male_pronoun_preds.append(
get_accum_prob_from_hf_pipeline_outputs(
model_response, MALE_LIST, num_preds
)
)
neutral_pronoun_preds.append(
get_accum_prob_from_hf_pipeline_outputs(
model_response, NEUTRAL_LIST, num_preds
)
)
if normalizing:
total_gendered_probs = np.add(
np.add(female_pronoun_preds, male_pronoun_preds), neutral_pronoun_preds
)
female_pronoun_preds = np.around(
np.divide(female_pronoun_preds, total_gendered_probs + EPS) * 100,
decimals=DECIMAL_PLACES,
)
male_pronoun_preds = np.around(
np.divide(male_pronoun_preds, total_gendered_probs + EPS) * 100,
decimals=DECIMAL_PLACES,
)
neutral_pronoun_preds = np.around(
np.divide(neutral_pronoun_preds, total_gendered_probs + EPS) * 100,
decimals=DECIMAL_PLACES,
)
female_dfs.append(pd.DataFrame({target_text: female_pronoun_preds}))
male_dfs.append(pd.DataFrame({target_text: male_pronoun_preds}))
neutral_dfs.append(pd.DataFrame({target_text: neutral_pronoun_preds}))
female_results = pd.concat(female_dfs, axis=1)
male_results = pd.concat(male_dfs, axis=1)
neutral_results = pd.concat(neutral_dfs, axis=1)
return (
target_text,
female_results,
male_results,
neutral_results,
)
# %%
def prep_inference(
prompt_idx,
indie_var_name,
is_instruction,
special_id="",
freeform_text="",
):
test_version = (
f"{special_id}_test{TESTING}{f'_P{prompt_idx}' if is_instruction else ''}"
)
input_texts = []
if freeform_text:
input_texts = [freeform_text]
else:
for verb in MGT_EVAL_SET_PROMPT_VERBS:
for stage in MGT_EVAL_SET_LIFESTAGES:
target_text = MGT_TARGET_TEXT.format(
split_key=indie_var_name.upper(), verb=verb, stage=stage
)
input_texts.append(target_text)
return {
"input_texts": input_texts,
"test_version": test_version,
}
# %%
def run_inference(
model_names,
special_id,
freeform_text,
normalizing,
results_dir=INFERENCE_FULL_PATH,
model=None,
tokenizer=None,
):
for model_name in model_names:
model_call_name = MODELS_PARAMS[model_name]["model_call_name"]
is_instruction = MODELS_PARAMS[model_name]["is_instruction"]
cond_prefix = MODELS_PARAMS[model_name]["cond_prefix"]
if not UNDERSP_METRIC:
model, tokenizer = load_model_and_tokenizer(model_name)
if not is_instruction:
all_instructions = {0: "DUMMY_INSTRUCTION"}
else:
all_instructions = INSTRUCTION_PROMPTS
for indie_var_name, indie_var_values in INDIE_VARS.items():
for prompt_idx, prompt_params in all_instructions.items():
infer_params = prep_inference(
prompt_idx,
indie_var_name,
is_instruction,
special_id,
freeform_text,
)
input_texts = infer_params["input_texts"]
test_version = infer_params["test_version"]
all_female_results = {}
all_male_results = {}
all_neutral_results = {}
for input_text in input_texts:
(
target_text,
female_results,
male_results,
neutral_results,
) = predict_gender_pronouns(
model_name,
model,
tokenizer,
input_text,
prompt_idx,
prompt_params,
is_instruction,
cond_prefix,
int(normalizing),
indie_var_name,
indie_var_values,
)
all_female_results[target_text] = female_results
all_male_results[target_text] = male_results
all_neutral_results[target_text] = neutral_results
filename = f"{DATASET_STYLE}_{convert_to_file_save_name(model_call_name)}_{indie_var_name}_{test_version}"
f_filename = f"fp_{filename}"
m_filename = f"mp_{filename}"
n_filename = f"np_{filename}"
save_results(
all_female_results.copy(),
indie_var_name,
f_filename,
dir=results_dir,
)
save_results(
all_male_results.copy(), indie_var_name, m_filename, dir=results_dir
)
save_results(
all_neutral_results.copy(),
indie_var_name,
n_filename,
dir=results_dir,
)
if __name__ == "__main__":
if UNDERSP_METRIC:
freeform_text = "wino_gender"
visualization = False
normalizing = True
SPLIT_KEY = "DATE"
for model_name in MODEL_NAMES:
model, tokenizer = load_model_and_tokenizer(model_name)
fp = open(
os.path.join(WINOGENDER_SCHEMA_PATH, SENTENCE_TEMPLATES_FILE), "r"
)
next(fp) # First line is headers
for line in fp:
line = line.strip().split("\t")
special_id, freeform_text = (
line[0],
f"In {SPLIT_KEY.upper()}: {line[1]}",
)
run_inference(
[model_name], # One model at a times
special_id,
freeform_text,
normalizing,
model=model,
tokenizer=tokenizer,
)
else:
freeform_text = ""
visualization = True
normalizing = False
special_id = ""
run_inference(
MODEL_NAMES,
special_id,
freeform_text,
normalizing,
)
# %%
| [] |
2024-01-10 | yeboahd24/form-extension | maps~langchain_utils.py | # from langchain import LangChain
# lc = LangChain()
# def process_health_input(user_input):
# response = lc.process_input(user_input)
# return response
| [] |
2024-01-10 | quanhlam/alexa-assistant | lambda~lambda_function.py | # -*- coding: utf-8 -*-
# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.
# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
# session persistence, api calls, and more.
# This sample is built using the handler classes approach in skill builder.
import logging
from typing import Union
import openai_service
import ask_sdk_core.utils as ask_utils
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
chatGptService = openai_service.ChatGPTService()
ASK_CHAT_GPT_INTENT_NAME="AskChatgptIntent"
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
test_result = openai_service.test_chat_completion("Say integration test is successful")
speak_output = f"Hi, I'm Jarvis GPT. Open AI integration result: {test_result}"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class AskChatgptIntent(AbstractRequestHandler):
def can_handle(self, handler_input: HandlerInput) -> bool:
return (ask_utils.is_request_type("IntentRequest")(handler_input) and
ask_utils.is_intent_name(ASK_CHAT_GPT_INTENT_NAME))
def handle(self, handler_input: HandlerInput) -> Response | None:
user_input = ask_utils.get_slot_value(handler_input, "questiontwo")
chat_gpt_answer = chatGptService.ask(user_input)
speak_output = f"Your question was {user_input}. Chat GPT answer is: {chat_gpt_answer}"
return (
handler_input.response_builder.speak(speak_output).response
)
class HelloWorldIntentHandler(AbstractRequestHandler):
"""Handler for Hello World Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("HelloWorldIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Hello World!"
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "You can say hello to me! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallbackIntentHandler(AbstractRequestHandler):
"""Single handler for Fallback Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In FallbackIntentHandler")
speech = "Hmm, I'm not sure. You can say Hello or Help. What would you like to do?"
reprompt = "I didn't catch that. What can I help you with?"
return handler_input.response_builder.speak(speech).ask(reprompt).response
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(HelloWorldIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler() | [
"I didn't catch that. What can I help you with?"
] |
2024-01-10 | abdvllahcadceed/langchain | askthedata.py | import streamlit as st
import pandas as pd
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_pandas_dataframe_agent
from langchain.agents.agent_types import AgentType
# Page title
st.set_page_config(page_title='๐ฆ๐ Ask the Data App')
st.title('๐ฆ๐ Ask the Data App')
st.markdown('''
Application built in `Python` + `Streamlit` + `GitHub` + `LangChain` + `OpenAI` by [Abdullahi M. Cadceed](https://twitter.com/@abdullahcadceed)
''')
# Load CSV file
def load_csv(input_csv):
df = pd.read_csv(input_csv)
with st.expander('See DataFrame'):
st.write(df)
return df
# Generate LLM response
def generate_response(csv_file, input_query):
llm = ChatOpenAI(model_name='gpt-3.5-turbo-0613', temperature=0.2, openai_api_key=openai_api_key)
df = load_csv(csv_file)
# Create Pandas DataFrame Agent
agent = create_pandas_dataframe_agent(llm, df, verbose=True, agent_type=AgentType.OPENAI_FUNCTIONS)
# Perform Query using the Agent
response = agent.run(input_query)
return st.success(response)
# Input widgets
uploaded_file = st.file_uploader('Upload a CSV file', type=['csv'])
question_list = [
'How many rows are there?',
'What is the range of values for MolWt with logS greater than 0?',
'How many rows have MolLogP value greater than 0.',
'Other']
query_text = st.selectbox('Select an example query:', question_list, disabled=not uploaded_file)
openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text))
# App logic
if query_text is 'Other':
query_text = st.text_input('Enter your query:', placeholder = 'Enter query here ...', disabled=not uploaded_file)
if not openai_api_key.startswith('sk-'):
st.warning('Please enter your OpenAI API key!', icon='โ ')
if openai_api_key.startswith('sk-') and (uploaded_file is not None):
st.header('Output')
generate_response(uploaded_file, query_text)
| [] |
2024-01-10 | abdvllahcadceed/langchain | blogoutline.py | import streamlit as st
from langchain.llms import OpenAI
from langchain import PromptTemplate
st.set_page_config(page_title="๐ฆ๐ Blog Outline Generator App")
st.title('๐ฆ๐ Blog Outline Generator App')
st.markdown('''
Application built in `Python` + `Streamlit` + `GitHub` + `LangChain` + `OpenAI` by [Abdullahi M. Cadceed](https://twitter.com/@abdullahcadceed)
''')
openai_api_key = st.sidebar.text_input('OpenAI API Key', type='password')
def generate_response(topic):
llm = OpenAI(model_name='text-davinci-003', openai_api_key=openai_api_key)
# Prompt
template = 'As an experienced data scientist and technical writer, generate an outline for a blog about {topic}.'
prompt = PromptTemplate(input_variables=['topic'], template=template)
prompt_query = prompt.format(topic=topic)
# Run LLM model and print out response
response = llm(prompt_query)
return st.info(response)
with st.form('myform'):
topic_text = st.text_input('Enter keyword:', '')
submitted = st.form_submit_button('Submit')
if not openai_api_key.startswith('sk-'):
st.warning('Please enter your OpenAI API key!', icon='โ ')
if submitted and openai_api_key.startswith('sk-'):
generate_response(topic_text)
| [
"As an experienced data scientist and technical writer, generate an outline for a blog about {topic}."
] |
2024-01-10 | abdvllahcadceed/langchain | askthedoc.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
def generate_response(uploaded_file, openai_api_key, query_text):
# Load document if file is uploaded
if uploaded_file is not None:
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# Create a vectorstore from documents
db = Chroma.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever()
# Create QA chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=openai_api_key), chain_type='stuff', retriever=retriever)
return qa.run(query_text)
# Page title
st.set_page_config(page_title='๐ฆ๐ Ask the Document App')
st.title('๐ฆ๐ Ask the Document App')
st.markdown('''
Application built in `Python` + `Streamlit` + `GitHub` + `LangChain` + `OpenAI` by [Abdullahi M. Cadceed](https://twitter.com/@abdullahcadceed)
''')
# File upload
uploaded_file = st.file_uploader('Upload an article', type='txt')
# Query text
query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=not uploaded_file)
# Form input and query
result = []
with st.form('myform', clear_on_submit=True):
openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text))
submitted = st.form_submit_button('Submit', disabled=not(uploaded_file and query_text))
if submitted and openai_api_key.startswith('sk-'):
with st.spinner('Calculating...'):
response = generate_response(uploaded_file, openai_api_key, query_text)
result.append(response)
del openai_api_key
if len(result):
st.info(response)
| [] |
2024-01-10 | abdvllahcadceed/langchain | askyourcsv.py | from langchain.agents import create_csv_agent
from langchain.llms import OpenAI
from dotenv import load_dotenv
import os
import streamlit as st
def main():
load_dotenv()
# Load the OpenAI API key from the environment variable
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
print("OPENAI_API_KEY is not set")
exit(1)
else:
print("OPENAI_API_KEY is set")
st.set_page_config(page_title=" ๐ฆ๐ฆ๐จโ๐ป Ask your CSV")
st.header("๐ฆ๐ฆ๐จโ๐ป Ask your CSV ๐")
st.markdown('''
Application built by [Abdullahi M. Cadceed](https://twitter.com/@abdullahcadceed)
''')
csv_file = st.file_uploader("Upload a CSV file", type="csv")
if csv_file is not None:
agent = create_csv_agent(
OpenAI(temperature=0), csv_file, verbose=True)
user_question = st.text_input("Ask a question about your CSV: ")
if user_question is not None and user_question != "":
with st.spinner(text="In progress..."):
st.write(agent.run(user_question))
if __name__ == "__main__":
main()
| [] |
2024-01-10 | abdvllahcadceed/langchain | textsummarization.py | import streamlit as st
from langchain import OpenAI
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
def generate_response(txt):
# Instantiate the LLM model
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
# Split text
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(txt)
# Create multiple documents
docs = [Document(page_content=t) for t in texts]
# Text summarization
chain = load_summarize_chain(llm, chain_type='map_reduce')
return chain.run(docs)
# Page title
st.set_page_config(page_title='๐ฆ๐ Text Summarization App')
st.title('๐ฆ๐ Text Summarization App')
st.markdown('''
Application built in `Python` + `Streamlit` + `GitHub` + `LangChain` + `OpenAI` by [Abdullahi M. Cadceed](https://twitter.com/@abdullahcadceed)
''')
# Text input
txt_input = st.text_area('Enter your text', '', height=200)
# Form to accept user's text input for summarization
result = []
with st.form('summarize_form', clear_on_submit=True):
openai_api_key = st.text_input('OpenAI API Key', type = 'password', disabled=not txt_input)
submitted = st.form_submit_button('Submit')
if submitted and openai_api_key.startswith('sk-'):
with st.spinner('Calculating...'):
response = generate_response(txt_input)
result.append(response)
del openai_api_key
if len(result):
st.info(response)
| [] |
2024-01-10 | abdvllahcadceed/langchain | quickstart.py | import streamlit as st
from langchain.llms import OpenAI
st.set_page_config(page_title="๐ฆ๐ Quickstart App")
st.title('๐ฆ๐ Quickstart App')
st.markdown('''
Application built in `Python` + `Streamlit` + `GitHub` + `LangChain` + `OpenAI` by [Abdullahi M. Cadceed](https://twitter.com/@abdullahcadceed)
''')
openai_api_key = st.sidebar.text_input('OpenAI API Key')
def generate_response(input_text):
llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key)
st.info(llm(input_text))
with st.form('my_form'):
text = st.text_area('Enter text:', 'What are the three key pieces of advice for learning how to code?')
submitted = st.form_submit_button('Submit')
if not openai_api_key.startswith('sk-'):
st.warning('Please enter your OpenAI API key!', icon='โ ')
if submitted and openai_api_key.startswith('sk-'):
generate_response(text)
| [] |
2024-01-10 | tom-doerr/zsh_codex | create_completion.py | #!/usr/bin/env python3
from openai import OpenAI
import sys
import os
import configparser
# Get config dir from environment or default to ~/.config
CONFIG_DIR = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
API_KEYS_LOCATION = os.path.join(CONFIG_DIR, 'openaiapirc')
# Read the organization_id and secret_key from the ini file ~/.config/openaiapirc
# The format is:
# [openai]
# organization_id=<your organization ID>
# secret_key=<your secret key>
# If you don't see your organization ID in the file you can get it from the
# OpenAI web site: https://openai.com/organizations
def create_template_ini_file():
"""
If the ini file does not exist create it and add the organization_id and
secret_key
"""
if not os.path.isfile(API_KEYS_LOCATION):
with open(API_KEYS_LOCATION, 'w') as f:
f.write('[openai]\n')
f.write('organization_id=\n')
f.write('secret_key=\n')
f.write('model=gpt-3.5-turbo-0613\n')
print('OpenAI API config file created at {}'.format(API_KEYS_LOCATION))
print('Please edit it and add your organization ID and secret key')
print('If you do not yet have an organization ID and secret key, you\n'
'need to register for OpenAI Codex: \n'
'https://openai.com/blog/openai-codex/')
sys.exit(1)
def initialize_openai_api():
"""
Initialize the OpenAI API
"""
# Check if file at API_KEYS_LOCATION exists
create_template_ini_file()
config = configparser.ConfigParser()
config.read(API_KEYS_LOCATION)
api_key = config['openai']['secret_key'].strip('"').strip("'")
model_name = config['openai'].get('model', 'gpt-3.5-turbo').strip('"').strip("'")
client = OpenAI(api_key=api_key)
return client, model_name
client, model_name = initialize_openai_api()
cursor_position_char = int(sys.argv[1])
# Read the input prompt from stdin.
buffer = sys.stdin.read()
prompt_prefix = '#!/bin/zsh\n\n' + buffer[:cursor_position_char]
prompt_suffix = buffer[cursor_position_char:]
full_command = prompt_prefix + prompt_suffix
response = client.chat.completions.create(model=model_name, messages=[
{
"role":'system',
"content": "You are a zsh shell expert, please help me complete the following command, you should only output the completed command, no need to include any other explanation",
},
{
"role":'user',
"content": full_command,
}
])
completed_command = response.choices[0].message.content
sys.stdout.write(f"\n{completed_command.replace(prompt_prefix, '', 1)}")
| [
"You are a zsh shell expert, please help me complete the following command, you should only output the completed command, no need to include any other explanation",
"#!/bin/zsh\n\nPLACEHOLDERPLACEHOLDER",
"#!/bin/zsh\n\nPLACEHOLDER"
] |
2024-01-10 | twibster/Nebula | api~routes.py | import uuid
from midiutil import MIDIFile
from fastapi import FastAPI, File, UploadFile, HTTPException, status
from fastapi.responses import FileResponse
import openai
from api.utils.sonify import (process_frame,
sonify_image,
write_midi_file,
convert_midi_to_mp3,
compute_luminance,
apply_gamma_correction,
sonify_pixel, TEMPO
,check_tuning_file)
from api.utils.decompose import decompose_img
app = FastAPI()
openai.api_key = "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"
utils_dir = "./api/utils/"
@app.post("/sonify")
async def sonfiy(media: UploadFile = File(...), melody: UploadFile = File(None)):
video = True
if media.content_type in ["image/jpeg", "image/png", "image/jpg", "image/gif", "image/bmp", "image/webp"]:
video = False
elif media.content_type in ["video/mp4"]:
video = True
else:
raise HTTPException(status.HTTP_409_CONFLICT, "image must be of jpeg, png, jpg, gif, bmp or webp type\n or video of mp4")
if melody:
if melody.content_type not in ["audio/mid"]:
raise HTTPException(status.HTTP_409_CONFLICT, "melody must be of midi type")
with open(utils_dir + melody.filename, "wb") as f:
melody_contents = await melody.read()
f.write(melody_contents)
general_name = f"{uuid.uuid4()}"
media.filename = general_name + "." + media.filename.split(".")[1]
with open(utils_dir + media.filename, "wb") as f:
media_contents = await media.read()
f.write(media_contents)
down_scaled_image = process_frame(utils_dir+media.filename)
MIN_VOLUME, MAX_VOLUME, note_midis = check_tuning_file(utils_dir + melody.filename if melody else "")
print(MIN_VOLUME, MAX_VOLUME, note_midis)
midi_file = sonify_image(down_scaled_image, MIN_VOLUME, MAX_VOLUME, note_midis)
write_midi_file(midi_file, utils_dir + general_name)
convert_midi_to_mp3(utils_dir+f"{general_name}.mid", utils_dir+ "sound-font.sf2", utils_dir+ f"{general_name}.mp3")
return FileResponse(utils_dir+ f"{general_name}.mp3")
@app.post("/color_tone")
async def get_color_tone(hex: str):
hex = hex.lstrip("#")
rgb = tuple(int(hex[i:i+2], 16) for i in (0, 2, 4))
# if melody:
# if melody.content_type not in ["audio/mid"]:
# raise HTTPException(status.HTTP_409_CONFLICT, "melody must be of midi type")
# with open(utils_dir + melody.filename, "wb") as f:
# melody_contents = await melody.read()
# f.write(melody_contents)
# MIN_VOLUME, MAX_VOLUME, note_midis = check_tuning_file(utils_dir + melody.filename if melody else "")
MIN_VOLUME, MAX_VOLUME, note_midis = check_tuning_file("")
luminance = compute_luminance(rgb)
pitch, duration, volume = sonify_pixel(rgb, luminance, MAX_VOLUME, MAX_VOLUME, note_midis)
midi_filename = str(rgb[0]) + "-" + str(rgb[1]) + "-" + str(rgb[2])
midi_file = MIDIFile(1)
midi_file.addTempo(track=0, time=0, tempo=TEMPO) # add midi notes
midi_file.addNote(
track=0,
channel=0,
time=0,
pitch=pitch,
volume=volume,
duration=duration,
)
write_midi_file(midi_file, utils_dir+ midi_filename)
convert_midi_to_mp3(utils_dir+ f"{midi_filename}.mid", utils_dir+ "sound-font.sf2", utils_dir+ f"{midi_filename}.mp3")
return FileResponse(utils_dir+ f"{midi_filename}.mp3")
@app.post("/decompose")
async def decompose(image: UploadFile = File(...)):
with open(utils_dir + image.filename, "wb") as f:
media_contents = await image.read()
f.write(media_contents)
return decompose_img()
def chat_with_chatgpt(prompt, model="gpt-3.5-turbo"):
response = openai.ChatCompletion.create(
model=model,
messages=[{"content": prompt, "role": "user"}],
temperature=0,
)
message = response['choices'][0]['message']['content']
return message
@app.get("/chatgpt")
def get_chatgpt(propmt: str):
return chat_with_chatgpt(propmt)
| [] |
2024-01-10 | daDiz/hbtm_covid19_twitter | script~lda_coh.py | import gensim
import numpy
import pandas as pd
from gensim.models.ldamodel import LdaModel
from gensim.models.coherencemodel import CoherenceModel
import gensim.corpora as corpora
df = pd.read_csv('../data/tweets_cutoff0.01_sorted.csv',sep='\t')
texts = df['text'].values
texts = [x.split(' ') for x in texts]
id2word = corpora.Dictionary(texts)
corpus = [id2word.doc2bow(text) for text in texts]
num_topics=[2,3,4,5,6,7,8,9,10,15,20,25,30,35,40,45,50]
coh_scores = []
for ntop in num_topics:
lda_model = LdaModel(corpus=corpus,
id2word=id2word,
num_topics=ntop,
random_state=42,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
coherence_model_lda = CoherenceModel(model=lda_model, texts=texts, dictionary=id2word, coherence='c_uci')
coherence_lda = coherence_model_lda.get_coherence()
coh_scores.append(coherence_lda)
df_out = pd.DataFrame.from_dict({'ntop':num_topics,'coh':coh_scores})
df_out.to_csv('lda_coh.csv',index=False)
| [] |
2024-01-10 | aryani1/RL-PPO | mario_files~mario_env.py | import gym
import numpy as np
from baselines.common.atari_wrappers import FrameStack
class ObsWrapper(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, frame):
if frame is None:
frame = np.zeros((13,16)) # tiles x,y shape
return frame
class ActionsDiscretizer(gym.ActionWrapper):
def __init__(self, env):
# From openai github:
# Don't forget to call super(class_name, self).init(env)
# if you override the wrapper's init function.
super(ActionsDiscretizer, self).__init__(env)
self._actions = np.array([
[0, 0, 0, 0, 0, 0], #0 - no button",
[1, 0, 0, 0, 0, 0], #1 - up only (to climb vine)",
#[0, 0, 1, 0, 0, 0], #2 - left only",
[0, 0, 0, 1, 0, 0], #3 - right only",
[0, 0, 0, 0, 0, 1], #4 - run only",
[0, 0, 0, 0, 1, 0], #5 - jump only",
#[0, 0, 1, 0, 0, 1], #6 - left run",
#[0, 0, 1, 0, 1, 0], #7 - left jump",
[0, 0, 0, 1, 0, 1], #8 - right run",
[0, 0, 0, 1, 1, 0], #9 - right jump",
#[0, 0, 1, 0, 1, 1], #10 - left run jump",
[0, 0, 0, 1, 1, 1]]) #11 - right run jump",
self.action_space = gym.spaces.Discrete(len(self._actions))
# take an action
def action(self, a):
return self._actions[a].copy()
# def reset(self):
# #print(self.env.change_level(new_level=0))
# #return self.env.reset()
# return self.env.change_level(new_level=0)
class ProcessRewards(gym.Wrapper):
def __init__(self, env):
super(ProcessRewards, self).__init__(env)
self._max_x = 41
self._time_ = 400
self._score_ = 0
def reset(self, **kwargs):
# TODO: Try to changelevel to level 0 instead
# of reseting the entire environment.
# this is would yield faster training.
self._max_x = 41
self._time_ = 400
self._score_ = 0
return self.env.reset(**kwargs)
#return self.env.change_level(new_level=0)
def step(self, action):
obs, reward, done, info = self.env.step(action)
score_coef = 0.0001 # tune the score reward
time_penalty = 0.01 # for every second that passes, give -'time_penalty' reward
r = 0
# Check first if distance is in info, this is mario-specific
if 'distance' in info:
if info['distance'] > 41:
r += reward * 0.5
score_dif = (info['score'] - self._score_) * score_coef
r += score_dif
# time penalty every second
if info['time'] < self._time_:
r -= time_penalty
# if mario died
if done and info['life'] == 0:
r -= 2
if done and info['distance'] > 0.97 * 3266: # level 0 max_distance
r += 2
self._max_x = max(self._max_x, info['distance'])
self._score_ = info['score']
self._time_ = info['time']
return obs, r, done, info
def replace_nans(obs):
obs[np.isnan(obs)] = 0.
return obs
def make_env():
''' function for editing and returning the environment for mario '''
env = gym.make('SuperMarioBros-1-1-v0')
env = ActionsDiscretizer(env)
env = ProcessRewards(env)
env = ObsWrapper(env)
env.close()
#env = FrameStack(env, 2)
return env | [] |
2024-01-10 | priya-dwivedi/generative-ai | smart_memory~memory_bot.py | import openai
from openai import OpenAI
import os
from dotenv import load_dotenv
import asyncio
import json
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
from rich.console import Console
from rich.prompt import Prompt
# Create a console object
console = Console()
class Chatbot:
def __init__(self):
## reading config file
self.client = OpenAI()
self.messages = []
self.messages.append({"role": "system", "content":"You are a friendly chatbot who likes to chat with users and extract relevant information. You respond back in JSON format."})
self.memory = {}
def set_user_prompt(self):
user_prompt = f'''
Chat with the user. If they share personal information like their or family members birthday, or hobbies, likes or dislikes, then extract those too. Store these with entity name and the information in third person style.
If no personal information is shared, then return None for relevant information.
I would like output in JSON format. See example below:
\n Example:
Query: " I am turning 40 on Dec 19th. I am not sure what to buy. I hope you can make some suggestions"
Answer:
{{"response" : "Glad to hear that! I am happy to help. What kind of activities do you enjoy?"}},
"relevant_info" : [{{'entity': 'user', "information": 'Turning 40 on Dec 19"}}]
\n Example:
Query: " My sister loves making cakes! Maybe she can make a chocolate lava cake for me. I would like that"
Answer:
{{"response" : "Ohh nice!"}},
"relevant_info" : [{{'entity': 'user', "information": 'Loves chocolate lava cake"}}, {{'entity': 'sister', "information": 'Likes baking cakes"}}]
Now respond to user's chat below:
User: {self.chat}
Answer: {{'response': "", 'relevant_info':""}}
'''
return user_prompt
def collect_memory(self, record):
for each in record:
entity = each['entity']
info = each['information']
if entity in self.memory.keys():
self.memory[entity] += ". "+ info
else:
self.memory[entity] = ''
self.memory[entity] += ". "+ info
async def return_memory(self):
yield self.memory
async def call_open_ai(self, chat):
self.chat = chat
user_prompt = self.set_user_prompt()
self.messages.append({"role": "user", "content": user_prompt})
completion = self.client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=self.messages,
response_format={"type": "json_object"},
temperature=0.4,
)
result = completion.choices[0].message.content
self.messages.append({"role": "assistant", "content": result})
result = json.loads(result)
if result['relevant_info'] != "None":
## Save this
self.collect_memory(result['relevant_info'])
yield {
"response":result["response"],
"memory":result["relevant_info"]
}
if __name__=="__main__":
bot = Chatbot()
async def CLI():
mssg_cnt = 0
while True:
user_input = Prompt.ask("[bold green] Query [/bold green] ")
async for output in bot.call_open_ai(user_input):
if output['response'] !='None':
console.print(f"[bold green] ๐ค AI: {output['response']} [/bold green]")
if output['memory'] != 'None':
console.print(f"[bold yellow] ๐ค Add to memory: {output['memory']} [/bold yellow]")
mssg_cnt +=1
if mssg_cnt %5 ==0:
async for output in bot.return_memory():
console.print(f"[bold red] ๐ค My memory so far: {output} [/bold red]")
asyncio.run(CLI())
| [
"You are a friendly chatbot who likes to chat with users and extract relevant information. You respond back in JSON format."
] |
2024-01-10 | The---onE/AI-Chat | Server~bilibili.py | import json
import re
import warnings
from typing import List, Tuple
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BiliBiliLoader(BaseLoader):
"""Load `BiliBili` video transcripts."""
def __init__(self, video_urls: List[str], cookies): #
"""Initialize with bilibili url.
Args:
video_urls: List of bilibili urls.
"""
self.video_urls = video_urls
self.cookies = cookies #
def load(self) -> List[Document]:
"""Load Documents from bilibili url."""
results = []
for url in self.video_urls:
transcript, video_info = self._get_bilibili_subs_and_info(url)
doc = Document(page_content=transcript, metadata=video_info)
results.append(doc)
return results
def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]:
try:
from bilibili_api import sync, video, Credential
except ImportError:
raise ImportError(
"requests package not found, please install it with "
"`pip install bilibili-api-python`"
)
if url.startswith('BV'):
url = 'https://www.bilibili.com/video/' + url
bvid = re.search(r"BV\w+", url)
SESSDATA, BILI_JCT, BUVID3 = None, None, None #
for cookie in self.cookies: #
if cookie.get('name') == 'SESSDATA':
SESSDATA = cookie.get('value')
elif cookie.get('name') == 'bili_jct':
BILI_JCT = cookie.get('value')
elif cookie.get('name') == 'buvid3':
BUVID3 = cookie.get('value')
credential = Credential(sessdata=SESSDATA, bili_jct=BILI_JCT, buvid3=BUVID3) #
bvid = re.search(r"BV\w+", url)
if bvid is not None:
v = video.Video(bvid=bvid.group(), credential=credential) #
else:
aid = re.search(r"av[0-9]+", url)
if aid is not None:
try:
v = video.Video(aid=int(aid.group()[2:]), credential=credential) #
except AttributeError:
raise ValueError(f"{url} is not bilibili url.")
else:
raise ValueError(f"{url} is not bilibili url.")
video_info = sync(v.get_info())
video_info.update({"url": url})
sub = sync(v.get_subtitle(video_info["cid"]))
# Get subtitle url
sub_list = sub["subtitles"]
if sub_list:
sub_url = sub_list[0]["subtitle_url"]
if not sub_url.startswith("http"):
sub_url = "https:" + sub_url
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)["body"]
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
raw_transcript_with_meta_info = (
f"Video Title: {video_info['title']},"
f"description: {video_info['desc']}\n\n"
f"Transcript: {raw_transcript}"
)
return raw_transcript_with_meta_info, video_info
else:
raw_transcript = ""
warnings.warn(
f"""
No subtitles found for video: {url}.
Return Empty transcript.
"""
)
return raw_transcript, video_info
| [] |
2024-01-10 | The---onE/AI-Chat | Server~langchain_client.py | import langchain
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.schema.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader, Docx2txtLoader, UnstructuredPDFLoader, SeleniumURLLoader
from bilibili import BiliBiliLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import VectorStore, FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.summarize import load_summarize_chain
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_gemini_chat_models import ChatGoogleGenerativeAI
import os
import hashlib
import json
import traceback
import asyncio
from logging import Handler
from enum import Enum
from typing import List, Tuple, Optional
from fastapi import UploadFile
class ModelType(Enum):
GPT = 1,
GEMINI = 2
class LangchainClient:
gpt35_token = 6000
gpt4_token = 3000
gemini_token = 12000
langchain.verbose = True
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
separators=['\n\n', '\n', ' ', ''], model_name='gpt-3.5-turbo-16k', chunk_size=gpt35_token / 2, chunk_overlap=150)
faiss_dir = 'faissSave/'
file_dir = 'files/'
use_gpt4 = True
file_context_prefix = 'f:'
url_context_prefix = 'u:'
bilibili_context_prefix = 'b:'
text_context_prefix = 't:'
context_prefix = [file_context_prefix, url_context_prefix,
bilibili_context_prefix, text_context_prefix]
summarize_prompt_prefix = ':s'
special_prompt_prefix = [summarize_prompt_prefix]
def __init__(self, openai_api_key: str, google_api_key: str, embeddingLogger: Handler, gptLogger: Handler, geminiLogger: Handler):
self.embeddingLogger = embeddingLogger
self.gptLogger = gptLogger
self.geminiLogger = geminiLogger
self.update_openai_api_key(openai_api_key)
self.update_google_api_key(google_api_key)
def update_openai_api_key(self, key: str):
os.environ['OPENAI_API_KEY'] = key
self.embeddings = OpenAIEmbeddings(client=None)
self.llm35 = ChatOpenAI(model='gpt-3.5-turbo-16k',
temperature=0.7, max_tokens=self.gpt35_token)
self.llm4 = ChatOpenAI(
model='gpt-4', temperature=0.7, max_tokens=self.gpt4_token)
def update_google_api_key(self, key: str):
os.environ['GOOGLE_API_KEY'] = key
self.llm_gemini = ChatGoogleGenerativeAI(
model='gemini-pro', temperature=0.7, max_output_tokens=self.gemini_token, convert_system_message_to_human=True)
async def request(self, messages: List, type: ModelType) -> Tuple[str, str]:
if messages[0].get('role') == 'system' and messages[0].get('content').startswith(tuple(self.context_prefix)):
if messages[0].get('content').startswith(self.file_context_prefix):
result_content, source_content = await self.file_base_request(messages, type)
elif messages[0].get('content').startswith(self.bilibili_context_prefix):
result_content, source_content = await self.bilibili_base_request(
messages, type)
elif messages[0].get('content').startswith(self.text_context_prefix):
result_content, source_content = await self.text_base_request(messages, type)
else:
result_content, source_content = await self.url_base_request(messages, type)
else:
result_content, source_content = await self.langchain_request(messages, type)
return result_content, source_content
async def langchain_request(self, messages: List, type: ModelType) -> Tuple[str, str]:
contents = []
messages.reverse()
for msg in messages:
role = msg.get('role')
content = msg.get('content')
if role == 'user':
message = HumanMessage(content=content)
elif role == 'assistant':
message = AIMessage(content=content)
else:
if type == ModelType.GPT:
message = SystemMessage(content=content)
elif type == ModelType.GEMINI:
continue
contents.append(message)
if type == ModelType.GPT:
if self.use_gpt4 and self.llm4.get_num_tokens_from_messages(contents) > self.gpt4_token:
break
if not self.use_gpt4 and self.llm35.get_num_tokens_from_messages(contents) > self.gpt35_token:
break
elif type == ModelType.GEMINI:
if self.llm_gemini.get_num_tokens_from_messages(contents) > self.gemini_token:
break
if type == ModelType.GEMINI:
for content in contents[::-1]:
if not isinstance(content, HumanMessage):
del contents[-1]
else:
break
contents.reverse()
if type == ModelType.GPT:
if self.use_gpt4:
result = await self.llm4.agenerate([contents])
else:
result = await self.llm35.agenerate([contents])
elif type == ModelType.GEMINI:
result = await self.llm_gemini.agenerate([contents])
return result.generations[0][0].text, ''
async def based_request(self, messages: List, db: VectorStore, index: str, type: ModelType) -> Tuple[str, str]:
query = messages[-1].get('content')
if query.startswith(tuple(self.special_prompt_prefix)):
if query.startswith(self.summarize_prompt_prefix):
return await self.summarize_based_request(index, query, type)
else:
return await self.summarize_based_request(index, query, type)
else:
return await self.conversational_based_request(messages, db, type)
async def conversational_based_request(self, messages: List, db: VectorStore, type: ModelType) -> Tuple[str, str]:
if type == ModelType.GPT:
llm = self.llm35
limit = self.gpt35_token*1.2
elif type == ModelType.GEMINI:
llm = self.llm_gemini
limit = self.gemini_token*1.2
_template = """้่ฟ็ปๅบ็ๅฏน่ฏๅๅฒๅ่ฟฝๅ ็้ฎ้ข, ๆนๅ่ฟฝๅ ็้ฎ้ขๆไธบไธไธช็ฌ็ซ็้ฎ้ข, ็จๅฏน่ฏๅๅฒ็่ฏญ่จใ
ๅฏน่ฏๅๅฒ:
```
{chat_history}
```
่ฟฝๅ ็้ฎ้ข: {question}
็ฌ็ซ็้ฎ้ข:"""
condense_question_prompt = PromptTemplate.from_template(_template)
system_template = """ๆ นๆฎไธๆๅ
ๅฎนๅ็ญ้ฎ้ขใๅฆๆๆ ๆณๅ็ญ๏ผๅๅคโๆไธ็ฅ้โ๏ผไธ่ฆ็ผ้ ็ญๆกใ
```
{context}
```
"""
combine_docs_chain_messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
combine_docs_chain_prompt = ChatPromptTemplate.from_messages(
combine_docs_chain_messages)
qa = ConversationalRetrievalChain.from_llm(llm, db.as_retriever(search_type='mmr'), chain_type='stuff',
return_source_documents=True, return_generated_question=True,
max_tokens_limit=limit, condense_question_prompt=condense_question_prompt,
combine_docs_chain_kwargs={'prompt': combine_docs_chain_prompt})
chat_history = []
i = 1
while i < len(messages) - 1:
msg = messages[i]
role = msg.get('role')
query = msg.get('content')
i += 1
if role == 'user':
msg = messages[i]
role = msg.get('role')
if role == 'assistant':
answer = msg.get('content')
chat_history.append((query, answer))
i += 1
query = messages[-1].get('content')
content = {'question': query, 'chat_history': chat_history}
result = await qa.acall(content)
result_content = result['answer']
source_content = ''
try:
generated_question = result["generated_question"]
source_content = generated_question
source_docs = result['source_documents']
contexts = []
for doc in source_docs:
contexts.append(doc.page_content)
source_content += '\n\n' + '\n\n'.join(contexts)
except Exception as e:
traceback.print_exc()
if type == ModelType.GPT:
self.gptLogger.exception(e)
elif type == ModelType.GEMINI:
self.geminiLogger.exception(e)
return result_content, source_content
async def summarize_based_request(self, index: str, query: str, type: ModelType) -> Tuple[str, str]:
if type == ModelType.GPT:
llm = self.llm35
elif type == ModelType.GEMINI:
llm = self.llm_gemini
loader = TextLoader(f'{self.faiss_dir}{index}/{index}.txt',
autodetect_encoding=True)
data = loader.load()
docs = self.text_splitter.split_documents(data)
prompt = query[len(self.summarize_prompt_prefix):]
map_template = """่ฏฆ็ปๆป็ปไธๆๅๆฎต่ฝ็ๅ
ๅฎน๏ผๅฆๆๆ ๆณๆป็ปๅ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ:
```
{text}
```
ไฝ ็ๅ็ญ:"""
if len(prompt.strip()) > 0:
map_template = '้่ฟไธๆๅ
ๅฎน๏ผ' + prompt + '๏ผๅฆๆๆ ๆณๅ็ญๅ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ' + """:
```
{text}
```
ไฝ ็ๅ็ญ:"""
map_prompt = PromptTemplate(
template=map_template, input_variables=["text"])
combine_template = """็ฒพ่ฆๅฐ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ:
```
{text}
```
ไฝ ็ๅ็ญ:"""
if len(prompt.strip()) > 0:
combine_template = '้่ฟไธๆๅ
ๅฎน๏ผ่ฏฆ็ป่ฏดๆ' + prompt + '๏ผๅฆๆๆ ๆณ่ฏดๆๅ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ' + """:
```
{text}
```
ไฝ ็ๅ็ญ:"""
combine_prompt = PromptTemplate(
template=combine_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="map_reduce",
map_prompt=map_prompt, combine_prompt=combine_prompt, token_max=self.gpt35_token)
result = await chain.arun(docs)
source_content = ''
try:
contexts = []
for doc in docs:
contexts.append(doc.page_content)
source_content = '\n\n'.join(contexts)
except Exception as e:
traceback.print_exc()
if type == ModelType.GPT:
self.gptLogger.exception(e)
elif type == ModelType.GEMINI:
self.geminiLogger.exception(e)
return result, source_content
async def file_base_request(self, messages: List, type: ModelType) -> Tuple[str, str]:
content = messages[0].get('content')
context = content[len(self.file_context_prefix):]
db = FAISS.load_local(self.faiss_dir + context, self.embeddings)
return await self.based_request(messages, db, context, type)
async def url_base_request(self, messages: List, type: ModelType) -> Tuple[str, str]:
content = messages[0].get('content')
url = content[len(self.url_context_prefix):]
hl = hashlib.md5()
hl.update(url.encode(encoding='utf-8'))
context = hl.hexdigest()
path = self.faiss_dir + context
if not os.path.exists(path):
db = await self.load_url(url, context)
else:
db = FAISS.load_local(path, self.embeddings)
return await self.based_request(messages, db, context, type)
async def bilibili_base_request(self, messages: List, type: ModelType) -> Tuple[str, str]:
content = messages[0].get('content')
url = content[len(self.bilibili_context_prefix):]
hl = hashlib.md5()
hl.update(url.encode(encoding='utf-8'))
context = hl.hexdigest()
path = self.faiss_dir + context
if not os.path.exists(path):
db = await self.load_bilibli(url, context)
if not db:
return '่ฏฅ่ง้ขๆช็ๆๅญๅน', ''
else:
db = FAISS.load_local(path, self.embeddings)
return await self.based_request(messages, db, context, type)
async def text_base_request(self, messages: List, type: ModelType) -> Tuple[str, str]:
content = messages[0].get('content')
text = content[len(self.text_context_prefix):]
hl = hashlib.md5()
hl.update(text.encode(encoding='utf-8'))
context = hl.hexdigest()
path = self.faiss_dir + context
if not os.path.exists(path):
data = [Document(page_content=text, metadata={})]
first_line = text[:text.index('\n')] if '\n' in text else text
db = await self.save_docs_to_db(data, context, first_line)
else:
db = FAISS.load_local(path, self.embeddings)
return await self.based_request(messages, db, context, type)
async def load_url(self, url: str, index: str) -> VectorStore:
loader = SeleniumURLLoader(urls=[url], headless=False)
data = loader.load()
db = await self.save_docs_to_db(data, index, url)
return db
async def load_bilibli(self, url: str, index: str) -> Optional[VectorStore]:
cookies = json.loads(
open('./bili_cookies_0.json', encoding='utf-8').read())
loader = BiliBiliLoader(video_urls=[url], cookies=cookies)
data = loader.load()
text = data[0].page_content
if (text == ''):
return None
db = await self.save_docs_to_db(data, index, url)
return db
async def save_docs_to_db(self, data: List[Document], index: str, source: str) -> VectorStore:
docs = self.text_splitter.split_documents(data)
loop = asyncio.get_event_loop()
db = await loop.run_in_executor(None, FAISS.from_documents, docs, self.embeddings)
db.save_local(self.faiss_dir + index)
self.embeddingLogger.info(f'{index} - {source}')
with open(f'{self.faiss_dir}{index}/{index}.txt', 'w', encoding='utf8') as txt:
for doc in data:
txt.write(doc.page_content)
txt.write('\n\n')
txt.close()
return db
async def upload_file(self, file: UploadFile):
if index == None or len(index.strip()) <= 0:
hl = hashlib.md5()
while True:
content = await file.read(8192)
if not content:
await file.seek(0)
break
hl.update(content)
index = hl.hexdigest()
ext = file.filename.split('.')[-1]
name = self.file_dir + index + '.' + ext
with open(name, 'wb') as f:
content = await file.read()
f.write(content)
if ext == 'txt':
loader = TextLoader(name, autodetect_encoding=True)
elif ext == 'docx' or ext == 'dox':
loader = Docx2txtLoader(name)
elif ext == 'pdf':
loader = UnstructuredPDFLoader(name)
else:
return {'message': f'{file.filename} not support', 'index': ''}
data = loader.load()
await self.save_docs_to_db(data, index, file.filename)
| [
"้่ฟไธๆๅ
ๅฎน๏ผ่ฏฆ็ป่ฏดๆPLACEHOLDER๏ผๅฆๆๆ ๆณ่ฏดๆๅ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ:\n```\n {text}\n ```\n ไฝ ็ๅ็ญ:",
"้่ฟไธๆๅ
ๅฎน๏ผPLACEHOLDER๏ผๅฆๆๆ ๆณๅ็ญๅ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ:\n```\n {text}\n```\n ไฝ ็ๅ็ญ:",
"ๆ นๆฎไธๆๅ
ๅฎนๅ็ญ้ฎ้ขใๅฆๆๆ ๆณๅ็ญ๏ผๅๅคโๆไธ็ฅ้โ๏ผไธ่ฆ็ผ้ ็ญๆกใ\n```\n {context}\n```\n ",
"็ฒพ่ฆๅฐ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ:\n```\n {text}\n```\n ไฝ ็ๅ็ญ:",
":s",
"่ฏฆ็ปๆป็ปไธๆๅๆฎต่ฝ็ๅ
ๅฎน๏ผๅฆๆๆ ๆณๆป็ปๅ้ๅคไธๆๅ
จ้จๅ
ๅฎน๏ผๅฟฝ็ฅๆ ๆณๆป็ป็้จๅ:\n```\n {text}\n```\n ไฝ ็ๅ็ญ:",
"{question}",
"้่ฟ็ปๅบ็ๅฏน่ฏๅๅฒๅ่ฟฝๅ ็้ฎ้ข, ๆนๅ่ฟฝๅ ็้ฎ้ขๆไธบไธไธช็ฌ็ซ็้ฎ้ข, ็จๅฏน่ฏๅๅฒ็่ฏญ่จใ\n ๅฏน่ฏๅๅฒ:\n```\n {chat_history}\n```\n ่ฟฝๅ ็้ฎ้ข: {question}\n ็ฌ็ซ็้ฎ้ข:"
] |
2024-01-10 | Artessay/InsightSupporter | chatGPT~turn.py | import os
import openai
# Set up OpenAI API client
openai.api_key = os.getenv("OPENAI_API_KEY")
model_engine = "text-davinci-002"
# Define function to retrieve OpenAI response to customer query
def get_openai_response(query, conversation_context=None):
if conversation_context is None:
conversation_context = []
prompt = "Customer: " + query + "\n" + "\n".join(conversation_context) + "\nChatbot:"
response = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.7,
)
message = response.choices[0].text.strip()
return message
# Implement self-service customer service loop
print("Welcome to our self-service customer service.")
conversation_context = []
while True:
query = input("Customer: ")
response = get_openai_response(query, conversation_context)
conversation_context.append("Customer: " + query)
conversation_context.append("Chatbot: " + response)
print("Chatbot:", response)
| [
"\n",
"Customer: ",
"\nChatbot:",
"Customer: \" + query + \"\\n\" + \"\\n\".join(conversation_context) + \"\\nChatbot:"
] |
2024-01-10 | Artessay/InsightSupporter | chatGPT~pipeline.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
print("if you want to stop the conversation, please input 'quit'") #ๆ็คบๆณ็ปๆญข่ๅคฉๆถ่พๅ
ฅ"quit"
def chat(prompt): #ๅฎไนไธไธชๅฝๆฐ๏ผไปฅไพฟๅ้ขๅๅค่ฐ็จ
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt= prompt,
temperature=0.9,
max_tokens=2500,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=[" Human:", " AI:"]
)
answer = response["choices"][0]["text"].strip()
return answer
except Exception as exc:
#print(exc) #ๅฆๆ้่ฆๆๅฐๅบๆ
้ๅๅ ๅฏไปฅไฝฟ็จๆฌ่กไปฃ็ ๏ผๅฆๆๆณๅขๅผบ็พๆ๏ผๅฐฑๅฑ่ฝๅฎใ
return "broken"
text = "" #่ฎพ็ฝฎไธไธชๅญ็ฌฆไธฒๅ้
turns = [] #่ฎพ็ฝฎไธไธชๅ่กจๅ้๏ผturnๆๅฏน่ฏๆถ็่ฏ่ฝฎ
while True: #่ฝๅค่ฟ็ปญๆ้ฎ
question = input()
if len(question.strip()) == 0: #ๅฆๆ่พๅ
ฅไธบ็ฉบ๏ผๆ้่พๅ
ฅ้ฎ้ข
print("please input your question")
elif question == "quit": #ๅฆๆ่พๅ
ฅไธบ"quit"๏ผ็จๅบ็ปๆญข
print("\nAI: See You Next Time!")
break
else:
prompt = text + "\nHuman: " + question
result = chat(prompt)
while result == "broken": #้ฎไธๅบ็ปๆไผ่ชๅจๅๅคๆไบคไธไธไธช้ฎ้ข๏ผ็ดๅฐๆ็ปๆไธบๆญขใ
print("please wait...")
result = chat(prompt) #้ๅคๆไบค้ฎ้ข
else:
turns += [question] + [result]#ๅชๆ่ฟๆ ท่ฟญไปฃๆ่ฝ่ฟ็ปญๆ้ฎ็่งฃไธไธๆ
print(result)
if len(turns)<=10: #ไธบไบ้ฒๆญข่ถ
่ฟๅญๆฐ้ๅถ็จๅบไผ็ๆ๏ผๆไปฅๆไบค็่ฏ่ฝฎ่ฏญๅขไธบ10ๆฌกใ
text = " ".join(turns)
else:
text = " ".join(turns[-10:]) | [
"PLACEHOLDER\nHuman: PLACEHOLDER"
] |
2024-01-10 | test-dan-run/state-spaces | src~dataloaders~lm.py | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import subprocess
from pathlib import Path
from typing import Optional, List, Tuple
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule
from src.utils import distributed
import src.utils.train
log = src.utils.train.get_logger(__name__)
from src.dataloaders.datasets import SequenceDataset, default_data_path
from src.dataloaders.vocabulary import OpenAIVocab, Vocab
import src.utils as utils
# from tasks.legacy.tasks import LMPerplexity, LMBPC
# TODO: create a package so we don't have to mess with sys.path?
project_root = Path(__file__).parent.parent.absolute()
data_path = Path(__file__).absolute().parent / 'data'
import sys
sys.path.insert(0, str(project_root))
class LMOrderedIterator:
def __init__(
self,
data,
batch_size,
l_max,
batch_first=True,
# device="cpu",
# mem_len=None,
# ext_len=None,
# warmup=True,
n_context=1,
n_epoch_double=0,
pad_last=False,
roll_seed=None, # roll data based on seed
limit_tokens=1.0, # reduce tokens; useful for debugging last batch edge cases
):
"""
data -- LongTensor -- the LongTensor is strictly ordered
pad_last: whether to pad the last sequence in the batch so that all sequences
have the same length (l_max).
"""
self.raw_data = data
self.batch_size = batch_size
self.l_max = l_max
self.batch_first = batch_first
# self.ext_len = ext_len if ext_len is not None else 0
# self.mem_len = mem_len
# self.warmup = warmup
self.pad_last = pad_last
self.roll_seed = roll_seed
self.n_context = n_context
self.n_epoch_double = n_epoch_double
# self.device = device
# self.last_iter = None # AG: this isn't in original repo and doesn't appear to be used
self.epoch = -1
# DDP
self.world_size = distributed.get_world_size()
self.rank = distributed.get_rank()
if limit_tokens is not None and 0.0 < limit_tokens < 1.0:
l_data = int(math.floor(data.size(-1) * limit_tokens))
self.raw_data = self.raw_data[:l_data]
self.process()
def process(self):
""" Process the data. All logic involving sequence length and batch size should go here """
assert self.l_max % self.n_context == 0
self.l_inc = self.l_max // self.n_context
global_batch_size = self.world_size * self.batch_size
# Work out how cleanly we can divide the dataset into batch_size parts.
n_step = self.raw_data.size(-1) // global_batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
self.data = self.raw_data[: n_step * global_batch_size]
# Evenly divide the data across the batches.
self.data = self.data.view(global_batch_size, -1).contiguous().pin_memory() # (global_batch_size, length)
# Partition data for DistributedDataParallel
self.data = self.data.chunk(self.world_size, dim=0)[self.rank]
# Number of mini-batches
# Need to subtract 1 because target is data shifted by 1
self.n_batch = (self.data.size(-1) - 1 + self.l_inc - 1) // self.l_inc
def roll(self, seed):
rng = torch.Generator()
rng.manual_seed(seed)
for i in range(self.data.size(0)):
row = self.data[i, :]
shift = torch.randint(0, self.data.size(-1), (1,), generator=rng)
row = torch.cat((row[shift:], row[:shift]))
self.data[i, :] = row
def get_batch(self, i, l_max=None):
""" Get batch starting at token index i """
# if l_max is None: l_max = self.l_max
# seq_len = min(l_max, self.data.size(0) - 1 - i)
end_idx = min(i + self.l_inc, self.data.size(-1)-1)
# beg_idx = max(0, i - self.ext_len)
beg_idx = max(0, end_idx - self.l_max)
seq_len = end_idx - i
data = self.data[..., beg_idx:end_idx] # .to(self.device, non_blocking=True)
target = self.data[..., i+1 : end_idx+1] # .to( self.device, non_blocking=True)
if self.pad_last and seq_len < self.l_inc:
data = F.pad(data, (0, self.l_inc - seq_len)) # (batch_size, l_inc)
target = F.pad(target, (0, self.l_inc - seq_len))
seq_len = self.l_inc
if not self.batch_first:
data = data.transpose(0, 1).contiguous() # (n_batch, l_sequence)
target = target.transpose(0, 1).contiguous()
# [21-09-19] Unsqueeze the last dimension so that shape is always (n_batch, l_seq, d_input)
data = data
target = target
return data, target, seq_len
def get_fixlen_iter(self, start=0): # AG: Don't see start ever used?
if start != 0:
start += self.l_max
for i in range(start, self.data.size(-1) - 1, self.l_inc):
self.last_iter = i
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
l_max = self.l_max + max_deviation * std
i = start
while True:
l_max = self.l_max if np.random.random() < 0.95 else self.l_max / 2.0
l_max = min(l_max, max(min_len, int(np.random.normal(l_max, std))))
data, target, seq_len = self.get_batch(i, l_max) # AG: this doesn't appear to work...
i += seq_len
yield data, target, seq_len
if i >= self.data.size(-1) - 2:
break
def __iter__(self):
self.epoch += 1
if self.n_epoch_double > 0 and self.epoch > 0 and self.epoch % n == 0:
if self.batch_size > 1:
log.info(f"LM Iterator doubling length from {self.l_max} to {self.l_max*2}")
self.l_max *= 2
self.batch_size //= 2
self.process()
if self.roll_seed is not None:
self.roll(self.roll_seed + self.epoch)
return self.get_fixlen_iter()
def __len__(self):
return self.n_batch
class LMShuffledIterator(object):
def __init__(
self, data, batch_size, l_max, device="cpu", ext_len=None, shuffle=False
):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.batch_size = batch_size
self.l_max = l_max
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = (
np.random.permutation(len(self.data))
if self.shuffle
else np.array(range(len(self.data)))
)
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.batch_size
data = torch.LongTensor(self.l_max, self.batch_size)
target = torch.LongTensor(self.l_max, self.batch_size)
n_retain = 0
while True:
# data : [n_retain+l_max x batch_size]
# target : [l_max x batch_size]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.batch_size):
n_filled = 0
try:
while n_filled < self.l_max:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.l_max - n_filled)
# first n_retain tokens are retained from last batch
data[
n_retain + n_filled : n_retain + n_filled + n_new,
i,
] = streams[i][:n_new]
target[n_filled : n_filled + n_new, i] = streams[i][
1 : n_new + 1
]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data = data.to(self.device)
target = target.to(self.device)
yield data, target, self.l_max
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.l_max, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(
self,
paths,
vocab,
batch_size,
l_max,
device="cpu",
ext_len=None,
shuffle=False,
):
self.paths = paths
self.vocab = vocab
self.batch_size = batch_size
self.l_max = l_max
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
# class WikiText2(LightningDataModule):
class WikiText2(SequenceDataset):
_name_ = "wt2"
# Vocab arguments
vocab_kwargs = {"special": ["<eos>"], "lower_case": False}
encode_kwargs = {"ordered": True}
# Embedding arguments (adaptive softmax / word embeddings)
# default_task = {
# 'adaptive': False,
# 'div_val': 1,
# 'cutoffs': [],
# 'tie_weights': False,
# 'tie_projs': [False],
# }
@property
def default_task(self):
return {
'_target_': 'tasks.tasks.LMTask',
'tied': False,
'rescale': True,
# init_cfg,
'metrics': ['ppl'],
'init_cfg': {
'init': 'normal', # Parameter initializer to use
'init_range': 0.01, # Parameters initialized by U(-init_range, init_range)
'init_std': 0.02, # Parameters initialized by N(0, init_std)
'proj_init_std': 0.01, # Separate std for projection params
}
}
# Task class / constructor
# task_cls = LMPerplexity
# @property
# def l_output(self):
# return self.l_max
init_defaults = {
# Dataset arguments
'l_max': 512,
'bpe': False,
'roll_seed': 42,
'test_split': True,
# Task / Embedding arguments
# 'task': None,
}
@property
def n_tokens(self):
return len(self.vocab)
# def __init__(
# self,
# data_dir,
# d_embed, init_cfg, task=None, # Task / Embedding arguments
# bpe=False,
# l_max=None,
# pad_last=False,
# roll_seed=42,
# eval={
# 'l_max': None,
# 'pad_last': False,
# 'roll_seed': None,
# },
# **kwargs,
# # TODO kwargs is here to absorb things like 'num_workers' and 'pin_memory' which should really be part of every dataset
# ):
# super().__init__()
# if data_dir is None: self.data_dir = Path(data_dir) / self._name_
# # self.d_embed = d_embed
# # self.init_cfg = init_cfg
# if bpe:
# self.vocab = OpenAIVocab()
# else:
# self.vocab = Vocab(**self.vocab_kwargs)
# # Loader arguments
# assert l_max is not None
# self.l_max = l_max
# self.pad_last = pad_last
# self.roll_seed = roll_seed
# self.eval = DictConfig(eval)
# if self.eval.l_max is None: self.eval.l_max = self.l_max
# if task is not None:
# self.task.update(task)
def prepare_data(self):
# [21-09-23] probably broken
if not self.data_dir.exists():
subprocess.run(
[
str(project_root / "data" / "getdata.sh"),
self._name_,
str(self.data_dir.parent.absolute()),
],
check=True,
)
def setup(self, stage=None): # [21-09-10 AG]: TODO shouldn't this tokenization happen in the prepare_data? since we're caching it it doesn't really matter, but still
if self.data_dir is None: self.data_dir = default_data_path / self._name_
# self.d_embed = d_embed
# self.init_cfg = init_cfg
if self.bpe:
self.vocab = OpenAIVocab()
else:
self.vocab = Vocab(**self.vocab_kwargs)
# Loader arguments
if not self._load_from_cache():
logging.info(f"Producing dataset {self._name_}...")
self._vocab_count()
self.vocab.build_vocab()
self.train = self.vocab.encode_file(
str(self.data_dir / "train.txt"), **self.encode_kwargs
)
self.valid = self.vocab.encode_file(
str(self.data_dir / "valid.txt"), **self.encode_kwargs
)
self.test = self.vocab.encode_file(
str(self.data_dir / "test.txt"), **self.encode_kwargs
)
self._save_to_cache()
# No test set if specified
if not self.test_split:
self.test = None
# Define task
print("Vocab size:", len(self.vocab))
# self.task = self.task_cls(len(self.vocab), self.d_embed, init_cfg=self.init_cfg, **self.task_args)
# self.d_input = self.d_output = self.d_embed
def _vocab_count(self):
self.vocab.count_file(self.data_dir / "train.txt")
self.vocab.count_file(self.data_dir / "valid.txt")
self.vocab.count_file(self.data_dir / "test.txt")
def _save_to_cache(self):
cache_path = self.data_dir / f"cache.pt" # TODO name could include vocab_kwargs to disambiguate
with distributed.sync_workers() as rank:
if rank == 0:
try:
torch.save(
(self.vocab, self.train, self.valid, self.test),
cache_path,
)
logging.info(f"Saved dataset to {cache_path}...")
except:
pass
def _load_from_cache(self):
cache_path = self.data_dir / f"cache.pt"
if cache_path.exists():
logging.info("Loading cached dataset...")
self.vocab, self.train, self.valid, self.test = torch.load(
cache_path
)
return True
else:
return False
def train_dataloader(self, eval=None, **kwargs):
# TODO kwargs absorbs num_workers
return LMOrderedIterator(
self.train,
roll_seed=self.roll_seed,
**kwargs,
)
# def val_dataloader(self, batch_size, **kwargs):
def _eval_dataloader(self, dataset, eval=None, **loader_args):
if dataset is None: return None
# Make eval a list of dictionaries
if eval is None: eval = {}
if not utils.is_list(eval):
eval = [eval]
# Each eval setting overrides the train setting
for eval_args in eval:
for k in loader_args:
if eval_args.get(k, None) is None:
eval_args[k] = loader_args[k]
print("eval loader:", eval_args)
loaders = [LMOrderedIterator(dataset, **eval_args) for eval_args in eval]
if len(loaders) == 1: return loaders[0]
return loaders
def val_dataloader(self, **kwargs):
return self._eval_dataloader(self.valid, **kwargs)
# return LMOrderedIterator(
# self.valid,
# batch_size,
# **self.eval,
# )
# for k in train_args:
# if eval_args.get(k, None) is None:
# eval_args[k] = v
# return LMOrderedIterator(self.valid, **eval_args)
def test_dataloader(self, **kwargs):
return self._eval_dataloader(self.test, **kwargs)
class WikiText103(WikiText2):
_name_ = "wt103"
@property
def default_task(self):
return {
# 'adaptive': True,
'_target_': 'tasks.tasks.AdaptiveLMTask',
'div_val': 1,
'cutoffs': [19997, 39997, 199997],
'tie_weights': True,
'tie_projs': [False] + [True, True, True], # * len(cutoffs),
'init_cfg': {
'init': 'normal', # Parameter initializer to use
'init_range': 0.01, # Parameters initialized by U(-init_range, init_range)
'init_std': 0.02, # Parameters initialized by N(0, init_std)
'proj_init_std': 0.01, # Separate std for projection params
}
}
def _vocab_count(self):
print(self.data_dir)
self.vocab.count_file(self.data_dir / "train.txt")
class PennTreeBank(WikiText2):
_name_ = "ptb"
vocab_kwargs = {"special": ["<eos>"], "lower_case": True}
# task_cls = LMBPC
class EnWik8(WikiText2):
_name_ = "enwik8"
vocab_kwargs = {}
encode_kwargs = {"ordered": True, "add_eos": False}
# task_cls = LMBPC
@property
def default_task(self):
return {
'_target_': 'tasks.tasks.LMTask',
'tied': False,
'rescale': True,
# init_cfg,
'metrics': ['ppl'],
# 'init_cfg': {
# 'init': 'normal', # Parameter initializer to use
# 'init_range': 0.01, # Parameters initialized by U(-init_range, init_range)
# 'init_std': 0.02, # Parameters initialized by N(0, init_std)
# 'proj_init_std': 0.01, # Separate std for projection params
# }
}
class Text8(EnWik8):
_name_ = "text8"
# task_cls = LMBPC
class LM1B(WikiText2):
# [21-09-08 AG]: this looks very out of date, the __init__ function should be inherited
_name_ = "lm1b"
vocab_kwargs = {"special": [], "lower_case": False}
cutoffs = [59997, 99997, 639997]
tie_projs = [False] + [False] * len(cutoffs)
def __init__(self, data_dir, bpe=False, *args, **kwargs):
LightningDataModule.__init__(self)
self.data_dir = Path(data_dir)
# self.vocab_type = vocab
if bpe:
self.vocab = OpenAIVocab()
else:
self.vocab = Vocab(
vocab_file=self.data_dir / "1b_word_vocab.txt",
**self.vocab_kwargs,
)
def setup(self, stage=None):
if not self._load_from_cache():
logging.info(f"Producing dataset {self._name_}...")
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
train_paths = list(
(
self.data_dir
/ "1-billion-word-language-modeling-benchmark-r13output"
/ "training-monolingual.tokenized.shuffled"
).glob("news.en-*")
)
self.train = train_paths
self.valid = self.vocab.encode_file(
str(self.data_dir / "valid.txt"),
ordered=False,
add_double_eos=True,
)
self.test = self.vocab.encode_file(
str(self.data_dir / "test.txt"),
ordered=False,
add_double_eos=True,
)
self._save_to_cache()
def train_dataloader(self, *args, **kwargs):
kwargs["shuffle"] = True
return LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
def val_dataloader(self, *args, **kwargs):
return LMShuffledIterator(self.valid, *args, **kwargs)
def test_dataloader(self, *args, **kwargs):
return LMShuffledIterator(self.test, *args, **kwargs)
class Corpus(object):
# AG: only used in get_lm_corpus which is only called in the unit test
def __init__(self, path, dataset, vocab, *args, **kwargs):
self.dataset = dataset
if vocab == "word":
self.vocab = Vocab(*args, **kwargs)
elif vocab == "bpe":
self.vocab = OpenAIVocab()
else:
raise RuntimeError("Unsupported vocab")
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
self.vocab.count_file(os.path.join(path, "train.txt"))
self.vocab.count_file(os.path.join(path, "valid.txt"))
self.vocab.count_file(os.path.join(path, "test.txt"))
elif self.dataset == "wt103":
self.vocab.count_file(os.path.join(path, "train.txt"))
elif self.dataset == "lm1b":
train_path_pattern = os.path.join(
path,
"1-billion-word-language-modeling-benchmark-r13output",
"training-monolingual.tokenized.shuffled",
"news.en-*",
)
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ["ptb", "wt2", "wt103"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True
)
elif self.dataset in ["enwik8", "text8"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True, add_eos=False
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True, add_eos=False
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True, add_eos=False
)
elif self.dataset == "lm1b":
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"),
ordered=False,
add_double_eos=True,
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"),
ordered=False,
add_double_eos=True,
)
def get_iterator(self, split, *args, **kwargs):
if split == "train":
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == "lm1b":
kwargs["shuffle"] = True
data_iter = LMMultiFileIterator(
self.train, self.vocab, *args, **kwargs
)
elif split in ["valid", "test"]:
data = self.valid if split == "valid" else self.test
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == "lm1b":
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(data_dir, name, vocab):
if vocab == "word":
fn = os.path.join(data_dir, "cache.pt")
elif vocab == "bpe":
fn = os.path.join(data_dir, "cache.pt.bpe")
else:
raise RuntimeError("Unsupported vocab")
if os.path.exists(fn):
logging.info("Loading cached dataset...")
corpus = torch.load(fn)
else:
logging.info("Producing dataset {}...".format(name))
kwargs = {}
if name in ["wt103", "wt2"]:
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = False
elif name == "ptb":
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = True
elif name == "lm1b":
kwargs["special"] = []
kwargs["lower_case"] = False
kwargs["vocab_file"] = os.path.join(data_dir, "1b_word_vocab.txt")
elif name in ["enwik8", "text8"]:
pass
corpus = Corpus(data_dir, name, vocab, **kwargs)
# with distributed.sync_workers() as rank:
# if rank == 0:
# torch.save(corpus, fn)
return corpus
def tokenize_raw(text, lang="en"):
# AG: Not used?
import sacremoses
mt = sacremoses.MosesTokenizer(lang)
text = mt.tokenize(text, return_str=True)
text = re.sub(r""", '"', text)
text = re.sub(r"'", "'", text)
text = re.sub(r"(\d)\.(\d)", r"\1 @.@ \2", text)
text = re.sub(r"(\d),(\d)", r"\1 @,@ \2", text)
text = re.sub(r"(\w)-(\w)", r"\1 @-@ \2", text)
return text
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="unit test")
parser.add_argument(
"--datadir",
type=str,
default="../data/text8",
help="location of the data corpus",
)
parser.add_argument(
"--dataset",
type=str,
default="text8",
choices=["ptb", "wt2", "wt103", "lm1b", "enwik8", "text8"],
help="dataset name",
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
corpus = get_lm_corpus(args.datadir, args.dataset, vocab="word")
logging.info("Vocab size : {}".format(len(corpus.vocab.idx2sym)))
| [] |
2024-01-10 | bongkyunSON/test | hybrid_gpt.py | from fastapi import FastAPI
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from typing import List
import hashlib
import hmac
import base64
import time
import requests
import json
import openai
import os
from dotenv import load_dotenv
class gptMessangeSender:
# load environment
load_dotenv()
# gpt keys
openai.organization = os.getenv("OPENAI_ORG_KEY")
openai.api_key = os.getenv("OPENAI_API_KEY")
def GPT(msg):
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=[{"role":"system", "content":"๋๋ ๋์ ๋ชจ๋ ๋ถ์ผ์ ์ ์๋์ด์ผ ์น์ ํ ๋งํฌ๋ก ๋๋ตํด์ฃผ๊ณ 200์ ์ด๋ด๋ก ๋๋ตํด์ค"}, {"role":"user", "content":msg}],
max_tokens=500,
temperature=0.5
)
response = response.to_dict_recursive()
response = response["choices"][0]["message"]["content"]
return response
class ChatbotMessageSender:
# load environment
load_dotenv()
# chatbot api gateway url & secret key
ep_path = os.getenv("CLOVA_PATH")
secret_key = os.getenv("CLOVA_SECRET_KEY")
def req_message_send(self, msg):
timestamp = self.get_timestamp()
request_body = {
'version': 'v2',
'userId': 'tester',
'timestamp': timestamp,
'bubbles': [
{
'type': 'text',
'data': {
'description': msg
}
}
],
'event': 'send'
}
## Request body
encode_request_body = json.dumps(request_body).encode('UTF-8')
## make signature
signature = self.make_signature(self.secret_key, encode_request_body)
## headers_clova
custom_headers = {
'Content-Type': 'application/json;UTF-8',
'X-NCP-CHATBOT_SIGNATURE': signature
}
## POST Request
response = requests.post(headers=custom_headers, url=self.ep_path, data=encode_request_body)
return response
@staticmethod
def get_timestamp():
timestamp = int(time.time() * 1000)
return timestamp
@staticmethod
def make_signature(secret_key, request_body):
secret_key_bytes = bytes(secret_key, 'UTF-8')
signing_key = base64.b64encode(hmac.new(secret_key_bytes, request_body, digestmod=hashlib.sha256).digest())
return signing_key
class Input_msg(BaseModel):
msg : str
app = FastAPI()
@app.post("/chatbot")
async def hybrid_chatbot(input_text: Input_msg):
res = ChatbotMessageSender().req_message_send(msg=input_text.msg)
if(res.text.find('"value":"canNotHelpMsg"') > 0 or res.text.find('"value":"similarAnswer"') > 0 or res.text.find('"value":"unknownMsg"') > 0):
input_text = input_text.dict()
gpt_answer = gptMessangeSender.GPT(msg=input_text["msg"])
gpt_answer = gpt_answer.replace("/n:"," ")
return {"msg": gpt_answer}
elif (res.text.find('"imageUrl":') > 0):
### image ๋ต๋ณ์ผ ๊ฒฝ์ฐ ###
reponse = json.loads(res.text)
if "imageUrl" in reponse:
image_url = reponse["imageUrl"]
print("imageUrl:", image_url)
# "description" ํค์ ๊ฐ์ ์ ์ฅํ ๋ฆฌ์คํธ
korean_descriptions = []
# JSON ๋ฐ์ดํฐ์์ "description" ํค์ ๊ฐ ์ถ์ถ
def extract_description(data):
if isinstance(data, dict):
for key, value in data.items():
if key == "description" and isinstance(value, str):
korean_descriptions.append(value)
else:
extract_description(value)
elif isinstance(data, list):
for item in data:
extract_description(item)
# JSON ๋ฐ์ดํฐ์์ "description" ํค์ ๊ฐ ์ถ์ถ
extract_description(reponse)
for description in korean_descriptions:
description
#return ('\n'.join(korean_descriptions))
korean_descriptions = '\n'.join(korean_descriptions)
return JSONResponse({"msg": korean_descriptions})
### ๋ต๋ณ์ ์ด๋ฏธ์ง๊ฐ ์์ ๊ฒฝ์ฐ ###
else:
reponse = json.loads(res.text)
# "description" ํค์ ๊ฐ์ ์ ์ฅํ ๋ฆฌ์คํธ
korean_descriptions = []
# ์ฌ๊ท์ ์ผ๋ก JSON ๋ฐ์ดํฐ๋ฅผ ํ์ํ์ฌ "description" ํค์ ๊ฐ ์ถ์ถ
def extract_description(data):
if isinstance(data, dict):
for key, value in data.items():
if key == "description" and isinstance(value, str):
korean_descriptions.append(value)
else:
extract_description(value)
elif isinstance(data, list):
for item in data:
extract_description(item)
# JSON ๋ฐ์ดํฐ์์ "description" ํค์ ๊ฐ ์ถ์ถ
extract_description(reponse)
# ์ถ์ถ๋ "description" ๊ฐ ์ถ๋ ฅ
for description in korean_descriptions:
korean_descriptions
#return ('\n'.join(korean_descriptions))
korean_descriptions = '\n'.join(korean_descriptions)
return JSONResponse({"msg": korean_descriptions})
| [
"๋๋ ๋์ ๋ชจ๋ ๋ถ์ผ์ ์ ์๋์ด์ผ ์น์ ํ ๋งํฌ๋ก ๋๋ตํด์ฃผ๊ณ 200์ ์ด๋ด๋ก ๋๋ตํด์ค"
] |
2024-01-10 | GerevAI/gerev | app~indexing~index_documents.py | import logging
import re
from enum import Enum
from typing import List, Optional
from data_source.api.basic_document import BasicDocument, FileType
from db_engine import Session
from indexing.bm25_index import Bm25Index
from indexing.faiss_index import FaissIndex
from models import bi_encoder
from parsers.pdf import split_PDF_into_paragraphs
from paths import IS_IN_DOCKER
from schemas import Document, Paragraph
from langchain.schema import Document as PDFDocument
logger = logging.getLogger(__name__)
def get_enum_value_or_none(enum: Optional[Enum]) -> Optional[str]:
if enum is None:
return None
return enum.value
class Indexer:
@staticmethod
def basic_to_document(document: BasicDocument, parent: Document = None) -> Document:
paragraphs = Indexer._split_into_paragraphs(document.content)
return Document(
data_source_id=document.data_source_id,
id_in_data_source=document.id_in_data_source,
type=document.type.value,
file_type=get_enum_value_or_none(document.file_type),
status=document.status,
is_active=document.is_active,
title=document.title,
author=document.author,
author_image_url=document.author_image_url,
location=document.location,
url=document.url,
timestamp=document.timestamp,
paragraphs=[
Paragraph(content=content)
for content in paragraphs
],
parent=parent
)
@staticmethod
def index_documents(documents: List[BasicDocument]):
logger.info(f"Indexing {len(documents)} documents")
ids_in_data_source = [document.id_in_data_source for document in documents]
with Session() as session:
documents_to_delete = session.query(Document).filter(
Document.id_in_data_source.in_(ids_in_data_source)).all()
if documents_to_delete:
logging.info(f'removing documents that were updated and need to be re-indexed.')
Indexer.remove_documents(documents_to_delete, session)
for document in documents_to_delete:
# Currently bulk deleting doesn't cascade. So we need to delete them one by one.
# See https://stackoverflow.com/a/19245058/3541901
session.delete(document)
session.commit()
with Session() as session:
db_documents = []
for document in documents:
# Split the content into paragraphs that fit inside the database
paragraphs = Indexer._split_into_paragraphs(document.content)
# Create a new document in the database
db_document = Indexer.basic_to_document(document)
children = []
if document.children:
children = [Indexer.basic_to_document(child, db_document) for child in document.children]
db_documents.append(db_document)
db_documents.extend(children)
# Save the documents to the database
session.add_all(db_documents)
session.commit()
# Create a list of all the paragraphs in the documents
logger.info(f"Indexing {len(db_documents)} documents => {len(paragraphs)} paragraphs")
paragraphs = [paragraph for document in db_documents for paragraph in document.paragraphs]
if len(paragraphs) == 0:
logger.info(f"No paragraphs to index")
return
paragraph_ids = [paragraph.id for paragraph in paragraphs]
paragraph_contents = [Indexer._add_metadata_for_indexing(paragraph) for paragraph in paragraphs]
logger.info(f"Updating BM25 index...")
Bm25Index.get().update()
if len(paragraph_contents) == 0:
return
# Encode the paragraphs
show_progress_bar = not IS_IN_DOCKER
logger.info(f"Encoding with bi-encoder...")
embeddings = bi_encoder.encode(paragraph_contents, convert_to_tensor=True, show_progress_bar=show_progress_bar)
# Add the embeddings to the index
logger.info(f"Updating Faiss index...")
FaissIndex.get().update(paragraph_ids, embeddings)
logger.info(f"Finished indexing {len(documents)} documents => {len(paragraphs)} paragraphs")
@staticmethod
def _split_into_paragraphs(text, minimum_length=256):
"""
split into paragraphs and batch small paragraphs together into the same paragraph
"""
if text is None:
return []
paragraphs = []
current_paragraph = ''
for paragraph in re.split(r'\n\s*\n', text):
if len(current_paragraph) > 0:
current_paragraph += ' '
current_paragraph += paragraph.strip()
if len(current_paragraph) > minimum_length:
paragraphs.append(current_paragraph)
current_paragraph = ''
if len(current_paragraph) > 0:
paragraphs.append(current_paragraph)
return paragraphs
@staticmethod
def _add_metadata_for_indexing(paragraph: Paragraph) -> str:
result = paragraph.content
if paragraph.document.title is not None:
result += '; ' + paragraph.document.title
return result
@staticmethod
def remove_documents(documents: List[Document], session=None):
logger.info(f"Removing {len(documents)} documents")
# Get the paragraphs from the documents
db_paragraphs = [paragraph for document in documents for paragraph in document.paragraphs]
# Remove the paragraphs from the index
paragraph_ids = [paragraph.id for paragraph in db_paragraphs]
logger.info(f"Removing documents from faiss index...")
FaissIndex.get().remove(paragraph_ids)
logger.info(f"Removing documents from BM25 index...")
Bm25Index.get().update(session=session)
logger.info(f"Finished removing {len(documents)} documents => {len(db_paragraphs)} paragraphs")
| [] |
2024-01-10 | listenaddress/translation-chain | common~constants.py | import os
from dotenv import load_dotenv, find_dotenv
from requests_oauthlib import OAuth1Session
import openai
load_dotenv(find_dotenv(), verbose=True)
SEMANTIC_SCHOLAR_GRAPH_API_URL = os.getenv("SEMANTIC_SCHOLAR_GRAPH_API_URL")
SEMANTIC_SCHOLAR_API_KEY = os.getenv("SEMANTIC_SCHOLAR_API_KEY")
SEMANTIC_SCHOLAR_DATASETS_API_URL = os.getenv(
"SEMANTIC_SCHOLAR_DATASETS_API_URL")
base_model = "gpt-3.5-turbo"
openai.api_key = os.getenv("OPEN_AI_API_KEY")
ss_headers = {'x-api-key': SEMANTIC_SCHOLAR_API_KEY}
base_prompt = "You are a translatorโyou translate neuroscience to developmental biology. Deep symmetries exist between these fields. Use them to infer novel, grounded and plausible hypotheses in developmental biology. " + \
"Follow these instructions carefully. Each translation from neuroscience to developmental biology should:" + \
"- Read as developmental biology. Neuroscience concepts like 'hippocampus' translated to most relevant/related developmental biology term." + \
"- Use real terms from developmental biology literature." + \
"- Don't include any neuroscience words, like a part of the brain. Do the best you can to find the most relevant translation." + \
"- Be compelling. No fanciful language just be scientifically novel and plausible, given what is known in science." + \
"- Unless necessary to prove a point, the translation should be structurally similar to the input. " + \
"For example, here are some terms and plausible translations ('N:' is neuroscience and 'D:' is Developmental Biology):" + \
"N:Neuron D:Cell" + \
"N:Behavior D:Morphogenesis" + \
"N:Millisecond D:Minute" + \
"N:Memory D:Pattern Memory" + \
"N:Brain D:Body" + \
"N:Retina D:Epithelium" + \
"N:Synapse D:Gap junction" + \
"N:Neuroscience D:Developmental biology" + \
"N:Navigating D:Navigating in morphospace" + \
"N:Lateralization D:Left-right asymmetry" + \
"N:Mental illness D:Birth defects" + \
"N:Psychiatry D:Developmental teratology" + \
"N:Senses D:Receptors" + \
"N:Action potential D:Change of vmem" + \
"N:Consciousness D:Somatic consciousness" + \
"N:Neuroimaging D:Body tissue imaging" + \
"N:Synaptic D:Electrical-synaptic" + \
"N:Cognitive D:Proto-cognitive" + \
"N:Psychiatry D:Developmental teratology" + \
"N:Space D:Anatomical morphospace" + \
"N:Animal D:Tissue" + \
"N:Goals D:Target morphologies" + \
"N:Muscle contractions D:Cell behavior" + \
"N:Behavioral space D:Morphospace" + \
"N:Pattern completion D:Regeneration" + \
"N:Behavior D:Morphogenesis" + \
"N:Think D:Regenerate" + \
"N:Intelligence D:Ability to regenerate" + \
"N:Event-related potentials D:Bioelectrical signals" + \
"N:Transcranial D:Optogenetic" + \
"N:Down the axon D:Across the cell surface" + \
"N:Action potential movement within an axon D:Differential patterns of Vmem across single cellsโ surface" + \
"N:Neurogenesis D:Cell proliferation" + \
"N:Neuromodulation D:Developmental signaling" + \
"N:Critical plasticity periods D:Competency windows for developmental induction events" + \
"N:What are the goals of hedgehogs D:What are the target morphologies of hedgehogs" + \
"N:On brains. Retina, behavioral plasticity, muscle, synaptic activity and lateralization D:On bodies. Epithelium, regenerative capacity, cell, cell-signaling activity and left-right asymmetry" \
"[Examples done]"
minimal_prompt = "You are a translatorโyou translate neuroscience to developmental biology. Deep symmetries exist between these fields. Use them to infer novel, grounded and plausible hypotheses in developmental biology. " + \
"Follow these instructions carefully. Each translation from neuroscience to developmental biology should:" + \
"- Read as developmental biology. Neuroscience concepts like 'hippocampus' translated to most relevant/related developmental biology term." + \
"- Use real terms from developmental biology literature." + \
"- Don't include any neuroscience words, like a part of the brain. Do the best you can to find the most relevant translation." + \
"- Be compelling. No fanciful language just be scientifically novel and plausible, given what is known in science." + \
"For example, here are some terms and plausible translations ('N:' is neuroscience and 'D:' is Developmental Biology):" + \
"N:Neuron D:Cell" + \
"N:Behavior D:Morphogenesis" + \
"N:Millisecond D:Minute" + \
"N:Memory D:Pattern Memory" + \
"N:Brain D:Body" + \
"N:Neuroscience D:Developmental biology" + \
"N:Navigating D:Navigating in morphospace" + \
"N:Lateralization D:Left-right asymmetry" + \
"N:Mental illness D:Birth defects" + \
"N:Psychiatry D:Developmental teratology" + \
"N:What are the goals of hedgehogs D:What are the target morphologies of hedgehogs" + \
"N:On brains. Retina, behavioral plasticity, muscle, synaptic activity and lateralization D:On bodies. Epithelium, regenerative capacity, cell, cell-signaling activity and left-right asymmetry" \
"[Examples done]"
| [
"You are a translatorโyou translate neuroscience to developmental biology. Deep symmetries exist between these fields. Use them to infer novel, grounded and plausible hypotheses in developmental biology. Follow these instructions carefully. Each translation from neuroscience to developmental biology should:- Read as developmental biology. Neuroscience concepts like 'hippocampus' translated to most relevant/related developmental biology term.- Use real terms from developmental biology literature.- Don't include any neuroscience words, like a part of the brain. Do the best you can to find the most relevant translation.- Be compelling. No fanciful language just be scientifically novel and plausible, given what is known in science.For example, here are some terms and plausible translations ('N:' is neuroscience and 'D:' is Developmental Biology):N:Neuron D:CellN:Behavior D:MorphogenesisN:Millisecond D:MinuteN:Memory D:Pattern MemoryN:Brain D:BodyN:Neuroscience D:Developmental biologyN:Navigating D:Navigating in morphospaceN:Lateralization D:Left-right asymmetryN:Mental illness D:Birth defectsN:Psychiatry D:Developmental teratologyN:What are the goals of hedgehogs D:What are the target morphologies of hedgehogsN:On brains. Retina, behavioral plasticity, muscle, synaptic activity and lateralization D:On bodies. Epithelium, regenerative capacity, cell, cell-signaling activity and left-right asymmetry[Examples done]",
"You are a translatorโyou translate neuroscience to developmental biology. Deep symmetries exist between these fields. Use them to infer novel, grounded and plausible hypotheses in developmental biology. Follow these instructions carefully. Each translation from neuroscience to developmental biology should:- Read as developmental biology. Neuroscience concepts like 'hippocampus' translated to most relevant/related developmental biology term.- Use real terms from developmental biology literature.- Don't include any neuroscience words, like a part of the brain. Do the best you can to find the most relevant translation.- Be compelling. No fanciful language just be scientifically novel and plausible, given what is known in science.- Unless necessary to prove a point, the translation should be structurally similar to the input. For example, here are some terms and plausible translations ('N:' is neuroscience and 'D:' is Developmental Biology):N:Neuron D:CellN:Behavior D:MorphogenesisN:Millisecond D:MinuteN:Memory D:Pattern MemoryN:Brain D:BodyN:Retina D:EpitheliumN:Synapse D:Gap junctionN:Neuroscience D:Developmental biologyN:Navigating D:Navigating in morphospaceN:Lateralization D:Left-right asymmetryN:Mental illness D:Birth defectsN:Psychiatry D:Developmental teratologyN:Senses D:ReceptorsN:Action potential D:Change of vmemN:Consciousness D:Somatic consciousnessN:Neuroimaging D:Body tissue imagingN:Synaptic D:Electrical-synapticN:Cognitive D:Proto-cognitiveN:Psychiatry D:Developmental teratologyN:Space D:Anatomical morphospaceN:Animal D:TissueN:Goals D:Target morphologiesN:Muscle contractions D:Cell behaviorN:Behavioral space D:MorphospaceN:Pattern completion D:RegenerationN:Behavior D:MorphogenesisN:Think D:RegenerateN:Intelligence D:Ability to regenerateN:Event-related potentials D:Bioelectrical signalsN:Transcranial D:OptogeneticN:Down the axon D:Across the cell surfaceN:Action potential movement within an axon D:Differential patterns of Vmem across single cellsโ surfaceN:Neurogenesis D:Cell proliferationN:Neuromodulation D:Developmental signalingN:Critical plasticity periods D:Competency windows for developmental induction eventsN:What are the goals of hedgehogs D:What are the target morphologies of hedgehogsN:On brains. Retina, behavioral plasticity, muscle, synaptic activity and lateralization D:On bodies. Epithelium, regenerative capacity, cell, cell-signaling activity and left-right asymmetry[Examples done]"
] |
2024-01-10 | activeconnector/recruiter_rm | recruiter_rm.py | #!/usr/bin/env python3
"""
recruiter_rm: Recruiter Relationship Manager
Automatically responds to recruiter's emails with a courtesy message.
"""
import json
import os
import re
import smtplib
import sys
import textwrap
import time
import traceback
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from imap_tools import MailBox, MailMessage, MailMessageFlags
import openai
# TODO configuration file?
DRY_RUN = bool(int(os.getenv("DRY_RUN", "1")))
BYPASS_OPENAI = bool(int(os.getenv("BYPASS_OPENAI", "0")))
SIGNATURE = os.getenv("SIGNATURE")
GRACE_PERIOD_SECS = 5
class Mailer:
"""Handles interfacing with the IMAP and SMTP email clients."""
def __init__(self):
self.imap_mailbox = MailBox(
os.getenv("IMAP_HOST"), os.getenv("IMAP_PORT")
).login(os.getenv("MAILBOX_USER"), os.getenv("MAILBOX_PASS"))
self.smtp_mailbox = smtplib.SMTP_SSL(
os.getenv("SMTP_HOST"), os.getenv("SMTP_PORT")
)
self.smtp_mailbox.ehlo()
self.smtp_mailbox.login(os.getenv("MAILBOX_USER"), os.getenv("MAILBOX_PASS"))
def save_to_sent_folder(self, message):
"""Saves a sent message to the Sent folder."""
self.imap_mailbox.append(
str.encode(message.as_string()),
os.getenv("MAILBOX_SENT_FOLDER"),
dt=None,
flag_set=[MailMessageFlags.SEEN],
)
def compose_and_send_mail(self, subject, in_reply_to, from_addr, to_addrs, body):
"""Builds email and sends it over SMTP."""
message = MIMEMultipart()
message["From"] = from_addr
message["To"] = ", ".join(to_addrs)
message["Subject"] = subject
message["In-Reply-To"] = in_reply_to
message.attach(MIMEText(body))
print("Generated response email:")
print(message.as_string(), flush=True)
print(f"Going to send this email in {GRACE_PERIOD_SECS} seconds...")
if not DRY_RUN:
time.sleep(GRACE_PERIOD_SECS)
if not DRY_RUN:
self.smtp_mailbox.sendmail(
from_addr,
to_addrs,
message.as_string(),
)
self.save_to_sent_folder(message)
print("Sent email")
else:
print("DRY_RUN; not sending email")
def _is_reply(self, mail_message: MailMessage):
return "in-reply-to" in [header.lower() for header in mail_message.headers]
def get_recruiter_emails(self):
"""Gets all unprocessed recruiter emails from the Recruitment folder."""
self.imap_mailbox.folder.set(os.getenv("MAILBOX_RECRUITMENT_FOLDER"))
all_recruiter_emails = list(self.imap_mailbox.fetch())
filtered_messages = []
for mail_message in all_recruiter_emails:
if not self._is_reply(mail_message):
filtered_messages.append(mail_message)
return filtered_messages
def move_to_done(self, email):
"""After processing a message, used to move message to Done folder."""
self.imap_mailbox.move(email.uid, os.getenv("MAILBOX_DONE_FOLDER"))
def cleanup(self):
"""Cleans up mailbox client(s)."""
self.smtp_mailbox.quit()
def send_response(mailer: Mailer, recruiter_email: MailMessage):
"""Given an email from a recruiter, sends a courtesy response."""
quoted_original = ""
for line in recruiter_email.text.splitlines():
quoted_original += f"> {line}\n"
try:
name_and_co = get_recruiter_name_and_company(recruiter_email.text)
recruiter_name = name_and_co["name"]
recruiter_company = name_and_co["company"]
response = f"""\
Hi {recruiter_name or ""},
Thanks for reaching out! I'm not interested in new opportunities at this time, but I'll keep {recruiter_company or "your company"} in mind for the future.
Thanks again,
{SIGNATURE}
"""
response_body = textwrap.dedent(response) + quoted_original
mailer.compose_and_send_mail(
subject=f"Re:{recruiter_email.subject}",
in_reply_to=recruiter_email.headers["message-id"][0],
from_addr=os.getenv("EMAIL_ADDRESS"),
to_addrs=[recruiter_email.from_],
body=response_body,
)
if not DRY_RUN:
mailer.move_to_done(recruiter_email)
except Exception:
# TODO use logging module throughout
print("Error creating/sending response email! Skipping")
traceback.print_exc()
print("Recruiter email:")
print(recruiter_email.text)
def respond_to_recruitment_emails(mailer: Mailer):
"""Reads recruiter emails in the MAILBOX_RECRUITMENT_FOLDER, responds to
them, then moves each conversation to the MAILBOX_DONE_FOLDER so that
it's not repeatedly processed."""
emails = mailer.get_recruiter_emails()
print(f"Going to respond to {len(emails)} emails")
for index, email in enumerate(emails):
print(f"Responding to email {index + 1} of {len(emails)}...")
send_response(mailer, email)
print("Done")
print(
"--------------------------------------------------------------------------------"
)
def get_recruiter_name_and_company(email_text: str):
"""Uses OpenAI text models to automatically parse the recruiter's name
and company from their email."""
prompt = f"""
Given an email from a recruiter, return the recruiter's first name and the recruiter's company's name formatted as valid JSON.
Example: ***
Email:
'''
Hi Matt! This is Steve Jobs with Apple Computer Company! I'm interested in having you join our team here.
'''
Response:
{{"name": "Steve", "company": "Apple Computer Company"}}
***
Email:
'''
{email_text}
'''
Response:
"""
# consider disabling expensive OpenAI calls in development if not relevant
if BYPASS_OPENAI:
print("Bypassing OpenAI API, mocking data")
return json.loads('{"name": "Steve", "company": "Apple"}')
completion = openai.Completion.create(
model="text-davinci-002",
prompt=textwrap.dedent(prompt),
max_tokens=20,
temperature=0,
)
try:
# If we end up needing more cleaning to ensure the response can be parsed,
# consider improving the prompt.
json_str_response = completion.choices[0].text
json_str_response_clean = re.search(r".*(\{.*\})", json_str_response).groups()[
0
]
return json.loads(json_str_response_clean)
except (AttributeError, json.decoder.JSONDecodeError) as exception:
print("Could not decode completion response from OpenAI:")
print(completion)
raise exception
def main():
"""Entrypoint"""
if not DRY_RUN and BYPASS_OPENAI:
print(
"BYPASS_OPENAI can only be used w/ DRY_RUN to avoid sending emails with canned data."
)
sys.exit(1)
if DRY_RUN:
print("DRY_RUN mode on")
if BYPASS_OPENAI:
print("BYPASS_OPENAI mode on")
openai.organization = os.getenv("OPENAI_ORG")
openai.api_key = os.getenv("OPENAI_SECRET_KEY")
mailer = Mailer()
respond_to_recruitment_emails(mailer)
mailer.cleanup()
if __name__ == "__main__":
main()
| [
"\n Given an email from a recruiter, return the recruiter's first name and the recruiter's company's name formatted as valid JSON.\n\n Example: ***\n Email:\n '''\n Hi Matt! This is Steve Jobs with Apple Computer Company! I'm interested in having you join our team here.\n '''\n\n Response:\n {\"name\": \"Steve\", \"company\": \"Apple Computer Company\"}\n ***\n\n Email:\n '''\n PLACEHOLDER\n\n '''\n\n Response:\n "
] |
2024-01-10 | scahyono/scriptgpt | scriptgpt.py | from openai import OpenAI
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
# Configuration Management: Load API key from environment variables
api_key = os.environ.get('OPENAI_API_KEY')
MODEL="gpt-4-1106-preview" # Update this to the desired model version
INSTRUCTION = "ScriptGPT specializes in creating and optimizing scripts for automation and integration tasks. It provides complete, ready-to-use scripts and offers suggestions for script improvements. Knowledgeable in various scripting languages, it addresses both simple and complex automation needs. While not executing scripts, Script Automator delivers secure, efficient, and best practice-oriented code solutions. If a user's request lacks specific details, it will ask for clarification to ensure accuracy and helpfulness. The interaction style is direct and practical, aiming to provide scripts efficiently. For users who need it, Script Automator can also include brief explanations or comments within the scripts, particularly useful for those less experienced in scripting."
# Initialize the OpenAI client
client = OpenAI(api_key=api_key)
def scriptgpt_response(user_input, client):
"""Generate a response to the user input using OpenAI API."""
user_input = user_input.lower()
try:
# Connect to the OpenAI API and get a response using the updated method
stream = client.chat.completions.create(
stream=True,
model=MODEL,
messages=[
{"role": "system", "content": INSTRUCTION},
{"role": "user", "content": user_input}
]
)
except Exception as e:
return f"An error occurred: {e}"
return stream
def chat():
print("Welcome to ScriptGPT! What automation or integration script do you need? And in which language? Type 'bye' to exit.")
while True:
user_input = input("\033[92mYou: \033[0m") # \033[92m is the ANSI escape code for green text, \033[0m resets the text color
if user_input.lower() in ('bye', 'exit', 'quit'):
print(f"\033[94mScriptGPT: \033[0mGoodbye! Have a nice day!") # \033[94m is the ANSI escape code for blue text
break
stream = scriptgpt_response(user_input, client)
print(f"\033[94mScriptGPT: \033[0m", end='')
for chunk in stream:
chunkContent = chunk.choices[0].delta.content
if chunkContent is None:
chunkContent = "\n"
print(chunkContent, end='')
if __name__ == "__main__":
chat() | [
"ScriptGPT specializes in creating and optimizing scripts for automation and integration tasks. It provides complete, ready-to-use scripts and offers suggestions for script improvements. Knowledgeable in various scripting languages, it addresses both simple and complex automation needs. While not executing scripts, Script Automator delivers secure, efficient, and best practice-oriented code solutions. If a user's request lacks specific details, it will ask for clarification to ensure accuracy and helpfulness. The interaction style is direct and practical, aiming to provide scripts efficiently. For users who need it, Script Automator can also include brief explanations or comments within the scripts, particularly useful for those less experienced in scripting."
] |
2024-01-10 | scahyono/scriptgpt | test_scriptgpt.py | import openai
import unittest
from unittest.mock import patch
import scriptgpt
# Initialize the OpenAI client
client = openai.OpenAI(api_key="dummy key")
class Delta:
def __init__(self, content):
self.content = content
class Choice:
def __init__(self, content):
self.delta = Delta(content)
class Chunk:
def __init__(self, choice):
self.choices = [choice]
class TestScriptGPT(unittest.TestCase):
@patch.object(client.chat.completions, 'create')
def test_scriptgpt_response(self, mock_create):
# Arrange
user_input = 'Test input'
mock_create.return_value = [Chunk(Choice('Test response'))]
# Call the function to test
stream = scriptgpt.scriptgpt_response(user_input, client)
# Check that the response from the function matches the mocked response
for chunk in stream:
chunkContent = chunk.choices[0].delta.content
self.assertEqual(chunkContent, 'Test response')
@patch('builtins.input', return_value='quit')
@patch('scriptgpt.scriptgpt_response', return_value='Goodbye! Have a nice day!')
def test_chat(self, input_mock, response_mock):
with patch('builtins.print') as print_mock:
scriptgpt.chat()
print_mock.assert_called_with('\033[94mScriptGPT: \033[0mGoodbye! Have a nice day!')
if __name__ == '__main__':
unittest.main() | [] |
2024-01-10 | Shahnab/auto-analyst-demo | auto_analyst~llms~open_ai_utils.py | import openai
from config import OPENAI_API_KEY
from prompts.prompts import (
render_agg_plot_prompt,
render_analytical_prompt,
render_data_prompt,
)
openai.api_key = OPENAI_API_KEY
def get_chat_reply(system_prompt, prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
temperature=0.1,
)
return response['choices'][0]['message']['content'].strip().lower()
| [] |
2024-01-10 | varunsai-k/Create.ai | create.py | import os
import openai
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import SequentialChain
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.utilities import WikipediaAPIWrapper
import streamlit as st
from PIL import Image
os.environ['OPENAI_API_KEY']='Your OpenAI key'
img=Image.open("C:\\Create.ai\\Page_Icon.png")
st.set_page_config(page_title="Create.ai: Generate content with AI",page_icon=img)
st.title('Create:violet[.]ai ๐ท')
tab1,tab2=st.tabs(['Home','Create'])
with tab1:
st.write('Create:blue[.]ai is an AI-powered content creation tool that can help you level up your YouTube channel. With Create.ai, you can generate high-quality content in minutes, including titles, descriptions, scripts, and even entire videos.Whether you\'re a beginner or a seasoned YouTuber, Create.ai can help you take your channel to the next level.')
st.image('https://www.apa.org//images//2021-09-career-content-creation_tcm7-296397.jpg')
st.write('If you\'re looking for a way to create engaging and informative YouTube videos quickly and easily, then Create.ai is the perfect tool for you. :violet[Sign up] for a free trial today and see how Create:violet[.]ai can help you grow your channel.')
st.write('Here are some of the benefits of using Create:violet[.]ai:')
st.success('''
Save time and effort: Create.ai can help you generate content quickly and easily, so you can focus on other aspects of your YouTube channel.
Improve your content quality: Create.ai uses AI to understand your audience and create content that is both engaging and informative.
Stand out from the competition: Create.ai can help you create unique and original content that will help you stand out from the competition.
''')
with tab2:
st.write('Try Create:violet[.]ai today and see how it can help you grow your channel.')
st.image('https://assets.entrepreneur.com/content/3x2/2000/1629828633-GettyImages-1212772310.jpg')
prompt=st.text_input('What are you looking to create?',placeholder='Enter a prompt here')
# Prompt templates
title_template = PromptTemplate(
input_variables = ['topic'],
template='write me a youtube video title about {topic}'
)
script_template = PromptTemplate(
input_variables = ['title','wikipedia_research'],
template='write me a youtube video script based on this title: {title} while leveraging this wikipedia research {wikipedia_research}'
)
description_template=PromptTemplate(
input_variables=['script'],
template='Write me a description for youtube video in three lines based on this content:{script}'
)
hashtags_template=PromptTemplate(
input_variables=['script'],
template='write me five best hashtags for youtube video based on this content:{script}'
)
thumbnail_tempalte=PromptTemplate(
input_variables=['title'],
template='write me an eye-catching text on thumbnail for youtube video on this title: {title}'
)
# Memory
title_memory = ConversationBufferMemory(input_key='topic', memory_key='chat_history')
script_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')
description_memory = ConversationBufferMemory(input_key='script', memory_key='chat_history')
hashtags_memory = ConversationBufferMemory(input_key='script', memory_key='chat_history')
thumbnail_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')
# Llms
llm = OpenAI(temperature=0.9)
title_chain = LLMChain(llm=llm, prompt=title_template, verbose=True, output_key='title', memory=title_memory)
script_chain = LLMChain(llm=llm, prompt=script_template, verbose=True, output_key='script', memory=script_memory)
description_chain = LLMChain(llm=llm, prompt=description_template, verbose=True, output_key='description', memory=description_memory)
hashtags_chain = LLMChain(llm=llm, prompt=hashtags_template, verbose=True, output_key='hashtags', memory=hashtags_memory)
thumbnail_chain = LLMChain(llm=llm, prompt=thumbnail_tempalte, verbose=True, output_key='thumbnail', memory=thumbnail_memory)
wiki = WikipediaAPIWrapper()
if prompt:
title = title_chain.run(prompt)
wiki_research = wiki.run(prompt)
script = script_chain.run(title=title, wikipedia_research=wiki_research)
description = description_chain.run(script=script)
hashtags=hashtags_chain.run(script=script)
thumbnail=thumbnail_chain.run(title)
with st.expander('Title'):
st.info(title)
with st.expander('Script'):
st.info(script)
with st.expander('Description'):
st.info(description)
with st.expander('Hashtags'):
st.info(hashtags)
with st.expander('Thumbnail'):
st.info(thumbnail)
with st.expander('Wikipedia Research'):
st.info(wiki_research)
| [
"What are you looking to create?",
"write me five best hashtags for youtube video based on this content:{script}",
"Write me a description for youtube video in three lines based on this content:{script}",
"write me a youtube video script based on this title: {title} while leveraging this wikipedia research {wikipedia_research}",
"write me an eye-catching text on thumbnail for youtube video on this title: {title}",
"wikipedia_research",
"Enter a prompt here",
"write me a youtube video title about {topic}"
] |
2024-01-10 | sprenkamp/r2g2 | src~machine_learning~chat~work_kilian~telegram_bot.py | import os
from telegram import Update
from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
from langchain_agent.chat import chat
TOKEN = os.environ.get('telegram_OnePieceNavigator_bot')
BOT_NAME = "@OnePieceNavigator_bot"
async def start_command(update: Update, context: ContextTypes):
"""Send a message when the command /start is issued."""
await update.message.reply_text('Hi! I am a chatbot that can answer your questions about refugees in Switzerland. Ask me anything!')
async def help_command(update: Update, context: ContextTypes):
"""Send a message when the command /help is issued."""
await update.message.reply_text('Please question about aslum or migration in Switzerland. I will try my best answer it.')
async def custom_command(update: Update, context: ContextTypes):
"""Send a message when the command /custom is issued."""
await update.message.reply_text('This is a custom command, you can add whatever text you want here.')
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
message_type = update.message.chat.type
text = update.message.text
if message_type == 'group':
if BOT_NAME in text:
new_text = text.replace(BOT_NAME, "").strip()
response = chat(new_text)
else:
return
else:
response = chat(text)
print("Bot:", response)
await update.message.reply_text(response)
async def error(update: Update, context: ContextTypes.DEFAULT_TYPE):
print(f'Update {update} caused error {context.error}')
if __name__ == '__main__':
print("Starting bot...")
app = Application.builder().token(TOKEN).build()
# Commands
app.add_handler(CommandHandler('start', start_command))
app.add_handler(CommandHandler('help', help_command))
app.add_handler(CommandHandler('custom', custom_command))
# Messages
app.add_handler(MessageHandler(filters.TEXT, handle_message))
# Errors
app.add_error_handler(error)
print("polling...")
# Polls the telegram server for updates
app.run_polling(poll_interval=0.5)
| [] |
2024-01-10 | sprenkamp/r2g2 | frontend~chatgpt-backend~Tumen_Chatbot_development_edition.py | import os
from dotenv import load_dotenv
load_dotenv()
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import MongoDBAtlasVectorSearch
from fastapi.middleware.cors import CORSMiddleware
from pymongo import MongoClient
from fastapi import FastAPI
import datetime
from pydantic import BaseModel
class QueryRequest(BaseModel):
start_date: str
end_date: str
country: str
state: str
query: str
chat_history: list
# uvicorn Tumen_Chatbot_development_edition:app --reload
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:5173"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# The local machine should have the following environment variables:
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
# This function is used to parse the filters into the format that can be used by MongoDB
def parse_parameters(start_date, end_date, country, state):
must_conditions = []
if state != 'null':
filter = {
"text": {
"path": "state",
"query": state
}
}
must_conditions.append(filter)
if country != 'null':
filter = {
"text": {
"path": "country",
"query": country
}
}
must_conditions.append(filter)
start_date = '1999-01-01' if start_date == 'null' else start_date
end_date = '2999-01-01' if end_date == 'null' else end_date
filter = {
'range': {
'path': 'messageDatetime',
'gte': datetime.datetime.strptime(start_date, "%Y-%m-%d"),
'lte': datetime.datetime.strptime(end_date, "%Y-%m-%d")+datetime.timedelta(days=1),
}
}
must_conditions.append(filter)
conditions = {
"compound": {
"must": must_conditions
}
}
return conditions
# This function calls the chatbot and returns the answer and prints all the relevant metadata
@app.post("/query")
def query(query_request: QueryRequest):
start_date = query_request.start_date
end_date = query_request.end_date
country = query_request.country
state = query_request.state
query = query_request.query
chat_history = query_request.chat_history
'''
Args:
start_date: string, e.g. '2022-01-01'
end_date: string e.g. '2022-01-02'
country: string e.g. 'Switzerland'
state: string e.g. 'Zurich'
query: string e.g. 'Can I get free clothes in Zurich?'
chat_history: array
Returns:
'''
# initialize the connection to MongoDB Atlas
client = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(
ATLAS_USER, ATLAS_TOKEN))
db_name, collection_name = "scrape", "telegram"
collection = client[db_name][collection_name]
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
print('OpenAI API key not found in environment variables.')
exit()
# create the embedding and vector search objects
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
vectors = MongoDBAtlasVectorSearch(
collection=collection, text_key='messageText',
embedding=embeddings, index_name='telegram_embedding'
)
# create the memory object
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True,
output_key='answer')
# create the large leanguage model object
llm=ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo-16k', openai_api_key=api_key)
# create the prompt template for chatbot to use
prompt_template = """Use the following pieces of context to answer the question at the end.
Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer.
Please be as specific as possible, also you are a friendly chatbot who is always polite.
{context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(prompt_template)
# generate conditions
must_conditions = parse_parameters(start_date, end_date, country, state)
print(must_conditions)
# create a chatbot chain
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectors.as_retriever(search_type = 'mmr',
search_kwargs={
'k': 100, 'lambda_mult': 0.25,
"pre_filter": {
"compound": {
"must": must_conditions
}
},
}),
memory = memory,
return_source_documents=True,
return_generated_question=True,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# create the chat
answer = chain({"question": query, "chat_history": chat_history})
# for i in range(10):
# print(answer["source_documents"][i].metadata['state'])
# print(answer["source_documents"][i].metadata['country'])
# print(answer["source_documents"][i].metadata['messageDatetime'])
#print(answer["source_documents"][0].page_content)
return answer["answer"], answer['chat_history'] | [
"Use the following pieces of context to answer the question at the end. \n Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer. \n Please be as specific as possible, also you are a friendly chatbot who is always polite.\n {context}\n Question: {question}"
] |
2024-01-10 | sprenkamp/r2g2 | src~machine_learning~chat~work_kilian~langchain_agent~helper~split.py | from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(
# separator="\n",
chunk_size=1000,
chunk_overlap=150,
length_function=len
)
def split_text(text: str):
"""
Split the text into chunks of 1000 characters.
Args:
text (str): Text to be split.
Returns:
list: List of text chunks.
"""
return text_splitter.split_documents(text) | [] |
2024-01-10 | sprenkamp/r2g2 | src~machine_learning~chat~docker~chat_fastapi.py | # scp -i "D:\aws_key\aws_node.pem" "D:\visualstudiocode\project\r2g2\r2g2\src\machine_learning\chat\docker\chat_fastapi.py" [email protected]:/home/ec2-user/
import os
from dotenv import load_dotenv
load_dotenv()
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
# from langchain.chains import ConversationalRetrievalChain
from langchain.chains import RetrievalQA
# from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import MongoDBAtlasVectorSearch
from fastapi.middleware.cors import CORSMiddleware
from pymongo import MongoClient
from fastapi import FastAPI
import datetime
from pydantic import BaseModel
class QueryRequest(BaseModel):
start_date: str
end_date: str
country: str
state: str
predicted_class: str
query: str
chat_history: list
# uvicorn Tumen_Chatbot_development_edition:app --reload
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=[
'https://governmentasaplatform.ch',
'https://www.governmentasaplatform.ch',
'https://bot.governmentasaplatform.ch/query',
'https://bot.governmentasaplatform.ch',
'governmentasaplatform.ch',
'bot-load-balancer-175078596.eu-north-1.elb.amazonaws.com',
'http://localhost:5173',
],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# The local machine should have the following environment variables:
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
# This function is used to parse the filters into the format that can be used by MongoDB
def parse_parameters(start_date, end_date, country, state, predicted_class):
must_conditions = []
if state != 'null':
filter = {
"text": {
"path": "state",
"query": state
}
}
must_conditions.append(filter)
if country != 'null':
filter = {
"text": {
"path": "country",
"query": country
}
}
must_conditions.append(filter)
if predicted_class != 'null':
filter = {
"text": {
"path": "predicted_class",
"query": predicted_class
}
}
must_conditions.append(filter)
start_date = '1999-01-01' if start_date == 'null' else start_date
end_date = '2999-01-01' if end_date == 'null' else end_date
filter = {
'range': {
'path': 'messageDatetime',
'gte': datetime.datetime.strptime(start_date, "%Y-%m-%d"),
'lte': datetime.datetime.strptime(end_date, "%Y-%m-%d")+datetime.timedelta(days=1),
}
}
must_conditions.append(filter)
conditions = {
"compound": {
"must": must_conditions
}
}
return conditions
# This function calls the chatbot and returns the answer and prints all the relevant metadata
@app.post("/query")
def query(query_request: QueryRequest):
start_date = query_request.start_date
end_date = query_request.end_date
country = query_request.country
state = query_request.state
predicted_class = query_request.predicted_class
query = query_request.query
chat_history = query_request.chat_history
'''
Args:
start_date: string, e.g. '2022-01-01'
end_date: string e.g. '2022-01-02'
country: string e.g. 'Switzerland'
state: string e.g. 'Zurich'
predicted_class: string e.g. 'Education'
query: string e.g. 'Can I get free clothes in Zurich?'
chat_history: array
{
"start_date": "2022-01-01",
"end_date": "2022-01-10",
"country": "Switzerland",
"state": "Zurich",
"predicted_class": "Education",
"query": "hello",
"chat_history": []
}
Returns:
'''
# initialize the connection to MongoDB Atlas
client = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(
ATLAS_USER, ATLAS_TOKEN))
db_name, collection_name = "scrape", "telegram"
collection = client[db_name][collection_name]
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
print('OpenAI API key not found in environment variables.')
exit()
# create the embedding and vector search objects
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
vectors = MongoDBAtlasVectorSearch(
collection=collection, text_key='messageText',
embedding=embeddings, index_name='telegram_embedding'
)
# create the memory object
# memory = ConversationBufferMemory(
# memory_key='chat_history',
# return_messages=True,
# output_key='answer')
# create the large leanguage model object
llm=ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo-16k', openai_api_key=api_key)
# create the prompt template for chatbot to use
prompt_template = """Use the following pieces of context to answer the question at the end.
Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer.
Please be as specific as possible, also you are a friendly chatbot who is always polite.
{context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(prompt_template)
# generate conditions
must_conditions = parse_parameters(start_date, end_date, country, state, predicted_class)
print(must_conditions)
# create a chatbot chain
chain = RetrievalQA.from_llm(
llm=llm,
retriever=vectors.as_retriever(search_type = 'mmr',
search_kwargs={
'k': 100, 'lambda_mult': 0.25,
"pre_filter": {
"compound": {
"must": must_conditions
}
},
}),
# memory = memory,
return_source_documents=True,
# return_generated_question=True,
# combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# create the chat
answer = chain({"query": query})
print(answer)
#, "chat_history": chat_history})
# for i in range(10):
# print(answer["source_documents"][i].metadata['state'])
# print(answer["source_documents"][i].metadata['country'])
# print(answer["source_documents"][i].metadata['messageDatetime'])
#print(answer["source_documents"][0].page_content)
return answer["result"] #, answer['chat_history']
@app.get("/test")
def test_endpoint():
print("Test endpoint called!")
return {"message": "Test successful"}
# Run the FastAPI app using uvicorn
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) | [
"Use the following pieces of context to answer the question at the end. \n Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer. \n Please be as specific as possible, also you are a friendly chatbot who is always polite.\n {context}\n Question: {question}"
] |
2024-01-10 | sprenkamp/r2g2 | src~machine_learning~chat~Tumen_Chatbot_development_edition.py | import os
from dotenv import load_dotenv
load_dotenv()
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import MongoDBAtlasVectorSearch
from pymongo import MongoClient
from fastapi import FastAPI
import datetime
app = FastAPI()
# The local machine should have the following environment variables:
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
# This function is used to parse the filters into the format that can be used by MongoDB
def parse_parameters(start_date, end_date, country, state, predicted_class):
must_conditions = []
if state != 'null':
filter = {
"text": {
"path": "state",
"query": state
}
}
must_conditions.append(filter)
if country != 'null':
filter = {
"text": {
"path": "country",
"query": country
}
}
must_conditions.append(filter)
if predicted_class != 'null':
filter = {
"text": {
"path": "predicted_class",
"query": predicted_class
}
}
must_conditions.append(filter)
start_date = '1999-01-01' if start_date == 'null' else start_date
end_date = '2999-01-01' if end_date == 'null' else end_date
filter = {
'range': {
'path': 'messageDatetime',
'gte': datetime.datetime.strptime(start_date, "%Y-%m-%d"),
'lte': datetime.datetime.strptime(end_date, "%Y-%m-%d")+datetime.timedelta(days=1),
}
}
must_conditions.append(filter)
conditions = {
"compound": {
"must": must_conditions
}
}
return conditions
# This function calls the chatbot and returns the answer and prints all the relevant metadata
@app.post("/query")
def query(start_date, end_date, country, state, query, predicted_class, chat_history):
'''
Args:
start_date: string, e.g. '2022-01-01'
end_date: string e.g. '2022-01-02'
country: string e.g. 'Switzerland'
state: string e.g. 'Zurich'
query: string e.g. 'Can I get free clothes in Zurich?'
chat_history: array
Returns:
'''
# initialize the connection to MongoDB Atlas
client = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(
ATLAS_USER, ATLAS_TOKEN))
db_name, collection_name = "scrape", "telegram"
collection = client[db_name][collection_name]
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
print('OpenAI API key not found in environment variables.')
exit()
# create the embedding and vector search objects
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
vectors = MongoDBAtlasVectorSearch(
collection=collection, text_key='messageText',
embedding=embeddings, index_name='telegram_embedding'
)
# create the memory object
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True,
output_key='answer')
# create the large leanguage model object
llm=ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo-16k', openai_api_key=api_key)
# create the prompt template for chatbot to use
prompt_template = """Use the following pieces of context to answer the question at the end.
Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer.
Please be as specific as possible, also you are a friendly chatbot who is always polite.
{context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(prompt_template)
# generate conditions
must_conditions = parse_parameters(start_date, end_date, country, state, predicted_class)
# create a chatbot chain
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectors.as_retriever(search_type = 'mmr',
search_kwargs={
'k': 100, 'lambda_mult': 0.25,
"pre_filter": {
"compound": {
"must": must_conditions
}
},
}),
memory = memory,
return_source_documents=True,
return_generated_question=True,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# create the chat
answer = chain({"question": query, "chat_history": chat_history})
for i in range(10):
print(answer["source_documents"][i].metadata['state'])
print(answer["source_documents"][i].metadata['country'])
print(answer["source_documents"][i].metadata['messageDatetime'])
print(answer["source_documents"][i].metadata['predicted_class'])
#print(answer["source_documents"][0].page_content)
return answer["answer"], answer['chat_history'] | [
"Use the following pieces of context to answer the question at the end. \n Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer. \n Please be as specific as possible, also you are a friendly chatbot who is always polite.\n {context}\n Question: {question}"
] |
2024-01-10 | sprenkamp/r2g2 | src~machine_learning~chat~Tumen_Chatbot_test_edition.py | import os
from dotenv import load_dotenv
load_dotenv()
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
from pymongo import MongoClient
import datetime
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
def parse_parameters(start_date, end_date, country, state, predicted_class):
must_conditions = []
if state != 'null':
filter = {
"text": {
"path": "state",
"query": state
}
}
must_conditions.append(filter)
if country != 'null':
filter = {
"text": {
"path": "country",
"query": country
}
}
must_conditions.append(filter)
if predicted_class != 'null':
filter = {
"text": {
"path": "predicted_class",
"query": predicted_class
}
}
must_conditions.append(filter)
start_date = '1999-01-01' if start_date == 'null' else start_date
end_date = '2999-01-01' if end_date == 'null' else end_date
filter = {
'range': {
'path': 'messageDatetime',
'gte': datetime.datetime.strptime(start_date, "%Y-%m-%d"),
'lte': datetime.datetime.strptime(end_date, "%Y-%m-%d")+datetime.timedelta(days=1),
}
}
must_conditions.append(filter)
conditions = {
"compound": {
"must": must_conditions
}
}
return conditions
def query(start_date, end_date, country, state, query, predicted_class, chat_history):
'''
Args:
start_date: string, e.g. '2022-01-01'
end_date: string e.g. '2022-01-02'
country: string e.g. 'Switzerland'
state: string e.g. 'Zurich'
query: string e.g. 'Can I get free clothes in Zurich?'
chat_history: array
Returns:
'''
# initialize
client = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(
ATLAS_USER, ATLAS_TOKEN))
db_name, collection_name = "scrape", "telegram"
collection = client[db_name][collection_name]
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
print('OpenAI API key not found in environment variables.')
exit()
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
vectors = MongoDBAtlasVectorSearch(
collection=collection, text_key='messageText',
embedding=embeddings, index_name='telegram_embedding'
)
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True,
output_key='answer'
)
print(memory)
# ChatMessageHistory(messages=[]) output_key='answer' input_key=None return_messages=True human_prefix='Human' ai_prefix='AI' memory_key='chat_history'
# memory = ConversationBufferMemory(chat_memory=ChatMessageHistory(messages=[]), output_key='answer', input_key=None, return_messages=True, human_prefix='Human', ai_prefix='AI', memory_key='chat_history')
# print(memory)
llm=ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo-16k', openai_api_key=api_key)
prompt_template = """Use the following pieces of context to answer the question at the end.
Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer.
Please be as specific as possible, also you are a friendly chatbot who is always polite.
{context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(prompt_template)
# generate conditions
must_conditions = parse_parameters(start_date, end_date, country, state, predicted_class)
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectors.as_retriever(search_type = 'mmr',
search_kwargs={
'k': 100, 'lambda_mult': 0.25,
# "pre_filter": {
# "compound": {
# "must": must_conditions
# }
# },
}),
# memory = memory,
return_source_documents=True,
return_generated_question=True,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# chat_history = [chat_history]
#print(chain)
answer = chain({"question": query, "chat_history": chat_history})
print(answer["source_documents"][0].metadata['state'])
print(answer["source_documents"][0].metadata['country'])
print(answer["source_documents"][0].metadata['messageDatetime'])
print(answer["source_documents"][0].metadata['predicted_class'])
#(answer["source_documents"][0].page_content)
print(answer["answer"])
query('null', 'null', 'Switzerland', 'Zurich', 'null', 'Can I get free clothes in Zurich?', []) | [
"Use the following pieces of context to answer the question at the end. \n Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer. \n Please be as specific as possible, also you are a friendly chatbot who is always polite.\n {context}\n Question: {question}"
] |
2024-01-10 | sprenkamp/r2g2 | src~machine_learning~chat~chat_fastapi_app.py | import os
import datetime
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException, Request
from pymongo import MongoClient
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import MongoDBAtlasVectorSearch
#TODO:
# 1. Solve bug pymongo.errors.OperationFailure:
# $vectorSearch is not allowed or the syntax is incorrect, see the Atlas documentation for more information, full error: {'ok': 0, 'errmsg': '$vectorSearch is not allowed or the syntax is incorrect,
# see the Atlas documentation for more information', 'code': 8000, 'codeName': 'AtlasError'}
# 2. Add cluster to parse_parameters and use it in MongoDBAtlasVectorSearch
# 3. Simplify code I feel we don't need all these langchain functions
# 4. Write dockerfile and run app in docker
# 5. Think about hosting options
# Load environment variables from the .env file
load_dotenv()
# Initialize FastAPI app
app = FastAPI()
# Get MongoDB Atlas credentials from environment variables
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
def parse_parameters(start_date, end_date, country, state):
"""
Parse the input parameters and construct search conditions.
Args:
- start_date (str): The start date for the date range filter. Defaults to "1999-01-01" if 'null'.
- end_date (str): The end date for the date range filter. Defaults to "2999-01-01" if 'null'.
- country (str): The country to filter by. Not used if 'null'.
- state (str): The state to filter by. Not used if 'null'.
Returns:
- dict: Constructed search conditions to be used in MongoDB Atlas VectorSearch.
Sample Usage:
>>> parse_parameters("2022-01-01", "2022-12-31", "Switzerland", "Zurich")
{'compound': {'must': [{'text': {'path': 'state', 'query': 'Texas'}},
{'text': {'path': 'country', 'query': 'USA'}},
{'range': {'path': 'messageDatetime',
'gte': datetime.datetime(2022, 1, 1, 0, 0),
'lte': datetime.datetime(2022, 12, 31, 0, 0)}}]}}
"""
# List to hold our search conditions
must_conditions = []
# Check and add state condition
if state != 'null':
filter = {
"text": {
"path": "state",
"query": state
}
}
must_conditions.append(filter)
# Check and add country condition
if country != 'null':
filter = {
"text": {
"path": "country",
"query": country
}
}
must_conditions.append(filter)
# Set default start and end dates if not provided
start_date = '1999-01-01' if start_date == 'null' else start_date
end_date = '2999-01-01' if end_date == 'null' else end_date
# Add date range condition
filter = {
'range': {
'path': 'messageDatetime',
'gte': datetime.datetime.strptime(start_date, "%Y-%m-%d"),
'lte': datetime.datetime.strptime(end_date, "%Y-%m-%d")+datetime.timedelta(days=1),
}
}
must_conditions.append(filter)
# Return the constructed conditions
conditions = {
"compound": {
"must": must_conditions
}
}
return conditions
@app.post("/query")
async def query(request: Request):
"""
Endpoint to process user queries and return relevant answers.
Args:
- request (Request): FastAPI request object containing query parameters and body.
Returns:
- dict: A dictionary containing the generated answer and updated chat history.
Sample Usage:
Using HTTP client or CURL:
POST /query
{
"start_date": "2022-01-01",
"end_date": "2022-12-31",
"country": "Switzerland",
"state": "Zurich",
"query": "some query",
"chat_history": []
}
Response:
{
"answer": "some answer",
"chat_history": [["some answer", "some query"]]
}
"""
print("Starting /query endpoint...") # Debug: Indicate the start of the endpoint
# Get data from the incoming request
data = await request.json()
start_date = data.get("start_date", "null")
end_date = data.get("end_date", "null")
country = data.get("country", "null")
state = data.get("state", "null")
query_text = data.get("query")
chat_history_list = data.get("chat_history", [])
print(f"Received Data: Start Date: {start_date}, End Date: {end_date}, Country: {country}, State: {state}, Query: {query_text}")
# Error handling: Ensure a query is provided
if not query_text:
raise HTTPException(status_code=400, detail="Query text not provided in the request.")
# Initialize MongoDB Connection
print("Initializing MongoDB connection...")
client = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(
ATLAS_USER, ATLAS_TOKEN))
collection = client["scrape"]["telegram"]
# Check for the OpenAI API key
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
raise HTTPException(status_code=500, detail="OpenAI API key not found in environment variables.")
# Set up embeddings, vectors, and memory for the retrieval chain
print("Setting up embeddings, vectors, and memory...")
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
vectors = MongoDBAtlasVectorSearch(
collection=collection, text_key='messageText',
embedding=embeddings, index_name='telegram_embedding'
)
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True,
output_key='answer'
)
llm = ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo', openai_api_key=api_key)
prompt_template = """Use the following pieces of context to answer the question at the end.
Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer.
Please be as specific as possible, also you are a friendly chatbot who is always polite.
{context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(prompt_template)
# Generate the search conditions and set up the retrieval chain
must_conditions = parse_parameters(start_date, end_date, country, state)
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectors.as_retriever(search_type = 'mmr',
search_kwargs={
'k': 100, 'lambda_mult': 0.25,
"pre_filter": {
"compound": {
"must": must_conditions
}
},
}),
memory = memory,
return_source_documents=True,
return_generated_question=True,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# Process the query using the retrieval chain
answer = chain({"question": query_text, "chat_history": chat_history_list})
# Print details of the source documents
print(answer["source_documents"][0].metadata['state'])
print(answer["source_documents"][0].metadata['country'])
print(answer["source_documents"][0].metadata['messageDatetime'])
print(answer["source_documents"][0].page_content)
# Add the new Q&A pair to the chat history and return the results
print("Returning the response...")
chat_history_list.append((query_text, answer["answer"]))
return {"answer": answer["answer"], "chat_history": chat_history_list}
#solely for test & debug
@app.get("/test")
def test_endpoint():
print("Test endpoint called!")
return {"message": "Test successful"}
# Run the FastAPI app using uvicorn
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8080) | [
"Use the following pieces of context to answer the question at the end. \n Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer. \n Please be as specific as possible, also you are a friendly chatbot who is always polite.\n {context}\n Question: {question}"
] |
2024-01-10 | sprenkamp/r2g2 | src~frontend~streamlit~app_EU_multi.py | import streamlit as st
import pandas as pd
import numpy as np
import geopandas as gpd
import plotly.express as px
import leafmap.foliumap as leafmap
import openai
import tiktoken
import os
import json
#Config must be first line in script
st.set_page_config(layout="wide")
# Set OpenAI API key
openai.organization = os.environ["OPENAI_ORGANIZATION"]
openai.api_key = os.getenv("OPENAI_API_KEY")
max_input_tokens=3900
max_tokens_output=500
encoding = "cl100k_base"
if 'language' not in st.session_state:
st.session_state.language = "๐ฉ๐ช Deutsch"
# load translation data json
with open("data/translate_app.json", "r") as f:
translator = json.load(f)
# calculate number of tokens in a text string
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# run gpt
def run_gpt(prompt, max_tokens_output, timeout=10):
completion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
messages = [
{'role': 'user', 'content': prompt}
],
max_tokens = max_tokens_output,
n = 1,
stop = None,
temperature=0.5,
timeout=timeout
)
return completion['choices'][0]['message']['content']
# create start prompt
def start_prompt_creator(message_type, cluster):
if len(cluster) > 1:
cluster = ", ".join(cluster)
else:
cluster = cluster[0]
if message_type == "Telegram":
if st.session_state.language == "๐ฌ๐ง English":
start_prompt = f"looking at this telegram messages about {cluster} what are the up to 5 top needs of refugees? Response in English"
if st.session_state.language == "๐ฉ๐ช Deutsch":
start_prompt = f"looking at this telegram messages about {cluster} what are the up to 5 top needs of refugees? Response in German Language"
return start_prompt, cluster
if message_type == "Twitter":
if st.session_state.language == "๐ฌ๐ง English":
start_prompt = f"looking at this twitter messages about {cluster} what are the up to 5 to issues? If possibile focus on refugees. Response in English Language"
if st.session_state.language == "๐ฉ๐ช Deutsch":
start_prompt = f"looking at this twitter messages about {cluster} what are the up to 5 to issues? If possibile focus on refugees. Response in German Language"
return start_prompt, cluster
if message_type == "News":
if st.session_state.language == "๐ฌ๐ง English":
start_prompt = f"looking at this news articles about {cluster} what are the up to 5 to issues? If possibile focus on refugees. Response in English Language"
if st.session_state.language == "๐ฉ๐ช Deutsch":
start_prompt = f"looking at this news articles about {cluster} what are the up to 5 to issues? If possibile focus on refugees. Response in German Language"
return start_prompt, cluster
# sample from df
def sample_df_gpt_analysis(df, start_prompt, max_input_tokens):
current_input_tokens = num_tokens_from_string(start_prompt, encoding_name=encoding)
text_list = []
text_list.append(start_prompt)
while max_input_tokens > current_input_tokens:
df_sample = df.sample(n=1, replace=False)
df = df.drop(df_sample.index)
current_input_tokens += df_sample["tokens"].values[0]
if current_input_tokens > max_input_tokens:
break
text_list.append(df_sample["text"].values[0])
text = '\n'.join(text_list)
return text
# write output to streamlit
def write_output(text, summary_select, cluster):
st.header(translator[st.session_state.language]["Your Summary ๐"])
st.write(text)
# load geopandas data
gdf = gpd.read_file("data/germany_switzerland.geojson")
#function dummy space in sidebar
def dummy_function_space():
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
st.write("\n")
def dummy_function_space_small():
st.write("\n")
#functions to load data
@st.cache()
def load_telegram_data():
df = pd.read_csv("data/df_telegram.csv")
#print(df.head(1))
df['date'] = pd.to_datetime(df['date'], utc=True).dt.date
return df
@st.cache
def load_news_data():
df = pd.read_csv("data/df_news.csv")
df['date'] = pd.to_datetime(df['date'], utc=True).dt.date
return df
@st.cache()
def load_twitter_data():
df = pd.read_csv("data/df_twitter.csv")
df['date'] = pd.to_datetime(df['date'], utc=True).dt.date
return df
# manipulate data
def create_df_value_counts(df):
messages_per_week_dict = dict(df.value_counts("date"))
df_value_counts = df.value_counts(["cluster", "date"]).reset_index()
df_value_counts.columns = ["cluster", "date", "occurence_count"]
return df_value_counts
def modify_df_for_table(df_mod, country_select, state_select, cluster_select, date_slider, metric_select=None):
if country_select!=translator[st.session_state.language]["all countries analysed"]:
df_mod = df_mod[df_mod.country==translator[st.session_state.language][country_select]]
if state_select not in [translator[st.session_state.language]["all states analysed"]]:
df_mod = df_mod[df_mod.state==state_select]
if not translator[st.session_state.language]["all found topics"] in cluster_select:
df_mod = df_mod[df_mod.cluster.isin(cluster_select)]
df_mod = df_mod[df_mod.date.between(date_slider[0], date_slider[1])]
return df_mod
# load data
df_telegram = load_telegram_data()
df_twitter = load_twitter_data()
df_news = load_news_data()
with st.sidebar:
language_select = st.selectbox(
'Sprache/Language',
options=["๐ฉ๐ช Deutsch", "๐ฌ๐ง English"],
index=["๐ฉ๐ช Deutsch", "๐ฌ๐ง English"].index(st.session_state.language)
)
if st.session_state.language != language_select:
st.session_state.language = language_select
cluster_select_telegram = st.multiselect(
translator[st.session_state.language]['Choose the topics of interest within the telegram data'],
[translator[st.session_state.language]["all found topics"]] + df_telegram.cluster.unique().tolist(),
[translator[st.session_state.language]["all found topics"]]
)
cluster_select_twitter = st.multiselect(
translator[st.session_state.language]['Choose the topics of interest within the twitter data'],
[translator[st.session_state.language]["all found topics"]] + df_twitter.cluster.unique().tolist(),
[translator[st.session_state.language]["all found topics"]]
)
cluster_select_news = st.multiselect(
translator[st.session_state.language]['Choose the topic of interest within the news data'],
[translator[st.session_state.language]["all found topics"]] + df_news.cluster.unique().tolist(),
[translator[st.session_state.language]["all found topics"]]
)
dummy_function_space()
summary_select = st.selectbox(
translator[st.session_state.language]['show summary of'],
["Telegram", "Twitter", "News"],
)
calculate_summary = st.button(translator[st.session_state.language]["prepare summary"])
dummy_function_space_small()
show_table = st.button(translator[st.session_state.language]['show data in table'])
st.title(translator[st.session_state.language]['Identification of the most relevant topics in the context of the Ukrainian Refugee Crisis in the media and social media'])
# create text columns for country, state and time selection
text_col1, text_col2, text_col3 = st.columns(3)
with text_col1:
country_select = st.selectbox(
translator[st.session_state.language]["Select a country of interest"],
[translator[st.session_state.language]["all countries analysed"], translator[st.session_state.language]["Germany"], translator[st.session_state.language]["Switzerland"]],
)
with text_col2:
states = [translator[st.session_state.language]["all states analysed"]] + gdf.state.unique().tolist()
if country_select==translator[st.session_state.language]["Germany"]:
states = [translator[st.session_state.language]["all states analysed"]] + gdf[gdf["country"]=="Germany"].state.unique().tolist()
if country_select==translator[st.session_state.language]["Switzerland"]:
states = [translator[st.session_state.language]["all states analysed"]] + gdf[gdf["country"]=="Switzerland"].state.unique().tolist()
state_select = st.selectbox(
translator[st.session_state.language]['Choose a state of interest'],
states,
)
with text_col3:
date_slider = st.slider(translator[st.session_state.language]['Choose date range of interest'],
min_value=df_telegram.date.min(),
value=(df_telegram.date.min(), df_telegram.date.max()),
max_value=df_telegram.date.max()
)
df_telegram_mod = modify_df_for_table(df_mod=df_telegram, country_select=country_select, state_select=state_select, cluster_select=cluster_select_telegram, date_slider=date_slider)
df_value_counts_telegram = create_df_value_counts(df=df_telegram_mod)
df_twitter_mod = modify_df_for_table(df_mod=df_twitter, country_select=country_select, state_select=state_select, cluster_select=cluster_select_twitter, date_slider=date_slider)
df_value_counts_twitter = create_df_value_counts(df=df_twitter_mod)
df_news_mod = modify_df_for_table(df_mod=df_news, country_select=country_select, state_select=state_select, cluster_select=cluster_select_news, date_slider=date_slider)
df_value_counts_news = create_df_value_counts(df=df_news_mod)
visual_col1, visual_col2= st.columns(2)
with visual_col1:
if country_select==translator[st.session_state.language]["all countries analysed"]:
m = leafmap.Map(center=[46.449212, 7.734375], zoom=7)
m.add_gdf(gdf[gdf["country"].isin(["Switzerland", "Germany"])], layer_name="Countries choosen", fill_colors=["red"])
m.to_streamlit()
if country_select==translator[st.session_state.language]["Switzerland"] and state_select==translator[st.session_state.language]["all states analysed"]:
m = leafmap.Map(center=[46.449212, 7.734375], zoom=7)
m.add_gdf(gdf[gdf["country"]!="Switzerland"], layer_name="Countries", fill_colors=["blue"])
m.add_gdf(gdf[gdf["country"]=="Switzerland"], layer_name="Countries Choosen", fill_colors=["red"])
m.to_streamlit()
if country_select==translator[st.session_state.language]["Switzerland"] and state_select!=translator[st.session_state.language]["all states analysed"]:
m = leafmap.Map(center=[46.449212, 7.734375], zoom=7)
m.add_gdf(gdf[gdf["state"]!=state_select], layer_name="Countries", fill_colors=["blue"])
m.add_gdf(gdf[gdf["state"]==state_select], layer_name="Countries Choosen", fill_colors=["red"])
m.to_streamlit()
if country_select==translator[st.session_state.language]["Germany"] and state_select==translator[st.session_state.language]["all states analysed"]:
m = leafmap.Map(center=[46.449212, 7.734375], zoom=7)
m.add_gdf(gdf[gdf["country"]=="Germany"], layer_name="Countries Choosen", fill_colors=["red"])
m.add_gdf(gdf[gdf["country"]!="Germany"], layer_name="Countries", fill_colors=["blue"])
m.to_streamlit()
if country_select==translator[st.session_state.language]["Germany"] and state_select!=translator[st.session_state.language]["all states analysed"]:
m = leafmap.Map(center=[46.449212, 7.734375], zoom=7)
m.add_gdf(gdf[gdf["state"]==state_select], layer_name="Countries Choosen", fill_colors=["red"])
m.add_gdf(gdf[gdf["state"]!=state_select], layer_name="Countries", fill_colors=["blue"])
m.to_streamlit()
if country_select==translator[st.session_state.language]["Germany"] or country_select==translator[st.session_state.language]["Switzerland"] or country_select==translator[st.session_state.language]["all countries analysed"]:
title_diagram_news = translator[st.session_state.language]["Topics over time on News within"] + " " + country_select
fig = px.line(df_value_counts_news.sort_values(['date']), x="date", y="occurence_count", color='cluster', title=title_diagram_news)
else:
title_diagram_news = translator[st.session_state.language]["Topics over time on News within"] + " " + state_select
fig = px.line(df_value_counts_news.sort_values(['date']), x="date", y="occurence_count", color='cluster', title=title_diagram_news)
fig.update_xaxes(title_text=translator[st.session_state.language]["Date"])
fig.update_yaxes(title_text=translator[st.session_state.language]["Count"])
st.plotly_chart(fig, use_container_width=True)
with visual_col2:
if country_select==translator[st.session_state.language]["Germany"] or country_select==translator[st.session_state.language]["Switzerland"] or country_select==translator[st.session_state.language]["all countries analysed"]:
title_diagram_telegram = translator[st.session_state.language]["Topics over time on Telegram within"] + " " + country_select
fig = px.line(df_value_counts_telegram.sort_values(['date']), x="date", y="occurence_count", color='cluster', title=title_diagram_telegram)
else:
title_diagram_telegram = translator[st.session_state.language]["Topics over time on News within"] + " " + state_select
fig = px.line(df_value_counts_telegram.sort_values(['date']), x="date", y="occurence_count", color='cluster', title=title_diagram_telegram)
fig.update_xaxes(title_text=translator[st.session_state.language]["Date"])
fig.update_yaxes(title_text=translator[st.session_state.language]["Count"])
st.plotly_chart(fig, use_container_width=True)
st.markdown("<p style='margin-top: 150px;'</p>", unsafe_allow_html=True)
if country_select==translator[st.session_state.language]["Germany"] or country_select==translator[st.session_state.language]["Switzerland"] or country_select==translator[st.session_state.language]["all countries analysed"]:
title_diagram_twitter = translator[st.session_state.language]["Topics over time on Twitter within"] + " " + country_select
fig = px.line(df_value_counts_twitter.sort_values(['date']), x="date", y="occurence_count", color='cluster', title=title_diagram_twitter)
else:
title_diagram_twitter = translator[st.session_state.language]["Topics over time on Twitter within"] + " " + state_select
fig = px.line(df_value_counts_twitter.sort_values(['date']), x="date", y="occurence_count", color='cluster', title=title_diagram_twitter)
fig.update_xaxes(title_text=translator[st.session_state.language]["Date"])
fig.update_yaxes(title_text=translator[st.session_state.language]["Count"])
st.plotly_chart(fig, use_container_width=True)
if calculate_summary:
if summary_select=="Telegram":
df_mod = df_telegram_mod
cluster = cluster_select_telegram
if summary_select=="Twitter":
df_mod = df_twitter_mod
cluster = cluster_select_twitter
if summary_select=="News":
df_mod = df_news_mod
cluster = cluster_select_news
dummy_text_summary = st.header(translator[st.session_state.language]["Creating your summary โณ๐"])
start_prompt, cluster_str = start_prompt_creator(message_type=summary_select, cluster=cluster)
prompt = sample_df_gpt_analysis(df=df_mod, start_prompt=start_prompt, max_input_tokens=max_input_tokens-max_tokens_output)
try:
text = run_gpt(prompt, max_tokens_output, timeout=10)
except openai.OpenAIError as e:
text = translator[st.session_state.language]["Sorry, request timed out. Please try again."]
dummy_text_summary.empty()
write_output(text, summary_select, cluster_str)
if show_table:
if summary_select=="Telegram":
st.dataframe(df_telegram_mod)
if summary_select=="Twitter":
st.dataframe(df_twitter_mod)
if summary_select=="News":
st.dataframe(df_news_mod)
| [
"looking at this telegram messages about PLACEHOLDER what are the up to 5 top needs of refugees? Response in English",
"looking at this news articles about PLACEHOLDER what are the up to 5 to issues? If possibile focus on refugees. Response in German Language",
"looking at this twitter messages about PLACEHOLDER what are the up to 5 to issues? If possibile focus on refugees. Response in English Language",
"looking at this telegram messages about PLACEHOLDER what are the up to 5 top needs of refugees? Response in German Language",
"looking at this news articles about PLACEHOLDER what are the up to 5 to issues? If possibile focus on refugees. Response in English Language",
"looking at this twitter messages about PLACEHOLDER what are the up to 5 to issues? If possibile focus on refugees. Response in German Language"
] |
2024-01-10 | sprenkamp/r2g2 | src~machine_learning~chat~docker~chat_fastapi_conversational.py | # scp -i "D:\aws_key\aws_node.pem" "D:\visualstudiocode\project\r2g2\r2g2\src\machine_learning\chat\docker\chat_fastapi.py" [email protected]:/home/ec2-user/
import os
from dotenv import load_dotenv
load_dotenv()
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import MongoDBAtlasVectorSearch
from fastapi.middleware.cors import CORSMiddleware
from pymongo import MongoClient
from fastapi import FastAPI
import datetime
from pydantic import BaseModel
class QueryRequest(BaseModel):
start_date: str
end_date: str
country: str
state: str
predicted_class: str
query: str
chat_history: list
# uvicorn Tumen_Chatbot_development_edition:app --reload
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=[
'https://governmentasaplatform.ch',
'https://www.governmentasaplatform.ch',
'https://bot.governmentasaplatform.ch/query',
'https://bot.governmentasaplatform.ch',
'governmentasaplatform.ch',
'bot-load-balancer-175078596.eu-north-1.elb.amazonaws.com',
'http://localhost:5173',
],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# The local machine should have the following environment variables:
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
# This function is used to parse the filters into the format that can be used by MongoDB
def parse_parameters(start_date, end_date, country, state, predicted_class):
must_conditions = []
if state != 'null':
filter = {
"text": {
"path": "state",
"query": state
}
}
must_conditions.append(filter)
if country != 'null':
filter = {
"text": {
"path": "country",
"query": country
}
}
must_conditions.append(filter)
if predicted_class != 'null':
filter = {
"text": {
"path": "predicted_class",
"query": predicted_class
}
}
must_conditions.append(filter)
start_date = '1999-01-01' if start_date == 'null' else start_date
end_date = '2999-01-01' if end_date == 'null' else end_date
filter = {
'range': {
'path': 'messageDatetime',
'gte': datetime.datetime.strptime(start_date, "%Y-%m-%d"),
'lte': datetime.datetime.strptime(end_date, "%Y-%m-%d")+datetime.timedelta(days=1),
}
}
must_conditions.append(filter)
conditions = {
"compound": {
"must": must_conditions
}
}
return conditions
# This function calls the chatbot and returns the answer and prints all the relevant metadata
@app.post("/query")
def query(query_request: QueryRequest):
start_date = query_request.start_date
end_date = query_request.end_date
country = query_request.country
state = query_request.state
predicted_class = query_request.predicted_class
query = query_request.query
chat_history = query_request.chat_history
'''
Args:
start_date: string, e.g. '2022-01-01'
end_date: string e.g. '2022-01-02'
country: string e.g. 'Switzerland'
state: string e.g. 'Zurich'
predicted_class: string e.g. 'Education'
query: string e.g. 'Can I get free clothes in Zurich?'
chat_history: array
{
"start_date": "2022-01-01",
"end_date": "2022-01-10",
"country": "Switzerland",
"state": "Zurich",
"predicted_class": "Education",
"query": "hello",
"chat_history": []
}
Returns:
'''
# initialize the connection to MongoDB Atlas
client = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(
ATLAS_USER, ATLAS_TOKEN))
db_name, collection_name = "scrape", "telegram"
collection = client[db_name][collection_name]
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
print('OpenAI API key not found in environment variables.')
exit()
# create the embedding and vector search objects
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
vectors = MongoDBAtlasVectorSearch(
collection=collection, text_key='messageText',
embedding=embeddings, index_name='telegram_embedding'
)
# create the memory object
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True,
output_key='answer')
# create the large leanguage model object
llm=ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo-16k', openai_api_key=api_key)
# create the prompt template for chatbot to use
prompt_template = """Use the following pieces of context to answer the question at the end.
Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer.
Please be as specific as possible, also you are a friendly chatbot who is always polite.
{context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(prompt_template)
# generate conditions
must_conditions = parse_parameters(start_date, end_date, country, state, predicted_class)
print(must_conditions)
# create a chatbot chain
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectors.as_retriever(search_type = 'mmr',
search_kwargs={
'k': 100, 'lambda_mult': 0.25,
"pre_filter": {
"compound": {
"must": must_conditions
}
},
}),
memory = memory,
return_source_documents=True,
return_generated_question=True,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# create the chat
answer = chain({"question": query, "chat_history": chat_history})
# for i in range(10):
# print(answer["source_documents"][i].metadata['state'])
# print(answer["source_documents"][i].metadata['country'])
# print(answer["source_documents"][i].metadata['messageDatetime'])
#print(answer["source_documents"][0].page_content)
return answer["answer"], answer['chat_history']
@app.get("/test")
def test_endpoint():
print("Test endpoint called!")
return {"message": "Test successful"}
# Run the FastAPI app using uvicorn
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) | [
"Use the following pieces of context to answer the question at the end. \n Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer. \n Please be as specific as possible, also you are a friendly chatbot who is always polite.\n {context}\n Question: {question}"
] |
2024-01-10 | sprenkamp/r2g2 | frontend~chatgpt-backend~chat_fastapi_app.py | import os
import datetime
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException, Request
from pymongo import MongoClient
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import MongoDBAtlasVectorSearch
from fastapi.middleware.cors import CORSMiddleware
'''
http://127.0.0.1:8080/docs
Can I find a job in Switzerland as a nurse?
uvicorn chat_fastapi_app:app --reload
'''
#TODO:
# 1. Solve bug pymongo.errors.OperationFailure:
# $vectorSearch is not allowed or the syntax is incorrect, see the Atlas documentation for more information, full error: {'ok': 0, 'errmsg': '$vectorSearch is not allowed or the syntax is incorrect,
# see the Atlas documentation for more information', 'code': 8000, 'codeName': 'AtlasError'}
# 2. Add cluster to parse_parameters and use it in MongoDBAtlasVectorSearch
# 3. Simplify code I feel we don't need all these langchain functions
# 4. Write dockerfile and run app in docker
# 5. Think about hosting options
# Load environment variables from the .env file
load_dotenv()
# Initialize FastAPI app
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:5173"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Get MongoDB Atlas credentials from environment variables
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
def parse_parameters(start_date, end_date, country, state):
"""
Parse the input parameters and construct search conditions.
Args:
- start_date (str): The start date for the date range filter. Defaults to "1999-01-01" if 'null'.
- end_date (str): The end date for the date range filter. Defaults to "2999-01-01" if 'null'.
- country (str): The country to filter by. Not used if 'null'.
- state (str): The state to filter by. Not used if 'null'.
Returns:
- dict: Constructed search conditions to be used in MongoDB Atlas VectorSearch.
Sample Usage:
>>> parse_parameters("2022-01-01", "2022-12-31", "Switzerland", "Zurich")
{'compound': {'must': [{'text': {'path': 'state', 'query': 'Texas'}},
{'text': {'path': 'country', 'query': 'USA'}},
{'range': {'path': 'messageDatetime',
'gte': datetime.datetime(2022, 1, 1, 0, 0),
'lte': datetime.datetime(2022, 12, 31, 0, 0)}}]}}
"""
# List to hold our search conditions
must_conditions = []
# Check and add state condition
if state != 'null':
filter = {
"text": {
"path": "state",
"query": state
}
}
must_conditions.append(filter)
# Check and add country condition
if country != 'null':
filter = {
"text": {
"path": "country",
"query": country
}
}
must_conditions.append(filter)
# Set default start and end dates if not provided
start_date = '1999-01-01' if start_date == 'null' else start_date
end_date = '2999-01-01' if end_date == 'null' else end_date
# Add date range condition
filter = {
'range': {
'path': 'messageDatetime',
'gte': datetime.datetime.strptime(start_date, "%Y-%m-%d"),
'lte': datetime.datetime.strptime(end_date, "%Y-%m-%d")+datetime.timedelta(days=1),
}
}
must_conditions.append(filter)
# Return the constructed conditions
conditions = {
"compound": {
"must": must_conditions
}
}
return conditions
@app.post("/query")
async def query(request: Request):
"""
Endpoint to process user queries and return relevant answers.
Args:
- request (Request): FastAPI request object containing query parameters and body.
Returns:
- dict: A dictionary containing the generated answer and updated chat history.
Sample Usage:
Using HTTP client or CURL:
POST /query
{
"start_date": "2022-01-01",
"end_date": "2022-12-31",
"country": "Switzerland",
"state": "Zurich",
"query": "some query",
"chat_history": []
}
Response:
{
"answer": "some answer",
"chat_history": [["some answer", "some query"]]
}
"""
print("Starting /query endpoint...") # Debug: Indicate the start of the endpoint
# Get data from the incoming request
data = await request.json()
start_date = data.get("start_date", "null")
end_date = data.get("end_date", "null")
country = data.get("country", "null")
state = data.get("state", "null")
query_text = data.get("query")
chat_history_list = data.get("chat_history", [])
print(f"Received Data: Start Date: {start_date}, End Date: {end_date}, Country: {country}, State: {state}, Query: {query_text}")
# Error handling: Ensure a query is provided
if not query_text:
raise HTTPException(status_code=400, detail="Query text not provided in the request.")
# Initialize MongoDB Connection
print("Initializing MongoDB connection...")
client = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(
ATLAS_USER, ATLAS_TOKEN))
collection = client["scrape"]["telegram"]
# Check for the OpenAI API key
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
raise HTTPException(status_code=500, detail="OpenAI API key not found in environment variables.")
# Set up embeddings, vectors, and memory for the retrieval chain
print("Setting up embeddings, vectors, and memory...")
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
vectors = MongoDBAtlasVectorSearch(
collection=collection, text_key='messageText',
embedding=embeddings, index_name='telegram_embedding'
)
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True,
output_key='answer'
)
llm = ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo', openai_api_key=api_key)
prompt_template = """Use the following pieces of context to answer the question at the end.
Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer.
Please be as specific as possible, also you are a friendly chatbot who is always polite.
{context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(prompt_template)
# Generate the search conditions and set up the retrieval chain
must_conditions = parse_parameters(start_date, end_date, country, state)
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectors.as_retriever(search_type = 'mmr',
search_kwargs={
'k': 100, 'lambda_mult': 0.25,
"pre_filter": {
"compound": {
"must": must_conditions
}
},
}),
memory = memory,
return_source_documents=True,
return_generated_question=True,
combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT}
)
# Process the query using the retrieval chain
answer = chain({"question": query_text, "chat_history": chat_history_list})
# # Print details of the source documents
# print(answer["source_documents"][0].metadata['state'])
# print(answer["source_documents"][0].metadata['country'])
# print(answer["source_documents"][0].metadata['messageDatetime'])
# print(answer["source_documents"][0].page_content)
# # Add the new Q&A pair to the chat history and return the results
# print("Returning the response...")
chat_history_list.append((query_text, answer["answer"]))
return {"answer": answer["answer"], "chat_history": chat_history_list}
#solely for test & debug
@app.get("/test")
def test_endpoint():
print("Test endpoint called!")
return {"message": "Test successful"}
# Run the FastAPI app using uvicorn
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) | [
"Use the following pieces of context to answer the question at the end. \n Combine the information from the context with your own general knowledge to provide a comprehensive and accurate answer. \n Please be as specific as possible, also you are a friendly chatbot who is always polite.\n {context}\n Question: {question}"
] |
2024-01-10 | sprenkamp/r2g2 | src~database~processScrapeTelegram.py | import os
from dotenv import load_dotenv
load_dotenv()
import argparse
from pymongo import MongoClient
from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne
import pandas as pd
import datetime
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
def validate_local_file(f): # function to check if file exists
if not os.path.exists(f):
raise argparse.ArgumentTypeError("{0} does not exist".format(f))
return f
def get_chats_list(input_file_path):
"""
Args:
input_file_path: chats path
Returns: pandas dataframe. e.g.
|country|chat|
|Switzerland|https://t.me/zurich_hb_help|
|Switzerland|https://t.me/helpfulinfoforua|
"""
countries, chats = list(), list()
with open(input_file_path, 'r') as file:
for line in file.readlines():
if line.startswith("#"):
country = line.replace('#', '').replace('\n', '')
else:
chat = line.replace('\n', '')
chats.append(chat)
countries.append(country)
df = pd.DataFrame(list(zip(countries, chats)),
columns=['country', 'chat'])
return df
def calculate_message_without_bert_topic_label(chats, collection):
'''
find new coming scraping data and show how many data should be trained and given topic labels
Args:
chats:
collection:
'''
for index, row in chats.iterrows():
selection_criteria = {
"$and": [
{'chat': row['chat']},
{"topicUpdateTime": {'$exists': False}},
],
}
projection = {'_id': 1}
cursor = collection.find(selection_criteria, projection)
print(len(list(cursor.clone())), "records need to be trained", row['chat'])
def calculate_redundant_embedding(chats, collection):
'''
find message that shouldn't have embedding.
result: message with embedding/message shouldn't have embedding.
expect 0/xx=0 -> no redundant embedding
Args:
chats:
collection:
'''
print("--- Check Redundant Embedding (Expect 0/x)---")
for index, row in chats.iterrows():
projection = {'_id': 1}
selection_criteria = {
"$and": [
{'chat': row['chat']},
{"predicted_class": {'$exists': True}},
{"messageText": {'$exists': True}},
],
"$or": [
{"predicted_class": {"$eq": 'Unknown'}},
{"$expr": {"$lt": [{"$strLenCP": '$messageText'}, 100]}}
]
}
all_no_embedding_cursor = collection.find(selection_criteria, projection)
selection_criteria = {
"$and": [
{'chat': row['chat']},
{'embedding': {'$exists': True}},
{"predicted_class": {'$exists': True}},
{"messageText": {'$exists': True}},
],
"$or": [
{"predicted_class": {"$eq": 'Unknown'}},
{"$expr": {"$lt": [{"$strLenCP": '$messageText'}, 100]}}
]
}
true_embeding_cursor = collection.find(selection_criteria, projection)
print("{}/{} of rows have '{}'. {}".format(
len(list(true_embeding_cursor.clone())),
len(list(all_no_embedding_cursor.clone())),
'embedding', row['chat']))
def calculate_missing_embedding(chats, collection):
'''
find message that should have embedding.
result: message with embedding/message should have embedding.
expect xx/xx= 1 -> no missing embedding
Args:
chats:
collection:
'''
print("--- Check Missing Embedding (Expect x/x) ---")
for index, row in chats.iterrows():
projection = {'embedding': 1}
selection_criteria = {
"$and": [
{'chat': row['chat']},
{"predicted_class": {'$exists': True, "$ne": 'Unknown'}},
{"messageText": {'$exists': True}},
{"$expr": {"$gt": [{"$strLenCP": '$messageText'}, 100]}}
],
}
all_embedding_cursor = collection.find(selection_criteria, projection)
selection_criteria = {
"$and": [
{'chat': row['chat']},
{'embedding': {'$exists': True}},
{"predicted_class": {'$exists': True, "$ne": 'Unknown'}},
{"messageText": {'$exists': True}},
{"$expr": {"$gt": [{"$strLenCP": '$messageText'}, 100]}}
],
}
true_embeding_cursor = collection.find(selection_criteria, projection)
print("{}/{} of rows have '{}'. {}".format(
len(list(true_embeding_cursor.clone())),
len(list(all_embedding_cursor.clone())),
'embedding', row['chat']))
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding']
def add_embedding(chats, collection):
'''
clear embedding which don't meet requirements
(1) length >100
(2) bertopic != 'Unknown'
Args:
chats:
collection:
Returns:
'''
import bson
batch_size = 1000
for index, row in chats.iterrows():
print(row['chat'])
selection_criteria = {
"$and": [
{'chat': row['chat']},
{"predicted_class": {'$exists': True, "$ne": 'Unknown'}},
{"messageText": {'$exists': True}},
{"$expr": {"$gt": [{"$strLenCP": '$messageText'}, 100]}}
],
}
projection = {'_id': 1, 'messageText':1}
cursor = collection.find_raw_batches(selection_criteria, projection, batch_size=batch_size)
for batch in cursor:
data = bson.decode_all(batch)
df = pd.DataFrame(list(data))
df['embedding'] = df['messageText'].apply(lambda x: get_embedding(x))
tmp = list()
for index, row in df.iterrows():
tmp.append(UpdateOne({"_id": row["_id"]}, {"$set": {"embedding": row['embedding']}}))
collection.bulk_write(tmp)
def clear_embedding(chats, collection):
'''
clear embedding which don't meet requirements
(1) length <100
(2) bertopic == 'Unknown'
Args:
chats:
collection:
Returns:
'''
import bson
batch_size = 1000
for index, row in chats.iterrows():
selection_criteria = {
"$and": [
{'chat': row['chat']},
{"predicted_class": {'$exists': True}},
{"messageText": {'$exists': True}},
],
"$or": [
{"predicted_class": {"$eq": 'Unknown'}},
{"$expr": {"$lt": [{"$strLenCP": '$messageText'}, 100]}}
]
}
projection = {'_id': 1}
cursor = collection.find_raw_batches(selection_criteria, projection, batch_size=batch_size)
for batch in cursor:
data = bson.decode_all(batch)
df = pd.DataFrame(list(data))
# df = df[(df['messageText'].str.len() < 100) | (df['predicted_class'] == 'Unknown')]
tmp = list()
for index, row in df.iterrows():
tmp.append(UpdateOne({"_id": row["_id"]}, {'$unset': {'embedding': 1}}))
collection.bulk_write(tmp)
def add_model_modification_timestamp(chats, collection):
modelUpdateTime = datetime.datetime(2023, 10, 17)
for index, row in chats.iterrows():
collection.update_many({'chat': row['chat']}, {"$set": {"modelUpdateTime": modelUpdateTime}})
def update_field_name(chats, collection, previous_name, new_name):
for index, row in chats.iterrows():
print(row['chat'])
collection.update_many({'chat': row['chat']}, {"$rename": {previous_name: new_name}})
def update_messageDate(chats, collection):
'''
add field 'messageDate' in form of "%Y-%m-%d"
To accelerate inserting speed, write in bulk
Args:
chats:
collection:
Returns: add/update messageDate
'''
import bson
batch_size = 1000
for index, row in chats.iterrows():
print(row['chat'])
selection_criteria = {'chat': row['chat']}
projection = {'_id': 1, 'messageDatetime': 1}
cursor = collection.find_raw_batches(selection_criteria, projection, batch_size=batch_size)
for batch in cursor:
data = bson.decode_all(batch)
df = pd.DataFrame(list(data))
df['messageDate'] = df['messageDatetime'].dt.strftime("%Y-%m-%d")
tmp = list()
for index, row in df.iterrows():
tmp.append(UpdateOne({"_id": row["_id"]}, {"$set": {"messageDate": row['messageDate']}}))
collection.bulk_write(tmp)
def delete_chat_data(chats, collection):
for index, row in chats.iterrows():
condition = {'chat': row['chat']}
collection.delete_many(condition)
if __name__ == '__main__':
'''
Add messageDate to the whole collection: scrape.telegram
use command:
(1) prd dataset
python src/database/processScrapeTelegram.py \
-i data/telegram/queries/switzerland_groups.txt \
-o scrape.telegram
(2) testing dataset
python src/database/processScrapeTelegram.py \
-i data/telegram/queries/switzerland_groups.txt \
-o test.telegram
'''
# parse parameters
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file_path', help="Specify the input file", type=validate_local_file,
required=True)
parser.add_argument('-o', '--output_database', help="Specify the output database", required=True)
args = parser.parse_args()
# connect to db
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
cluster = MongoClient(
"mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(ATLAS_USER, ATLAS_TOKEN))
# specify names of database and collection
# db_name, collection_name = "test", "telegram"
db_name, collection_name = args.output_database.split('.')
collection = cluster[db_name][collection_name]
chats = get_chats_list(args.input_file_path)
########### operate collection
# calculate_message_without_bert_topic_label(chats, collection)
# clear_embedding(chats, collection)
# add_embedding(chats, collection)gi
# calculate_redundant_embedding(chats, collection)
# calculate_missing_embedding(chats, collection)
# update_messageDate(chats, collection)
# replace_empty_state(chats, collection)
# add_model_modification_timestamp(chats, collection)
# update_field_name(chats, collection, "modelUpdateTime", "topicUpdateTime")
########### operate collection
cluster.close()
| [] |
2024-01-10 | sprenkamp/r2g2 | src~helper~scraping~telegram_tools~scrapeTelegramChannelMessages.py | from telethon import TelegramClient, events, sync, errors
from telethon.sessions import StringSession
import asyncio
import os
from dotenv import load_dotenv
load_dotenv()
import datetime
from tqdm import tqdm
import argparse
from geosky import geo_plug
from pymongo import MongoClient, errors
import pandas as pd
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
# To run this code. You must get your own api_id and
# api_hash from https://my.telegram.org, under API Development.
# if you run code locally, you should store variables in .env file.
# If you run code in Github platform, code will fetch secret/variables automatically
TELEGRAM_API_ID = os.environ["TELEGRAM_API_ID"]
TELEGRAM_API_HASH = os.environ["TELEGRAM_API_HASH"]
TELEGRAM_STRING_TOKEN = os.environ["TELEGRAM_STRING_TOKEN"]
ATLAS_TOKEN = os.environ["ATLAS_TOKEN"]
ATLAS_USER = os.environ["ATLAS_USER"]
def validate_local_file(f): # function to check if file exists
if not os.path.exists(f):
raise argparse.ArgumentTypeError("{0} does not exist".format(f))
return f
def validate_database(s):
database_name, collection_name = s.split('.')
cluster = MongoClient("mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(ATLAS_USER, ATLAS_TOKEN))
db = cluster[database_name]
list_of_collections = db.list_collection_names()
if collection_name not in list_of_collections:
raise Exception("Collection does not exit")
return s
def initialize_database(database_name, collection_name):
'''
use names of database and collection to fetch specific collection
Args:
database_name:
collection_name:
Returns:
'''
cluster = MongoClient("mongodb+srv://{}:{}@cluster0.fcobsyq.mongodb.net/".format(ATLAS_USER, ATLAS_TOKEN))
collection = cluster[database_name][collection_name]
return collection
def get_country_to_state_dict():
'''
prepare country-state mapping
Returns:
a mapping with country as key and states as value
e.g. {'Switzerland':['Zurich', 'Zug', 'Vaud', 'Saint Gallen'...], 'Germany':[]}
'''
data_state = geo_plug.all_Country_StateNames()
data_state = data_state.replace('null', ' ')
res = eval(data_state)
mapping_state = {}
for element in res:
for k, v in element.items():
mapping_state[k] = v
mapping_state["Switzerland"].remove("Basel-City")
mapping_state["Switzerland"].append("Basel")
return mapping_state
def get_state_to_city_dict():
'''
prepare state-city mapping
Returns:
a mapping with states as key and city as value
e.g. {'Zurich':["Winterthur", "Uster", ...], 'Basel':[]}
'''
data_city = geo_plug.all_State_CityNames()
data_city = data_city.replace('null', ' ')
res = eval(data_city)
mapping_city = {}
for element in res:
for k, v in element.items():
mapping_city[k] = v
mapping_city['North Rhine-Westphalia'].append('Cologne')
mapping_city['Bavaria'].append('Nuremberg')
mapping_city['Basel'] = mapping_city.pop('Basel-City')
return mapping_city
def special_translate_chat(chat):
'''
In same chats, they are writen in German or French. This functions will standardize their spelling
Args:
chat: original chat (string)
Returns: chat with standard format
'''
return chat.replace("Lousanne", "Lausanne") \
.replace("BielBienne", "Biel/Bienne") \
.replace("Geneve", "Geneva") \
.replace("StGallen", "Saint Gallen")
def parse_state_city(chat, country):
mapping_state = get_country_to_state_dict()
mapping_city = get_state_to_city_dict()
chat_standard = special_translate_chat(chat)
# parse state and city
chat_states = mapping_state[country]
state, city = '', ''
for s in chat_states:
chat_city = mapping_city[s]
for c in chat_city:
if c.upper() in chat_standard.upper():
city = c
state = s
break
if s.upper() in chat_standard.upper():
state = s
return state, city
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding']
def get_chats_list(input_file_path):
"""
Args:
input_file_path: chats path
Returns: pandas dataframe. e.g.
|country|chat|
|Switzerland|https://t.me/zurich_hb_help|
|Switzerland|https://t.me/helpfulinfoforua|
"""
countries, chats = list(), list()
with open(input_file_path, 'r') as file:
for line in file.readlines():
if line.startswith("#"):
country = line.replace('#', '').replace('\n', '')
else:
chat = line.replace('\n', '')
chats.append(chat)
countries.append(country)
df = pd.DataFrame(list(zip(countries, chats)),
columns=['country', 'chat'])
return df
async def callAPI(input_file_path):
"""
This function takes an input file, output folder path
It reads the input file, extracts the chats and then uses the TelegramClient to scrape message.text and message.date from each chat.
Appending the chat's URL, message text, and message datetime to different lists.
Then it creates a dataframe from the lists and saves the dataframe to a CSV file in the specified output folder.
:input_file_path: .txt file containing the list of chats to scrape, each line should represent one chat
:output_folder_path: folder path where the output CSV file will be saved containing the scraped data
"""
data = get_chats_list(input_file_path)
print(len(data))
for index, row in tqdm(data.iterrows(), total=data.shape[0]):
async with TelegramClient(StringSession(TELEGRAM_STRING_TOKEN), TELEGRAM_API_ID, TELEGRAM_API_HASH) as client:
chat = row['chat']
country = row['country']
state, city = parse_state_city(chat, country)
# find max time in the database
time_col = 'messageDatetime' # "update_time"
search_max_date = output_collection.find_one({"chat": chat}, sort=[(time_col, -1)])
if search_max_date is None:
max_time = None
else:
# avoid include the record which date is equivalent to max_time_db
max_time = search_max_date[time_col] + datetime.timedelta(seconds=1)
print("{} last {} time: {} ".format(chat, time_col, max_time))
data_list = list()
async for message in client.iter_messages(chat, reverse=True, offset_date=max_time):
if message.message is not None and message.message != '':
record = dict()
record['chat'] = chat
record['messageDatetime'] = message.date
record['messageDate'] = message.date.strftime("%Y-%m-%d")
record['messageUpdateTime'] = datetime.datetime.now()
record['country'] = country
record['state'] = state
record['city'] = city
record['messageText'] = message.message
record['views'] = message.views if message.views is not None else 0
record['forwards'] = message.forwards if message.forwards is not None else 0
# if len(message.message) > 100:
# record['embedding'] = get_embedding(message.message)
if message.replies is None:
record['replies'] = 0
else:
record['replies'] = message.replies.replies
if message.reactions is None:
record['reactions'] = []
else:
reaction = dict()
for i in message.reactions.results:
try:
reaction[i.reaction.emoticon] = i.count
except:
# same message don't have emotion labels (reaction.emoticon)
pass
record['reactions'] = reaction
data_list.append(record)
print("data len:{}".format(len(data_list)))
if len(data_list) > 0:
output_collection.insert_many(data_list)
else:
print("no updated records")
if __name__ == '__main__':
"""
### example usage in command line:
(1) switzerland+germany
python src/helper/scraping/telegram_tools/scrapeTelegramChannelMessages.py \
-i data/telegram/queries/DACH.txt -o scrape.telegram
(2) only switzerland
python src/helper/scraping/telegram_tools/scrapeTelegramChannelMessages.py \
-i data/telegram/queries/switzerland_groups.txt -o scrape.telegram
### Read chats from DACH.txt and store telegram data to database.
(1) scrape telegram data
(2) get embedding of each sentence
(3) parse state and city from chat name
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file_path', help="Specify the input file", type=validate_local_file,
required=True)
parser.add_argument('-o', '--output_database', help="Specify the output database", required=True)
args = parser.parse_args()
o_database_name, o_collection_name = args.output_database.split('.')
output_collection = initialize_database(o_database_name, o_collection_name)
loop = asyncio.get_event_loop()
loop.run_until_complete(callAPI(args.input_file_path))
loop.close()
| [] |
2024-01-10 | rodaveli/qa_doc1 | embedchain2~embedchain.py | import chromadb
import openai
import os
from chromadb.utils import embedding_functions
from dotenv import load_dotenv
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from embedchain.loaders.youtube_video import YoutubeVideoLoader
from embedchain.loaders.pdf_file import PdfFileLoader
from embedchain.loaders.web_page import WebPageLoader
from embedchain.chunkers.youtube_video import YoutubeVideoChunker
from embedchain.chunkers.pdf_file import PdfFileChunker
from embedchain.chunkers.web_page import WebPageChunker
load_dotenv()
embeddings = OpenAIEmbeddings()
ABS_PATH = os.getcwd()
DB_DIR = os.path.join(ABS_PATH, "db")
openai_ef = embedding_functions.OpenAIEmbeddingFunction(
api_key=os.getenv("OPENAI_API_KEY"),
model_name="text-embedding-ada-002"
)
class EmbedChain:
def __init__(self):
"""
Initializes the EmbedChain instance, sets up a ChromaDB client and
creates a ChromaDB collection.
"""
self.chromadb_client = self._get_or_create_db()
self.collection = self._get_or_create_collection()
self.user_asks = []
def _get_loader(self, data_type):
"""
Returns the appropriate data loader for the given data type.
:param data_type: The type of the data to load.
:return: The loader for the given data type.
:raises ValueError: If an unsupported data type is provided.
"""
loaders = {
'youtube_video': YoutubeVideoLoader(),
'pdf_file': PdfFileLoader(),
'web_page': WebPageLoader()
}
if data_type in loaders:
return loaders[data_type]
else:
raise ValueError(f"Unsupported data type: {data_type}")
def _get_chunker(self, data_type):
"""
Returns the appropriate chunker for the given data type.
:param data_type: The type of the data to chunk.
:return: The chunker for the given data type.
:raises ValueError: If an unsupported data type is provided.
"""
chunkers = {
'youtube_video': YoutubeVideoChunker(),
'pdf_file': PdfFileChunker(),
'web_page': WebPageChunker()
}
if data_type in chunkers:
return chunkers[data_type]
else:
raise ValueError(f"Unsupported data type: {data_type}")
def add(self, data_type, url):
"""
Adds the data from the given URL to the vector db.
Loads the data, chunks it, create embedding for each chunk
and then stores the embedding to vector database.
:param data_type: The type of the data to add.
:param url: The URL where the data is located.
"""
loader = self._get_loader(data_type)
chunker = self._get_chunker(data_type)
self.user_asks.append([data_type, url])
self.load_and_embed(loader, chunker, url)
def _get_or_create_db(self):
"""
Returns a ChromaDB client, creates a new one if needed.
:return: The ChromaDB client.
"""
client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=DB_DIR,
anonymized_telemetry=False
)
return chromadb.Client(client_settings)
def _get_or_create_collection(self):
"""
Returns a ChromaDB collection, creates a new one if needed.
:return: The ChromaDB collection.
"""
return self.chromadb_client.get_or_create_collection(
'embedchain_store', embedding_function=openai_ef,
)
def load_and_embed(self, loader, chunker, url):
"""
Loads the data from the given URL, chunks it, and adds it to the database.
:param loader: The loader to use to load the data.
:param chunker: The chunker to use to chunk the data.
:param url: The URL where the data is located.
"""
embeddings_data = chunker.create_chunks(loader, url)
documents = embeddings_data["documents"]
metadatas = embeddings_data["metadatas"]
ids = embeddings_data["ids"]
self.collection.add(
documents=documents,
metadatas=metadatas,
ids=ids
)
print(f"Successfully saved {url}. Total chunks count: {self.collection.count()}")
def _format_result(self, results):
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def get_openai_answer(self, prompt):
messages = []
messages.append({
"role": "user", "content": prompt
})
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=messages,
temperature=0.4,
max_tokens=2000,
top_p=1,
)
return response["choices"][0]["message"]["content"]
def get_answer_from_llm(self, query, context):
"""
Gets an answer based on the given query and context by passing it
to an LLM.
:param query: The query to use.
:param context: Similar documents to the query used as context.
:return: The answer.
"""
prompt = f"""Imagine three different experts are answering this question given the below context. All experts will write down 1 step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. Return as a response SOLELY THE FINAL ANSWER.
{context}
Query: {query}
Answer:
"""
answer = self.get_openai_answer(prompt)
return answer
def query(self, input_query):
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
:param input_query: The query to use.
:return: The answer to the query.
"""
result = self.collection.query(
query_texts=[input_query,],
n_results=1,
)
result_formatted = self._format_result(result)
answer = self.get_answer_from_llm(input_query, result_formatted[0][0].page_content)
return answer
class App(EmbedChain):
"""
The EmbedChain app.
Has two functions: add and query.
adds(data_type, url): adds the data from the given URL to the vector db.
query(query): finds answer to the given query using vector database and LLM.
"""
pass
| [
"Imagine three different experts are answering this question given the below context. All experts will write down 1 step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. Return as a response SOLELY THE FINAL ANSWER.\n PLACEHOLDER\n Query: PLACEHOLDER\n Answer:\n "
] |
2024-01-10 | guangchen811/DeepKE | example~llm~UnleashLLMRE~gpt3ICL.py | import openai
import json
import random
import time
from tqdm import tqdm
from collections import Counter
import argparse
import numpy as np
import copy
import os
def convert_token(token):
""" Convert PTB tokens to normal tokens """
if (token.lower() == '-lrb-'):
return '('
elif (token.lower() == '-rrb-'):
return ')'
elif (token.lower() == '-lsb-'):
return '['
elif (token.lower() == '-rsb-'):
return ']'
elif (token.lower() == '-lcb-'):
return '{'
elif (token.lower() == '-rcb-'):
return '}'
return token
def f1_score(true, pred_result, rel2id):
correct = 0
total = len(true)
correct_positive = 0
pred_positive = 0
gold_positive = 0
neg = -1
for name in ['NA', 'na', 'no_relation', 'Other', 'Others', 'false', 'unanswerable']:
if name in rel2id:
neg = rel2id[name]
break
for i in range(total):
golden = true[i]
if golden == pred_result[i]:
correct += 1
if golden != neg:
correct_positive += 1
if golden != neg:
gold_positive +=1
if pred_result[i] != neg:
pred_positive += 1
acc = float(correct) / float(total)
try:
micro_p = float(correct_positive) / float(pred_positive)
except:
micro_p = 0
try:
micro_r = float(correct_positive) / float(gold_positive)
except:
micro_r = 0
try:
micro_f1 = 2 * micro_p * micro_r / (micro_p + micro_r)
except:
micro_f1 = 0
result = {'acc': acc, 'micro_p': micro_p, 'micro_r': micro_r, 'micro_f1': micro_f1}
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--api_key', '-ak', type=str, required=True)
parser.add_argument('--train_path', '-tp', type=str, required=True, help="The path of training / demonstration data.")
parser.add_argument('--test_path', '-ttp', type=str, required=True, help="The path of test data.")
parser.add_argument('--output_success', '-os', type=str, required=True, help="The output directory of successful ICL samples.")
parser.add_argument('--output_nores', '-on', type=str, required=True, help="The output directory of failed ICL samples.")
parser.add_argument('--prompt', type=str, required=True, choices=["text", "text_schema", "instruct", "instruct_schema"])
parser.add_argument('--k', type=int, default=1, help="k-shot demonstrations")
args = parser.parse_args()
openai.api_key = args.api_key
# Train / Demostration Set
with open(args.train_path,'r') as f:
train = json.load(f)
label_list = {}
for line in train:
rel = line['relation']
if rel not in label_list:
label_list[rel] = [line]
else:
label_list[rel].append(line)
# Relations
rels = list(label_list.keys())
rel2id = {}
for i, rel in enumerate(rels):
rel2id[rel] = i
# Label words
rel2labelword = {}
for rel in rels:
rel2labelword[rel] = rel.lower().replace("_"," ").replace("-", " ").replace("per", "person").replace("org", "organization").replace("stateor", "state or ")
labelword2rel = {}
for k,v in rel2labelword.items():
labelword2rel[v] = k
# Test Set
with open(args.test_path,'r') as f:
test = json.load(f)
res = []
true = []
nores = []
success = []
with open(os.path.join(args.output_success, "os.json"),"w") as f:
for input in tqdm(test):
random.shuffle(rels)
try:
if "text" in args.prompt:
prompt = "There are candidate relations: " + ', '.join(labelword2rel.keys()) + ".\n"
else:
prompt = "Given a context, a pair of head and tail entities in the context, decide the relationship between the head and tail entities from candidate relations: " + \
', '.join(labelword2rel.keys()) + ".\n"
for rel in rels:
random.shuffle(label_list[rel])
kshot = label_list[rel][:args.k]
for data in kshot:
ss, se = data['subj_start'], data['subj_end']
head = ' '.join(data['token'][ss:se+1])
headtype = data['subj_type'].lower().replace('_',' ')
if headtype == "misc":
headtype = "miscellaneous"
os, oe = data['obj_start'], data['obj_end']
tail = ' '.join(data['token'][os:oe+1])
tailtype = data['obj_type'].lower().replace('_',' ')
if tailtype == "misc":
tailtype = "miscellaneous"
sentence = ' '.join([convert_token(token) for token in data['token']])
relation = rel2labelword[data['relation']]
if "schema" in args.prompt:
prompt += "Context: " + sentence + " The relation between " + headtype + " '" + head + "' and " + tailtype + " '" + tail + "' in the context is " + relation + ".\n"
else:
prompt += "Context: " + sentence + " The relation between '" + head + "' and '" + tail + "' in the context is " + relation + ".\n"
# prompt += " The relation between '" + head + "' and '" + tail + "' in the context '" + sentence + "' is " + relation + ".\n"
tss, tse = input['subj_start'], input['subj_end']
testhead = ' '.join(input['token'][tss:tse+1])
testheadtype = input['subj_type'].lower().replace('_',' ')
if testheadtype == "misc":
testheadtype = "miscellaneous"
tos, toe = input['obj_start'], input['obj_end']
testtail = ' '.join(input['token'][tos:toe+1])
testtailtype = input['obj_type'].lower().replace('_',' ')
if testtailtype == "misc":
testtailtype = "miscellaneous"
testsen = ' '.join(input['token'])
if "schema" in args.prompt:
prompt += "Context: " + testsen + " The relation between " + testheadtype + " '" + testhead + "' and " + testtailtype + " '" + testtail + "' in the context is "
else:
prompt += "Context: " + testsen + " The relation between '" + testhead + "' and '" + testtail + "' in the context is "
# prompt += " The relation between '" + testhead + "' and '" + testtail + "' in the context '" + testsen + "' is "
# print(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt = prompt,
temperature=0,
max_tokens=128
)
resrel = response['choices'][0]['text'].strip().split('.')[0].lower()
if resrel in labelword2rel:
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("city" in resrel) and (resrel.replace("city", "cities") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("city", "cities")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("city", "cities")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("country" in resrel) and (resrel.replace("country", "countries") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("country", "countries")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("country", "countries")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("province" in resrel) and (resrel.replace("province", "provinces") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("province", "provinces")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("province", "provinces")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
else:
input['pr'] = resrel
nores.append(input)
except Exception as e:
print(e)
if e._message == 'You exceeded your current quota, please check your plan and billing details.':
break
nores.append(input)
time.sleep(30)
if len(nores)!=0:
json.dump(nores, open(os.path.join(args.output_nores, "no.json"),'w'))
print(f1_score(true, res, rel2id)) | [
"' and ",
"Context: PLACEHOLDER The relation between 'PLACEHOLDER' and 'PLACEHOLDER' in the context is ",
" '",
".\n",
"Given a context, a pair of head and tail entities in the context, decide the relationship between the head and tail entities from candidate relations: ",
"Context: PLACEHOLDER The relation between 'PLACEHOLDER' and 'PLACEHOLDER' in the context is PLACEHOLDER.\n",
"Context: ",
"There are candidate relations: ",
", ",
"' in the context is ",
" The relation between "
] |
2024-01-10 | marks/streamlit-airtable-connection | examples~explore.py | import streamlit as st
import pandas as pd
import json
from langchain.chat_models import ChatOpenAI
from langchain_experimental.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.agents.agent_types import AgentType
from streamlit_airtable import AirtableConnection
import explore_helpers
# Initiate connection to Airtable using st.connection
airtable_conn = st.connection(
"your_connection_name", type=AirtableConnection
)
# Retrieve list of bases, and create a dict of base id to name
bases_list = airtable_conn.list_bases()
bases_id_to_name = {base["id"]: base["name"] for base in bases_list["bases"]}
# Add a sidebar to select a base
with st.sidebar:
st.markdown("## Configuration")
# with st.form("Configuration"):
selected_base_id = st.selectbox(
"Which base would you like to explore?",
options=list(bases_id_to_name.keys()),
format_func=lambda base_id: bases_id_to_name[base_id],
help="If you don't see a base in the list, make sure your personal access token has access to it.",
)
openai_api_key = st.text_input(
"(Optional) What is your OpenAI API key?",
value="",
type="password",
help="(Optional) You can find your API key at https://platform.openai.com/account/api-keys. If not supplied, you will not be able to use the AI features on this page.",
)
# Main content pane
with st.container():
st.info(
"Welcome to the demo of the Airtable connector for Streamlit. The [source code for the connector and this example can be found on Github](https://github.com/marks/streamlit-airtable-connection). You can also watch a [demo video](https://share.support.airtable.com/RBuJRnyL) and access the [underlying sample Airtable base](https://airtable.com/appdbRXgibDiQkNQN/shrIO0m8oyeQJTf9T) from [thesquirrelcensus.com](https://www.thesquirrelcensus.com/).",
icon="๐",
)
st.markdown("# Airtable Base Explorer")
# st.markdown(f"## `{bases_id_to_name[selected_base_id]}`")
base_schema = airtable_conn.get_base_schema(base_id=selected_base_id)
st.markdown(
f"You're exploring the base named [{bases_id_to_name[selected_base_id]}](https://airtable.com/{selected_base_id})."
)
st.markdown("### Base schema")
# Two columns: list of tables and graph of relationships
col1, col2 = st.columns(2)
# Show the number and a list of tables in the base
col1.write(f"**{len(base_schema['tables'])} tables**")
# Create a dataframe of the table names and ids; and display number of
tables_df = pd.DataFrame(
[
{
"name": table["name"],
"id": table["id"],
"deep_link": f"https://airtable.com/{selected_base_id}/{table['id']}",
}
for table in base_schema["tables"]
]
)
col1.dataframe(
tables_df,
column_config={"deep_link": st.column_config.LinkColumn()},
hide_index=True,
)
col1.download_button(
"Download base schema as JSON",
json.dumps(base_schema),
f"base-schema-{selected_base_id}.json",
"application/json",
)
# Show a graph of the relationships between tables
graph = explore_helpers.create_graph_from_base_schema(selected_base_id, base_schema)
col2.write(
f"**Table relationships**\n\nNodes are table names and edges are _field names_ of the linked record fields. Both are clickable links to the Airtable UI for that item."
)
col2.graphviz_chart(graph)
st.divider()
st.markdown("### Table schemas")
table_schema_tabs = st.tabs([f"{table['name']}" for table in base_schema["tables"]])
# Show the full schema for each table in an expander
for i, table_schema in enumerate(base_schema["tables"]):
this_tab = table_schema_tabs[i]
this_tab.write(
f"**{len(table_schema['fields'])} fields** belonging to table `{table_schema['id']}`:"
)
fields_df = pd.DataFrame(
[
{
"name": item["name"],
"id": item["id"],
"type": item["type"],
"choices": [
choice["name"] for choice in item["options"].get("choices", [])
]
if item["type"] in ["singleSelect", "multipleSelects"]
else None,
"linked_table_id": item["options"]["linkedTableId"]
if item["type"] in ["multipleRecordLinks"]
else None,
"deep_link": f"https://airtable.com/{selected_base_id}/{table_schema['id']}/{item['id']}",
}
for item in table_schema["fields"]
]
)
this_tab.dataframe(
fields_df,
column_config={"deep_link": st.column_config.LinkColumn()},
hide_index=True,
)
col1, col2 = this_tab.columns(2)
col1.download_button(
"Download list of fields as CSV",
fields_df.to_csv(index=False).encode("utf-8"),
f"table-schema-{selected_base_id}-{table_schema['id']}.csv",
"text/csv",
)
col2.download_button(
"Download full table schema as JSON",
json.dumps(table_schema),
f"table-schema-{selected_base_id}-{table_schema['id']}.json",
"application/json",
)
st.divider()
st.markdown("### Base records")
st.markdown(
"The following record previews display the first 10 records from the [Airtable list records API](https://airtable.com/developers/web/api/list-records) in `string` cell format. This will return the same values you see in the Airtable UI and when you export as CSV."
)
table_record_tabs = st.tabs([f"{table['name']}" for table in base_schema["tables"]])
# Show the full schema for each table in an expander
for i, table_schema in enumerate(base_schema["tables"]):
this_tab = table_record_tabs[i]
this_tab.dataframe(
airtable_conn.query(
base_id=selected_base_id,
table_id=table_schema["id"],
max_records=10,
cell_format="string",
time_zone="America/Los_Angeles",
user_locale="en-us",
),
hide_index=True,
)
st.divider()
st.markdown("### AI")
st.markdown(
"Uses [LangChain](https://www.langchain.com), [the LangChain Pandas Dataframe Agent](https://python.langchain.com/docs/integrations/toolkits/pandas), and [OpenAI's `chatgpt-3.5-turbo` model](https://platform.openai.com/docs/models/gpt-3-5) to answer questions about the table's records.")
if not openai_api_key.startswith("sk-"):
st.warning(
"Please enter your OpenAI API in the configuration question to the left to test the AI functionality",
icon="โ ",
)
else:
# Select a table to query
table_for_ai = st.selectbox(
"Which table would you like ask questions about?",
tables_df["name"],
)
if "messages" not in st.session_state:
st.session_state["messages"] = [
# {"role": "assistant", "content": "How can I help you?"}
]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(
f"Enter a question about the '{table_for_ai}' table"
):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
with st.spinner("Generating response ..."):
# TODO refactor, but caching is built into the connector
df_for_ai = airtable_conn.query(
base_id=selected_base_id,
table_id=table_for_ai,
cell_format="string",
time_zone="America/Los_Angeles",
user_locale="en-us",
)
# Initiate language model
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.2,
openai_api_key=openai_api_key,
)
# Create Pandas DataFrame Agent
agent = create_pandas_dataframe_agent(
llm,
df_for_ai,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
# Perform Query using the Agent
response = {"content": agent.run(prompt), "role": "assistant"}
st.session_state.messages.append(response)
st.chat_message("assistant").write(response["content"])
| [] |
2024-01-10 | vandit98/tubechat-Talk-with-youtube-link | gpt_utils.py |
import os
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.prompts import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
MessagesPlaceholder
)
from dotenv import load_dotenv
load_dotenv()
import openai
import configparser
import json
import streamlit as st
openai.api_key=os.getenv("openai_api_key")
# openai_api_key = os.getenv("openai_api_key")
def get_conversation_buffer_memory(conBuffWindow=3):
return ConversationBufferWindowMemory(k=conBuffWindow,return_messages=True)
def get_response(model, query, context):
# prompt_template, temperature, prompt = get_gpt_config(source)
prompt="""\n\nInstructions for Response Generation:\n\n
Kindly exhibit empathetic responses to greeting messages and provide thoughtful replies.
The context provided is from "Transcription" which is a transcription of some youtube video.
Always refer "Transcription" as the context in the response instead of saying "context".
Format the response as a valid json with keys "Bot Response" and "Relevancy".Format the "Relevancy" value as a boolean based on whether the Query is based on "Transcription" provided in the user_input or not.
If the query answer is available in "Transcription" for answering, set "Relevancy" as true else set it as false.\n
"Bot Response" is a multiline json string, make sure that the format it as a valid json string.
For general conversational interactions or inquiries not requiring specialist knowledge, feel free to respond naturally and appropriately without relying on the "Transcription".
Make sure you always format the response as Json as mentioned above and keep your answer within 50 words.
"""
system_role = prompt
# response_text = "{\"Relevancy\":false,\"Bot Response\": \"The information provided in the ABCD Knowedge Library does not contain any relevant information about washing with hope.\"}"
user_input = f"\n\nQuery: {query}\n\n Transcript:\n {context}\n\n "
# response_text_2 = "{\"Relevancy\":true,\"Bot Response\": \"Washing with hope is a culture in florida\"}"
# user_input_2 = f"\n\nQuery: {query}\n\n T:\n {context}\n\n "
response_list = [
{"role": "system", "content": system_role},
# {"role": "user", "content": example_text},
# {"role": "assistant", "content": response_text},
{"role": "user", "content": user_input},
# {"role": "user", "content": example_text_2},
# {"role": "assistant", "content": response_text_2},
# {"role": "user", "content": user_input_2},
]
response = openai.ChatCompletion.create(
model=model,
temperature=0,
messages=response_list
)
gpt_response = response["choices"][0]["message"]["content"]
print("user Query-Input:")
print(user_input)
print("bot Output:")
print(gpt_response)
st.write(gpt_response)
# try:
response_dict = json.loads(gpt_response)
response = response_dict["Bot Response"]
within_knowledge_base = response_dict["Relevancy"]
print("*" * 50)
print("is query related to knowledge base: ", within_knowledge_base)
print("*" * 50)
try:
response= json.loads(response)
response = response["Bot Response"]
except:
pass
return response
def normal_talk(model, query, context):
prompt="""\n\nInstructions for Response Generation:\n\n
Kindly exhibit empathetic responses to greeting messages and provide thoughtful replies.
The context provided is from "Transcription" which is a transcription of some youtube video.
Always refer "Transcription" as the context in the response instead of saying "context".
Format the response as a valid json with keys "Bot Response" and "Relevancy".Format the "Relevancy" value as a boolean based on whether the Query is based on "Transcription" provided in the user_input or not.
If the query answer is available in "Transcription" for answering, set "Relevancy" as true else set it as false.\n
"Bot Response" is a multiline json string, make sure that the format it as a valid json string.
For general conversational interactions or inquiries not requiring specialist knowledge, feel free to respond naturally and appropriately without relying on the "Transcription".
Make sure you always format the response as Json as mentioned above and keep your answer within 50 words.
"""
system_role = prompt
buffer_memory = get_conversation_buffer_memory()
example_text = f"""\n\nCONVERSATION LOG: \n
Human: hi
Bot Response: hi
Query: What is the meaning or significance behind Washing with hope?
Transcript: I live in florida.
"""
response_text = "{\"Relevancy\":false,\"Bot Response\": \"The information provided in the transcript does not contain any relevant information about washing with hope.\"}"
user_input = f"\n\nCONVERSATION LOG: \n{buffer_memory}\n\nQuery: {query}\n\n Transcript:\n {context}\n\n "
response_list = [
{"role": "system", "content": system_role},
{"role": "user", "content": example_text},
{"role": "assistant", "content": response_text},
{"role": "user", "content": user_input}
]
response = openai.ChatCompletion.create(
model=model,
temperature=0,
messages=response_list
)
gpt_response = response["choices"][0]["message"]["content"]
print("user Query-Input:")
print(user_input)
print("bot Output:")
print(gpt_response)
st.write(gpt_response)
# try:
response_dict = json.loads(gpt_response)
response = response_dict["Bot Response"]
within_knowledge_base = response_dict["Relevancy"]
print("*" * 50)
print("is query related to knowledge base: ", within_knowledge_base)
print("*" * 50)
try:
response= json.loads(response)
response = response["Bot Response"]
except:
pass
return response
# if __name__=="__main__":
# normal_talk(model="gpt-3.5-turbo-16k", query="What is the meaning or significance behind Washing with hope?", context="I live in florida.")
| [
"\n\nInstructions for Response Generation:\n\n\n Kindly exhibit empathetic responses to greeting messages and provide thoughtful replies.\n The context provided is from \"Transcription\" which is a transcription of some youtube video.\n Always refer \"Transcription\" as the context in the response instead of saying \"context\".\n Format the response as a valid json with keys \"Bot Response\" and \"Relevancy\".Format the \"Relevancy\" value as a boolean based on whether the Query is based on \"Transcription\" provided in the user_input or not.\n If the query answer is available in \"Transcription\" for answering, set \"Relevancy\" as true else set it as false.\n\n \"Bot Response\" is a multiline json string, make sure that the format it as a valid json string.\n For general conversational interactions or inquiries not requiring specialist knowledge, feel free to respond naturally and appropriately without relying on the \"Transcription\".\n Make sure you always format the response as Json as mentioned above and keep your answer within 50 words. \n "
] |
2024-01-10 | yushiro-yamashita/Carrier-Owl | src~carrier_owl.py | import argparse
import datetime
import os
import textwrap
import time
import urllib.parse
import warnings
from dataclasses import dataclass
import arxiv
import openai
import requests
import slackweb
import yaml
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.options import Options
from webdriver_manager.firefox import GeckoDriverManager
# setting
warnings.filterwarnings('ignore')
@dataclass
class Result:
url: str
title: str
abstract: str
summary: str
words: list
score: float = 0.0
def calc_score(abst: str, keywords: dict) -> (float, list):
sum_score = 0.0
hit_kwd_list = []
for word in keywords.keys():
score = keywords[word]
if word.lower() in abst.lower():
sum_score += score
hit_kwd_list.append(word)
return sum_score, hit_kwd_list
def search_keyword(
articles: list, keywords: dict, score_threshold: float
) -> list:
results = []
# ใใใใฌในใขใผใใงใใฉใฆใถใ่ตทๅ
options = Options()
options.add_argument('--headless')
# ใใฉใฆใถใผใ่ตทๅ
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=options)
# ChatGPTใซใใ็ฟป่จณ
system = """You are an expert with a background in physics and informatics.
Please output the best summary based on the following constraints and the input text.
Constraints:
The text should be concise and easy to understand.
Bullet points should be output in 3 lines.
Each line should be approximately 50 words.
Do not miss any important keywords.
The summarized text should be translated into Japanese.
Expected output format:
1.
2.
3.
"""
for article in articles:
url = article['arxiv_url']
title = article['title']
abstract = article['summary']
score, hit_keywords = calc_score(abstract, keywords)
if (score != 0) and (score >= score_threshold):
title_trans = get_translated_text('ja', 'en', title, driver)
abstract = abstract.replace('\n', '')
abstract_trans = get_translated_text('ja', 'en', abstract, driver)
text = f"title: {title}\nbody: {abstract}"
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{'role': 'system', 'content': system},
{'role': 'user', 'content': text}
],
temperature=0.25,
)
summary = response['choices'][0]['message']['content']
except:
summary = ""
time.sleep(30)
result = Result(
url=url, title=title_trans, abstract=abstract_trans,
score=score, words=hit_keywords, summary=summary)
results.append(result)
# ใใฉใฆใถๅๆญข
driver.quit()
return results
def send2app(text: str, slack_id: str, line_token: str) -> None:
# slack
if slack_id is not None:
slack = slackweb.Slack(url=slack_id)
slack.notify(text=text)
# line
if line_token is not None:
line_notify_api = 'https://notify-api.line.me/api/notify'
headers = {'Authorization': f'Bearer {line_token}'}
data = {'message': f'message: {text}'}
requests.post(line_notify_api, headers=headers, data=data)
def notify(results: list, slack_id: str, line_token: str) -> None:
# ้็ฅ
star = '*'*80
today = datetime.date.today()
n_articles = len(results)
text = f'{star}\n \t \t {today}\tnum of articles = {n_articles}\n{star}'
send2app(text, slack_id, line_token)
# descending
for result in sorted(results, reverse=True, key=lambda x: x.score):
url = result.url
title = result.title
summary = result.summary
abstract = result.abstract
word = result.words
score = result.score
text = f'\n score: `{score}`'\
f'\n hit keywords: `{word}`'\
f'\n url: {url}'\
f'\n title: {title}'\
f'\n summary:\n{summary}\n'\
f'\n abstract:'\
f'\n \t {abstract}'\
f'\n {star}'
send2app(text, slack_id, line_token)
def get_translated_text(from_lang: str, to_lang: str, from_text: str, driver) -> str:
'''
https://qiita.com/fujino-fpu/items/e94d4ff9e7a5784b2987
'''
sleep_time = 1
# urlencode
from_text = urllib.parse.quote(from_text)
# urlไฝๆ
url = 'https://www.deepl.com/translator#' \
+ from_lang + '/' + to_lang + '/' + from_text
driver.get(url)
driver.implicitly_wait(10) # ่ฆใคใใใชใใจใใฏใ10็งใพใงๅพ
ใค
for i in range(30):
# ๆๅฎๆ้ๅพ
ใค
time.sleep(sleep_time)
html = driver.page_source
# to_text = get_text_from_page_source(html)
to_text = get_text_from_driver(driver)
if to_text:
break
if to_text is None:
return urllib.parse.unquote(from_text)
return to_text
def get_text_from_driver(driver) -> str:
try:
elem = driver.find_element_by_class_name('lmt__translations_as_text__text_btn')
except NoSuchElementException as e:
return None
text = elem.get_attribute('innerHTML')
return text
def get_text_from_page_source(html: str) -> str:
soup = BeautifulSoup(html, features='lxml')
target_elem = soup.find(class_="lmt__translations_as_text__text_btn")
text = target_elem.text
return text
def get_config() -> dict:
file_abs_path = os.path.abspath(__file__)
file_dir = os.path.dirname(file_abs_path)
config_path = f'{file_dir}/../config.yaml'
with open(config_path, 'r') as yml:
config = yaml.load(yml)
return config
def main():
# debug็จ
parser = argparse.ArgumentParser()
parser.add_argument('--slack_id', default=None)
parser.add_argument('--line_token', default=None)
parser.add_argument('--openai_api', default=None)
args = parser.parse_args()
config = get_config()
subject = config['subject']
keywords = config['keywords']
score_threshold = float(config['score_threshold'])
day_before_yesterday = datetime.datetime.today() - datetime.timedelta(days=2)
day_before_yesterday_str = day_before_yesterday.strftime('%Y%m%d')
# datetime format YYYYMMDDHHMMSS
arxiv_query = f'({subject}) AND ' \
f'submittedDate:' \
f'[{day_before_yesterday_str}000000 TO {day_before_yesterday_str}235959]'
articles = arxiv.query(query=arxiv_query,
max_results=1000,
sort_by='submittedDate',
iterative=False)
openai.api_key = os.getenv("OPENAI_API") or args.openai_api
results = search_keyword(articles, keywords, score_threshold)
slack_id = os.getenv("SLACK_ID") or args.slack_id
line_token = os.getenv("LINE_TOKEN") or args.line_token
notify(results, slack_id, line_token)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | yushiro-yamashita/Carrier-Owl | src~slide_owl.py | import argparse
import datetime
import os
import time
import urllib.parse
import warnings
from dataclasses import dataclass
from make_slide import make_slides
import arxiv
import openai
from slack_sdk import WebClient
from io import BytesIO
import yaml
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from webdriver_manager.firefox import GeckoDriverManager
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
from pathlib import Path
# setting
warnings.filterwarnings("ignore")
@dataclass
class Result:
score: float = 0.0
hit_keywords: list = None
arxiv_result: dict = None
abst_jp: str = None
PROMPT = """ไธใใใใ่ซๆใฎ่ฆ็นใใพใจใใไปฅไธใฎ้
็ฎใงๆฅๆฌ่ชใงๅบๅใใใใใใใใฎ้
็ฎใฏๆๅคงใงใ180ๆๅญไปฅๅ
ใซ่ฆ็ดใใใ
```
่ซๆๅ:ใฟใคใใซใฎๆฅๆฌ่ช่จณ
ใญใผใฏใผใ:ใใฎ่ซๆใฎใญใผใฏใผใ
่ชฒ้ก:ใใฎ่ซๆใ่งฃๆฑบใใ่ชฒ้ก
ๆๆณ:ใใฎ่ซๆใๆๆกใใๆๆณ
็ตๆ:ๆๆกๆๆณใซใใฃใฆๅพใใใ็ตๆ
```"""
BASE_DIR=Path("./files")
CHANNEL_ID = "C03KGQE0FT6"
def calc_score(abst: str, keywords: dict):
sum_score = 0.0
hit_kwd_list = []
for word in keywords.keys():
score = keywords[word]
if word.lower() in abst.lower():
sum_score += score
hit_kwd_list.append(word)
return sum_score, hit_kwd_list
def get_text_from_driver(driver) -> str:
try:
# elem = driver.find_element_by_class_name("lmt__translations_as_text__text_btn")
elem = driver.find_element(by=By.CLASS_NAME, value="lmt__translations_as_text__text_btn")
except NoSuchElementException as e:
return None
text = elem.get_attribute("innerHTML")
return text
def get_translated_text(from_lang: str, to_lang: str, from_text: str, driver) -> str:
sleep_time = 1
from_text = urllib.parse.quote(from_text)
url = "https://www.deepl.com/translator#" \
+ from_lang + "/" + to_lang + "/" + from_text
driver.get(url)
driver.implicitly_wait(10)
for i in range(30):
time.sleep(sleep_time)
to_text = get_text_from_driver(driver)
if to_text:
break
if to_text is None:
return urllib.parse.unquote(from_text)
return to_text
def search_keyword(
articles: list, keywords: dict, score_threshold: float
):
options = webdriver.FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(service=Service(GeckoDriverManager().install()), options=options)
results = []
for article in articles:
abstract = article.summary.replace("\n", " ")
score, hit_keywords = calc_score(abstract, keywords)
if score < score_threshold:
continue
abstract_trans = get_translated_text("ja", "en", abstract, driver)
result = Result(score=score, hit_keywords=hit_keywords, arxiv_result=article, abst_jp=abstract_trans)
results.append(result)
driver.quit()
return results
def get_summary(result):
title = result.title.replace("\n ", "")
body = result.summary.replace("\n", " ")
text = f"title: {title}\nbody: {body}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# model="gpt-4",
messages=[
{"role": "system", "content": PROMPT},
{"role": "user", "content": text}
],
temperature=0.25,
)
summary = response["choices"][0]["message"]["content"]
summary_dict = {}
for b in summary.split("\n"):
if b.startswith("่ซๆๅ"):
summary_dict["title_jp"] = b[4:].lstrip()
if b.startswith("ใญใผใฏใผใ"):
summary_dict["keywords"] = b[6:].lstrip()
if b.startswith("่ชฒ้ก"):
summary_dict["problem"] = b[3:].lstrip()
if b.startswith("ๆๆณ"):
summary_dict["method"] = b[3:].lstrip()
if b.startswith("็ตๆ"):
summary_dict["result"] = b[3:].lstrip()
summary_dict["title"]= result.title
summary_dict["id"] = result.get_short_id().replace(".", "_")
summary_dict["date"] = result.published.strftime("%Y-%m-%d %H:%M:%S")
summary_dict["authors"] = result.authors
summary_dict["year"] = str(result.published.year)
summary_dict["entry_id"] = str(result.entry_id)
summary_dict["primary_category"] = str(result.primary_category)
summary_dict["categories"] = result.categories
summary_dict["journal_ref"] = result.journal_ref
summary_dict["pdf_url"] = result.pdf_url
summary_dict["doi"]= result.doi
summary_dict["abstract"] = body
return summary_dict
def send2app(text: str, slack_token: str, file: str=None) -> None:
if slack_token is not None:
client = WebClient(token=slack_token)
if file is None:
new_message = client.chat_postMessage(
channel=CHANNEL_ID,
text=text,
)
else:
print(file)
with open(file, "rb") as f:
new_file = client.files_upload(
channels=CHANNEL_ID,
file=BytesIO(f.read()),
filename=file.name,
filetype="pdf",
initial_comment=text,
)
def notify(results: list, slack_token: str) -> None:
star = "*"*80
today = datetime.date.today()
n_articles = len(results)
text = f"{star}\n \t \t {today}\tnum of articles = {n_articles}\n{star}"
send2app(text, slack_token)
for result in sorted(results, reverse=True, key=lambda x: x.score):
ar = result.arxiv_result
url = ar.entry_id
title = ar.title.replace("\n ", "")
word = result.hit_keywords
score = result.score
abstract = result.abst_jp.replace("ใ", "ใ\n>")
if abstract[-1] == "\n>":
abstract = abstract.rstrip("\n>")
abstract_en = ar.summary.replace("\n", " ").replace(". ", ". \n>")
text = f"\n Score: `{score}`"\
f"\n Hit keywords: `{word}`"\
f"\n URL: {url}"\
f"\n Title: {title}"\
f"\n Abstract:"\
f"\n>{abstract}"\
f"\n Original:"\
f"\n>{abstract_en}"\
f"\n {star}"
file = None
if openai.api_key is not None:
try:
summary_dict = get_summary(ar)
summary_dict["abst_jp"] = result.abst_jp
id = summary_dict["id"]
dirpath = BASE_DIR/id
dirpath.mkdir(parents=True, exist_ok=True)
pdf = f"{id}.pdf"
ar.download_pdf(dirpath=str(dirpath), filename=pdf)
summary_dict["pdf"] = str(dirpath/pdf)
file = make_slides(dirpath, id, summary_dict)
except Exception as e:
print(e)
send2app(text, slack_token, file)
def get_config():
file_abs_path = os.path.abspath(__file__)
file_dir = os.path.dirname(file_abs_path)
config_path = f"{file_dir}/../config.yaml"
with open(config_path, "r", encoding="utf-8") as yml:
config = yaml.load(yml)
return config
def main():
# debug็จ
parser = argparse.ArgumentParser()
parser.add_argument("--slack_token", default=None)
parser.add_argument("--openai_api", default=None)
args = parser.parse_args()
config = get_config()
subject = config["subject"]
keywords = config["keywords"]
score_threshold = float(config["score_threshold"])
day_before_yesterday = datetime.datetime.today() - datetime.timedelta(days=2)
day_before_yesterday_str = day_before_yesterday.strftime("%Y%m%d")
arxiv_query = f"({subject}) AND " \
f"submittedDate:" \
f"[{day_before_yesterday_str}000000 TO {day_before_yesterday_str}235959]"
articles = arxiv.Search(query=arxiv_query,
max_results=1000,
sort_by = arxiv.SortCriterion.SubmittedDate).results()
articles = list(articles)
openai.api_key = os.getenv("OPENAI_API") or args.openai_api
results = search_keyword(articles, keywords, score_threshold)
slack_token = os.getenv("SLACK_BOT_TOKEN") or args.slack_token
notify(results[:1], slack_token)
if __name__ == "__main__":
main()
| [
"ไธใใใใ่ซๆใฎ่ฆ็นใใพใจใใไปฅไธใฎ้
็ฎใงๆฅๆฌ่ชใงๅบๅใใใใใใใใฎ้
็ฎใฏๆๅคงใงใ180ๆๅญไปฅๅ
ใซ่ฆ็ดใใใ\n```\n่ซๆๅ:ใฟใคใใซใฎๆฅๆฌ่ช่จณ\nใญใผใฏใผใ:ใใฎ่ซๆใฎใญใผใฏใผใ\n่ชฒ้ก:ใใฎ่ซๆใ่งฃๆฑบใใ่ชฒ้ก\nๆๆณ:ใใฎ่ซๆใๆๆกใใๆๆณ\n็ตๆ:ๆๆกๆๆณใซใใฃใฆๅพใใใ็ตๆ\n```"
] |
2024-01-10 | simonmoesorensen/gpt4docs | gpt4docs~scripts~build_vectorstore.py | from typing import Generator
import time
import uuid
from pathlib import Path
import shutil
import logging
from dotenv import load_dotenv
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import TokenTextSplitter
from langchain.document_loaders.parsers.txt import TextParser
from langchain.document_loaders.blob_loaders import Blob
load_dotenv()
logging.basicConfig(
level=logging.INFO,
format=(
"[%(asctime)s][%(levelname)s][%(name)s]"
+ "[%(funcName)s) %(filename)s:%(lineno)d] %(message)s"
),
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
root = Path(__file__).parent.parent.parent
def delete_existing_vectorstore(directory: Path):
if directory.exists():
shutil.rmtree(directory)
logger.info(f"Deleted existing vectorstore at {directory}")
def load_documents_from_folder(
folder: Path, docs_per_iter: int
) -> Generator[Blob, None, None]:
"""Loads documents in the given folder"""
docs_count = 0
documents = []
for file in folder.glob("**/*.py"):
if file.name == "__init__.py":
continue
if file.is_file():
documents.append(Blob(data=file.read_bytes(), path=file.name))
docs_count += 1
if docs_count % docs_per_iter == 0:
yield documents
documents = []
if len(documents) > 0:
yield documents
if docs_count == 0:
raise ValueError("No documents found in folder")
logger.info(f"Loaded {docs_count} documents from {folder}")
def read_documents(documents_generator: Generator) -> Generator:
"""Embeds documents in the given folder"""
parser = TextParser()
for documents_blob in documents_generator:
doc_ids = []
documents = []
for document in documents_blob:
parsed_docs = parser.parse(document)
doc_id = str(uuid.uuid4())
for doc in parsed_docs:
doc.metadata["doc_id"] = doc_id
documents.extend(parsed_docs)
doc_ids.append(doc_id)
yield documents
def embed_documents(
documents_generator: Generator,
chunk_size: int = 500,
chunk_overlap: int = 100,
persist_directory=root / "data" / ".chroma/",
):
# Create vectorstore for documents
text_splitter = TokenTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
vectorstore = Chroma(
collection_name="documents",
embedding_function=OpenAIEmbeddings(),
persist_directory=str(persist_directory),
)
for documents in documents_generator:
split_docs = text_splitter.split_documents(documents)
if len(split_docs) == 0:
raise ValueError("Documents couldn't be read")
logger.info(f"Split into {len(split_docs)} documents")
logger.info("Embedding documents...")
vectorstore.add_documents(split_docs)
vectorstore.persist()
logger.info(f"Persisted vectorstore to {persist_directory}")
vectorstore = None
def build_vectorstore(
persist_directory: Path,
documents_folder: Path,
chunk_size=2000,
docs_per_iter=25,
):
logger.info(f"Building vectorstore with chunk size: {chunk_size}")
delete_existing_vectorstore(persist_directory)
documents_blob = load_documents_from_folder(documents_folder, docs_per_iter)
documents = read_documents(documents_blob)
start_time = time.time()
embed_documents(
documents,
chunk_size=chunk_size,
persist_directory=persist_directory,
)
logger.info(f"embed_documents took {time.time() - start_time:.2f} seconds")
logger.info("Successfully built vectorstore")
if __name__ == "__main__":
logger.info(f"Root: {root}")
persist_directory = root / "data" / ".chroma/"
documents_folder = root / "data" / "documents"
build_vectorstore(persist_directory, documents_folder)
| [] |
2024-01-10 | simonmoesorensen/gpt4docs | gpt4docs~model~DocstringLLM.py | from pathlib import Path
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from gpt4docs.modules.datamodels import PyDefinition
TOKENS_LIMIT = {
"gpt-3.5-turbo": 4096,
"text-davinci-003": 4096,
"gpt-4": 8192,
"gpt-4-32k": 32768,
"gpt-3.5-turbo-16k": 16384,
}
prompt_dir = Path(__file__).parent / "prompts"
class DocstringLLM:
def __init__(
self,
retriever=None,
callbacks=None,
model_name="gpt-3.5-turbo-16k",
):
"""
Setup the langchain Chain class for Q&A with LLM
"""
if model_name not in TOKENS_LIMIT:
raise ValueError(
f"Model {model_name} not supported. "
f"Supported models: {TOKENS_LIMIT.keys()}"
)
if callbacks is None:
callbacks = []
self.callbacks = callbacks
self.model = ChatOpenAI(model_name=model_name, streaming=False, temperature=0)
self.retriever = retriever
qa_prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
open(prompt_dir / "qa.txt").read()
),
HumanMessagePromptTemplate.from_template(
"Write a docstring for the following definition: `{question}`\nGenerated Docstring:" # noqa: E501
),
]
)
# Setup final chain
self.chain = RetrievalQA.from_chain_type(
llm=self.model,
chain_type="stuff",
chain_type_kwargs={
"prompt": qa_prompt,
},
retriever=self.retriever,
)
def run(self, definition: PyDefinition) -> str:
response = self.chain.run(definition.source)
return self._format_response(response)
async def arun(self, definition: PyDefinition) -> str:
response = await self.chain.arun(query=definition.source)
return self._format_response(response)
def _format_response(self, response: str) -> str:
return response.replace('"""', "")
| [
"Write a docstring for the following definition: `{question}`\nGenerated Docstring:",
"prompts / "
] |
2024-01-10 | simonmoesorensen/gpt4docs | gpt4docs~modules~managers~VectorStoreManager.py | from pathlib import Path
from gpt4docs.scripts.build_vectorstore import build_vectorstore
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
class VectorStoreManager:
def __init__(self, vectorstore_path: str):
self.dir = vectorstore_path
self.vectorstore = self.load(self.dir)
@staticmethod
def build(vectorstore_path: str, documents_folder: str):
build_vectorstore(
persist_directory=vectorstore_path, documents_folder=documents_folder
)
@staticmethod
def is_built(vectorstore_path: str):
return Path(vectorstore_path).exists()
def load(self, vectorstore_path: str):
if not self.is_built(vectorstore_path):
raise ValueError(f"Cannot find vectorstore in {vectorstore_path}")
return Chroma(
collection_name="documents",
persist_directory=str(vectorstore_path),
embedding_function=OpenAIEmbeddings(),
)
def get_retriever(self, k=6, search_kwargs=None):
if search_kwargs is None:
search_kwargs = {}
if "k" not in search_kwargs:
search_kwargs.update({"k": k})
return self.vectorstore.as_retriever(
search_type="mmr", search_kwargs=search_kwargs
)
| [] |
2024-01-10 | simonmoesorensen/gpt4docs | gpt4docs~model~ReadmeLLM.py | from pathlib import Path
from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI
import logging
logger = logging.getLogger(__name__)
TOKENS_LIMIT = {
"gpt-3.5-turbo": 4096,
"text-davinci-003": 4096,
"gpt-4": 8192,
"gpt-4-32k": 32768,
"gpt-3.5-turbo-16k": 16384,
}
prompt_dir = Path(__file__).parent / "prompts"
class ReadmeLLM:
def __init__(
self,
retriever=None,
callbacks=None,
model_name="gpt-3.5-turbo-16k",
):
"""
Setup the langchain Chain class for Q&A with LLM
"""
if model_name not in TOKENS_LIMIT:
raise ValueError(
f"Model {model_name} not supported. "
f"Supported models: {TOKENS_LIMIT.keys()}"
)
if callbacks is None:
callbacks = []
self.callbacks = callbacks
self.model = ChatOpenAI(model_name=model_name, streaming=False, temperature=0)
self.reduce_llm = ChatOpenAI(model_name="gpt-4", streaming=False, temperature=0)
self.retriever = retriever
readme_prompt = PromptTemplate.from_template(
open(prompt_dir / "readme.txt").read()
)
map_prompt = PromptTemplate.from_template(open(prompt_dir / "map.txt").read())
self.chain = load_summarize_chain(
llm=self.model,
reduce_llm=self.reduce_llm,
chain_type="map_reduce",
map_prompt=map_prompt,
combine_prompt=readme_prompt,
input_key="input_documents",
output_key="output_text",
return_intermediate_steps=True,
)
def run(self) -> str:
docs = self.get_relevant_docs()
response = self.chain({"input_documents": docs})
logger.debug("\n\n".join(response["intermediate_steps"]))
return response["output_text"]
async def arun(self) -> str:
docs = self.get_relevant_docs()
response = await self.chain.acall({"input_documents": docs})
logger.debug("\n\n".join(response["intermediate_steps"]))
return response["output_text"]
def get_relevant_docs(self):
return self.retriever._get_relevant_documents(
query="How do I get started with the project?",
run_manager=None,
)
| [
"prompts / "
] |
2024-01-10 | nikhilagr/Recommendation-System-For-Restaurants | BusinessInsights.py | import pandas as pd
import numpy as np
from nltk.tokenize import sent_tokenize, word_tokenize
from string import punctuation
from nltk.corpus import stopwords
import nltk
import ssl
from time import time
from sklearn.ensemble import ExtraTreesRegressor
# from textblob import TextBlob
# try:
# _create_unverified_https_context = ssl._create_unverified_context
# except AttributeError:
# pass
# else:
# ssl._create_default_https_context = _create_unverified_https_context
nltk.download('vader_lexicon')
nltk.download('punkt')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import re
from pprint import pprint
from sklearn.linear_model import Ridge
import pickle
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# Enable logging for gensim - optional
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score,classification_report
from datetime import datetime
class TopRecommendation:
def __init__(self):
self.reviews_filename = 'Finalized_Reviews.csv'
self.users_filename = 'Finalized_users.csv'
self.restaurants_filename = 'Finalized_Restaurants.csv'
def prepareData(self):
reviewData = pd.read_csv(self.reviews_filename)
userData = pd.read_csv(self.users_filename)
restaurantData = pd.read_csv(self.restaurants_filename)
restaurantData = restaurantData.loc[restaurantData['business_id'].isin(reviewData['business_id'])]
userData = userData.loc[userData['user_id'].isin(reviewData['user_id'])]
return reviewData,userData,restaurantData
def getSentimentScore(self,reviewData):
sid = SentimentIntensityAnalyzer()
pos = []
neg = []
for text in reviewData['text']:
score = sid.polarity_scores(text)
pos.append(score['pos'])
neg.append(score['neg'])
reviewData['PostiveScore'] = pos
reviewData['NegativeScore'] = neg
return reviewData
def filterLen(self,docs, minlen):
r""" filter out terms that are too short.
docs is a list of lists, each inner list is a document represented as a list of words
minlen is the minimum length of the word to keep
"""
return [ [t for t in d if len(t) >= minlen ] for d in docs ]
def remove_stop_words(self,docs):
en_stops = stopwords.words('english')
en_stops.extend(['should','they','this','came','would','could'])
new_docs = []
for doc in docs:
new_word = []
for word in doc:
if word not in en_stops:
new_word.append(word)
new_docs.append(new_word)
return new_docs
def filterInput(self,documents):
new_docs = []
for doc in documents:
new_word = []
for word in doc:
new_word.append(word.lower())
for char in word:
if(not char.isalpha()):
new_word.remove(word.lower())
break
new_docs.append(new_word)
return new_docs
def remove_punctuation(self,docs):
new_docs = []
for doc in docs:
new_words = []
for word in doc:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_words.append(new_word)
new_docs.append(new_words)
return new_docs
def preprocessText(self,reviewData):
docs = list(reviewData['text'])
docs_tokens = [word_tokenize(doc) for doc in docs]
docs_filt = self.filterInput(docs_tokens)
docs_wo_punctuation = self.remove_punctuation(docs_filt)
preprocessed_docs = self.remove_stop_words(docs_wo_punctuation)
return preprocessed_docs
def createDictForLDA(self,preprocessed_docs):
#Create Corpus
texts = preprocessed_docs
# Create Dictionary
id2word = corpora.Dictionary(preprocessed_docs)
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
return id2word,corpus
def getPredictedTopic(self,reviewData,lda_model,corpus):
topic_pred = []
for i in range(0, len(reviewData.text)):
temp = lda_model[corpus[i]]
result = sorted(temp,key=lambda x:(-x[1],x[0]))
topic_pred.append(result[0][0])
reviewData['PredictedTopic'] = topic_pred
return reviewData
def createFeatureDF(self,reviewData):
features = pd.DataFrame()
features['PositiveScore'] = reviewData['PostiveScore']
features['NegativeScore'] = reviewData['NegativeScore']
return features
def prepareFeatures(self,features):
TP1 = []
TP2 = []
TP3 = []
TP4 = []
TP5 = []
TP6 = []
TP7 = []
TP8 = []
TP9 = []
TP10 = []
TN1 = []
TN2 = []
TN3 = []
TN4 = []
TN5 = []
TN6 = []
TN7 = []
TN8 = []
TN9 = []
TN10 = []
for j,row in reviewData.iterrows():
ps = row['PostiveScore']
ns = row['NegativeScore']
temp = lda_model.get_topic_terms(row['PredictedTopic'])
TP1.append(temp[0][1] * ps)
TP2.append(temp[1][1] * ps)
TP3.append(temp[2][1] * ps)
TP4.append(temp[3][1] * ps)
TP5.append(temp[4][1] * ps)
TP6.append(temp[5][1] * ps)
TP7.append(temp[6][1] * ps)
TP8.append(temp[7][1] * ps)
TP9.append(temp[8][1] * ps)
TP10.append(temp[9][1] * ps)
TN1.append(temp[0][1] * ns)
TN2.append(temp[1][1] * ns)
TN3.append(temp[2][1] * ns)
TN4.append(temp[3][1] * ns)
TN5.append(temp[4][1] * ns)
TN6.append(temp[5][1] * ns)
TN7.append(temp[6][1] * ns)
TN8.append(temp[7][1] * ns)
TN9.append(temp[8][1] * ns)
TN10.append(temp[9][1] * ns)
features['TP1'] = TP1
features['TP2'] = TP2
features['TP3'] = TP3
features['TP4'] = TP4
features['TP5'] = TP5
features['TP6'] = TP6
features['TP7'] = TP7
features['TP8'] = TP8
features['TP9'] = TP9
features['TP10'] = TP10
features['TN1'] = TN1
features['TN2'] = TN2
features['TN3'] = TN3
features['TN4'] = TN4
features['TN5'] = TN5
features['TN6'] = TN6
features['TN7'] = TN7
features['TN8'] = TN8
features['TN9'] = TN9
features['TN10'] = TN10
return features
def predictRatings(self,features,reviewData):
X = features
y = reviewData['stars']
# X_train , X_test,y_train,y_test = train_test_split(X,y,test_size=0.30,random_state=42)
clf = Ridge(alpha=1.0)
clf.fit(X, y)
pred_rating = clf.predict(X)
reviewData['PredictedRating'] = pred_rating
# reviewData.to_csv('PredictedRating')
return reviewData
def addMonthYear(self,reviewData):
year = []
month =[]
for k,row in (reviewData).iterrows():
dateobject = datetime.strptime(row['date'], '%Y-%m-%d')
year.append(dateobject.year)
month.append(dateobject.month)
reviewData['year']= year
reviewData['month']= month
return reviewData
if __name__ == '__main__':
start = time()
tr = TopRecommendation()
reviewData,userData,restaurantData = tr.prepareData()
print(reviewData.head())
reviewData = tr.getSentimentScore(reviewData)
pre = tr.preprocessText(reviewData)
id2word,corpus = tr.createDictForLDA(pre)
lda_model = gensim.models.LdaMulticore(workers=3,corpus=corpus,id2word=id2word, num_topics=10, random_state=100,passes=5)
reviewData = tr.getPredictedTopic(reviewData,lda_model,corpus)
features = tr.createFeatureDF(reviewData)
features = tr.prepareFeatures(features)
reviewData = tr.predictRatings(features,reviewData)
reviewData = tr.addMonthYear(reviewData)
dataToPickle = [reviewData,restaurantData]
pickle.dump(dataToPickle, open("modelTopRec.pkl","wb"))
print("Model Dumped Successfully")
end = time()
print((end - start)/60) | [] |
2024-01-10 | fenekku/zenodo | zenodo~modules~records~serializers~schemas~dc.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record serialization."""
from __future__ import absolute_import, print_function
import lxml.html
from marshmallow import Schema, fields
from zenodo.modules.openaire.helpers import openaire_community_identifier, \
resolve_openaire_communities
from ...models import ObjectType
class DublinCoreV1(Schema):
"""Schema for records v1 in JSON."""
identifiers = fields.Method('get_identifiers')
titles = fields.Function(lambda o: [o['metadata'].get('title', u'')])
creators = fields.Method('get_creators')
relations = fields.Method('get_relations')
rights = fields.Method('get_rights')
dates = fields.Method('get_dates')
subjects = fields.Method('get_subjects')
descriptions = fields.Method('get_descriptions')
publishers = fields.Method('get_publishers')
contributors = fields.Method('get_contributors')
types = fields.Method('get_types')
sources = fields.Method('get_sources')
languages = fields.Function(lambda o: [o['metadata'].get('language', u'')])
def get_identifiers(self, obj):
"""Get identifiers."""
items = []
items.append(u'https://zenodo.org/record/{0}'.format(
obj['metadata']['recid']))
items.append(obj['metadata'].get('doi', u''))
oai = obj['metadata'].get('_oai', {}).get('id')
if oai:
items.append(oai)
return items
def get_creators(self, obj):
"""Get creators."""
return [c['name'] for c in obj['metadata'].get('creators', [])]
def get_relations(self, obj):
"""Get creators."""
rels = []
# Grants
for g in obj['metadata'].get('grants', []):
eurepo_id = g.get('identifiers', {}).get('eurepo')
if eurepo_id:
rels.append(eurepo_id)
# Alternate identifiers
for a in obj['metadata'].get('alternate_identifiers', []):
rels.append(
u'info:eu-repo/semantics/altIdentifier/{0}/{1}'.format(
a['scheme'],
a['identifier']))
# Related identifiers
for a in obj['metadata'].get('related_identifiers', []):
rels.append(
u'{0}:{1}'.format(
a['scheme'],
a['identifier']))
# OpenAIRE community identifiers
openaire_comms = resolve_openaire_communities(
obj['metadata'].get('communities', []))
for oa_comm in openaire_comms:
rels.append(
u'url:{0}'.format(openaire_community_identifier(oa_comm)))
return rels
def get_rights(self, obj):
"""Get rights."""
rights = [
u'info:eu-repo/semantics/{}Access'.format(
obj['metadata']['access_right'])]
license_url = obj['metadata'].get('license', {}).get('url')
if license_url:
rights.append(license_url)
return rights
def get_dates(self, obj):
"""Get dates."""
dates = [obj['metadata']['publication_date']]
if obj['metadata']['access_right'] == u'embargoed':
dates.append(
u'info:eu-repo/date/embargoEnd/{0}'.format(
obj['metadata']['embargo_date']))
return dates
def get_descriptions(self, obj):
"""Get descriptions."""
descriptions = []
if obj['metadata'].get('description', '').strip():
descriptions.append(
lxml.html.document_fromstring(obj['metadata']['description'])
.text_content().replace(u"\xa0", u" "))
return descriptions
def get_subjects(self, obj):
"""Get subjects."""
metadata = obj['metadata']
subjects = []
subjects.extend(metadata.get('keywords', []))
subjects.extend((s['term'] for s in metadata.get('subjects', [])))
return subjects
def get_publishers(self, obj):
"""Get publishers."""
imprint = obj['metadata'].get('imprint', {}).get('publisher')
if imprint:
return [imprint]
part = obj['metadata'].get('part_of', {}).get('publisher')
if part:
return [part]
return []
def get_contributors(self, obj):
"""Get contributors."""
return [c['name'] for c in obj['metadata'].get('contributors', [])]
def get_types(self, obj):
"""Get types."""
t = ObjectType.get_by_dict(obj['metadata']['resource_type'])
types = [t['eurepo'], t['internal_id']]
oa_type = ObjectType.get_openaire_subtype(obj['metadata'])
if oa_type:
types.append(oa_type)
return types
def get_sources(self, obj):
"""Get sources."""
items = []
# Journal
journal = obj['metadata'].get('journal')
if journal is not None:
vol = journal.get('volume')
issue = journal.get('issue')
if vol and issue:
vol = u'{0}({1})'.format(vol, issue)
if vol is None:
vol = issue
y = journal.get('year')
parts = [
journal.get('title'),
vol,
journal.get('pages'),
u'({0})'.format(y) if y else None,
]
items.append(u' '.join([x for x in parts if x]))
# Meetings
m = obj['metadata'].get('meetings', {})
if m:
parts = [
m.get('acronym'),
m.get('title'),
m.get('place'),
m.get('dates'),
]
items.append(', '.join([x for x in parts if x]))
return items
| [] |
2024-01-10 | daodao97/chatdoc | server~doc_util.py | from PyPDF2 import PdfReader
import sys
import os
import logging
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, QuestionAnswerPrompt, QueryMode, LLMPredictor
from consts import BASE_DIR
import ebooklib
from ebooklib import epub
from epub2txt import epub2txt
from langchain.chat_models import ChatOpenAI
from llama_index import download_loader
import docx2txt
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
llm_predictor = LLMPredictor(llm=ChatOpenAI(
temperature=0.2, model_name="gpt-3.5-turbo"))
CJKPDFReader = download_loader("CJKPDFReader")
SimpleWebPageReader = download_loader("SimpleWebPageReader")
QUESTION_ANSWER_PROMPT_TMPL_2 = """
You are an AI assistant providing helpful advice. You are given the following extracted parts of a long document and a question. Provide a conversational answer based on the context provided.
If you can't find the answer in the context below, just say "Hmm, I'm not sure." Don't try to make up an answer.
If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
Context information is below.
=========
{context_str}
=========
{query_str}
"""
QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"{query_str}\n"
)
class Doc:
def __init__(
self,
doc_id: str,
filename: str = ""
) -> None:
self.dir_name = doc_id
full_dir = os.path.join(BASE_DIR, self.dir_name)
if not os.path.exists(full_dir):
os.makedirs(full_dir)
self.filename = filename
self.file_path = os.path.join(BASE_DIR, self.dir_name, filename)
self.data_file = os.path.join(BASE_DIR, self.dir_name, "data.txt")
self.index_file = os.path.join(BASE_DIR, self.dir_name, "index.json")
async def save(self, content: bytes):
with open(self.file_path, "wb") as f:
f.write(content)
def build_txt(self, doc_type: str):
if doc_type == 'application/epub+zip':
self.extract_epub()
if doc_type == 'application/pdf':
self.extract_pdf()
if doc_type == 'text/plain' or doc_type == 'text/markdown':
self.data_file = self.file_path
if doc_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
self.extra_docx()
def extract_epub(self):
res = epub2txt(self.file_path)
with open(self.data_file, "a") as file:
for i in range(len(res)):
file.write(res[i])
def extract_pdf(self):
reader = PdfReader(self.file_path)
print("total pages ", len(reader.pages))
with open(self.data_file, "a") as file:
for i in range(len(reader.pages)):
page = reader.pages[i]
text = page.extract_text()
file.write(text)
def extra_docx(self):
res = docx2txt.process(self.file_path)
with open(self.data_file, "a") as file:
file.write(res)
def build_index(self, doc_type: str):
if doc_type == 'web':
self.build_web()
return
documents = SimpleDirectoryReader(
input_files=[self.data_file]).load_data()
index = GPTSimpleVectorIndex(documents)
index.save_to_disk(self.index_file)
def build_web(self):
loader = SimpleWebPageReader()
documents = loader.load_data(urls=[self.filename])
index = GPTSimpleVectorIndex(documents)
index.save_to_disk(self.index_file)
def query(self, question: str):
print("query2", self.index_file, self.file_path)
loader = CJKPDFReader()
index_file = self.index_file
if os.path.exists(index_file) == False:
documents = loader.load_data(file=self.file_path)
index = GPTSimpleVectorIndex(documents)
index.save_to_disk(index_file)
else:
index = GPTSimpleVectorIndex.load_from_disk(index_file)
QUESTION_ANSWER_PROMPT = QuestionAnswerPrompt(
QUESTION_ANSWER_PROMPT_TMPL_2)
return index.query(
query_str=question,
llm_predictor=llm_predictor,
text_qa_template=QUESTION_ANSWER_PROMPT,
# response_mode="tree_summarize",
similarity_top_k=3,
)
def query2(self, question: str):
print("query2", self.index_file, self.file_path)
loader = CJKPDFReader()
index_file = self.index_file
if os.path.exists(index_file) == False:
documents = loader.load_data(file=self.file_path)
index = GPTSimpleVectorIndex(documents)
index.save_to_disk(index_file)
else:
index = GPTSimpleVectorIndex.load_from_disk(index_file)
QUESTION_ANSWER_PROMPT = QuestionAnswerPrompt(
QUESTION_ANSWER_PROMPT_TMPL_2)
return index.query(
query_str=question,
llm_predictor=llm_predictor,
text_qa_template=QUESTION_ANSWER_PROMPT,
response_mode="tree_summarize",
similarity_top_k=3,
)
| [
"\nYou are an AI assistant providing helpful advice. You are given the following extracted parts of a long document and a question. Provide a conversational answer based on the context provided.\nIf you can't find the answer in the context below, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\nIf the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.\nContext information is below.\n=========\n{context_str}\n=========\n{query_str}\n",
"Context information is below. \n---------------------\n{context_str}\n---------------------\n{query_str}\n",
"t find the answer in the context below, just say \"Hmm, I"
] |
2024-01-10 | davidsmakerworks/ai-artist | artist_classes.py | # MIT License
# Copyright (c) 2023 David Rice
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import logging
import openai
import pygame
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
from stability_sdk import client
from log_config import get_logger_name
logger = logging.getLogger(get_logger_name())
class ArtistCreation:
"""
Class representing a full "creation" by the A.R.T.I.S.T. system, i.e., the image
and its corresponding verse.
"""
def __init__(
self,
img: pygame.Surface,
verse_lines: list[str],
prompt: str,
is_daydream: bool,
) -> None:
self.img = img
self.verse_lines = verse_lines
self.prompt = prompt
self.is_daydream = is_daydream
class ArtistCanvas:
"""
Class representing the visible surface on which the ArtistCreation object
will be rendered.
"""
def __init__(
self,
width: int,
height: int,
horiz_margin: int,
vert_margin: int,
verse_font_name: str,
verse_font_max_size: int,
verse_line_spacing: int,
) -> None:
self._width = width
self._height = height
self._horiz_margin = horiz_margin
self._vert_margin = vert_margin
self._verse_font_name = verse_font_name
self._verse_font_max_size = verse_font_max_size
self._verse_line_spacing = verse_line_spacing
self._surface = pygame.Surface(size=(width, height))
def _get_verse_font_size(self, verse_lines: list[str], max_verse_width: int) -> int:
font_obj = pygame.font.SysFont(self._verse_font_name, self._verse_font_max_size)
longest_line_size = 0
# Need to check pizel size of each line to account for
# proprtional fonts. Assumes that size scales linearly.
for line in verse_lines:
text_size = font_obj.size(line)
if text_size[0] > longest_line_size:
longest_line_size = text_size[0]
longest_line = line
font_size = self._verse_font_max_size
will_fit = False
while not will_fit:
font_obj = pygame.font.SysFont(self._verse_font_name, font_size)
text_size = font_obj.size(longest_line)
if text_size[0] < max_verse_width:
will_fit = True
else:
font_size -= 2
return font_size
def _get_verse_total_height(
self, verse_lines: list[str], verse_font_size: int
) -> int:
font_obj = pygame.font.SysFont(self._verse_font_name, verse_font_size)
total_height = 0
for line in verse_lines:
text_size = font_obj.size(line)
total_height += text_size[1]
total_height += self._verse_line_spacing
total_height -= self._verse_line_spacing
return total_height
@property
def surface(self) -> pygame.Surface:
return self._surface
def clear(self) -> None:
self._surface.fill(color=pygame.Color("black"))
def render_creation(self, creation: ArtistCreation, img_side: str) -> None:
self.clear()
img_width = creation.img.get_width()
if img_side.lower() == "left":
img_x = self._horiz_margin
verse_x = self._horiz_margin + img_width + self._horiz_margin
elif img_side.lower() == "right":
img_x = self._width - self._horiz_margin - img_width
verse_x = self._horiz_margin
else:
raise ValueError("img_side must be either 'left' or 'right'")
# Draw the image
self._surface.blit(source=creation.img, dest=(img_x, self._vert_margin))
max_verse_width = (self._width - img_width) - (self._horiz_margin * 3)
verse_font_size = self._get_verse_font_size(
creation.verse_lines, max_verse_width
)
total_height = self._get_verse_total_height(
creation.verse_lines, verse_font_size
)
offset = -total_height // 2
font_obj = pygame.font.SysFont(self._verse_font_name, verse_font_size)
for line in creation.verse_lines:
text_surface = font_obj.render(line, True, pygame.Color("white"))
self._surface.blit(
source=text_surface, dest=(verse_x, (self._height // 2) + offset)
)
offset += int(total_height / len(creation.verse_lines))
class StatusScreen:
"""
Class representing the status screen displayed when A.R.T.I.S.T. is
waiting for input or generating a new creation.
"""
def __init__(
self,
width: int,
height: int,
font_name: str,
heading1_size: int,
heading2_size: int,
status_size: int,
vert_margin: int,
) -> None:
self._width = width
self._height = height
self._font_name = font_name
self._heading1_size = heading1_size
self._heading2_size = heading2_size
self._status_size = status_size
self._vert_margin = vert_margin
self._surface = pygame.Surface(size=(width, height))
@property
def surface(self) -> pygame.Surface:
return self._surface
def render_status(self, text: str) -> None:
self._surface.fill(pygame.Color("black"))
font = pygame.font.SysFont(self._font_name, self._heading1_size)
heading1 = "A.R.T.I.S.T."
x_pos = int(self._surface.get_width() / 2 - font.size(heading1)[0] / 2)
y_pos = self._vert_margin
text_surface = font.render(heading1, True, pygame.Color("white"))
self._surface.blit(text_surface, (x_pos, y_pos))
heading1_height = font.size(heading1)[1]
font = pygame.font.SysFont(self._font_name, self._heading2_size)
heading2 = "Audio-Responsive Transformative Imagination Synthesis Technology"
x_pos = int(self._surface.get_width() / 2 - font.size(heading2)[0] / 2)
y_pos += heading1_height
text_surface = font.render(heading2, True, pygame.Color("white"))
self._surface.blit(text_surface, (x_pos, y_pos))
font = pygame.font.SysFont(self._font_name, self._status_size)
x_pos = int(self._surface.get_width() / 2 - font.size(text)[0] / 2)
y_pos = int(self._surface.get_height() / 2 - font.size(text)[1] / 2)
text_surface = font.render(text, True, pygame.Color("white"))
self._surface.blit(text_surface, (x_pos, y_pos))
class SDXLCreator:
def __init__(
self,
api_key: str,
img_width: int,
img_height: int,
steps: int,
cfg_scale: float,
) -> None:
self.api_key = api_key
self.img_width = img_width
self.img_height = img_height
self.steps = steps
self.cfg_scale = cfg_scale
self._stability_client = client.StabilityInference(
key=self.api_key,
engine="stable-diffusion-xl-1024-v1-0",
)
def generate_image_data(self, prompt: str) -> bytes:
response = self._stability_client.generate(
prompt=prompt,
width=self.img_width,
height=self.img_height,
steps=self.steps,
cfg_scale=self.cfg_scale,
)
for r in response:
for artifact in r.artifacts:
if artifact.finish_reason == generation.FILTER:
logger.error("Content filter triggered")
raise RuntimeError("Content filter triggered")
else:
return artifact.binary
raise RuntimeError("No artifact returned")
class DallE2Creator:
def __init__(self, api_key: str, img_width: int, img_height: int) -> None:
self.api_key = api_key
self.img_width = img_width
self.img_height = img_height
def generate_image_data(self, prompt: str) -> bytes:
img_size = f"{self.img_width}x{self.img_height}"
try:
response = openai.Image.create(
api_key=self.api_key,
prompt=prompt,
size=img_size,
response_format="b64_json",
user="A.R.T.I.S.T.",
)
except Exception as e:
logger.error(f"Image creation response: {response}")
logger.exception(e)
raise
return base64.b64decode(response["data"][0]["b64_json"])
| [] |
2024-01-10 | davidsmakerworks/ai-artist | artist_moderator.py | # MIT License
# Copyright (c) 2023 David Rice
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import openai
from log_config import get_logger_name
logger = logging.getLogger(get_logger_name())
class ArtistModerator:
def __init__(self, api_key: str) -> None:
self.api_key = api_key
def check_msg(self, msg: str) -> bool:
"""
Check if a message complies with content policy.
Returns True if message is safe, False if it is not.
"""
try:
response = openai.Moderation.create(api_key=self.api_key, input=msg)
except Exception as e:
logger.error(f"Moderation response: {response}")
logger.exception(e)
raise
flagged = response["results"][0]["flagged"]
if flagged:
logger.info(f"Message flagged by moderation: {msg}")
logger.info(f"Moderation response: {response}")
else:
logger.info(f"Moderation check passed")
return not flagged
| [] |
2024-01-10 | kamushadenes/chloe | .github~scripts~release-notes.py | #!/usr/bin/env python3
import os
from functools import reduce
from typing import AnyStr, Tuple, Dict
import openai
import requests
def get_latest_release(repository: AnyStr) -> Dict:
"""Get the release from the GitHub API."""
print('[*] Getting latest release...')
return requests.get(
"https://api.github.com/repos/{}/releases/latest".format(repository),
headers={
"Accept": "application/vnd.github+json",
"Authorization": "Bearer {}".format(os.environ["GITHUB_TOKEN"])
},
).json()
def get_improved_release_message(release: Dict) -> Tuple[Dict, AnyStr]:
"""Improves the release message using OpenAI's GPT-3 API."""
print('[*] Improving release message...')
completion = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=[
{
"role": "system",
"content": "Your task is to rewrite release notes in a more concise manner, "
"no need to mention specific commits. "
"Group things by features / bug fixes / etc as appropriate. "
"Try to focus on the most important changes. "
"Return it in markdown format.",
},
{
"role": "user",
"content": release['body']
}
])
return release, completion.choices[0].message.content
def update_release_notes(args: Tuple[Dict, AnyStr]) -> None:
"""Update the release notes using the GitHub API."""
print('[*] Updating release notes...')
print('[*] New content:\n\n{}\n\n'.format(args[1]))
r = requests.patch(
args[0]['url'],
headers={
"Accept": "application/vnd.github+json",
"Authorization": "Bearer {}".format(os.environ["GITHUB_TOKEN"])
},
json={
"body": args[1],
"draft": False,
}
)
if r.status_code != 200:
print("[-] Failed to update release notes: {}".format(r.text))
else:
print('[+] Successfully updated release notes!')
if __name__ == '__main__':
if "GITHUB_TOKEN" not in os.environ:
raise ValueError("GITHUB_TOKEN environment variable is not set.")
if "OPENAI_API_KEY" not in os.environ:
raise ValueError("OPENAI_API_KEY environment variable is not set.")
reduce(lambda x, f: f(x),
[
get_latest_release,
get_improved_release_message,
update_release_notes,
], "kamushadenes/chloe")
| [
"Your task is to rewrite release notes in a more concise manner, no need to mention specific commits. Group things by features / bug fixes / etc as appropriate. Try to focus on the most important changes. Return it in markdown format."
] |
2024-01-10 | raghavpillai/InvestIQ | server~stock.py | import yfinance as yf
import requests
import dotenv
import os
import math
import openai
from difflib import SequenceMatcher
from typing import List, Dict, Tuple
dotenv.load_dotenv()
from sources.news import News
from sources.reddit import Reddit
from sources.youtube import Youtube
POLYGON_API_KEY = os.getenv("POLYGON_API_KEY")
openai.api_key = os.getenv("OPENAI_API_KEY")
class Stock:
def __init__(self, ticker):
# Essentials
self.ticker: str = ticker
# Details
self.name: str = None
self.market_cap: float = None
self.description: str = None
self.similar: str = None
self.current_price: float = None
self.growth: str = None
self.recommend: str = None
self.blurb: str = None
self.logo_url: str = None
self.analyst_count: int = None
# Data
self.perception: float = None
self.popularity: int = None
self.overall_rating: float = None
def create_blurb(self, stock_data: Dict[str, str]) -> str:
# Delete to save tokens
stuff_to_delete: List[str] = [
"longBusinessSummary", "companyOfficers", "uuid", "messageBoardId",
"address1", "website", "phone", "city", "state", "zip",
"country", "industry", "gmtOffSetMilliseconds", "governanceEpochDate",
"timeZoneFullName", "timeZoneShortName",
]
for stuff in stuff_to_delete:
if stuff in stock_data:
del stock_data[stuff]
stock_data["name"] = self.name
# return "Insert blurb here"
response: Dict[str, str] = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a helpful assistant designed to take in stock data and return an smart but concise analysis on the market trends. Use and cite quantitative data to determine if the stock is worth buying or not. Every sentence should be a point backed up by data. Provide a single concise paragraph blurb of no more than 150 characters.",
},
{
"role": "user",
"content": str(stock_data),
}
],
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
# print(response.choices[0].message.content)
return response.choices[0].message.content
def populate(self):
if not self.ticker:
print("Invalid ticker")
return
stock_details: Dict[str, str] = {}
stock_data: Dict[str, str] = {}
try:
stock_details, stock_data = self._get_stock_info()
except Exception:
print("Unable to get stock info")
return
print(f"Retrieving stock info for {self.ticker}")
print(stock_details)
self.name = stock_details.get("name") or self.ticker
self.market_cap = stock_details.get("marketcap")
self.similar = stock_details.get("similar")
self.logo = stock_details.get("logo")
open_price = stock_data.get("regularMarketOpen")
close_price = stock_data.get("previousClose")
self.description = stock_data.get("longBusinessSummary")
self.current_price = stock_data.get("currentPrice")
self.growth = stock_data.get("revenueGrowth",0) * 100
self.recommend = stock_data.get("recommendationKey", "Unknown")
self.analyst_count = stock_data.get("numberOfAnalystOpinions", 0)
self.blurb = self.create_blurb(stock_data)
# twitter = Twitter()
reddit: Reddit = Reddit()
news: News = News()
youtube: Youtube = Youtube()
reddit_perception, reddit_popularity = reddit.calculate_perception(self.name)
youtube_perception, youtube_popularity = youtube.calculate_perception(
self.name
)
news_perception, news_popularity = news.calculate_perception(self.name)
total_popularity: float = (
(reddit_popularity + youtube_popularity + news_popularity) / 3
)
total_perception: float = (
(reddit_perception + youtube_perception + news_perception) / 3
) + 0.2
print(f"Perception: {total_perception}")
print(f"Popularity: {total_popularity}")
def apply_bias(score, bias_factor):
return score * math.exp(bias_factor * abs(score))
# Go higher/lower as needed
bias_factor = 0.2
biased_perception = apply_bias(total_perception, bias_factor)
biased_popularity = apply_bias(total_popularity, bias_factor)
overall_rating = (biased_perception + biased_popularity) / 2
overall_rating = min(max(overall_rating, -0.98), 0.98) # Clamp
def similarity_ratio(a: str, b: str) -> float:
return SequenceMatcher(a=a.lower(), b=b.lower()).ratio()
top_overall_titles: List[Tuple[str, str]] = [(title, "youtube") for title in youtube.top_titles] + \
[(title, "reddit") for title in reddit.top_titles] + \
[(title, "news") for title in news.top_titles]
bottom_overall_titles: List[Tuple[str, str]] = [(title, "youtube") for title in youtube.bottom_titles] + \
[(title, "reddit") for title in reddit.bottom_titles] + \
[(title, "news") for title in news.bottom_titles]
print(top_overall_titles)
print(bottom_overall_titles)
top_overall_titles.sort(key=lambda x: similarity_ratio(x[0], self.name), reverse=True)
bottom_overall_titles.sort(key=lambda x: similarity_ratio(x[0], self.name), reverse=True)
self.perception = round(total_perception * 100, 2)
self.popularity = round(total_popularity * 100, 2)
self.overall_rating = round(overall_rating * 100, 2)
if self.perception > 0:
majority_role = "positive"
minority_role = "negative"
majority_titles = top_overall_titles
minority_titles = bottom_overall_titles
else:
majority_role = "negative"
minority_role = "positive"
majority_titles = bottom_overall_titles
minority_titles = top_overall_titles
# Select the top titles based on perception
titles_to_show: List[Dict[str, str]] = [
{"title": majority_titles[0][0], "source": majority_titles[0][1], "role": majority_role},
{"title": majority_titles[1][0], "source": majority_titles[1][1], "role": majority_role},
{"title": minority_titles[0][0], "source": minority_titles[0][1], "role": minority_role},
]
self.titles = titles_to_show
def _get_stock_info(self) -> Dict[str, str]:
response: requests.Response = requests.get(
f"https://api.polygon.io/v1/meta/symbols/{self.ticker}/company?apiKey={POLYGON_API_KEY}",
)
stock_details: Dict[str, str] = response.json()
stock_raw: yf.Ticker = yf.Ticker(self.ticker)
stock_data: Dict[str, str] = stock_raw.info
return stock_details, stock_data
if __name__ == "__main__":
stock = Stock("AAPL")
stock.populate() | [
"You are a helpful assistant designed to take in stock data and return an smart but concise analysis on the market trends. Use and cite quantitative data to determine if the stock is worth buying or not. Every sentence should be a point backed up by data. Provide a single concise paragraph blurb of no more than 150 characters."
] |
2024-01-10 | billwert/azure-sdk-tools | packages~python-packages~apiview-gpt~src~_gpt_reviewer.py | import os
import dotenv
import json
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import AzureChatOpenAI
from langchain.output_parsers import PydanticOutputParser
import openai
from typing import List
from ._sectioned_document import SectionedDocument, Section
from ._models import GuidelinesResult, Violation
dotenv.load_dotenv()
openai.api_type = "azure"
openai.api_base = os.getenv("OPENAI_API_BASE")
openai.api_key = os.getenv("OPENAI_API_KEY")
OPENAI_API_VERSION = "2023-05-15"
_PACKAGE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
_GUIDELINES_FOLDER = os.path.join(_PACKAGE_ROOT, "guidelines")
class GptReviewer:
def __init__(self):
self.llm = AzureChatOpenAI(client=openai.ChatCompletion, deployment_name="gpt-4", openai_api_version=OPENAI_API_VERSION, temperature=0)
self.output_parser = PydanticOutputParser(pydantic_object=GuidelinesResult)
self.prompt_template = PromptTemplate(
input_variables=["apiview", "guidelines", "language"],
partial_variables={"format_instructions": self.output_parser.get_format_instructions()},
template="""
Given the following {language} Azure SDK Guidelines:
{guidelines}
Verify whether the following code satisfies the guidelines:
```
{apiview}
```
{format_instructions}
"""
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def get_response(self, apiview, language):
general_guidelines, language_guidelines = self.retrieve_guidelines(language)
all_guidelines = general_guidelines + language_guidelines
guidelines = self.select_guidelines(all_guidelines, [
"python-client-naming",
"python-client-options-naming",
"python-models-async",
"python-models-dict-result",
"python-models-enum-string",
"python-models-enum-name-uppercase",
"python-client-sync-async",
"python-client-async-keywords",
"python-client-separate-sync-async",
"python-client-same-name-sync-async",
"python-client-namespace-sync",
])
for i, g in enumerate(guidelines):
g["number"] = i
chunked_apiview = SectionedDocument(apiview.splitlines(), chunk=True)
final_results = GuidelinesResult(status="Success", violations=[])
for chunk in chunked_apiview.sections:
if self.should_evaluate(chunk):
results = self.chain.run(apiview=str(chunk), guidelines=guidelines, language=language)
output = self.output_parser.parse(results)
final_results.violations.extend(self.process_violations(output.violations, chunk))
if output.status == "Error":
final_results.status = output.status
return final_results
def process_violations(self, violations: List[Violation], section: Section) -> List[Violation]:
if not violations:
return violations
combined_violations = {}
for violation in violations:
line_no = self.find_line_number(section, violation.bad_code)
violation.line_no = line_no
existing = combined_violations.get(line_no, None)
if existing:
for rule_id in violation.rule_ids:
if rule_id not in existing.rule_ids:
existing.rule_ids.append(rule_id)
if existing.suggestion != violation.suggestion:
# FIXME: Collect all suggestions and use the most popular??
existing.suggestion = violation.suggestion
existing.comment = existing.comment + " " + violation.comment
else:
combined_violations[line_no] = violation
return [x for x in combined_violations.values()]
def find_line_number(self, chunk: Section, bad_code: str) -> int:
offset = chunk.start_line_no
line_no = None
for i, line in enumerate(chunk.lines):
if line.strip() == bad_code.strip():
if line_no is None:
line_no = offset + i
else:
raise Exception(f"Found multiple instances of bad code in the given chunk: {bad_code}")
if not line_no:
raise Exception(f"Unable to find line number for bad code: {bad_code}")
return line_no
def should_evaluate(self, chunk: Section):
for line in chunk.lines:
if not line.strip().startswith("#") and not line.strip() == "":
return True
return False
def select_guidelines(self, all, select_ids):
return [guideline for guideline in all if guideline["id"] in select_ids]
def retrieve_guidelines(self, language):
general_guidelines = []
general_guidelines_path = os.path.join(_GUIDELINES_FOLDER, "general")
language_guidelines_path = os.path.join(_GUIDELINES_FOLDER, language)
for filename in os.listdir(general_guidelines_path):
with open(os.path.join(general_guidelines_path, filename), "r") as f:
items = json.loads(f.read())
general_guidelines.extend(items)
language_guidelines = []
for filename in os.listdir(language_guidelines_path):
with open(os.path.join(language_guidelines_path, filename), "r") as f:
items = json.loads(f.read())
language_guidelines.extend(items)
return general_guidelines, language_guidelines
| [
"\n Given the following {language} Azure SDK Guidelines:\n {guidelines}\n Verify whether the following code satisfies the guidelines:\n ```\n {apiview}\n ```\n \n {format_instructions}\n "
] |
2024-01-10 | HumanCompatibleAI/population-irl | scripts~ddqn_atari.py | '''Train an Atari game using dueling DQN from OpenAI Baselines.
Based on examples/run_atari.py from Baselines.
Standalone application, not integrated into rest of the codebase.'''
import argparse
from datetime import datetime
import os
import tempfile
import time
import tensorflow as tf
import zipfile
import cloudpickle
import numpy as np
import gym
from baselines import bench
from baselines.common.atari_wrappers import make_atari
from baselines.common.schedules import LinearSchedule
import baselines.common.tf_util as U
from baselines.common import set_global_seeds
from baselines import logger
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from baselines.deepq.utils import BatchInput, load_state, save_state
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
@staticmethod
def load(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act = deepq.build_act(**act_params)
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def learn(env,
q_func,
out_dir,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space_shape = env.observation_space.shape
def make_obs_ph(name):
return BatchInput(observation_space_shape, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
model_saved = False
model_file = os.path.join(out_dir, "model")
if os.path.exists(model_file + '.index'):
load_state(model_file)
logger.log('Restored model from {}'.format(model_file))
exploration = LinearSchedule(schedule_timesteps=1,
initial_p=1.0,
final_p=exploration_final_eps)
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_state(model_file)
return act
def train(env, args):
# Parameters from Wang et al (2016): https://arxiv.org/pdf/1511.06581.pdf
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[512],
dueling=True,
)
ISO_TIMESTAMP = "%Y%m%d_%H%M%S"
timestamp = datetime.now().strftime(ISO_TIMESTAMP)
if args.out_dir is None:
out_dir = os.path.join('data', 'deepq', '{}-{}'.format(args.env, timestamp))
else:
out_dir = args.out_dir
os.makedirs(out_dir, exist_ok=True)
act = learn(
env,
q_func=model,
out_dir=out_dir,
lr=1e-4,
max_timesteps=args.num_timesteps,
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=True,
)
act.save("{}/model.pkl".format(out_dir))
env.close()
def test(env, args):
act = ActWrapper.load(args.model)
while True:
obs, done = env.reset(), False
episode_rew = 0
delay = 1 / args.fps
target_next_frame = time.time()
while not done:
loop_start = time.time()
delta = target_next_frame - loop_start
if delta > 0:
time.sleep(delta)
env.render()
target_next_frame = loop_start + delay
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID',
default='SeaquestNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
subparsers = parser.add_subparsers(dest='subparser',
help='train model or test existing model')
train_parser = subparsers.add_parser('train')
# Mnih et al (2015) and other DeepMind work usually train for 200e6 frames,
# which is 50e6 time steps with 4 frameskip (introduced by wrapper in
# make_atari.)
train_parser.add_argument('--num-timesteps', type=int, default=int(50e6))
train_parser.add_argument('--out-dir', type=str, default=None,
help='checkpoint directory')
test_parser = subparsers.add_parser('test')
test_parser.add_argument('--fps', type=int, default=int(1e3))
test_parser.add_argument('model', help='model file', type=str)
args = parser.parse_args()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
logger.configure()
set_global_seeds(args.seed)
env = make_atari(args.env)
env = bench.Monitor(env, logger.get_dir())
env = deepq.wrap_atari_dqn(env)
if args.subparser == 'train':
train(env, args)
elif args.subparser == 'test':
test(env, args)
else:
assert False
if __name__ == '__main__':
main()
| [] |
2024-01-10 | HumanCompatibleAI/population-irl | pirl~irl~gail.py | '''Interface for GAIL from OpenAI Baselines.
Based on run_mijoco.py. MPI breaks inside of Ray, so I monkey patch to
force a dummy (sequential) version of MPI.
'''
import os.path as osp
# Force our dummy (sequential) version of MPI to be loaded
import sys
old_path = sys.path
sys.path = [osp.join(osp.dirname(__file__), 'dummy_mpi')] + old_path
import mpi4py # OK this is unused, imported only for side-effects
sys.path = old_path
import numpy as np
import tensorflow as tf
from baselines.common import tf_util
from baselines.gail import behavior_clone, mlp_policy, trpo_mpi
from baselines.gail.adversary import TransitionClassifier
from baselines.gail.dataset.mujoco_dset import Dset
from pirl.agents.sample import SampleMonitor
## IRL
def _make_dset(trajectories, randomize=True):
'''Return a Dset object containing observations and actions extracted
from trajectories. GAIL does not care about episode bounds, so
we concatenate together all trajectories, and optionally randomly
sample state-action pairs.'''
obs = np.concatenate([x[0] for x in trajectories])
acs = np.concatenate([x[1] for x in trajectories])
return Dset(obs, acs, randomize)
def _policy_factory(policy_cfg):
'''Return a function to create policies.
WARNING: This function must be called ONCE per graph.'''
policy_kwargs = {
'hid_size': 100,
'num_hid_layers': 2,
}
if policy_cfg is not None:
policy_kwargs.update(policy_cfg)
# WORKAROUND: erase placeholder cache
# This is needed since we create policies in a fresh graph each time,
# so caching would result in tensors from different graphs!
tf_util._PLACEHOLDER_CACHE = {}
def policy_fn(name, ob_space, ac_space, reuse=False):
return mlp_policy.MlpPolicy(name=name, reuse=reuse,
ob_space=ob_space, ac_space=ac_space,
**policy_kwargs)
return policy_fn
def irl(env, trajectories, discount, seed, log_dir, *,
tf_cfg, policy_cfg=None, gan_cfg=None, train_cfg=None):
dataset = _make_dset(trajectories)
train_graph = tf.Graph()
with train_graph.as_default():
tf.set_random_seed(seed)
policy_fn = _policy_factory(policy_cfg)
gan_kwargs = {'hidden_size': 100}
if gan_cfg is not None:
gan_kwargs.update(gan_cfg)
reward_giver = TransitionClassifier(env, **gan_kwargs)
train_kwargs = {
'pretrained': False,
'BC_max_iter': 10000,
'g_step': 3, # number of steps to train policy in each epoch
'd_step': 1, # number of steps to train discriminator in each epoch
'entcoeff': 0, # entropy coefficiency of policy
'max_timesteps': 5e6, # number of timesteps per episode
'timesteps_per_batch': 1024,
'max_kl': 0.01,
'cg_iters': 10,
'cg_damping': 0.1,
'lam': 0.97,
'vf_iters': 5,
'vf_stepsize': 1e-3,
}
if train_cfg is not None:
train_kwargs.update(train_cfg)
pretrained_weight = None
bc_max_iter = train_kwargs.pop('BC_max_iter')
if train_kwargs['pretrained']:
# Pretrain with behavior cloning
pretrained_weight = behavior_clone.learn(env, policy_fn, dataset,
max_iters=bc_max_iter)
ckpt_dir = osp.join(log_dir, 'checkpoints')
with tf.Session(config=tf_cfg) as sess:
trpo_mpi.learn(env, policy_fn, reward_giver, dataset, rank=0,
pretrained_weight=pretrained_weight,
ckpt_dir=ckpt_dir, log_dir=log_dir,
gamma=discount, save_per_iter=100,
task_name='gail', **train_kwargs)
policy_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'pi')
policy_serialised = sess.run(policy_vars)
return None, policy_serialised
def sample(env, policy_saved, num_episodes, seed, *, tf_cfg, policy_cfg=None):
env = SampleMonitor(env)
infer_graph = tf.Graph()
with infer_graph.as_default():
tf.set_random_seed(seed)
policy_fn = _policy_factory(policy_cfg)
policy = policy_fn('pi', env.observation_space, env.action_space,
reuse=False)
with tf.Session(config=tf_cfg) as sess:
# Deserialize policy
policy_vars = policy.get_variables()
restores = []
for p, loaded_p in zip(policy_vars, policy_saved):
restores.append(p.assign(loaded_p))
sess.run(restores)
# Policy rollout
completed = 0
ob = env.reset()
while completed < num_episodes:
# First argument to act determines if stochastic (sample)
# or deterministic (mode of policy)
a, vpred = policy.act(True, ob)
ob, _r, done, _info = env.step(a)
if done:
completed += 1
ob = env.reset()
return env.trajectories | [] |
2024-01-10 | mrshenli/machin | test~env~wrappers~test_openai_gym.py | """
Currently, openai gym offers a great range of environments and we try to
test all kinds of them (not all), however, "robotics" and "mujoco"
requires a license and we cannot test them, but theoretically they
should work just fine.
Submit us a issue if you have found any problem.
"""
from machin.env.wrappers import openai_gym
from random import choice, sample
from colorlog import getLogger
import pytest
import gym
import numpy as np
logger = getLogger("default")
ENV_NUM = 2
SAMPLE_NUM = 2
WORKER_NUM = 2
def mock_action(action_space: gym.spaces.Space):
return action_space.sample()
def prepare_envs(env_list):
for env in env_list:
env.reset()
def should_skip(spec):
# From gym/envs/tests/spec_list.py
# Used to check whether a gym environment should be tested.
# We skip tests for envs that require dependencies or are otherwise
# troublesome to run frequently
ep = spec.entry_point
# No need to test unittest environments
if ep.startswith('gym.envs.unittest'):
return True
# Skip not renderable tests
if (ep.startswith('gym.envs.algorithmic') or
ep.startswith('gym.envs.toy_text')):
return True
# Skip mujoco tests
if (ep.startswith('gym.envs.mujoco') or
ep.startswith('gym.envs.robotics:')):
return True
try:
import atari_py
except ImportError:
if ep.startswith('gym.envs.atari'):
return True
try:
import Box2D
except ImportError:
if ep.startswith('gym.envs.box2d'):
return True
if ('GoEnv' in ep or
'HexEnv' in ep or
(ep.startswith("gym.envs.atari") and
not spec.id.startswith("Pong") and
not spec.id.startswith("Seaquest"))):
return True
return False
@pytest.fixture(scope="module", autouse=True)
def envs():
all_envs = []
env_map = {}
lg = getLogger(__file__)
# Find the newest version of non-skippable environments.
for env_raw_name, env_spec in gym.envs.registry.env_specs.items():
if not should_skip(env_spec):
env_name, env_version = env_raw_name.split("-v")
if (env_name not in env_version
or int(env_version) > env_map[env_name]):
env_map[env_name] = int(env_version)
# Create environments.
for env_name, env_version in env_map.items():
env_name = env_name + "-v" + str(env_version)
lg.info("OpenAI gym {} added".format(env_name))
all_envs.append([lambda *_: gym.make(env_name)
for _ in range(ENV_NUM)])
lg.info("{} OpenAI gym environments to be tested."
.format(len(all_envs)))
return all_envs
class TestParallelWrapperDummy(object):
########################################################################
# Test for ParallelWrapperDummy.reset
########################################################################
param_test_reset = [
(None, ENV_NUM),
(choice(range(ENV_NUM)), 1),
(sample(range(ENV_NUM), SAMPLE_NUM), SAMPLE_NUM),
([_ for _ in range(ENV_NUM)], ENV_NUM),
]
@pytest.mark.parametrize("idx,reset_num", param_test_reset)
def test_reset(self, envs, idx, reset_num):
for env_list in envs:
dummy_wrapper = openai_gym.ParallelWrapperDummy(env_list)
obsrvs = dummy_wrapper.reset(idx)
dummy_wrapper.close()
assert len(obsrvs) == reset_num
for obsrv in obsrvs:
assert dummy_wrapper.observation_space.contains(obsrv), \
"Required observation form: {}, Actual observation: {}" \
.format(str(dummy_wrapper.observation_space), obsrv)
########################################################################
# Test for ParallelWrapperDummy.step
########################################################################
param_test_step = [
(None, ENV_NUM),
(choice(range(ENV_NUM)), 1),
(sample(range(ENV_NUM), SAMPLE_NUM), SAMPLE_NUM),
([_ for _ in range(ENV_NUM)], ENV_NUM),
]
@pytest.mark.parametrize("idx,act_num", param_test_step)
def test_step(self, envs, idx, act_num):
for env_list in envs:
dummy_wrapper = openai_gym.ParallelWrapperDummy(env_list)
action = [mock_action(dummy_wrapper.action_space)
for _ in range(act_num)]
dummy_wrapper.reset(idx)
obsrvs, reward, terminal, info = dummy_wrapper.step(action, idx)
dummy_wrapper.close()
assert len(obsrvs) == act_num
assert len(reward) == act_num and isinstance(reward, np.ndarray)
assert len(terminal) == act_num and isinstance(terminal, np.ndarray)
assert len(info) == act_num and isinstance(info[0], dict)
for obsrv in obsrvs:
assert dummy_wrapper.observation_space.contains(obsrv), \
"Required observation form: {}, Actual observation: {}" \
.format(str(dummy_wrapper.observation_space), obsrv)
########################################################################
# Test for ParallelWrapperDummy.seed
########################################################################
param_test_seed = [
None,
choice(range(ENV_NUM)),
sample(range(ENV_NUM), SAMPLE_NUM),
[_ for _ in range(ENV_NUM)]
]
@pytest.mark.parametrize("idx", param_test_seed)
def test_seed(self, envs, idx):
for env_list in envs:
dummy_wrapper = openai_gym.ParallelWrapperDummy(env_list)
seeds = dummy_wrapper.seed()
dummy_wrapper.close()
assert len(seeds) == ENV_NUM
########################################################################
# Test for ParallelWrapperDummy.render
########################################################################
param_test_render = [
(None, ENV_NUM),
(choice(range(ENV_NUM)), 1),
(sample(range(ENV_NUM), SAMPLE_NUM), SAMPLE_NUM),
([_ for _ in range(ENV_NUM)], ENV_NUM)
]
@pytest.mark.parametrize("idx,render_num", param_test_render)
def test_render(self, envs, idx, render_num):
for env_list in envs:
dummy_wrapper = openai_gym.ParallelWrapperDummy(env_list)
dummy_wrapper.reset(idx)
rendered = dummy_wrapper.render(idx)
dummy_wrapper.close()
assert len(rendered) == render_num
assert isinstance(rendered[0], np.ndarray)
assert rendered[0].ndim == 3 and rendered[0].shape[-1] == 3
########################################################################
# Test for ParallelWrapperDummy.close
########################################################################
def test_close(self, envs):
for env_list in envs:
dummy_wrapper = openai_gym.ParallelWrapperDummy(env_list)
dummy_wrapper.close()
########################################################################
# Test for ParallelWrapperDummy.active
########################################################################
def test_active(self, envs):
for env_list in envs:
dummy_wrapper = openai_gym.ParallelWrapperDummy(env_list)
dummy_wrapper.reset()
active = dummy_wrapper.active()
dummy_wrapper.close()
assert len(active) == ENV_NUM
########################################################################
# Test for ParallelWrapperDummy.size
########################################################################
def test_size(self, envs):
dummy_wrapper = openai_gym.ParallelWrapperDummy(envs[0])
assert dummy_wrapper.size() == ENV_NUM
dummy_wrapper.close()
class TestParallelWrapperSubProc(object):
########################################################################
# Test for ParallelWrapperSubProc.reset
########################################################################
param_test_reset = [
(None, ENV_NUM),
(choice(range(ENV_NUM)), 1),
(sample(range(ENV_NUM), SAMPLE_NUM), SAMPLE_NUM),
([_ for _ in range(ENV_NUM)], ENV_NUM),
]
@pytest.mark.parametrize("idx,reset_num", param_test_reset)
def test_reset(self, envs, idx, reset_num):
for env_list in envs:
subproc_wrapper = openai_gym.ParallelWrapperSubProc(env_list)
obsrvs = subproc_wrapper.reset(idx)
subproc_wrapper.close()
assert len(obsrvs) == reset_num
for obsrv in obsrvs:
assert subproc_wrapper.observation_space.contains(obsrv), \
"Required observation form: {}, Actual observation: {}" \
.format(str(subproc_wrapper.observation_space), obsrv)
########################################################################
# Test for ParallelWrapperSubProc.step
########################################################################
param_test_step = [
(None, ENV_NUM),
(choice(range(ENV_NUM)), 1),
(sample(range(ENV_NUM), SAMPLE_NUM), SAMPLE_NUM),
([_ for _ in range(ENV_NUM)], ENV_NUM),
]
@pytest.mark.parametrize("idx,act_num", param_test_step)
def test_step(self, envs, idx, act_num):
for env_list in envs:
subproc_wrapper = openai_gym.ParallelWrapperSubProc(env_list)
action = [mock_action(subproc_wrapper.action_space)
for _ in range(act_num)]
subproc_wrapper.reset(idx)
obsrvs, reward, terminal, info = subproc_wrapper.step(action, idx)
subproc_wrapper.close()
assert len(obsrvs) == act_num
assert len(reward) == act_num and isinstance(reward, np.ndarray)
assert len(terminal) == act_num and isinstance(terminal, np.ndarray)
assert len(info) == act_num and isinstance(info[0], dict)
for obsrv in obsrvs:
assert subproc_wrapper.observation_space.contains(obsrv), \
"Required observation form: {}, Actual observation: {}" \
.format(str(subproc_wrapper.observation_space), obsrv)
########################################################################
# Test for ParallelWrapperSubProc.seed
########################################################################
param_test_seed = [
None,
choice(range(ENV_NUM)),
sample(range(ENV_NUM), SAMPLE_NUM),
[_ for _ in range(ENV_NUM)]
]
@pytest.mark.parametrize("idx", param_test_seed)
def test_seed(self, envs, idx):
for env_list in envs:
subproc_wrapper = openai_gym.ParallelWrapperSubProc(env_list)
seeds = subproc_wrapper.seed()
subproc_wrapper.close()
assert len(seeds) == ENV_NUM
########################################################################
# Test for ParallelWrapperSubProc.render
########################################################################
param_test_render = [
(None, ENV_NUM),
(choice(range(ENV_NUM)), 1),
(sample(range(ENV_NUM), SAMPLE_NUM), SAMPLE_NUM),
([_ for _ in range(ENV_NUM)], ENV_NUM)
]
@pytest.mark.parametrize("idx,render_num", param_test_render)
def test_render(self, envs, idx, render_num):
for env_list in envs:
subproc_wrapper = openai_gym.ParallelWrapperSubProc(env_list)
subproc_wrapper.reset(idx)
rendered = subproc_wrapper.render(idx)
subproc_wrapper.close()
assert len(rendered) == render_num
assert isinstance(rendered[0], np.ndarray)
assert rendered[0].ndim == 3 and rendered[0].shape[-1] == 3
########################################################################
# Test for ParallelWrapperSubProc.close
########################################################################
def test_close(self, envs):
for env_list in envs:
subproc_wrapper = openai_gym.ParallelWrapperSubProc(env_list)
subproc_wrapper.close()
########################################################################
# Test for ParallelWrapperSubProc.active
########################################################################
def test_active(self, envs):
for env_list in envs:
subproc_wrapper = openai_gym.ParallelWrapperSubProc(env_list)
subproc_wrapper.reset()
active = subproc_wrapper.active()
subproc_wrapper.close()
assert len(active) == ENV_NUM
def test_size(self, envs):
subproc_wrapper = openai_gym.ParallelWrapperSubProc(envs[0])
assert subproc_wrapper.size() == ENV_NUM
subproc_wrapper.close()
| [] |
2024-01-10 | pabloivorra/EDEM_MDA2324 | Profesores~Python~Ejemplo%20IA%20Chat%20GPT~ejemplo_chatgpt.py | import os
import openai
# Configuramos la API KEY de OPEN AI
openai.api_key = '' # 'TODO: AQUร TIENES QUE PONER TU API KEY DE CHATGPT'
prompt = input('Redacta con detalle el resultado que quieres: ')
file_name = input('Indica el nombre del archivo donde se va a guardar: ')
file_extension = input('Indica la extensiรณn del archivo: .')
# realizamos peticiรณn a Open AI
response = openai.Completion.create(
model="text-davinci-003",
prompt= prompt,
temperature=0.7,
max_tokens=900,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
with open(f'{file_name}.{file_extension}', 'w') as file:
file.write(response["choices"][0]["text"]) | [
"Redacta con detalle el resultado que quieres: "
] |
2024-01-10 | jovisly/RC-CreativeCoding | rest-tracker~nlp.py | from datetime import datetime
import os
from dotenv import load_dotenv
from openai import OpenAI
load_dotenv()
api_key = os.getenv("OPENAI_KEY")
# https://github.com/openai/openai-python/discussions/742
client = OpenAI(api_key=api_key)
FIRST_PROMPT = (
"You are a mindful bot that helps the user to get a lot of rest. " +
"You will ask the user when did they rest the last time, and the user will respond with when they rested last time, and what activity they did. " +
"If the user hasn't taken a break for a few hours, you would suggest the user to take a break. " +
"You will choose one and only of the following options: stretch, rest, nap, get a glass of water, meditate, talk to a friend, pet your cat or dog, go for a walk, or anything you can think of. " +
"If the user hasn't taken a break for more than 24 hours, you will tell the user to stop working immediately. " +
"Keep your response very short, just a sentence or two. " +
"And don't ask follow-up questions. Do not ask user if there's anything else you can help with. Just end the conversation."
)
JSON_PROMPT = (
"You are a data processing bot that will organize the user's rest data. " +
"The user will tell you when they last took a rest. " +
"Look for the following information about the user's rest: 'activity_name', 'activity_duration', and 'activity_time'." +
"You will return these three pieces of information as a JSON object. " +
"Don't ask any follow-up question, just do your best to interpret the user's response into the three pieces of information. " +
"If you don't know what the activity_duration is, make it 10 minutes by default. " +
"For activity_time, return a string like '2021-10-10 10:10:10'. And also you know the current time is " + datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
"For activity_name, if you don't know what the activity is, return 'Unknown rest activity'. "
)
def json_answer(last_rest, model="gpt-3.5-turbo", max_tokens=800, stop_sequence=None):
messages = [
{"role": "system", "content": JSON_PROMPT},
{"role": "assistant", "content": "When did you last take a rest?"},
{"role": "user", "content": last_rest},
]
try:
# Create a completions using the question and context
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=0.2,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
)
return response.choices[0].message.content
except Exception as e:
print(e)
return ""
def first_answer(last_rest, model="gpt-3.5-turbo", max_tokens=800, stop_sequence=None):
messages = [
{"role": "system", "content": FIRST_PROMPT},
{"role": "assistant", "content": "When did you last take a rest?"},
{"role": "user", "content": last_rest},
]
try:
# Create a completions using the question and context
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=0.2,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
)
return response.choices[0].message.content
except Exception as e:
print(e)
return ""
| [
"Don't ask any follow-up question, just do your best to interpret the user's response into the three pieces of information. ",
"%Y-%m-%d %H:%M:%S",
"When did you last take a rest?",
"You will return these three pieces of information as a JSON object. ",
"Look for the following information about the user's rest: 'activity_name', 'activity_duration', and 'activity_time'.",
"You are a data processing bot that will organize the user's rest data. ",
"If you don't know what the activity_duration is, make it 10 minutes by default. ",
"For activity_name, if you don't know what the activity is, return 'Unknown rest activity'. ",
"The user will tell you when they last took a rest. ",
"You are a mindful bot that helps the user to get a lot of rest. You will ask the user when did they rest the last time, and the user will respond with when they rested last time, and what activity they did. If the user hasn't taken a break for a few hours, you would suggest the user to take a break. You will choose one and only of the following options: stretch, rest, nap, get a glass of water, meditate, talk to a friend, pet your cat or dog, go for a walk, or anything you can think of. If the user hasn't taken a break for more than 24 hours, you will tell the user to stop working immediately. Keep your response very short, just a sentence or two. And don't ask follow-up questions. Do not ask user if there's anything else you can help with. Just end the conversation.",
"For activity_time, return a string like '2021-10-10 10:10:10'. And also you know the current time is "
] |
2024-01-10 | karansag/phrase-analogies-large-vae | src~analogy.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import os
import pickle
import random
import sys
import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import (
DataLoader,
Dataset,
SequentialSampler,
RandomSampler,
TensorDataset,
)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
# TODO fix this to not manually append PYTHONPATH?
OPTIMUS_ROOT_PATH = os.path.abspath("../Optimus/code")
if OPTIMUS_ROOT_PATH not in sys.path:
sys.path.append(OPTIMUS_ROOT_PATH)
OPTIMUS_EXAMPLES_PATH = os.path.abspath("../Optimus/code/examples/big_ae")
if OPTIMUS_EXAMPLES_PATH not in sys.path:
sys.path.append(OPTIMUS_EXAMPLES_PATH)
from pytorch_transformers import (
GPT2Config,
OpenAIGPTConfig,
XLNetConfig,
TransfoXLConfig,
BertConfig,
)
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from pytorch_transformers import BertForLatentConnector, BertTokenizer
from collections import defaultdict, namedtuple
from modules import VAE
from utils import TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader
import run_latent_generation as runl
from util import get_device
LATENT_SIZE_LARGE = 768
MODEL_CLASSES = {
"gpt2": (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
"bert": (BertConfig, BertForLatentConnector, BertTokenizer),
}
CODER_TYPE_TO_NAME = {"gpt2": "gpt2", "bert": "bert-base-cased"}
# Set this when running experiments
# TODO: make a parameter?
# OUTPUT_DIR = os.path.abspath("../../data/snli-b1/checkpoint-31250/")
# Set the output dir here. E.g.,
# export OUTPUT_DIR="/scratch/MYUSER/project-repo/pretrained_models/snli-b1/checkpoint-31250/"
OUTPUT_DIR = os.environ.get("OPTIMUS_CHECKPOINT_DIR")
def get_encoder(encoder_type="bert", output_encoder_dir="/tmp"):
if not OUTPUT_DIR:
raise Exception(
"OPTIMUS_CHECKPOINT_DIR environment varialbe is required for running analogies. Please see example in src here."
)
checkpoint_encoder_dir = os.path.join(OUTPUT_DIR, "checkpoint-encoder-31250")
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[
encoder_type
]
model_encoder = encoder_model_class.from_pretrained(
checkpoint_encoder_dir, latent_size=LATENT_SIZE_LARGE
)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(
CODER_TYPE_TO_NAME[encoder_type], do_lower_case=True
)
model_encoder.to(get_device())
return {"tokenizer": tokenizer_encoder, "model": model_encoder}
def get_decoder(decoder_type="gpt2", output_decoder_dir="/tmp"):
if not OUTPUT_DIR:
raise Exception(
"OPTIMUS_CHECKPOINT_DIR environment varialbe is required for running analogies. Please see example in src here."
)
checkpoint_decoder_dir = os.path.join(OUTPUT_DIR, "checkpoint-decoder-31250")
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[
decoder_type
]
model_decoder = decoder_model_class.from_pretrained(
checkpoint_decoder_dir, latent_size=LATENT_SIZE_LARGE
)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(
CODER_TYPE_TO_NAME[decoder_type], do_lower_case=True
)
model_decoder.to(get_device())
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {
"pad_token": "<PAD>",
"bos_token": "<BOS>",
"eos_token": "<EOS>",
}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
print("We have added", num_added_toks, "tokens to GPT2")
model_decoder.resize_token_embeddings(
len(tokenizer_decoder)
) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == "<PAD>"
return {"tokenizer": tokenizer_decoder, "model": model_decoder}
def get_vae(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, beta=1):
if not OUTPUT_DIR:
raise Exception(
"OPTIMUS_CHECKPOINT_DIR environment varialbe is required for running analogies. Please see example in src here."
)
ArgsObj = namedtuple("Args", ["latent_size", "device", "fb_mode", "beta"])
args = ArgsObj(
latent_size=LATENT_SIZE_LARGE, device=get_device(), fb_mode=0, beta=beta
)
checkpoint_full_dir = os.path.join(OUTPUT_DIR, "checkpoint-full-31250")
if not torch.cuda.is_available():
checkpoint = torch.load(
os.path.join(checkpoint_full_dir, "training.bin"), map_location="cpu"
)
else:
checkpoint = torch.load(os.path.join(checkpoint_full_dir, "training.bin"))
model_vae = VAE(
model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args
)
model_vae.load_state_dict(checkpoint["model_state_dict"])
# logger.info("Pre-trained Optimus is successfully loaded")
model_vae.to(args.device)
return model_vae
def eval_analogy(
model_vae,
tokenizer_encoder,
tokenizer_decoder,
a,
b,
c,
degree_to_target=1.0,
temperature=1.0,
top_k=0,
top_p=1.0,
):
"""
Usage:
r = get_encoder()
s = get_decoder()
v = v = get_vae(r['model'], s['model'], r['tokenizer'], s['tokenizer'])
result = eval_analogy(v, r['tokenizer'], s['tokenizer'], 'I saw a truck', 'I saw an automobile', 'I saw a dog', temperature=0.01, degree_to_target=1)
=> i saw an animal
"""
ArgsObj = namedtuple(
"Args",
[
"degree_to_target",
"device",
"sent_source",
"sent_target",
"sent_input",
"temperature",
"top_k",
"top_p",
],
)
args = ArgsObj(
degree_to_target=degree_to_target,
device=get_device(),
sent_source=a,
sent_target=b,
sent_input=c,
temperature=temperature,
top_k=top_k,
top_p=top_p,
)
return runl.analogy(model_vae, tokenizer_encoder, tokenizer_decoder, args)
| [] |
2024-01-10 | Mhaske1200/Gen-Z_Health_Minder | app01.py | import os
from apikey import apikey
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
from langchain.memory import ConversationBufferMemory
from langchain.utilities import WikipediaAPIWrapper
os.environ['OPENAI_API_KEY'] = apikey
# App framework
st.title('โฎ๏ธโป๏ธ Gen-Z Health Minder ๐๐ปโโ๏ธ๐ฅ')
st.title("Employee Mental Health Data")
st.write("Enter the following information:")
# Llms
llm = OpenAI(temperature=0.9)
def get_user_input():
age = st.number_input("Age", min_value=18, max_value=65, value=25)
gender = st.radio("Gender", ["Male", "Female", "Other"])
height = st.number_input("Height (in cm)", min_value=100, max_value=300, value=175)
weight = st.number_input("Weight (in kg)", min_value=10, max_value=500, value=82)
family_history = st.radio("Do you have a family history of mental illness?", ["Yes", "No"])
work_interfere = st.radio("If you have a mental health condition, do you feel that it interferes with your work?", ["Often", "Rarely" , "Never" , "Sometimes"])
remote_work = st.radio("Do you work remotely (outside of an office) at least 50% of the time?", ["Yes", "No"])
benefits = st.radio("Does Employer Provide Mental Health Benefits?", ["Yes", "No" , "Dont_Know"])
care_option = st.radio("Do you know the options for mental health care your employer provides?", ["Yes", "No" , "Dont_Know"])
wellness_program = st.radio("Has your employer ever discussed mental health as part of an employee wellness program?", ["Yes", "No" , "Dont_Know"])
seek_help = st.radio("Does your employer provide resources to learn more about mental health issues and how to seek help?", ["Yes", "No" , "Dont_Know"])
anonymity = st.radio("Is your anonymity protected if you choose to take advantage of mental health or substance abuse treatment resources?", ["Yes", "No" , "Dont_Know"])
mental_health_consequence = st.radio("Do you think that discussing a mental health issue with your employer would have negative consequences?", ["Yes", "No" , "Maybe"])
physical_health_consequence = st.radio("Do you think that discussing a physical health issue with your employer would have negative consequences?", ["Yes", "No" , "Maybe"])
coworkers = st.radio("Willingness to Discuss Mental Health with Coworkers?", ["Yes", "No"])
supervisor = st.radio("Willingness to Discuss Mental Health with Supervisor?", ["Yes", "No"])
mental_health_interview = st.radio("Would you bring up a mental health issue with a potential employer in an interview?", ["Yes", "No" , "Maybe"])
physical_health_interview = st.radio("Would you bring up a physical health issue with a potential employer in an interview?", ["Yes", "No" , "Maybe"])
mental_vs_physical = st.radio("Do you feel that your employer takes mental health as seriously as physical health?", ["Yes", "No" , "Dont Know"])
obs_consequence = st.radio("Observed Negative Consequences for Coworkers with Mental Health Conditions?",
["Yes", "No"])
# Return the user input as a dictionary
user_input = {
"Age": age,
"Gender": gender,
"Height": height,
"Weight": weight,
"Family History": family_history,
"Work Interfere": work_interfere,
"Remote Work": remote_work,
"Benefits": benefits,
"Care Option": care_option,
"Wellness Program": wellness_program,
"Seek Help": seek_help,
"Anonymity": anonymity,
"Mental Health Consequence": mental_health_consequence,
"Physical Health Consequence": physical_health_consequence,
"Coworkers": coworkers,
"Supervisor": supervisor,
"Mental Health Interview": mental_health_interview,
"Physical Health Interview": physical_health_interview,
"Mental Vs Physical": mental_vs_physical,
"Obs_consequence": obs_consequence
}
user_input_string = str(user_input) # Convert user_input dictionary to string
user_input_string += "\n\nAbove is employee data working in the organization.\nPlease calculate the BMI index and provide a week-wise diet plan, physical exercise plan, and mental health exercise plan for a month with daily 60 minutes."
#st.write(user_input_string)
return user_input
user_input = get_user_input()
generated_promp =""
#Display the collected user input
#st.subheader("User Input:")
for key, value in user_input.items():
#st.write(f"{key}: {value}")
generated_promp += f"{key}: {value}"
generated_promp += "\n"
generated_promp += "above is employee data working in organization\nplease calculate bmi index & provide week wise diet & physical & mental health exercise plan for a month for daily 60 min"
print(generated_promp)
if st.button("Submit"):
# Generate the response here
st.subheader("Response:")
#st.write(generated_promp)
response=llm(generated_promp)
st.write(response)
| [] |
2024-01-10 | kpyopark/looker_palm_integration | lookml_palm.py | import looker_sdk
import vertexai
import os
import json
from langchain.chat_models import ChatVertexAI
from langchain.llms import VertexAI
from langchain.output_parsers import PydanticOutputParser
from langchain.pydantic_v1 import BaseModel, Field, validator
from looker_sdk.sdk.api31 import models as ml
from typing import cast, Dict, List, Union
from vector_util import VectorDatabase
from langchain.embeddings import VertexAIEmbeddings
class LookerFilterValue(BaseModel):
field_name: str = Field(description="field_name")
values: List[str] = Field(description="values")
class LookerSortField(BaseModel):
field_name: str = Field(description="field_name")
direction: str = Field(description="direction")
class LookerQueryParameters(BaseModel):
dimensions: List[str] = Field(description="dimensions")
measures: List[str] = Field(description="measures")
parameters: List[str] = Field(description="parameters")
filters: List[LookerFilterValue] = Field(description="filters")
sorts: List[LookerSortField] = Field(description="sorts")
pivots: List[str] = Field(description="pivot - These fields are used as pivots in the chart.")
hidden_fields: List[str] = Field(description="hidden_fields - These fields are used as filters but are not shown in the chart.")
class LookerFilterRetrieves(BaseModel):
required_target: List[str] = Field(description="required_target")
class LookMaker():
def __init__(self, question):
self.question = question
self.llm = None
self.lookml_explore = None
self.lookml_model = None
self.fields = None
self.schema = None
self.related_fields = None
self.valid_filter_values = None
self.filter_dict:Dict[str, str] = {}
self.PROJECT_ID = os.getenv("PROJECT_ID") # @param {type:"string"}
self.location = "us-central1"
self.is_public_publishing = True
self.vdb = VectorDatabase()
self.init_llm()
self.init_sdk()
def init_llm(self):
vertexai.init(project=self.PROJECT_ID, location=self.location)
self.llm = VertexAI(
model_name="text-bison-32k",
max_output_tokens=8000,
temperature=0,
top_p=0.8,
top_k=40,
)
self.embeddings = VertexAIEmbeddings()
def init_sdk(self):
# instantiate sdk
#self.sdk = looker_sdk.init31()
self.sdk = looker_sdk.init40()
def write_navexplore_to_vdb(self, lookml_model):
for nav_explore in lookml_model.explores:
if(nav_explore.description is None):
continue
description = nav_explore.description
desc_vector = self.embeddings.embed_query(description)
self.vdb.insert_record(sql=None, parameters=None, description=description, explore_view=nav_explore.name, model_name=lookml_model.name, table_name=None, column_schema=None, desc_vector=desc_vector)
def write_all_models_to_vdb(self):
for lookml_model in self.sdk.all_lookml_models():
self.write_navexplore_to_vdb(lookml_model)
def choose_right_explore(self) -> None:
# self.lookml_explore = "national_pension_mom"
# self.lookml_model = "lookml_hol_sample"
test_embedding = self.embeddings.embed_query(self.question)
with self.vdb.get_connection() as conn:
try:
with conn.cursor() as cur:
select_record = (str(test_embedding).replace(' ',''),)
cur.execute(f"with tb_settings as (select %s::vector as compared_vec) SELECT model_name, explore_view, description, 1 - cosine_distance(desc_vector,compared_vec) as cosine_sim FROM rag_test, tb_settings where (1 - cosine_distance(desc_vector,compared_vec)) > 0.5 order by 4 desc limit 4", select_record)
result = cur.fetchone()
print(result[0], result[1], result[2], result[3])
self.lookml_model = result[0]
self.lookml_explore = result[1]
except Exception as e:
print(e)
return None, None
def get_schema_for_the_explore(self) -> None:
print(self.lookml_model + ":" + self.lookml_explore)
# API Call to pull in metadata about fields in a particular explore
explore = self.sdk.lookml_model_explore(
lookml_model_name=self.lookml_model,
explore_name=self.lookml_explore,
fields="id, name, description, fields",
)
my_fields = []
# Iterate through the field definitions and pull in the description, sql,
# and other looker tags you might want to include in your data dictionary.
if explore.fields and explore.fields.dimensions:
for dimension in explore.fields.dimensions:
dim_def = {
"field_type": "Dimension",
"view_name": dimension.view_label,
"field_name": dimension.name,
"type": dimension.type,
"description": dimension.description,
#"sql": dimension.sql,
}
my_fields.append(dim_def)
if explore.fields and explore.fields.measures:
for measure in explore.fields.measures:
mes_def = {
"field_type": "Measure",
"view_name": measure.view_label,
"field_name": measure.name,
"type": measure.type,
"description": measure.description,
#"sql": measure.sql,
}
my_fields.append(mes_def)
if explore.fields and explore.fields.parameters:
for parameter in explore.fields.parameters:
par_def = {
"field_type": "Parameter",
"view_name": parameter.view_label,
"field_name": parameter.name,
"default_filter_value": parameter.default_filter_value,
"type": parameter.type,
"description": parameter.description,
#"sql": parameter.sql,
}
my_fields.append(par_def)
self.schema = my_fields
def get_field_type(self, field_name) -> str:
for field in self.schema:
if field['field_name'] == field_name:
return field['type']
def parse_llm_reponse_to_fields_object(self, response) -> LookerQueryParameters:
parser = PydanticOutputParser(pydantic_object=LookerQueryParameters)
return parser.parse(response)
def choose_related_fields(self) -> None:
sample_json = """
{
"dimensions": [
"dimension1",
],
"measures": [
"measure1",
],
"filters": [
{
"field_name": "field_name1",
"values": [
"value1"
]
}
],
"sorts": [
{
"field_name": "field_name1",
"direction": "asc"
}
],
"parameters": [
"param1",
],
"pivots": [
"field1"
],
"hidden_fields": [
"field1"
]
}
"""
prompt_template = """As a looker developer, choose right dimesions and measures for the question below.
You should choose right fields as least as possible and sort fields must be choosen in the dimension fields.
fields : {fields}
question: {question}
answer format: json
{sample_json}
"""
response = self.llm.predict(prompt_template.format(fields=self.schema, question=self.question, sample_json=sample_json))
self.related_fields = self.parse_llm_reponse_to_fields_object(response)
def parse_llm_response_to_retreive_target_filters(self, retrieve_target_filters) -> LookerFilterRetrieves:
parser = PydanticOutputParser(pydantic_object=LookerFilterRetrieves)
return parser.parse(retrieve_target_filters)
def get_user_input_value_for_filter_field(self, field_name) -> str:
for filter in self.related_fields.filters:
if filter.field_name == field_name:
return filter.values
return ""
def decide_to_retrieve_values_for_the_filters(self) -> None:
# output_sample = """
# {
# "required_target": ["field1","field2"]
# }
# """
# prompt_template = """As a looker developer, decide whether to retrieve values for the filters below.
# For example, date / timestamp columns don't need to retrieve values. but string columns need to retrieve values from the database.
# filter fields : {filter_fields}
# output sample : json array
# {output_sample}
# """
#response = self.llm.predict(prompt_template.format(filter_fields=self.related_fields.filters, output_sample=output_sample))
#self.retrieve_target_filters = self.parse_llm_response_to_retreive_target_filters(response)
required_target = []
for filter in self.related_fields.filters:
field_type = self.get_field_type(filter.field_name)
# if field_type == 'string':
required_target.append(filter.field_name)
self.retrieve_target_filters = LookerFilterRetrieves(required_target=required_target)
# def get_value_list_from_json_array(self, json_array):
# values = []
# for json_object in json_array:
# print(json_object)
# values.append(list(json_object.values())[0])
# return values
def get_validated_filter_values_from_looker(self) -> None:
choose_right_filter_value_list = []
for retrieve_target_filter in self.retrieve_target_filters.required_target:
#print(retrieve_target_filter)
query_template = ml.WriteQuery(model=self.lookml_model, view=self.lookml_explore,fields=[retrieve_target_filter])
query = self.sdk.create_query(query_template)
# json_object = json.loads(self.sdk.run_query(query.id, "json"))
# choose_right_filter_value_list.append({ retrieve_target_filter : self.get_value_list_from_json_array(json_object)})
choose_right_filter_value_list.append({ retrieve_target_filter : self.sdk.run_query(query.id, "json")})
self.retrieve_filter_and_values = choose_right_filter_value_list
def parse_json_response(self, llm_json_response) -> any:
parsed_json = None
try :
start_char = '['
end_char = ']'
if llm_json_response.find('[') == -1 or llm_json_response.find('{') < llm_json_response.find('[') :
start_char = '{'
end_char = '}'
start_index = llm_json_response.find(start_char)
end_index = llm_json_response.rfind(end_char)
json_data = llm_json_response[start_index:end_index+1]
json_data = json_data.replace('\\n', '')
parsed_json = json.loads(json_data)
except Exception as ex:
print(ex)
print("json parse error: " + json_data)
return parsed_json
def choose_right_filter_value(self, filter_values, wanted_value) -> any:
example_json = "[{\"national_pension_mom.data_create_yearmonth_year\":2022}]"
prompt_template = """As a looker developer, choose right filter value for the wanted value below without changing filter value itself.
example :
{example_json}
filter_values : {filter_values}
wanted_values: {wanted_value}
answer format: json array
"""
response = self.llm.predict(prompt_template.format(example_json=example_json,filter_values=filter_values, wanted_value=wanted_value))
print("Choose Right Filter Value:" + response)
return self.parse_json_response(response)
def get_appropriate_filter_value_pair(self) -> None:
self.valid_filter_values = []
for filter_and_values in self.retrieve_filter_and_values:
field_name = list(filter_and_values.keys())[0]
user_input_value = self.get_user_input_value_for_filter_field(field_name)
value_object = self.choose_right_filter_value(filter_and_values, user_input_value)
self.valid_filter_values.append(value_object)
filter_and_values[field_name] = value_object
def get_quoted_value(self, field_name) -> str:
values = []
for filter_values in self.valid_filter_values:
print(filter_values)
for filter_value in filter_values:
field_name_cmp = list(filter_value.keys())[0]
field_value = list(filter_value.values())[0]
field_type = self.get_field_type(field_name)
if field_name_cmp == field_name:
if field_type == 'string':
values.append(field_value)
else:
values.append(str(field_value))
return ','.join(values)
def get_lookml_filter_dictionary(self) -> None:
self.filter_dict:Dict[str, str] = {}
for filter in self.related_fields.filters:
field_name = filter.field_name
quoted_values = self.get_quoted_value(field_name)
filter.values = quoted_values
self.filter_dict[field_name] = quoted_values
def make_dimension_and_description_pair(self) -> List[str]:
dimension_and_description_pair = []
for one_dimension in self.related_fields.dimensions:
for dimension in self.schema:
if dimension['field_name'] == one_dimension:
dimension_and_description_pair.append((one_dimension, dimension['description']))
return dimension_and_description_pair
def choose_chart_type_and_pivots(self) -> any:
dimension_and_description_pair = self.make_dimension_and_description_pair()
sample_json = """{
"chart_type": "looker_column",
"date_time_dimensions": ["dimension1"],
"pivots": [
"field1"
],
"hidden_fields": [
"field1"
]
"reason_to_choose": "I choose field1 as a pivot field because ..."
}"""
prompt_template = """As a looker developer, choose chart type and pivot fields and hidden fields in the given dimensions for the question below.
Pivot field is a field that is used to create a pivot table. A pivot field converts category values in the field to columns so that you can compare different category values.
For example, if you have sales data, you can compare sales by product by setting the "Product" field as a pivot field. Date/time fields MUST not be used as pivot fields.
Hidden field is a field that is not displayed in a chart. Hidden fields are used to hide fields that are not needed in the chart or that can confuse users.
For example, the "Product ID" field can be used to identify products, but it does not need to be displayed in a chart. If there are two same date fields, one of them can be hidden.
At least one dimension field must be a visible field that is not used in pivot fields or hidden fields.
chart_types :
looker_column - Column charts are useful when you want to compare the values of multiple fields(under 3~4 categories) for multiple records. It needs one main field to show the values separated by the main field. And this field must not be a pivot field.
looker_line - Line charts are useful when you want to show the changes in a value over time. They are also useful for comparing the changes in many categories over time.
looker_area - Area charts are useful when you want to compare the trends of two or more values over time. They are also useful for showing the cumulative sum of values over time.
looker_funnel - Funnel charts are useful to understand events in a sequential process, like prospect stages in a sales pipeline, engagement with a marketing campaign, or visitor movement through a website.
looker_pie - Pie charts are useful when you want to show the proportion of values to the total value. They are also useful for comparing the proportional differences between values. Pivot fields are not allowed.
looker_timeline - Timeline charts are useful when you want to show events over time. They are also useful for showing the duration of events. It needs at least 3 fields. 1. Event Name 2. Start Date 3. End Date
looker_table - Table charts are useful when you want to show the values of multiple fields for multiple records. They are also useful for showing the values of multiple fields for a single record.
dimensions :
{dimensions}
question:
{question}
answer format: json
{sample_json}
"""
prompt_full = prompt_template.format(dimensions=dimension_and_description_pair, question=self.question, sample_json=sample_json)
response = self.llm.predict(prompt_full)
return self.parse_json_response(response)
def make_query(self):
self.choose_right_explore()
self.get_schema_for_the_explore()
self.choose_related_fields()
self.decide_to_retrieve_values_for_the_filters()
self.get_validated_filter_values_from_looker()
self.get_appropriate_filter_value_pair()
self.get_lookml_filter_dictionary()
fields = []
fields.extend(self.related_fields.dimensions)
fields.extend(self.related_fields.measures)
filters = self.filter_dict
chart_type_and_pivots = self.choose_chart_type_and_pivots()
hidden_fields = chart_type_and_pivots['hidden_fields']
pivot_fields = chart_type_and_pivots['pivots']
chart_type = chart_type_and_pivots['chart_type']
print('fields:' + str(fields))
print('filters:' + str(filters))
print('hidden_fields:' + str(hidden_fields))
print('pivot_fields:' + str(pivot_fields))
print('chart_type:' + str(chart_type))
query_template = ml.WriteQuery(model=self.lookml_model, view=self.lookml_explore, fields=fields, filters=filters, pivots=pivot_fields, query_timezone="Asia/Seoul", vis_config={'type':chart_type, 'hidden_fields':hidden_fields})
query = self.sdk.create_query(query_template)
run_response = self.sdk.run_inline_query("json", query)
print('query.id:' + str(query.id))
self.query = query
def make_look(self):
self.make_query()
generated_query = self.query
existing_look = self.sdk.search_looks(query_id=generated_query.id)
if len(existing_look) > 0:
return existing_look[0]
existing_look = self.sdk.search_looks(title=self.question)
if len(existing_look) > 0:
return existing_look[0]
new_look = self.sdk.create_look(ml.WriteLookWithQuery(query_id=generated_query.id,
description=self.question,
deleted=False,
is_run_on_load=True,
public=self.is_public_publishing,
folder_id=str(self.sdk.me().personal_folder_id),
title=self.question+"4"))
return new_look
if __name__ == "__main__":
maker = LookMaker("22๋
5์๋ถํฐ 6์๊น์ง ๋ณด๊ฑด์
์ฐ๊ธ ๋ฉ๋ถ ํ๊ท ์ก์ ์๋ณ ๊ทธ๋ํ๋ก ๋ณด์ฌ์ค")
look = maker.make_look()
print(look.id)
print(look.short_url)
print(look.public_url)
| [
"As a looker developer, choose chart type and pivot fields and hidden fields in the given dimensions for the question below. \n Pivot field is a field that is used to create a pivot table. A pivot field converts category values in the field to columns so that you can compare different category values. \n For example, if you have sales data, you can compare sales by product by setting the \"Product\" field as a pivot field. Date/time fields MUST not be used as pivot fields.\n Hidden field is a field that is not displayed in a chart. Hidden fields are used to hide fields that are not needed in the chart or that can confuse users. \n For example, the \"Product ID\" field can be used to identify products, but it does not need to be displayed in a chart. If there are two same date fields, one of them can be hidden. \n At least one dimension field must be a visible field that is not used in pivot fields or hidden fields.\n\n chart_types : \n looker_column - Column charts are useful when you want to compare the values of multiple fields(under 3~4 categories) for multiple records. It needs one main field to show the values separated by the main field. And this field must not be a pivot field.\n looker_line - Line charts are useful when you want to show the changes in a value over time. They are also useful for comparing the changes in many categories over time.\n looker_area - Area charts are useful when you want to compare the trends of two or more values over time. They are also useful for showing the cumulative sum of values over time.\n looker_funnel - Funnel charts are useful to understand events in a sequential process, like prospect stages in a sales pipeline, engagement with a marketing campaign, or visitor movement through a website.\n looker_pie - Pie charts are useful when you want to show the proportion of values to the total value. They are also useful for comparing the proportional differences between values. Pivot fields are not allowed.\n looker_timeline - Timeline charts are useful when you want to show events over time. They are also useful for showing the duration of events. It needs at least 3 fields. 1. Event Name 2. Start Date 3. End Date\n looker_table - Table charts are useful when you want to show the values of multiple fields for multiple records. They are also useful for showing the values of multiple fields for a single record.\n\n dimensions : \n {dimensions}\n\n question:\n {question}\n\n answer format: json\n {sample_json}\n ",
"As a looker developer, choose right filter value for the wanted value below without changing filter value itself.\n\n example :\n {example_json}\n\n filter_values : {filter_values}\n\n wanted_values: {wanted_value}\n\n answer format: json array\n ",
"As a looker developer, choose right dimesions and measures for the question below. \n You should choose right fields as least as possible and sort fields must be choosen in the dimension fields.\n\n fields : {fields}\n\n question: {question}\n\n answer format: json\n {sample_json}\n ",
"Asia/Seoul",
"hidden_fields"
] |
2024-01-10 | kpyopark/looker_palm_integration | sql_converter.py | from abc import *
from enum import Enum
from typing import Callable
from parsors import parse_json_response, parse_python_object
import vertexai
from langchain.chat_models import ChatVertexAI
from langchain.llms import VertexAI
import os
from langchain.embeddings import VertexAIEmbeddings
from google.cloud import bigquery
from vector_util import VectorDatabase
from lookml_palm import LookMaker
PREPARED_STATEMENT_PARAMETER_CHAR_BIGQUERY = '?'
PREPARED_STATEMENT_PARAMETER_CHAR_OTHERS = '%s'
PREPARED_STATEMENT_PARAMETER_CHAR = PREPARED_STATEMENT_PARAMETER_CHAR_BIGQUERY
PROJECT_ID = os.getenv("PROJECT_ID") # @param {type:"string"}
vertexai.init(project=PROJECT_ID, location="us-central1")
llm_vertex = VertexAI(
#model_name="text-bison@latest",
model_name="text-bison-32k",
max_output_tokens=8000,
temperature=0,
top_p=0.8,
top_k=40,
)
# TODO : Change below routine before to use in production
llm = llm_vertex
embeddings = VertexAIEmbeddings()
client = bigquery.Client()
sample_dataset_id = 'bigquery-public-data.thelook_ecommerce'
## TODO
class SqlConverterEventType(Enum):
INIT = 0
PROGRESSING = 1
FINISHED = 2
NEED_MORE_INFO = 3
ERROR = 5
VALIDATING = 6
class SqlConverterResultType(Enum):
SQL = 0,
RESULT_SET = 1,
DASHBOARD = 2,
LOOK = 3
class SqlConverterResult:
def __init__(self, result_type : SqlConverterResultType, converted_sql : str, result_set : any, dashboard_url : str, look_url : str):
self.result_type = result_type
self.result_set = result_set
self.converted_sql = converted_sql
self.dashboard_url = dashboard_url
self.look_url = look_url
def get_converted_sql(self):
return self.converted_sql
def get_look_url(self):
return self.look_url
def get_dashboard_url(self):
return self.dashboard_url
def get_result_set(self):
return self.result_set
conversation_maps = {}
def get_sql_converter(conversation_id, question):
if conversation_id not in conversation_maps:
conversation_maps[conversation_id] = SqlConverterFactory(question).get_sql_converter()
return conversation_maps[conversation_id]
class SqlConverterFactory:
def __init__(self, question):
self.question = question
def get_sql_converter(self):
return DirectSqlConverter()
class SqlCrawler(ABC):
def __init__(self, properties):
self.properties = properties
self.vdb = VectorDatabase()
def truncate_table(self):
if self.vdb is not None:
self.vdb.truncate_table()
@abstractmethod
def crawl(self):
pass
class BigQuerySchemaScrawler(SqlCrawler):
# properties = {'dataset_id' : 'bigquery-public-data.thelook_ecommerce'}
def __init__(self, properties):
super().__init__(properties)
def crawl(self):
table_schemas = self.crawl_table_schemas()
enriched_table_schemas = self.enrich_table_schemas(table_schemas)
self.write_schema_to_vdb(enriched_table_schemas)
def crawl_table_schemas(self):
dataset_id = self.properties['dataset_id']
tables = client.list_tables(dataset_id)
table_schemas = []
for table in tables:
table_id = f"{dataset_id}.{table.table_id}"
table_schema = client.get_table(table).schema
table_schemas.append({'table_id': table_id, 'table_schema': table_schema})
return table_schemas
def enrich_schema_information(self, table_name, table_schema):
sample_json = """
{
"table_name" : "bigquery-public-data.thelook_ecommerce.orders",
"table_description" : "Orders placed by customers on the Look, an online store that sells clothing, shoes, and other items.",
"columns" : [
{
"column_name" : "order_id",
"column_description" : "A unique identifier for the order. This is populated when an order is created.",
"column_type" : "INT64"
}
]
}
"""
prompt_template = """You are a Looker Developer, enrich the schama information for the table {table_name} with the following information:
table_name :
{table_name}
table_column_schema :
{table_column_schema}
output_json :
{sample_json}
"""
prompt = prompt_template.format(table_name=table_name, table_column_schema=table_schema, sample_json=sample_json)
response = llm.predict(prompt)
return response
def enrich_table_schemas(self, table_schemas):
results = []
for table_schema in table_schemas:
table_name = table_schema['table_id']
one_table_schema = table_schema['table_schema']
response = self.enrich_schema_information(table_name, one_table_schema)
results.append(parse_json_response(response))
return results
def write_schema_to_vdb(self, enriched_table_schemas):
if enriched_table_schemas is None or len(enriched_table_schemas) == 0:
return
for enriched_table_schema in enriched_table_schemas:
description = enriched_table_schema['table_description']
desc_vector = embeddings.embed_query(description)
self.vdb.insert_record(sql=None, parameters=None, description=description, explore_view=None, model_name=None, table_name=str(enriched_table_schema['table_name']), column_schema=str(enriched_table_schema['columns']), desc_vector=desc_vector)
class LookerNavExplorerCrawer(SqlCrawler):
def __init__(self, properties):
super().__init__(properties)
def crawl(self):
lookml_maker = LookMaker("")
lookml_maker.write_all_models_to_vdb()
class SqlConverter(metaclass=ABCMeta):
def __init__(self, question):
self.callbacks = []
self.converted_sql = ""
self.filters = []
self.question_history = []
self.question_history.append(question)
self.last_event = SqlConverterEventType.INIT
self.result = None
self.vdb = VectorDatabase()
def register_callback(self, callback : Callable[[SqlConverterEventType, str], any]):
if callback not in self.callbacks:
print("call back registered: " + str(callback))
self.callbacks.append(callback)
else:
print("call back not registered: " + str(callback))
def invoke_callback(self, event_type : SqlConverterEventType, message : str):
self.last_event = event_type
for callback in self.callbacks:
callback(event_type, message)
def set_result(self, result_type, converted_sql, result_set, dashboard_url, look_url):
self.result = SqlConverterResult(result_type, converted_sql, result_set, dashboard_url, look_url)
def get_result(self):
return self.result
def suggest_additional_information(self, message : str):
self.question_history.append(message)
def get_field_unique_values(self, matched_table, matched_field):
if matched_table[0] != '`' :
matched_table = '`' + matched_table + '`'
sql_query = f"with distinct_values as ( select distinct {matched_field} as {matched_field} from {matched_table} ) select {matched_field}, (select count(1) from distinct_values) as total_count from distinct_values limit 500"
df = client.query(sql_query).to_dataframe()
return df[matched_field].tolist(), df['total_count'][0]
@abstractmethod
def try_convert(self):
pass
def convert(self):
self.invoke_callback(SqlConverterEventType.PROGRESSING, "Converting SQL...")
try:
event_type, message = self.try_convert()
if event_type == SqlConverterEventType.NEED_MORE_INFO:
self.invoke_callback(SqlConverterEventType.NEED_MORE_INFO, message)
return
self.invoke_callback(SqlConverterEventType.FINISHED, "Finished")
except Exception as e:
print(e)
self.invoke_callback(SqlConverterEventType.ERROR, e)
class DirectSqlConverter(SqlConverter):
def __init__(self, question):
super().__init__(question=question)
def try_convert(self):
self.invoke_callback(SqlConverterEventType.PROGRESSING, "Finding related tables...")
self.related_tables = self.get_related_tables()
self.invoke_callback(SqlConverterEventType.PROGRESSING, "Generating SQL with schema...")
self.converted_sql = self.convert_sql_with_schemas()
self.invoke_callback(SqlConverterEventType.PROGRESSING, "Extracing filter values...")
self.sql_and_filters = self.extract_filter_columns()
self.invoke_callback(SqlConverterEventType.PROGRESSING, "Adjusting filter values...")
self.adjust_filter_value(self.sql_and_filters['filter_columns'])
self.invoke_callback(SqlConverterEventType.PROGRESSING, "Replacing filter values...")
self.choose_right_one_value_from_adjusted_values()
self.invoke_callback(SqlConverterEventType.PROGRESSING, "Executing the query...")
df_result = self.prepared_statement_with_filter_values_in_bigquery()
self.set_result(SqlConverterResultType.RESULT_SET, self.converted_sql, df_result, None, None)
return SqlConverterEventType.FINISHED, "Sucess"
def get_formatted_schema(self, table_name, table_description, column_schema):
#column_schema_template = """ {table_name}.{column_name} {column_type} # {column_description}"""
column_schema_template = """ {column_name} {column_type} """
table_schema_template = """ * table name : {table_name} REMARKS '{table_description}'
* columns :
(
{column_schema}
)
"""
column_schema_list = []
for column in parse_python_object(column_schema):
#column_schema_list.append(column_schema_template.format(table_name=table_name,column_name=column['column_name'], column_description=column['column_description'], column_type=column['column_type']))
column_schema_list.append(column_schema_template.format(column_name=column['column_name'], column_type=column['column_type']))
column_schema_str = "\n".join(column_schema_list)
return table_schema_template.format(table_name=table_name, table_description=table_description, column_schema=column_schema_str)
def get_related_tables(self):
test_embedding = embeddings.embed_query(str(self.question_history))
results = []
with self.vdb.get_connection() as conn:
try:
with conn.cursor() as cur:
select_record = (str(test_embedding).replace(' ',''),)
cur.execute(f"SELECT table_name, description, column_schema FROM rag_test where (1 - (desc_vector <=> %s)) > 0.5 ", select_record)
results = cur.fetchall()
#print(results)
except Exception as e:
print(e)
raise e
return results
def convert_sql_with_schemas(self):
prompt_template = """You are a Developer, convert the following question into SQL with the given schema information:
related_schemas :
{related_tables}
question :
{question}
output: SQL
"""
related_table_list = []
for related_table in self.related_tables:
related_table_list.append(self.get_formatted_schema(related_table[0], related_table[1], related_table[2]))
related_table_str = "\n".join(related_table_list)
#print(related_table_str)
prompt = prompt_template.format(related_tables=related_table_str, question=str(self.question_history))
#print(prompt)
response = llm.predict(prompt)
#print(response)
return response
def extract_filter_columns(self):
sample_json = """
{
"prepared_statement" : "select * from `bigquery-public-data.thelook_ecommerce.delivery` where created_at between ? and ?",
"filter_columns" : [
{
"table_name" : "bigquery-public-data.thelook_ecommerce.delivery",
"column_name" : "created_at",
"column_type" : "TIMESTAMP",
"operator" : "between",
"filter_names" : ["created_at_start", "created_at_end"],
"filter_values" : ["2020-01-01", "2020-01-02"],
"filter_order" : 1
}
]
}
"""
prompt_template = """You are a looker developer, extract the filter columns and change the given sql into prepared statement in JSON format. Please don't suggest python code. Give me a json output as the given output example format.:
output format : json
{sample_json}
----------------------------------------------
sql :
{sql}
related_tables :
{related_tables}
"""
prompt = prompt_template.format(sql=self.converted_sql, parameter_char=PREPARED_STATEMENT_PARAMETER_CHAR, related_tables=self.related_tables, sample_json=sample_json)
response = llm.predict(prompt)
return parse_json_response(response)
def choose_right_filter_value(self, filter_values, wanted_value):
prompt_template = """As a looker developer, choose right filter value for the wanted value below without changing filter value itself.
filter_values : {filter_values}
wanted_values: {wanted_value}
answer format: python list
[filter_value1, filter_value2, ...]
"""
prompt = prompt_template.format(filter_values=filter_values, wanted_value=wanted_value)
response = llm.predict(prompt)
return response
def adjust_filter_value(self, filter_columns):
for filter in filter_columns:
matched_table = filter['table_name']
matched_field = filter['column_name']
filter['unique_values'], filter['unique_count'] = self.get_field_unique_values(matched_table, matched_field)
# TODO: if unique_count < 500, then choose right filter value in the unique value list.
if filter['unique_count'] < 500:
response = self.choose_right_filter_value(filter['unique_values'], filter['filter_values'])
print(response)
if response.strip().find("```json") == 0 :
filter['adjust_filter_values'] = parse_json_response(response)
else:
filter['adjust_filter_values'] = parse_python_object(response)
else:
filter['adjust_filter_values'] = filter['filter_values']
SINGLE_OPERATORS = ['=', '>', '<', '>=', '<=', '!=', '<>']
def choose_right_one_value_from_adjusted_values(self):
for filter in self.sql_and_filters['filter_columns']:
if filter['operator'] in DirectSqlConverter.SINGLE_OPERATORS :
filter['adjust_filter_values'] = [filter['adjust_filter_values'][0]]
def prepared_statement_with_filter_values_in_bigquery(self):
prepared_statement = self.sql_and_filters['prepared_statement']
query_parameters = []
for filter_column in self.sql_and_filters['filter_columns']:
if len(filter_column['adjust_filter_values']) > 1:
if(filter_column['column_type'] == 'FLOAT64'):
query_parameters.append(bigquery.ArrayQueryParameter(None, "FLOAT64", filter_column['adjust_filter_values']))
elif(filter_column['column_type'] == 'INT64'):
query_parameters.append(bigquery.ArrayQueryParameter(None, "INT64", filter_column['adjust_filter_values']))
else:
query_parameters.append(bigquery.ArrayQueryParameter(None, "STRING", filter_column['adjust_filter_values']))
else:
if(filter_column['column_type'] == 'FLOAT64'):
query_parameters.append(bigquery.ScalarQueryParameter(None, "FLOAT64", filter_column['adjust_filter_values'][0]))
elif(filter_column['column_type'] == 'INT64'):
query_parameters.append(bigquery.ScalarQueryParameter(None, "INT64", filter_column['adjust_filter_values'][0]))
else:
query_parameters.append(bigquery.ScalarQueryParameter(None, "STRING", filter_column['adjust_filter_values'][0]))
job_config = bigquery.QueryJobConfig(
query_parameters=query_parameters
)
print(prepared_statement)
query_job = client.query(prepared_statement, job_config=job_config)
return query_job.to_dataframe()
def dummy_callback(event_type : SqlConverterEventType, message : str):
print(event_type)
print(message)
if __name__ == "__main__":
# crawler = BigQuerySchemaScrawler({'dataset_id' : 'bigquery-public-data.thelook_ecommerce'})
# crawler.truncate_table()
# crawler.crawl()
# carwler test finished
# sql_converter = DirectSqlConverter("I want to know the total count of the product in Sports category.")
# sql_converter.register_callback(dummy_callback)
# sql_converter.convert()
# print(sql_converter.get_result().get_converted_sql())
# print(sql_converter.get_result().get_result_set())
# print(sql_converter.get_result().get_dashboard_url())
# print(sql_converter.get_result().get_look_url())
# crawler = LookerNavExplorerCrawer({})
# crawler.crawl()
pass | [
"As a looker developer, choose right filter value for the wanted value below without changing filter value itself.\n\n filter_values : {filter_values}\n\n wanted_values: {wanted_value}\n\n answer format: python list\n [filter_value1, filter_value2, ...]\n ",
"You are a Looker Developer, enrich the schama information for the table {table_name} with the following information:\n\n table_name : \n {table_name}\n\n table_column_schema :\n {table_column_schema}\n\n output_json :\n {sample_json}\n ",
" * table name : {table_name} REMARKS '{table_description}'\n * columns :\n (\n{column_schema}\n )\n\n",
"You are a looker developer, extract the filter columns and change the given sql into prepared statement in JSON format. Please don't suggest python code. Give me a json output as the given output example format.:\n\n output format : json\n {sample_json}\n\n ----------------------------------------------\n sql :\n {sql}\n\n related_tables :\n {related_tables}\n\n ",
"You are a Developer, convert the following question into SQL with the given schema information:\n\nrelated_schemas :\n{related_tables}\n\nquestion :\n{question}\n\noutput: SQL\n",
" {column_name} {column_type} "
] |
2024-01-10 | TheOwaisShaikh/Sitegpt-Chatbase-DanteAi-For-All | webquery.py | import os
import trafilatura
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFium2Loader
from langchain.docstore.document import Document
class PDFQuery:
def __init__(self, openai_api_key=None):
self.embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
os.environ["OPENAI_API_KEY"] = openai_api_key
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
self.llm = ChatOpenAI(temperature=0, openai_api_key=openai_api_key)
self.chain = None
self.db = None
def ask(self, question: str) -> str:
if self.chain is None:
return "Please, add a document."
docs = self.db.get_relevant_documents(question)
return self.chain.run(input_documents=docs, question=question)
def ingest(self, file_path: os.PathLike) -> None:
loader = PyPDFium2Loader(file_path)
documents = loader.load()
splitted_documents = self.text_splitter.split_documents(documents)
self.db = Chroma.from_documents(splitted_documents, self.embeddings).as_retriever()
self.chain = load_qa_chain(ChatOpenAI(temperature=0), chain_type="stuff")
def forget(self):
self.db = None
self.chain = None
class WebQuery:
def __init__(self, openai_api_key=None):
self.embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
os.environ["OPENAI_API_KEY"] = openai_api_key
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
self.chain = None
self.db = None
def ask(self, question: str) -> str:
if self.chain is None:
return "Please, add a document."
docs = self.db.get_relevant_documents(question)
return self.chain.run(input_documents=docs, question=question)
def ingest(self, url: str) -> str:
result = trafilatura.extract(trafilatura.fetch_url(url))
documents = [Document(page_content=result, metadata={"source": url})]
splitted_documents = self.text_splitter.split_documents(documents)
self.db = Chroma.from_documents(splitted_documents, self.embeddings).as_retriever()
self.chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
return "Success"
def forget(self):
self.db = None
self.chain = None
| [] |
2024-01-10 | amotivv/quivr | backend~llm~LANGUAGE_PROMPT.py | from langchain.prompts.prompt import PromptTemplate
prompt_template = """Your name is Quivr. You are a second brain. A person will ask you a question and you will provide a helpful answer. Write the answer in the same language as the question. If you don't know the answer, just say that you don't know. Don't try to make up an answer. Use the following context to answer the question:
{context}
Question: {question}
Helpful Answer:"""
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
) | [
"context",
"question",
"t know the answer, just say that you don",
"Your name is Quivr. You are a second brain. A person will ask you a question and you will provide a helpful answer. Write the answer in the same language as the question. If you don't know the answer, just say that you don't know. Don't try to make up an answer. Use the following context to answer the question:\n\n\n{context}\n\nQuestion: {question}\nHelpful Answer:"
] |
2024-01-10 | amotivv/quivr | backend~llm~summarization.py | import os
import guidance
import openai
from logger import get_logger
logger = get_logger(__name__)
openai_api_key = os.environ.get("OPENAI_API_KEY")
openai.api_key = openai_api_key
summary_llm = guidance.llms.OpenAI('gpt-3.5-turbo-0613', caching=False)
def llm_summerize(document):
summary = guidance("""
{{#system~}}
You are a world best summarizer. \n
Condense the text, capturing essential points and core ideas. Include relevant \
examples, omit excess details, and ensure the summary's length matches the \
original's complexity.
{{/system~}}
{{#user~}}
Summarize the following text:
---
{{document}}
{{/user~}}
{{#assistant~}}
{{gen 'summarization' temperature=0.2 max_tokens=100}}
{{/assistant~}}
""", llm=summary_llm)
summary = summary(document=document)
logger.info('Summarization: %s', summary)
return summary['summarization']
def llm_evaluate_summaries(question, summaries, model):
if not model.startswith('gpt'):
logger.info(
f'Model {model} not supported. Using gpt-3.5-turbo instead.')
model = 'gpt-3.5-turbo-0613'
logger.info(f'Evaluating summaries with {model}')
evaluation_llm = guidance.llms.OpenAI(model, caching=False)
evaluation = guidance("""
{{#system~}}
You are a world best evaluator. You evaluate the relevance of summaries based \
on user input question. Return evaluation in following csv format, csv headers \
are [summary_id,document_id,evaluation,reason].
Evaluator Task
- Evaluation should be a score number between 0 and 5.
- Reason should be a short sentence within 20 words explain why the evaluation.
---
Example
summary_id,document_id,evaluation,reason
1,4,3,"not mentioned about topic A"
2,2,4,"It is not relevant to the question"
{{/system~}}
{{#user~}}
Based on the question, do Evaluator Task for each summary.
---
Question: {{question}}
{{#each summaries}}
Summary
summary_id: {{this.id}}
document_id: {{this.document_id}}
evaluation: ""
reason: ""
Summary Content: {{this.content}}
File Name: {{this.metadata.file_name}}
{{/each}}
{{/user~}}
{{#assistant~}}
{{gen 'evaluation' temperature=0.2 stop='<|im_end|>'}}
{{/assistant~}}
""", llm=evaluation_llm)
result = evaluation(question=question, summaries=summaries)
evaluations = {}
for evaluation in result['evaluation'].split('\n'):
if evaluation == '' or not evaluation[0].isdigit():
continue
logger.info('Evaluation Row: %s', evaluation)
summary_id, document_id, score, *reason = evaluation.split(',')
if not score.isdigit():
continue
score = int(score)
if score < 3 or score > 5:
continue
evaluations[summary_id] = {
'evaluation': score,
'reason': ','.join(reason),
'summary_id': summary_id,
'document_id': document_id,
}
return [e for e in sorted(evaluations.values(), key=lambda x: x['evaluation'], reverse=True)]
| [] |
2024-01-10 | amotivv/quivr | backend~utils~vectors.py | import os
from typing import Annotated
from fastapi import Depends
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from langchain.vectorstores import SupabaseVectorStore
from llm.qa import get_qa_llm
from llm.summarization import llm_evaluate_summaries, llm_summerize
from logger import get_logger
from models.chats import ChatMessage
from models.users import User
from supabase import Client, create_client
logger = get_logger(__name__)
openai_api_key = os.environ.get("OPENAI_API_KEY")
anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
supabase_url = os.environ.get("SUPABASE_URL")
supabase_key = os.environ.get("SUPABASE_SERVICE_KEY")
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
supabase_client: Client = create_client(supabase_url, supabase_key)
documents_vector_store = SupabaseVectorStore(
supabase_client, embeddings, table_name="vectors")
summaries_vector_store = SupabaseVectorStore(
supabase_client, embeddings, table_name="summaries")
def common_dependencies():
return {
"supabase": supabase_client,
"embeddings": embeddings,
"documents_vector_store": documents_vector_store,
"summaries_vector_store": summaries_vector_store
}
CommonsDep = Annotated[dict, Depends(common_dependencies)]
def create_summary(document_id, content, metadata):
logger.info(f"Summarizing document {content[:100]}")
summary = llm_summerize(content)
logger.info(f"Summary: {summary}")
metadata['document_id'] = document_id
summary_doc_with_metadata = Document(
page_content=summary, metadata=metadata)
sids = summaries_vector_store.add_documents(
[summary_doc_with_metadata])
if sids and len(sids) > 0:
supabase_client.table("summaries").update(
{"document_id": document_id}).match({"id": sids[0]}).execute()
def create_vector(user_id,doc, user_openai_api_key=None):
logger.info(f"Creating vector for document")
logger.info(f"Document: {doc}")
if user_openai_api_key:
documents_vector_store._embedding = OpenAIEmbeddings(openai_api_key=user_openai_api_key)
try:
sids = documents_vector_store.add_documents(
[doc])
if sids and len(sids) > 0:
supabase_client.table("vectors").update(
{"user_id": user_id}).match({"id": sids[0]}).execute()
except Exception as e:
logger.error(f"Error creating vector for document {e}")
def create_user(email, date):
logger.info(f"New user entry in db document for user {email}")
return(supabase_client.table("users").insert(
{"email": email, "date": date, "requests_count": 1}).execute())
def update_user_request_count(email, date, requests_count):
logger.info(f"User {email} request count updated to {requests_count}")
supabase_client.table("users").update(
{ "requests_count": requests_count}).match({"email": email, "date": date}).execute()
def create_chat(user_id, history, chat_name):
# Chat is created upon the user's first question asked
logger.info(f"New chat entry in chats table for user {user_id}")
# Insert a new row into the chats table
new_chat = {
"user_id": user_id,
"history": history, # Empty chat to start
"chat_name": chat_name
}
insert_response = supabase_client.table('chats').insert(new_chat).execute()
logger.info(f"Insert response {insert_response.data}")
return(insert_response)
def update_chat(chat_id, history):
supabase_client.table("chats").update(
{ "history": history}).match({"chat_id": chat_id}).execute()
logger.info(f"Chat {chat_id} updated")
def create_embedding(content):
return embeddings.embed_query(content)
def similarity_search(query, table='match_summaries', top_k=5, threshold=0.5):
query_embedding = create_embedding(query)
summaries = supabase_client.rpc(
table, {'query_embedding': query_embedding,
'match_count': top_k, 'match_threshold': threshold}
).execute()
return summaries.data
def fetch_user_id_from_credentials(commons: CommonsDep,date,credentials):
user = User(email=credentials.get('email', 'none'))
# Fetch the user's UUID based on their email
response = commons['supabase'].from_('users').select('user_id').filter("email", "eq", user.email).execute()
userItem = next(iter(response.data or []), {})
if userItem == {}:
create_user_response = create_user(email= user.email, date=date)
user_id = create_user_response.data[0]['user_id']
else:
user_id = userItem['user_id']
return user_id
def get_chat_name_from_first_question(chat_message: ChatMessage):
# Step 1: Get the summary of the first question
# first_question_summary = summarize_as_title(chat_message.question)
# Step 2: Process this summary to create a chat name by selecting the first three words
chat_name = ' '.join(chat_message.question.split()[:3])
return chat_name
def get_answer(commons: CommonsDep, chat_message: ChatMessage, email: str, user_openai_api_key:str):
qa = get_qa_llm(chat_message, email, user_openai_api_key)
if chat_message.use_summarization:
# 1. get summaries from the vector store based on question
summaries = similarity_search(
chat_message.question, table='match_summaries')
# 2. evaluate summaries against the question
evaluations = llm_evaluate_summaries(
chat_message.question, summaries, chat_message.model)
# 3. pull in the top documents from summaries
if evaluations:
response = commons['supabase'].from_('vectors').select(
'*').in_('id', values=[e['document_id'] for e in evaluations]).execute()
# 4. use top docs as additional context
additional_context = '---\nAdditional Context={}'.format(
'---\n'.join(data['content'] for data in response.data)
) + '\n'
model_response = qa(
{"question": additional_context + chat_message.question})
else:
model_response = qa({"question": chat_message.question, "chat_history": chat_message.history})
answer = model_response['answer']
# append sources (file_name) to answer
if "source_documents" in answer:
# logger.debug('Source Documents: %s', answer["source_documents"])
sources = [
doc.metadata["file_name"] for doc in answer["source_documents"]
if "file_name" in doc.metadata]
# logger.debug('Sources: %s', sources)
if sources:
files = dict.fromkeys(sources)
# # shall provide file links until pages available
# files = [f"[{f}](/explore/{f})" for f in files]
answer = answer + "\n\nRef: " + "; ".join(files)
return answer
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.