date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | markingle/assistant_demo | assistant_demo_wx.py | from openai import OpenAI
import time
import sys
import wx
client = OpenAI()
class MyFrame(wx.Frame):
def __init__(self):
super().__init__(parent=None, title='OpenAI Assistant Creator', size = wx.Size(600, 375))
panel = wx.Panel(self)
my_sizer = wx.BoxSizer(wx.VERTICAL)
self.text_ctrl = wx.TextCtrl(panel)
my_sizer.Add(self.text_ctrl, 0, wx.ALL | wx.EXPAND, 15)
self.text_reply = wx.TextCtrl(panel, id=-1, size=(50,75), style=wx.TE_READONLY | wx.TE_MULTILINE | wx.TE_RICH)
my_sizer.Add(self.text_reply, 0, wx.ALL | wx.EXPAND, 30)
my_btn = wx.Button(panel, label='Press Me')
my_btn.Bind(wx.EVT_BUTTON, self.on_press)
my_sizer.Add(my_btn, 0, wx.ALL | wx.CENTER, 5)
self.text_status = wx.TextCtrl(panel, id=-1, size=(250,-1), style=wx.TE_READONLY)
my_sizer.Add(self.text_status, 0, wx.ALL | wx.LEFT, 40)
panel.SetSizer(my_sizer)
self.Show()
def set_status_text(self, text):
wx.CallAfter(self.text_status.SetValue, text)
def set_reply_text(self, text):
wx.CallAfter(self.text_reply.SetValue, text)
def on_press(self, event):
value = self.text_ctrl.GetValue()
if not value:
self.set_status_text("You didn't enter anything! Ya big dummy!!! :)")
else:
#print(f'You typed: "{value}"')
#self.set_status_text(value)
thread = client.beta.threads.create()
input_message = client.beta.threads.messages.create(
thread_id = thread.id,
role = "user",
content = value
)
if input_message is None:
self.set_status_text("Failed to create input message")
else:
run = client.beta.threads.runs.create(
thread_id = thread.id,
assistant_id = assistant.id
)
if run is None:
self.set_status_text("Failed to create run for message")
#Per the OpenAI architecture get a thread and message going here to prepare for user interaction
#while run.status != "completed":
# # Be nice to the API
# time.sleep(0.5)
# run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
#try:
# messages = client.beta.threads.messages.list(
# thread_id = thread.id
# )
#except:
# print("message list failed")
# exit()
run_done = False
while not run_done:
#Give the API a break
time.sleep(0.5)
#retrieve the runs
run = client.beta.threads.runs.retrieve(
thread_id = thread.id,
run_id = run.id
)
if run.status in ["queued", "in_progress", "cancelling"]:
run_done = False
elif run.status in ["cancelled", "failed", "completed", "expired"]:
run_done = True
elif run.status in ["requires_action"]:
self.set_status_text("Required Action...need to do more coding!!")
run_done = False
else:
self.set_status_text("chat: unrecognised run status" + run.status)
run_done = True
# send status to status callbackxs
self.set_status_text(run.status)
# retrieve messages on the thread
reply_messages = client.beta.threads.messages.list(thread_id=thread.id, order = "asc", after=input_message.id)
if reply_messages is None:
self.set_status_text("chat: failed to retrieve messages")
# concatenate all messages into a single reply skipping the first which is our question
reply = ""
need_newline = False
for message in reply_messages.data:
reply = reply + message.content[0].text.value
if need_newline:
reply = reply + "\n"
need_newline = True
if reply is None or reply == "":
self.set_status_text("chat: failed to retrieve latest reply")
else:
self.set_reply_text(reply)
assistant_name=None
module_name=None
assistant_created=False
#https://www.tutorialspoint.com/python/index.html
#TODO: Provide for running script without assistant name provided
if assistant_name is None:
assistant_name = str(sys.argv[1])
if module_name is None:
module_name = "gpt-4-1106-preview"
# if assistant exists, use it
assistants_list = client.beta.assistants.list()
for existing_assistant in assistants_list.data:
if existing_assistant.name == assistant_name:
print("setup_assistant: using existing assistant: " + existing_assistant.name + " id:" + existing_assistant.id)
assistant = client.beta.assistants.retrieve(existing_assistant.id)
assistant_created = True
if assistant_created == False:
assistant = client.beta.assistants.create(
name = assistant_name,
instructions = "You are a math tutor. Write and run code to answer math questions",
tools = [{"type": "code_interpreter"}],
model = "gpt-4-1106-preview"
)
print("Assistant " + assistant_name + " was created")
#run = client.beta.threads.runs.create(
# thread_id = thread.id,
# assistant_id = assistant.id
# )
if __name__ == '__main__':
app = wx.App()
frame = MyFrame()
app.MainLoop() | [] |
2024-01-10 | vasanthsarathy/mpep-qa | scripts~build_indexes.py | # This module includes code for building Llama indexes
import os
import openai
#openai.api_key = os.environ["OPENAI_API_KEY"]
| [] |
2024-01-10 | saurabh111233212/JustTheFacts | src~extract_facts.py | import openai
import os
import re
import json
def load_json(filename):
if os.path.isfile(filename):
with open(filename) as json_file:
data = json.load(json_file)
return data
return {}
def save_json(data, filename):
with open(filename, "w") as outfile:
json.dump(data, outfile)
def extract_statements(text):
""" Extract statements from a numbered list, skipping any lines that don't start with a number"""
pattern = r'^\s*\d+\.\s*(.*)\s*$'
lines = text.split('\n')
statements = []
for line in lines:
match = re.match(pattern, line)
if match:
statements.append(match.group(1))
return statements
def ask_question_about_article(article_text, question):
prompt_messages = [
{
"role": "system",
"content": "You are a fact checker." # The system description tells GPT what persona it should take on
},
{
"role": "user",
"content": "Here is a newspaper article:"
},
{
"role": "user",
"content": article_text
},
{
"role": "user",
"content": question
},
]
response = openai.ChatCompletion.create(model="gpt-4", messages=prompt_messages)
return response["choices"][0]["message"]["content"]
def ask_question(question):
prompt_messages = [
{
"role": "user",
"content": question
},
]
response = openai.ChatCompletion.create(model="gpt-4", messages=prompt_messages)
return response["choices"][0]["message"]["content"]
def extract_facts_from_article(article_text_1):
question = "Please extract a list of simple, declarative sentences that enumerate just the facts from the article. When you extract the facts, please rephrase the sentences that you extract them from."
response = ask_question_about_article(article_text_1, question)
return response
def fact_set_to_str(fact_sets):
fact_set_str = ""
for i in range(len(fact_sets)):
fact_set_str += "Set {set_num}: {set_contents}\n".format(set_num=i, set_contents=str(fact_sets[i]))
return fact_set_str
def get_closest_fact_set(fact_sets, fact):
prompt_messages = [
{
"role": "system",
"content": "You help group statement into sets of equivalent facts." # The system description tells GPT what persona it should take on
},
{
"role": "user",
"content": "Here are the fact sets:"
},
{
"role": "user",
"content": fact_set_to_str(fact_sets)
},
{
"role": "user",
"content": "Here's a fact to assign to a fact set: " + fact
},
{
"role": "user",
"content": "Which fact set does it most closely correspond to? "
"Give the number, or say 'None' if it doesn't closely correspond to any."
},
]
response = openai.ChatCompletion.create(model="gpt-4", messages=prompt_messages)
return response["choices"][0]["message"]["content"]
def get_facts(url):
import requests
res = requests.get("http://just-the-facts.apps.allenai.org/api/get-facts", params={"url": url, "method": "gpt-4"})
print(res.text)
def ask_openai_question():
openai.api_key = os.environ["OPENAI_API_KEY"]
q = "Can you write the code of a browser plugin that takes as input 2 lists of strings and display them side by side?"
response = ask_question(q)
print(response)
if __name__ == "__main__":
#get_facts(url='https://www.breitbart.com/news/wind-whipped-hawaii-wildfires-force-evacuations-water-rescues/')
get_facts(url='https://www.nbcnews.com/news/world/americans-imprisoned-iran-prisoner-exchange-deal-rcna99105')
| [
"Here is a newspaper article:",
"Here's a fact to assign to a fact set: PLACEHOLDER",
"You help group statement into sets of equivalent facts.",
"Here are the fact sets:",
"Which fact set does it most closely correspond to? Give the number, or say 'None' if it doesn't closely correspond to any.",
"You are a fact checker.",
"Please extract a list of simple, declarative sentences that enumerate just the facts from the article. When you extract the facts, please rephrase the sentences that you extract them from."
] |
2024-01-10 | saurabh111233212/JustTheFacts | src~get_openai_embeddings.py | import openai
import numpy as np
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
response = openai.Embedding.create(input = [text], model=model)['data'][0]['embedding']
return np.array(response) | [] |
2024-01-10 | AngryBacteria/lc2_ambispeech | backend_fastapi~app~langchain~gpt4all_helper.py | from __future__ import annotations
import os
from dotenv import load_dotenv
from langchain.llms.gpt4all import GPT4All
from langchain_core.language_models import BaseLanguageModel
from app.data.data import GenericLangChainModel
from app.utils.logging_util import logger
class GPT4AllHelper(GenericLangChainModel):
_instance = None
llm = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(GPT4AllHelper, cls).__new__(cls)
return cls._instance
def __init__(self):
if hasattr(self, "_initialized"):
return
load_dotenv()
model_path = os.getenv("GPT4ALL_MODEL_PATH")
if os.getenv("GPT4ALL_MODEL_PATH") is None:
logger.warning(
f"Model path not found: {model_path}. The GPT4All LLM will not be functional until a valid path is "
f"provided in .env File."
)
else:
self.llm = GPT4All(model=os.getenv("GPT4ALL_MODEL_PATH"))
logger.info("Created GPT4AllHelper")
self._initialized = True
def get_llm(self):
if self.llm is None:
raise RuntimeError(
"GPT4All LLM is not properly initialized. Please check the model path."
)
return self.llm
| [] |
2024-01-10 | HombreOso/aider | aider~coders~base_coder.py | #!/usr/bin/env python
import hashlib
import json
import os
import sys
import traceback
from json.decoder import JSONDecodeError
from pathlib import Path, PurePosixPath
import backoff
import git
import openai
import requests
from jsonschema import Draft7Validator
from openai.error import APIError, RateLimitError, ServiceUnavailableError, Timeout
from rich.console import Console, Text
from rich.live import Live
from rich.markdown import Markdown
# ------------------------------
## temporary make aider a package not a library
## after developing needed functionality -> package it again and publish to PyPI
## do not forget to attribute the original library aider from Paul Gauthier
# ------------------------------
## temporary commented out
# from aider import models, prompts, utils
# from aider.commands import Commands
# from aider.repomap import RepoMap
# ------------------------------
# ------------------------------
## temporary added
import models, prompts, utils
from commands import Commands
from repomap import RepoMap
# ------------------------------
## temporary commented out
# from ..dump import dump # noqa: F401
# ------------------------------
class MissingAPIKeyError(ValueError):
pass
class ExhaustedContextWindow(Exception):
pass
def wrap_fence(name):
return f"<{name}>", f"</{name}>"
class Coder:
abs_fnames = None
repo = None
last_aider_commit_hash = None
last_asked_for_commit_time = 0
repo_map = None
functions = None
total_cost = 0.0
num_exhausted_context_windows = 0
@classmethod
def create(
self,
main_model,
edit_format,
io,
**kwargs,
):
from . import (
EditBlockCoder,
EditBlockFunctionCoder,
SingleWholeFileFunctionCoder,
WholeFileCoder,
WholeFileFunctionCoder,
)
if not main_model:
main_model = models.GPT35_16k
if not main_model.always_available:
if not check_model_availability(main_model):
if main_model != models.GPT4:
io.tool_error(
f"API key does not support {main_model.name}, falling back to"
f" {models.GPT35_16k.name}"
)
main_model = models.GPT35_16k
if edit_format is None:
edit_format = main_model.edit_format
if edit_format == "diff":
return EditBlockCoder(main_model, io, **kwargs)
elif edit_format == "whole":
return WholeFileCoder(main_model, io, **kwargs)
elif edit_format == "whole-func":
return WholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "single-whole-func":
return SingleWholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "diff-func-list":
return EditBlockFunctionCoder("list", main_model, io, **kwargs)
elif edit_format in ("diff-func", "diff-func-string"):
return EditBlockFunctionCoder("string", main_model, io, **kwargs)
else:
raise ValueError(f"Unknown edit format {edit_format}")
def __init__(
self,
main_model,
io,
fnames=None,
pretty=True,
show_diffs=False,
auto_commits=True,
dirty_commits=True,
dry_run=False,
map_tokens=1024,
verbose=False,
assistant_output_color="blue",
code_theme="default",
stream=True,
use_git=True,
):
if not fnames:
fnames = []
self.chat_completion_call_hashes = []
self.chat_completion_response_hashes = []
self.verbose = verbose
self.abs_fnames = set()
self.cur_messages = []
self.done_messages = []
self.num_control_c = 0
self.io = io
self.stream = stream
if not auto_commits:
dirty_commits = False
self.auto_commits = auto_commits
self.dirty_commits = dirty_commits
self.assistant_output_color = assistant_output_color
self.code_theme = code_theme
self.dry_run = dry_run
self.pretty = pretty
if pretty:
self.console = Console()
else:
self.console = Console(force_terminal=False, no_color=True)
self.main_model = main_model
self.io.tool_output(f"Model: {main_model.name}")
self.show_diffs = show_diffs
self.commands = Commands(self.io, self)
if use_git:
self.set_repo(fnames)
else:
self.abs_fnames = set([str(Path(fname).resolve()) for fname in fnames])
if self.repo:
rel_repo_dir = self.get_rel_repo_dir()
self.io.tool_output(f"Git repo: {rel_repo_dir}")
else:
self.io.tool_output("Git repo: none")
self.find_common_root()
if main_model.use_repo_map and self.repo and self.gpt_prompts.repo_content_prefix:
self.repo_map = RepoMap(
map_tokens,
self.root,
self.main_model,
io,
self.gpt_prompts.repo_content_prefix,
self.verbose,
)
if self.repo_map.use_ctags:
self.io.tool_output(f"Repo-map: universal-ctags using {map_tokens} tokens")
elif not self.repo_map.has_ctags and map_tokens > 0:
self.io.tool_output(
f"Repo-map: basic using {map_tokens} tokens"
f" ({self.repo_map.ctags_disabled_reason})"
)
else:
self.io.tool_output("Repo-map: disabled because map_tokens == 0")
else:
self.io.tool_output("Repo-map: disabled")
for fname in self.get_inchat_relative_files():
self.io.tool_output(f"Added {fname} to the chat.")
# validate the functions jsonschema
if self.functions:
for function in self.functions:
Draft7Validator.check_schema(function)
if self.verbose:
self.io.tool_output("JSON Schema:")
self.io.tool_output(json.dumps(self.functions, indent=4))
def find_common_root(self):
if len(self.abs_fnames) == 1:
self.root = os.path.dirname(list(self.abs_fnames)[0])
elif self.abs_fnames:
self.root = os.path.commonpath(list(self.abs_fnames))
else:
self.root = os.getcwd()
self.root = utils.safe_abs_path(self.root)
def get_rel_repo_dir(self):
try:
return os.path.relpath(self.repo.git_dir, os.getcwd())
except ValueError:
return self.repo.git_dir
def add_rel_fname(self, rel_fname):
self.abs_fnames.add(self.abs_root_path(rel_fname))
def abs_root_path(self, path):
res = Path(self.root) / path
return utils.safe_abs_path(res)
def set_repo(self, cmd_line_fnames):
if not cmd_line_fnames:
cmd_line_fnames = ["."]
repo_paths = []
for fname in cmd_line_fnames:
fname = Path(fname)
if not fname.exists():
self.io.tool_output(f"Creating empty file {fname}")
fname.parent.mkdir(parents=True, exist_ok=True)
fname.touch()
fname = fname.resolve()
try:
repo_path = git.Repo(fname, search_parent_directories=True).working_dir
repo_path = utils.safe_abs_path(repo_path)
repo_paths.append(repo_path)
except git.exc.InvalidGitRepositoryError:
pass
if fname.is_dir():
continue
self.abs_fnames.add(str(fname))
num_repos = len(set(repo_paths))
if num_repos == 0:
return
if num_repos > 1:
self.io.tool_error("Files are in different git repos.")
return
# https://github.com/gitpython-developers/GitPython/issues/427
self.repo = git.Repo(repo_paths.pop(), odbt=git.GitDB)
self.root = utils.safe_abs_path(self.repo.working_tree_dir)
new_files = []
for fname in self.abs_fnames:
relative_fname = self.get_rel_fname(fname)
tracked_files = set(self.get_tracked_files())
if relative_fname not in tracked_files:
new_files.append(relative_fname)
if new_files:
rel_repo_dir = self.get_rel_repo_dir()
self.io.tool_output(f"Files not tracked in {rel_repo_dir}:")
for fn in new_files:
self.io.tool_output(f" - {fn}")
if self.io.confirm_ask("Add them?"):
for relative_fname in new_files:
self.repo.git.add(relative_fname)
self.io.tool_output(f"Added {relative_fname} to the git repo")
show_files = ", ".join(new_files)
commit_message = f"Added new files to the git repo: {show_files}"
self.repo.git.commit("-m", commit_message, "--no-verify")
commit_hash = self.repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
else:
self.io.tool_error("Skipped adding new files to the git repo.")
return
# fences are obfuscated so aider can modify this file!
fences = [
("``" + "`", "``" + "`"),
wrap_fence("source"),
wrap_fence("code"),
wrap_fence("pre"),
wrap_fence("codeblock"),
wrap_fence("sourcecode"),
]
fence = fences[0]
def get_abs_fnames_content(self):
for fname in list(self.abs_fnames):
content = self.io.read_text(fname)
if content is None:
relative_fname = self.get_rel_fname(fname)
self.io.tool_error(f"Dropping {relative_fname} from the chat.")
self.abs_fnames.remove(fname)
else:
yield fname, content
def choose_fence(self):
all_content = ""
for _fname, content in self.get_abs_fnames_content():
all_content += content + "\n"
good = False
for fence_open, fence_close in self.fences:
if fence_open in all_content or fence_close in all_content:
continue
good = True
break
if good:
self.fence = (fence_open, fence_close)
else:
self.fence = self.fences[0]
self.io.tool_error(
"Unable to find a fencing strategy! Falling back to:"
" {self.fence[0]}...{self.fence[1]}"
)
return
def get_files_content(self, fnames=None):
if not fnames:
fnames = self.abs_fnames
prompt = ""
for fname, content in self.get_abs_fnames_content():
relative_fname = self.get_rel_fname(fname)
prompt += "\n"
prompt += relative_fname
prompt += f"\n{self.fence[0]}\n"
prompt += content
prompt += f"{self.fence[1]}\n"
return prompt
def get_repo_map(self):
if not self.repo_map:
return
other_files = set(self.get_all_abs_files()) - set(self.abs_fnames)
repo_content = self.repo_map.get_repo_map(self.abs_fnames, other_files)
return repo_content
def get_files_messages(self):
all_content = ""
if self.abs_fnames:
files_content = self.gpt_prompts.files_content_prefix
files_content += self.get_files_content()
else:
files_content = self.gpt_prompts.files_no_full_files
all_content += files_content
repo_content = self.get_repo_map()
if repo_content:
if all_content:
all_content += "\n"
all_content += repo_content
files_messages = [
dict(role="user", content=all_content),
dict(role="assistant", content="Ok."),
]
if self.abs_fnames:
files_messages += [
dict(role="system", content=self.fmt_system_reminder()),
]
return files_messages
def run(self, with_message=None):
while True:
try:
if with_message:
new_user_message = with_message
self.io.user_input(with_message)
else:
new_user_message = self.run_loop()
while new_user_message:
new_user_message = self.send_new_user_message(new_user_message)
if with_message:
return
except KeyboardInterrupt:
self.num_control_c += 1
if self.num_control_c >= 2:
break
self.io.tool_error("^C again or /exit to quit")
except EOFError:
return
def should_dirty_commit(self, inp):
cmds = self.commands.matching_commands(inp)
if cmds:
matching_commands, _, _ = cmds
if len(matching_commands) == 1:
cmd = matching_commands[0]
if cmd in ("/exit", "/commit"):
return
if not self.dirty_commits:
return
if not self.repo:
return
if not self.repo.is_dirty():
return
if self.last_asked_for_commit_time >= self.get_last_modified():
return
return True
def move_back_cur_messages(self, message):
self.done_messages += self.cur_messages
if message:
self.done_messages += [
dict(role="user", content=message),
dict(role="assistant", content="Ok."),
]
self.cur_messages = []
def run_loop(self):
try:
inp = self.io.get_input(
self.root,
self.get_inchat_relative_files(),
self.get_addable_relative_files(),
self.commands,
)
except UnicodeEncodeError:
print("self.root: ", self.root),
print("self.get_inchat_relative_files(): ", self.get_inchat_relative_files())
print("self.get_addable_relative_files(): ",self.get_addable_relative_files())
print("self.commands", self.commands)
self.num_control_c = 0
if self.should_dirty_commit(inp):
self.io.tool_output("Git repo has uncommitted changes, preparing commit...")
self.commit(ask=True, which="repo_files")
# files changed, move cur messages back behind the files messages
self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
if inp.strip():
self.io.tool_output("Use up-arrow to retry previous command:", inp)
return
if not inp:
return
if self.commands.is_command(inp):
return self.commands.run(inp)
self.check_for_file_mentions(inp)
return self.send_new_user_message(inp)
def fmt_system_reminder(self):
prompt = self.gpt_prompts.system_reminder
prompt = prompt.format(fence=self.fence)
return prompt
def send_new_user_message(self, inp):
self.choose_fence()
self.cur_messages += [
dict(role="user", content=inp),
]
main_sys = self.gpt_prompts.main_system
# if self.main_model.max_context_tokens > 4 * 1024:
main_sys += "\n" + self.fmt_system_reminder()
messages = [
dict(role="system", content=main_sys),
]
messages += self.done_messages
messages += self.get_files_messages()
messages += self.cur_messages
if self.verbose:
utils.show_messages(messages, functions=self.functions)
exhausted = False
interrupted = False
try:
interrupted = self.send(messages, functions=self.functions)
except ExhaustedContextWindow:
exhausted = True
except openai.error.InvalidRequestError as err:
if "maximum context length" in str(err):
exhausted = True
else:
raise err
if exhausted:
self.num_exhausted_context_windows += 1
self.io.tool_error("The chat session is larger than the context window!\n")
self.commands.cmd_tokens("")
self.io.tool_error("\nTo reduce token usage:")
self.io.tool_error(" - Use /drop to remove unneeded files from the chat session.")
self.io.tool_error(" - Use /clear to clear chat history.")
return
if self.partial_response_function_call:
args = self.parse_partial_args()
if args:
content = args["explanation"]
else:
content = ""
elif self.partial_response_content:
content = self.partial_response_content
else:
content = ""
if interrupted:
self.io.tool_error("\n\n^C KeyboardInterrupt")
self.num_control_c += 1
content += "\n^C KeyboardInterrupt"
self.io.tool_output()
if interrupted:
self.cur_messages += [dict(role="assistant", content=content)]
return
edited, edit_error = self.apply_updates()
if edit_error:
return edit_error
# TODO: this shouldn't use content, should use self.partial_....
self.update_cur_messages(content, edited)
if edited:
if self.repo and self.auto_commits and not self.dry_run:
saved_message = self.auto_commit()
elif hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
else:
saved_message = None
self.move_back_cur_messages(saved_message)
add_rel_files_message = self.check_for_file_mentions(content)
if add_rel_files_message:
return add_rel_files_message
def update_cur_messages(self, content, edited):
self.cur_messages += [dict(role="assistant", content=content)]
def auto_commit(self):
res = self.commit(history=self.cur_messages, prefix="aider: ")
if res:
commit_hash, commit_message = res
self.last_aider_commit_hash = commit_hash
saved_message = self.gpt_prompts.files_content_gpt_edits.format(
hash=commit_hash,
message=commit_message,
)
else:
if self.repo:
self.io.tool_output("No changes made to git tracked files.")
saved_message = self.gpt_prompts.files_content_gpt_no_edits
return saved_message
def check_for_file_mentions(self, content):
words = set(word for word in content.split())
# drop sentence punctuation from the end
words = set(word.rstrip(",.!;") for word in words)
# strip away all kinds of quotes
quotes = "".join(['"', "'", "`"])
words = set(word.strip(quotes) for word in words)
addable_rel_fnames = self.get_addable_relative_files()
mentioned_rel_fnames = set()
fname_to_rel_fnames = {}
for rel_fname in addable_rel_fnames:
if rel_fname in words:
mentioned_rel_fnames.add(str(rel_fname))
fname = os.path.basename(rel_fname)
if fname not in fname_to_rel_fnames:
fname_to_rel_fnames[fname] = []
fname_to_rel_fnames[fname].append(rel_fname)
for fname, rel_fnames in fname_to_rel_fnames.items():
if len(rel_fnames) == 1 and fname in words:
mentioned_rel_fnames.add(rel_fnames[0])
if not mentioned_rel_fnames:
return
for rel_fname in mentioned_rel_fnames:
self.io.tool_output(rel_fname)
if not self.io.confirm_ask("Add these files to the chat?"):
return
for rel_fname in mentioned_rel_fnames:
self.add_rel_fname(rel_fname)
return prompts.added_files.format(fnames=", ".join(mentioned_rel_fnames))
@backoff.on_exception(
backoff.expo,
(
Timeout,
APIError,
ServiceUnavailableError,
RateLimitError,
requests.exceptions.ConnectionError,
),
max_tries=10,
on_backoff=lambda details: print(f"Retry in {details['wait']} seconds."),
)
def send_with_retries(self, model, messages, functions):
kwargs = dict(
model=model,
messages=messages,
temperature=0,
stream=self.stream,
)
if functions is not None:
kwargs["functions"] = self.functions
# we are abusing the openai object to stash these values
if hasattr(openai, "api_deployment_id"):
kwargs["deployment_id"] = openai.api_deployment_id
if hasattr(openai, "api_engine"):
kwargs["engine"] = openai.api_engine
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode())
self.chat_completion_call_hashes.append(hash_object.hexdigest())
res = openai.ChatCompletion.create(**kwargs)
return res
def send(self, messages, model=None, silent=False, functions=None):
if not model:
model = self.main_model.name
self.partial_response_content = ""
self.partial_response_function_call = dict()
interrupted = False
try:
completion = self.send_with_retries(model, messages, functions)
if self.stream:
self.show_send_output_stream(completion, silent)
else:
self.show_send_output(completion, silent)
except KeyboardInterrupt:
interrupted = True
if not silent:
if self.partial_response_content:
self.io.ai_output(self.partial_response_content)
elif self.partial_response_function_call:
# TODO: push this into subclasses
args = self.parse_partial_args()
if args:
self.io.ai_output(json.dumps(args, indent=4))
return interrupted
def show_send_output(self, completion, silent):
if self.verbose:
print(completion)
show_func_err = None
show_content_err = None
try:
self.partial_response_function_call = completion.choices[0].message.function_call
except AttributeError as func_err:
show_func_err = func_err
try:
self.partial_response_content = completion.choices[0].message.content
except AttributeError as content_err:
show_content_err = content_err
resp_hash = dict(
function_call=self.partial_response_function_call,
content=self.partial_response_content,
)
resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())
self.chat_completion_response_hashes.append(resp_hash.hexdigest())
if show_func_err and show_content_err:
self.io.tool_error(show_func_err)
self.io.tool_error(show_content_err)
raise Exception("No data found in openai response!")
prompt_tokens = completion.usage.prompt_tokens
completion_tokens = completion.usage.completion_tokens
tokens = f"{prompt_tokens} prompt tokens, {completion_tokens} completion tokens"
if self.main_model.prompt_price:
cost = prompt_tokens * self.main_model.prompt_price / 1000
cost += completion_tokens * self.main_model.completion_price / 1000
tokens += f", ${cost:.6f} cost"
self.total_cost += cost
show_resp = self.render_incremental_response(True)
if self.pretty:
show_resp = Markdown(
show_resp, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(show_resp or "<no response>")
self.io.console.print(show_resp)
self.io.console.print(tokens)
def show_send_output_stream(self, completion, silent):
live = None
if self.pretty and not silent:
live = Live(vertical_overflow="scroll")
try:
if live:
live.start()
for chunk in completion:
if chunk.choices[0].finish_reason == "length":
raise ExhaustedContextWindow()
try:
func = chunk.choices[0].delta.function_call
# dump(func)
for k, v in func.items():
if k in self.partial_response_function_call:
self.partial_response_function_call[k] += v
else:
self.partial_response_function_call[k] = v
except AttributeError:
pass
try:
text = chunk.choices[0].delta.content
if text:
self.partial_response_content += text
except AttributeError:
pass
if silent:
continue
if self.pretty:
self.live_incremental_response(live, False)
else:
sys.stdout.write(text)
sys.stdout.flush()
finally:
if live:
self.live_incremental_response(live, True)
live.stop()
def live_incremental_response(self, live, final):
show_resp = self.render_incremental_response(final)
if not show_resp:
return
md = Markdown(show_resp, style=self.assistant_output_color, code_theme=self.code_theme)
live.update(md)
def render_incremental_response(self, final):
return self.partial_response_content
def get_context_from_history(self, history):
context = ""
if history:
for msg in history:
context += "\n" + msg["role"].upper() + ": " + msg["content"] + "\n"
return context
def get_commit_message(self, diffs, context):
if len(diffs) >= 4 * 1024 * 4:
self.io.tool_error(
f"Diff is too large for {models.GPT35.name} to generate a commit message."
)
return
diffs = "# Diffs:\n" + diffs
messages = [
dict(role="system", content=prompts.commit_system),
dict(role="user", content=context + diffs),
]
try:
interrupted = self.send(
messages,
model=models.GPT35.name,
silent=True,
)
except openai.error.InvalidRequestError:
self.io.tool_error(
f"Failed to generate commit message using {models.GPT35.name} due to an invalid"
" request."
)
return
commit_message = self.partial_response_content
commit_message = commit_message.strip()
if commit_message and commit_message[0] == '"' and commit_message[-1] == '"':
commit_message = commit_message[1:-1].strip()
if interrupted:
self.io.tool_error(
f"Unable to get commit message from {models.GPT35.name}. Use /commit to try again."
)
return
return commit_message
def get_diffs(self, *args):
if self.pretty:
args = ["--color"] + list(args)
diffs = self.repo.git.diff(*args)
return diffs
def commit(self, history=None, prefix=None, ask=False, message=None, which="chat_files"):
repo = self.repo
if not repo:
return
if not repo.is_dirty():
return
def get_dirty_files_and_diffs(file_list):
diffs = ""
relative_dirty_files = []
for fname in file_list:
relative_fname = self.get_rel_fname(fname)
relative_dirty_files.append(relative_fname)
try:
current_branch_commit_count = len(
list(self.repo.iter_commits(self.repo.active_branch))
)
except git.exc.GitCommandError:
current_branch_commit_count = None
if not current_branch_commit_count:
continue
these_diffs = self.get_diffs("HEAD", "--", relative_fname)
if these_diffs:
diffs += these_diffs + "\n"
return relative_dirty_files, diffs
if which == "repo_files":
all_files = [os.path.join(self.root, f) for f in self.get_all_relative_files()]
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(all_files)
elif which == "chat_files":
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(self.abs_fnames)
else:
raise ValueError(f"Invalid value for 'which': {which}")
if self.show_diffs or ask:
# don't use io.tool_output() because we don't want to log or further colorize
print(diffs)
context = self.get_context_from_history(history)
if message:
commit_message = message
else:
commit_message = self.get_commit_message(diffs, context)
if not commit_message:
commit_message = "work in progress"
if prefix:
commit_message = prefix + commit_message
if ask:
if which == "repo_files":
self.io.tool_output("Git repo has uncommitted changes.")
else:
self.io.tool_output("Files have uncommitted changes.")
res = self.io.prompt_ask(
"Commit before the chat proceeds [y/n/commit message]?",
default=commit_message,
).strip()
self.last_asked_for_commit_time = self.get_last_modified()
self.io.tool_output()
if res.lower() in ["n", "no"]:
self.io.tool_error("Skipped commmit.")
return
if res.lower() not in ["y", "yes"] and res:
commit_message = res
repo.git.add(*relative_dirty_fnames)
full_commit_message = commit_message + "\n\n# Aider chat conversation:\n\n" + context
repo.git.commit("-m", full_commit_message, "--no-verify")
commit_hash = repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
return commit_hash, commit_message
def get_rel_fname(self, fname):
return os.path.relpath(fname, self.root)
def get_inchat_relative_files(self):
files = [self.get_rel_fname(fname) for fname in self.abs_fnames]
return sorted(set(files))
def get_all_relative_files(self):
if self.repo:
files = self.get_tracked_files()
else:
files = self.get_inchat_relative_files()
return sorted(set(files))
def get_all_abs_files(self):
files = self.get_all_relative_files()
files = [self.abs_root_path(path) for path in files]
return files
def get_last_modified(self):
files = self.get_all_abs_files()
if not files:
return 0
return max(Path(path).stat().st_mtime for path in files)
def get_addable_relative_files(self):
return set(self.get_all_relative_files()) - set(self.get_inchat_relative_files())
def allowed_to_edit(self, path, write_content=None):
full_path = self.abs_root_path(path)
if full_path in self.abs_fnames:
if write_content:
self.io.write_text(full_path, write_content)
return full_path
if not Path(full_path).exists():
question = f"Allow creation of new file {path}?" # noqa: E501
else:
question = f"Allow edits to {path} which was not previously provided?" # noqa: E501
if not self.io.confirm_ask(question):
self.io.tool_error(f"Skipping edit to {path}")
return
if not Path(full_path).exists() and not self.dry_run:
Path(full_path).parent.mkdir(parents=True, exist_ok=True)
Path(full_path).touch()
self.abs_fnames.add(full_path)
# Check if the file is already in the repo
if self.repo:
tracked_files = set(self.get_tracked_files())
relative_fname = self.get_rel_fname(full_path)
if relative_fname not in tracked_files and self.io.confirm_ask(f"Add {path} to git?"):
if not self.dry_run:
self.repo.git.add(full_path)
if write_content:
self.io.write_text(full_path, write_content)
return full_path
def get_tracked_files(self):
if not self.repo:
return []
try:
commit = self.repo.head.commit
except ValueError:
return set()
files = []
for blob in commit.tree.traverse():
if blob.type == "blob": # blob is a file
files.append(blob.path)
# convert to appropriate os.sep, since git always normalizes to /
res = set(str(Path(PurePosixPath(path))) for path in files)
return res
apply_update_errors = 0
def apply_updates(self):
max_apply_update_errors = 2
try:
edited = self.update_files()
except ValueError as err:
err = err.args[0]
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, retrying...")
self.io.tool_error(str(err))
return None, err
else:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, aborting.")
return False, None
except Exception as err:
print(err)
print()
traceback.print_exc()
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, retrying...")
return None, str(err)
else:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, aborting")
return False, None
self.apply_update_errors = 0
if edited:
for path in sorted(edited):
if self.dry_run:
self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
else:
self.io.tool_output(f"Applied edit to {path}")
return edited, None
def parse_partial_args(self):
# dump(self.partial_response_function_call)
data = self.partial_response_function_call.get("arguments")
if not data:
return
try:
return json.loads(data)
except JSONDecodeError:
pass
try:
return json.loads(data + "]}")
except JSONDecodeError:
pass
try:
return json.loads(data + "}]}")
except JSONDecodeError:
pass
try:
return json.loads(data + '"}]}')
except JSONDecodeError:
pass
def check_model_availability(main_model):
available_models = openai.Model.list()
model_ids = [model.id for model in available_models["data"]]
return main_model.name in model_ids
| [
"\n"
] |
2024-01-10 | HombreOso/aider | benchmark~benchmark.py | #!/usr/bin/env python
import datetime
import json
import os
import random
import re
import shutil
import subprocess
import time
from collections import defaultdict
from json.decoder import JSONDecodeError
from pathlib import Path
from types import SimpleNamespace
from typing import List
import git
import lox
import matplotlib.pyplot as plt
import numpy as np
import openai
import pandas as pd
import prompts
import typer
from imgcat import imgcat
from rich.console import Console
from aider import models
from aider.coders import Coder
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
BENCHMARK_DNAME = Path(os.environ["AIDER_BENCHMARK_DIR"])
ORIGINAL_DNAME = BENCHMARK_DNAME / "exercism-python"
app = typer.Typer(add_completion=False, pretty_exceptions_enable=False)
def show_stats(dirnames):
raw_rows = []
for dirname in dirnames:
row = summarize_results(dirname)
raw_rows.append(row)
return
repeats = []
seen = dict()
rows = []
for row in raw_rows:
if not row:
continue
if row.model == "gpt-3.5-turbo":
row.model = "gpt-3.5-turbo-0613"
if row.edit_format == "diff-func-string":
row.edit_format = "diff-func"
if (
row.model == "gpt-3.5-turbo-0613"
and row.edit_format == "whole"
and "repeat" not in row.dir_name
):
# remember this row, so we can update it with the repeat_avg
repeat_row = len(rows)
pieces = row.model.split("-")
row.model = "-".join(pieces[:3])
if pieces[3:]:
row.model += "\n-" + "-".join(pieces[3:])
if row.completed_tests < 133:
print(f"Warning: {row.dir_name} is incomplete: {row.completed_tests}")
if "repeat" in row.dir_name:
repeats.append(vars(row))
continue
kind = (row.model, row.edit_format)
if kind in seen:
dump(row.dir_name)
dump(seen[kind])
return
seen[kind] = row.dir_name
rows.append(vars(row))
if repeats:
extra = rows[repeat_row]
dump(extra)
repeats.append(extra)
repeats = pd.DataFrame.from_records(repeats)
repeat_max = repeats["pass_rate_2"].max()
repeat_min = repeats["pass_rate_2"].min()
repeat_avg = repeats["pass_rate_2"].mean()
repeat_lo = repeat_avg - repeat_min
repeat_hi = repeat_max - repeat_avg
dump(repeat_max)
dump(repeat_min)
dump(repeat_avg)
# use the average in the main bar
rows[repeat_row]["pass_rate_2"] = repeat_avg
df = pd.DataFrame.from_records(rows)
df.sort_values(by=["model", "edit_format"], inplace=True)
tries = [df.groupby(["model", "edit_format"])["pass_rate_2"].mean()]
if True:
tries += [df.groupby(["model", "edit_format"])["pass_rate_1"].mean()]
plt.rcParams["hatch.linewidth"] = 0.5
plt.rcParams["hatch.color"] = "#444444"
from matplotlib import rc
rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"], "size": 10})
fig, ax = plt.subplots(figsize=(6, 4))
ax.grid(axis="y", zorder=0, lw=0.2)
zorder = 1
for grouped in tries:
zorder += 1
df = grouped.unstack()
num_models, num_formats = df.shape
pos = np.array(range(num_models))
width = 0.8 / num_formats
formats = df.columns
models = df.index
for i, fmt in enumerate(formats):
if zorder > 1:
edge = dict(
edgecolor="#ffffff",
linewidth=1.5,
)
else:
edge = dict()
if zorder == 2:
edge["label"] = fmt
color = "#b3e6a8" if "diff" in fmt else "#b3d1e6"
hatch = "////" if "func" in fmt else ""
rects = ax.bar(
pos + i * width,
df[fmt],
width * 0.95,
color=color,
hatch=hatch,
zorder=zorder,
**edge,
)
if zorder == 2:
ax.bar_label(rects, padding=4, labels=[f"{v:.0f}%" for v in df[fmt]], size=6)
if len(repeats):
ax.errorbar(
1.4,
repeat_avg,
yerr=[[repeat_lo], [repeat_hi]],
fmt="none",
zorder=5,
capsize=2.5,
elinewidth=1,
markeredgewidth=1,
)
ax.set_xticks([p + 1.5 * width for p in pos])
ax.set_xticklabels(models)
top = 95
ax.annotate(
"First attempt,\nbased on\ninstructions",
xy=(2.9, 51),
xytext=(2.5, top),
horizontalalignment="center",
verticalalignment="top",
arrowprops={"arrowstyle": "->", "connectionstyle": "arc3,rad=0.3"},
)
ax.annotate(
"Second attempt,\nbased on\nunit test errors",
xy=(3.1, 68),
xytext=(4.25, top),
horizontalalignment="center",
verticalalignment="top",
arrowprops={"arrowstyle": "->", "connectionstyle": "arc3,rad=0.3"},
)
ax.set_ylabel("Percent of exercises completed successfully")
# ax.set_xlabel("Model")
ax.set_title("GPT Code Editing")
ax.legend(
title="Edit Format",
loc="upper left",
# bbox_to_anchor=(0.95, 0.95),
)
ax.set_ylim(top=100)
plt.tight_layout()
plt.savefig("tmp.svg")
imgcat(fig)
# df.to_csv("tmp.benchmarks.csv")
def resolve_dirname(dirname, use_single_prior, make_new):
if len(dirname.parts) > 1:
return dirname
priors = list(BENCHMARK_DNAME.glob(f"*--{dirname}"))
if len(priors) == 1 and use_single_prior:
dirname = priors[0].name
print(f"Using pre-existing {dirname}")
elif len(priors):
if not make_new:
print(f"Prior runs of {dirname} exist, use --new or name one explicitly")
print()
for prior in priors:
print(prior)
return
if not re.match(r"\d\d\d\d-\d\d-\d\d-", str(dirname)):
now = datetime.datetime.now()
now = now.strftime("%Y-%m-%d-%H-%M-%S--")
dirname = now + dirname.name
dirname = BENCHMARK_DNAME / dirname
return dirname
@app.command()
def main(
dirnames: List[str] = typer.Argument(..., help="Directory names"),
model: str = typer.Option("gpt-3.5-turbo", "--model", "-m", help="Model name"),
edit_format: str = typer.Option(None, "--edit-format", "-e", help="Edit format"),
keyword: str = typer.Option(
None, "--keyword", "-k", help="Only run tests that contain keyword"
),
clean: bool = typer.Option(
False, "--clean", "-c", help="Discard the existing testdir and make a clean copy"
),
cont: bool = typer.Option(False, "--cont", help="Continue the (single) matching testdir"),
make_new: bool = typer.Option(False, "--new", "-n", help="Make a new dated testdir"),
no_unit_tests: bool = typer.Option(False, "--no-unit-tests", help="Do not run unit tests"),
no_aider: bool = typer.Option(False, "--no-aider", help="Do not run aider"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
stats_only: bool = typer.Option(
False, "--stats", "-s", help="Do not run tests, just collect stats on completed tests"
),
tries: int = typer.Option(2, "--tries", "-r", help="Number of tries for running tests"),
threads: int = typer.Option(1, "--threads", "-t", help="Number of threads to run in parallel"),
num_tests: int = typer.Option(-1, "--num-tests", "-n", help="Number of tests to run"),
):
repo = git.Repo(search_parent_directories=True)
commit_hash = repo.head.object.hexsha[:7]
if repo.is_dirty():
commit_hash += "-dirty"
if len(dirnames) > 1 and not stats_only:
print("Only provide 1 dirname unless running with --stats")
return 1
updated_dirnames = []
for dirname in dirnames:
dirname = Path(dirname)
dirname = resolve_dirname(dirname, stats_only or cont, make_new)
if not dirname:
return 1
updated_dirnames.append(dirname)
if stats_only:
return show_stats(updated_dirnames)
assert len(updated_dirnames) == 1, updated_dirnames
dirname = updated_dirnames[0]
if "AIDER_DOCKER" not in os.environ:
print("Warning: benchmarking runs unvetted code from GPT, run in a docker container")
return
assert BENCHMARK_DNAME.exists() and BENCHMARK_DNAME.is_dir(), BENCHMARK_DNAME
assert ORIGINAL_DNAME.exists() and ORIGINAL_DNAME.is_dir(), ORIGINAL_DNAME
if clean and dirname.exists():
print("Cleaning up and replacing", dirname)
dir_files = set(fn.name for fn in dirname.glob("*"))
original_files = set(fn.name for fn in ORIGINAL_DNAME.glob("*"))
if dir_files != original_files:
print("ERROR: will not delete dir that does not look like original tests", dirname)
return
dest = dirname.parent / "OLD" / dirname.name
if dest.exists():
old_now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
dest = dirname.parent / "OLD" / (old_now + dirname.name)
dirname.rename(dest)
if not dirname.exists():
shutil.copytree(ORIGINAL_DNAME, dirname)
test_dnames = sorted(os.listdir(dirname))
if keyword:
test_dnames = [dn for dn in test_dnames if keyword in dn]
random.shuffle(test_dnames)
if num_tests > 0:
test_dnames = test_dnames[:num_tests]
if threads == 1:
all_results = []
for testname in test_dnames:
results = run_test(
dirname / testname,
model,
edit_format,
tries,
no_unit_tests,
no_aider,
verbose,
commit_hash,
)
all_results.append(results)
summarize_results(dirname)
else:
run_test_threaded = lox.thread(threads)(run_test)
for testname in test_dnames:
run_test_threaded.scatter(
dirname / testname,
model,
edit_format,
tries,
no_unit_tests,
no_aider,
verbose,
commit_hash,
)
all_results = run_test_threaded.gather(tqdm=True)
print()
print()
print()
summarize_results(dirname)
return 0
def summarize_results(dirname):
res = SimpleNamespace()
dirname = Path(dirname)
res.total_tests = len(list(dirname.glob("*")))
all_results = [json.loads(fname.read_text()) for fname in dirname.glob("*/.aider.results.json")]
try:
tries = max(len(results["tests_outcomes"]) for results in all_results if results)
except ValueError:
tries = 0
res.dir_name = str(dirname)
passed_tests = [0] * tries
res.completed_tests = 0
res.duration = 0
res.cost = 0
res.error_outputs = 0
res.user_asks = 0
res.test_timeouts = 0
res.exhausted_context_windows = 0
variants = defaultdict(set)
for results in all_results:
if not results:
continue
res.completed_tests += 1
passed = results["tests_outcomes"][-1]
if passed:
for i in range(len(results["tests_outcomes"]) - 1, tries):
passed_tests[i] += 1
res.cost += results["cost"]
res.duration += results["duration"]
res.test_timeouts += results.get("test_timeouts", 0)
res.error_outputs += results.get("num_error_outputs", 0)
res.user_asks += results.get("num_user_asks", 0)
res.exhausted_context_windows += results.get("num_exhausted_context_windows", 0)
for key in "model edit_format commit_hash".split():
val = results.get(key)
variants[key].add(val)
if not res.completed_tests:
return
console = Console(highlight=False)
console.rule(title=str(dirname))
console.print(f"test-cases: {res.completed_tests}")
for key, val in variants.items():
if len(val) > 1:
style = "red"
else:
style = None
val = ", ".join(map(str, val))
setattr(res, key, val)
console.print(f"{key}: {val}", style=style)
print("num_error_outputs:", res.error_outputs)
print("num_user_asks:", res.user_asks)
style = "red" if res.exhausted_context_windows else None
console.print("num_exhausted_context_windows", res.exhausted_context_windows, style=style)
style = "red" if res.test_timeouts else None
console.print("test_timeouts:", res.test_timeouts, style=style)
console.print()
for i in range(tries):
pass_rate = 100 * passed_tests[i] / res.completed_tests
console.print(f"{pass_rate:.1f}% correct after try {i}")
setattr(res, f"pass_rate_{i+1}", pass_rate)
console.print()
res.avg_duration = res.duration / res.completed_tests
console.print(f"duration: {res.avg_duration:.1f} sec/test-case")
res.avg_cost = res.cost / res.completed_tests
projected_cost = res.avg_cost * res.total_tests
console.print(
f"costs: ${res.avg_cost:.4f}/test-case, ${res.cost:.2f} total,"
f" ${projected_cost:.2f} projected"
)
console.rule()
# print(json.dumps(vars(res), indent=4, sort_keys=True))
return res
def run_test(
testdir, model_name, edit_format, tries, no_unit_tests, no_aider, verbose, commit_hash
):
if not os.path.isdir(testdir):
print("Not a dir:", testdir)
return
testdir = Path(testdir)
history_fname = testdir / ".aider.chat.history.md"
results_fname = testdir / ".aider.results.json"
if results_fname.exists():
try:
res = json.loads(results_fname.read_text())
return res
except JSONDecodeError:
print(f"{results_fname} failed to parse, skipping")
return
fnames = []
for fname in testdir.glob("*"):
if "test" not in fname.name and fname.is_file() and fname.name[0] != ".":
fnames.append(fname)
# restore the original file, in case we interrupted a prev run
# after it had saved changes
original_fname = ORIGINAL_DNAME / testdir.name / fname.name
shutil.copy(original_fname, fname)
file_list = " ".join(fname.name for fname in fnames)
instructions = ""
introduction = testdir / ".docs/introduction.md"
if introduction.exists():
instructions += introduction.read_text()
instructions += (testdir / ".docs/instructions.md").read_text()
instructions_append = testdir / ".docs/instructions.append.md"
if instructions_append.exists():
instructions += instructions_append.read_text()
instructions += prompts.instructions_addendum.format(file_list=file_list)
io = InputOutput(
pretty=True,
yes=False,
chat_history_file=history_fname,
)
main_model = models.Model(model_name)
edit_format = edit_format or main_model.edit_format
dump(main_model)
dump(edit_format)
show_fnames = ",".join(map(str, fnames))
print("fnames:", show_fnames)
openai.api_key = os.environ["OPENAI_API_KEY"]
coder = Coder.create(
main_model,
edit_format,
io,
fnames=fnames,
use_git=False,
stream=False,
pretty=False,
verbose=verbose,
)
timeouts = 0
dur = 0
test_outcomes = []
for i in range(tries):
start = time.time()
if not no_aider:
coder.run(with_message=instructions)
dur += time.time() - start
if coder.num_control_c:
raise KeyboardInterrupt
if no_unit_tests:
break
try:
errors = run_unit_tests(testdir, history_fname)
except subprocess.TimeoutExpired:
errors = "Tests timed out!"
timeouts += 1
if errors:
test_outcomes.append(False)
else:
test_outcomes.append(True)
break
errors = errors.splitlines()
print(errors[-1])
errors = errors[:50]
errors = "\n".join(errors)
instructions = errors
instructions += prompts.test_failures.format(file_list=file_list)
results = dict(
testdir=str(testdir),
testcase=testdir.name,
model=main_model.name,
edit_format=edit_format,
tests_outcomes=test_outcomes,
cost=coder.total_cost,
duration=dur,
test_timeouts=timeouts,
commit_hash=commit_hash,
num_error_outputs=io.num_error_outputs,
num_user_asks=io.num_user_asks,
num_exhausted_context_windows=coder.num_exhausted_context_windows,
chat_hashes=list(
zip(
coder.chat_completion_call_hashes,
coder.chat_completion_response_hashes,
)
),
)
dump(results)
results_fname.write_text(json.dumps(results, indent=4))
return results
def run_unit_tests(testdir, history_fname):
command = [
"python",
"-m",
"unittest",
"discover",
"-s",
str(testdir),
"-t",
str(testdir),
"-p",
"*_test.py",
]
print(" ".join(command))
timeout = 60
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
timeout=timeout,
)
success = result.returncode == 0
res = result.stdout
res = cleanup_test_output(res)
with history_fname.open("a") as fh:
fh.write(f"```\n{res}\n```")
if not success:
print(f"Tests failed: {testdir}")
return res
def cleanup_test_output(output):
# remove timing info, to avoid randomizing the response to GPT
res = re.sub(
r"^Ran \d+ tests in \d+\.\d+s$",
"",
output,
flags=re.MULTILINE,
)
res = re.sub(
r"^====*$",
"====",
res,
flags=re.MULTILINE,
)
res = re.sub(
r"^----*$",
"----",
res,
flags=re.MULTILINE,
)
return res
if __name__ == "__main__":
app()
| [] |
2024-01-10 | FredGoo/langchain-chinese-chat-models | langchain_c~chat_models~zhipuai.py | """ZhiPu chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
def _import_tiktoken() -> Any:
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install tiktoken`."
)
return tiktoken
def _create_retry_decorator(llm: ChatZhiPu) -> Callable[[Any], Any]:
import zhipuai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
# retry=(
# retry_if_exception_type(zhipuai..Timeout)
# | retry_if_exception_type(openai.error.APIError)
# | retry_if_exception_type(openai.error.APIConnectionError)
# | retry_if_exception_type(openai.error.RateLimitError)
# | retry_if_exception_type(openai.error.ServiceUnavailableError)
# ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatZhiPu, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict["content"])
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatZhiPu(BaseChatModel):
"""Wrapper around ZhiPu Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = Field(default="chatglm_130b", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
zhipuai_api_key: Optional[str] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
zhipuai_api_base: Optional[str] = None
zhipuai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
thu_proxy: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = cls.all_required_field_names()
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["zhipuai_api_key"] = get_from_dict_or_env(
values, "zhipuai_api_key", "ZHIPUAI_API_KEY"
)
values["zhipuai_organization"] = get_from_dict_or_env(
values,
"zhipuai_organization",
"ZHIPUAI_ORGANIZATION",
default="",
)
values["zhipuai_api_base"] = get_from_dict_or_env(
values,
"zhipuai_api_base",
"ZHIPUAI_API_BASE",
default="",
)
values["zhipuai_proxy"] = get_from_dict_or_env(
values,
"zhipuai_proxy",
"ZHIPUAI_PROXY",
default="",
)
try:
import zhipuai
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install zhipuai`."
)
try:
zhipuai.api_key = values["zhipuai_api_key"]
values["client"] = zhipuai.model_api
except AttributeError:
raise ValueError(
"`zhipuai` has no `model_api` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade zhipuai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
# retry=(
# retry_if_exception_type(openai.error.Timeout)
# | retry_if_exception_type(openai.error.APIError)
# | retry_if_exception_type(openai.error.APIConnectionError)
# | retry_if_exception_type(openai.error.RateLimitError)
# | retry_if_exception_type(openai.error.ServiceUnavailableError)
# ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.invoke(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
for stream_resp in self.completion_with_retry(
messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(prompt=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["data"]["choices"]:
message = _convert_dict_to_message(res)
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["data"]["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
async for stream_resp in await acompletion_with_retry(
self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
await run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
zhipuai_creds: Dict[str, Any] = {
"api_key": self.zhipuai_api_key,
"api_base": self.zhipuai_api_base,
"organization": self.zhipuai_organization,
"model": self.model_name,
}
# if self.zhipuai_proxy:
# import openai
#
# openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**zhipuai_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "zhipuai-chat"
def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
model = self.model_name
if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_token_ids(self, text: str) -> List[int]:
"""Get the tokens present in the text with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model == "gpt-3.5-turbo-0301":
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [_convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens | [
"content"
] |
2024-01-10 | FredGoo/langchain-chinese-chat-models | test~zhipuai_test.py | import os
from dotenv import load_dotenv, find_dotenv
from langchain_c.chat_models import ChatZhiPu
from langchain.prompts import ChatPromptTemplate
# read local .env file
_ = load_dotenv(find_dotenv())
gpt_api_key = os.environ['ZHIPUAI_API_KEY']
chat = ChatZhiPu(
temperature=0.9,
model_name="chatglm_130b",
max_tokens=1000,
zhipuai_api_key=gpt_api_key
)
# Prompt 编写
review_template = """
{text}\n
请你提取包含“人”(name, position),“时间”,“事件“,“地点”(location)类型的所有信息,并输出JSON格式
"""
# 创建 ChatPromptTemplate
prompt_template = ChatPromptTemplate.from_template(review_template)
# 用户的商品评价
customer_review = """
2022年11月4日,计算机系通过线上线下相结合的方式在东主楼10-103会议室召开博士研究生导师交流会。\
计算机学科学位分委员会主席吴空,计算机系副主任张建、党委副书记李伟出席会议,博士生研究生导师和教学办工作人员等30余人参加会议,会议由张建主持。
"""
messages = prompt_template.format_messages(text=customer_review)
# 请求
response = chat(messages)
print(response.content)
| [
"\n{text}\n\n请你提取包含“人”(name, position),“时间”,“事件“,“地点”(location)类型的所有信息,并输出JSON格式\n"
] |
2024-01-10 | FredGoo/langchain-chinese-chat-models | langchain_c~chat_models~sample.py | """OpenAI chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
def _import_tiktoken() -> Any:
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install tiktoken`."
)
return tiktoken
def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict["content"])
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatOpenAI(BaseChatModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
openai_api_base: Optional[str] = None
openai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = cls.all_required_field_names()
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
for stream_resp in self.completion_with_retry(
messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
async for stream_resp in await acompletion_with_retry(
self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
await run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
"model": self.model_name,
}
if self.openai_proxy:
import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "openai-chat"
def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
model = self.model_name
if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_token_ids(self, text: str) -> List[int]:
"""Get the tokens present in the text with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model == "gpt-3.5-turbo-0301":
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [_convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens | [
"content"
] |
2024-01-10 | FredGoo/langchain-chinese-chat-models | test~xfyun_test.py | import os
from dotenv import load_dotenv, find_dotenv
from langchain.prompts import ChatPromptTemplate
from langchain_c.chat_models import ChatXfyun
# read local .env file
_ = load_dotenv(find_dotenv())
app_id = os.environ['XFYUN_APP_ID']
api_key = os.environ['XFYUN_API_KEY']
api_secret = os.environ['XFYUN_API_SECRET']
chat = ChatXfyun(
temperature=0.9,
max_tokens=1000,
xfyun_app_id=app_id,
xfyun_api_key=api_key,
xfyun_api_secret=api_secret
)
# Prompt 编写
review_template = """
{text}\
请你提取包含“人”(name, position),“时间”,“事件“,“地点”(location)类型的所有信息,并输出JSON格式,人的键值为people
"""
# 创建 ChatPromptTemplate
prompt_template = ChatPromptTemplate.from_template(review_template)
# 用户的商品评价
customer_review = """
2022年11月4日,计算机系通过线上线下相结合的方式在东主楼10-103会议室召开博士研究生导师交流会。\
计算机学科学位分委员会主席吴空,计算机系副主任张建、党委副书记李伟出席会议,博士生研究生导师和教学办工作人员等30余人参加会议,会议由张建主持。\n
"""
messages = prompt_template.format_messages(text=customer_review)
# 请求
response = chat(messages)
print(response.content)
| [
"\n{text}请你提取包含“人”(name, position),“时间”,“事件“,“地点”(location)类型的所有信息,并输出JSON格式,人的键值为people\n"
] |
2024-01-10 | FredGoo/langchain-chinese-chat-models | langchain_c~chat_models~xfyun.py | """Xfyun chat wrapper."""
from __future__ import annotations
import _thread as thread
import base64
import datetime
import hashlib
import hmac
import json
import logging
import ssl
import sys
from datetime import datetime
from time import mktime
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from urllib.parse import urlencode
from urllib.parse import urlparse
from wsgiref.handlers import format_date_time
import websocket
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
def _import_tiktoken() -> Any:
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install tiktoken`."
)
return tiktoken
def _create_retry_decorator(llm: ChatXfyun) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
# retry=(
# retry_if_exception_type(openai.error.Timeout)
# | retry_if_exception_type(openai.error.APIError)
# | retry_if_exception_type(openai.error.APIConnectionError)
# | retry_if_exception_type(openai.error.RateLimitError)
# | retry_if_exception_type(openai.error.ServiceUnavailableError)
# ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatXfyun, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict["content"])
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
class ChatXfyun(BaseChatModel):
"""Wrapper around Xfyun Chat large language models.
environment variable ``XFYUN_APP_ID`` set with your app id.
environment variable ``XFYUN_API_KEY`` set with your API key.
environment variable ``XFYUN_API_SECRET`` set with your API Secret.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
xfyun_app_id: Optional[str] = None
"""Holds any model parameters valid for `create` call not explicitly specified."""
xfyun_api_key: Optional[str] = None
xfyun_api_secret: Optional[str] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
xfyun_api_base: Optional[str] = None
xfyun_organization: Optional[str] = None
# to support explicit proxy for OpenAI
xfyun_proxy: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = cls.all_required_field_names()
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["xfyun_app_id"] = get_from_dict_or_env(
values, "xfyun_app_id", "XFYUN_APP_ID"
)
values["xfyun_api_key"] = get_from_dict_or_env(
values, "xfyun_api_key", "XFYUN_API_KEY"
)
values["xfyun_api_secret"] = get_from_dict_or_env(
values, "xfyun_api_secret", "XFYUN_API_SECRET"
)
values["xfyun_organization"] = get_from_dict_or_env(
values,
"xfyun_organization",
"XFYUN_ORGANIZATION",
default="",
)
values["xfyun_api_base"] = get_from_dict_or_env(
values,
"xfyun_api_base",
"XFYUN_API_BASE",
default="wss://spark-api.xf-yun.com/v1.1/chat",
)
values["xfyun_proxy"] = get_from_dict_or_env(
values,
"xfyun_proxy",
"XFYUN_PROXY",
default="",
)
# try:
# values["client"] = None
# except AttributeError:
# raise ValueError(
# "`xfyun` client not found "
# )
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
# retry=(
# retry_if_exception_type(openai.error.Timeout)
# | retry_if_exception_type(openai.error.APIError)
# | retry_if_exception_type(openai.error.APIConnectionError)
# | retry_if_exception_type(openai.error.RateLimitError)
# | retry_if_exception_type(openai.error.ServiceUnavailableError)
# ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def create_url(self):
# 生成RFC1123格式的时间戳
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
host = urlparse(self.xfyun_api_base).netloc
path = urlparse(self.xfyun_api_base).path
gpt_url = self.xfyun_api_base
# 拼接字符串
signature_origin = "host: " + host + "\n"
signature_origin += "date: " + date + "\n"
signature_origin += "GET " + path + " HTTP/1.1"
# 进行hmac-sha256进行加密
signature_sha = hmac.new(self.xfyun_api_secret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = f'api_key="{self.xfyun_api_key}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# 将请求的鉴权参数组合为字典
v = {
"authorization": authorization,
"date": date,
"host": host
}
# 拼接鉴权参数,生成url
url = gpt_url + '?' + urlencode(v)
# 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
return url
content = ""
ws_status = 1
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
def on_open(ws):
thread.start_new_thread(run, (ws,))
def run(ws, *args):
data = json.dumps(gen_params(appid=ws.appid, messages=ws.messages))
ws.send(data)
def gen_params(appid, messages):
"""
通过appid和用户的提问来生成请参数
"""
data = {
"header": {
"app_id": appid,
"uid": "1234"
},
"parameter": {
"chat": {
"domain": "general",
"random_threshold": 0.5,
"max_tokens": 2048,
"auditing": "default"
}
},
"payload": {
"message": {
"text": messages['messages']
}
}
}
return data
def on_message(ws, message):
data = json.loads(message)
code = data['header']['code']
if code != 0:
print(f'请求错误: {code}, {data}')
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
self.content += choices["text"][0]["content"]
if status == 2:
print(self.content, end='')
self.ws_status = 2
ws.close()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
websocket.enableTrace(False)
ws_url = self.create_url()
ws = websocket.WebSocketApp(ws_url, on_message=on_message, on_open=on_open)
ws.appid = self.xfyun_app_id
ws.messages = kwargs
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
while self.ws_status == 1:
continue
return self.content
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
for stream_resp in self.completion_with_retry(
messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
async for stream_resp in await acompletion_with_retry(
self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
await run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"app_id": self.xfyun_app_id,
"api_key": self.xfyun_api_key,
"api_secret": self.xfyun_api_secret,
"api_base": self.xfyun_api_base,
"organization": self.xfyun_organization,
"model": self.model_name,
}
# todo
# if self.xfyun_proxy:
# import openai
#
# openai.proxy = {"http": self.openai_proxy,
# "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "xfyun-chat"
def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
model = self.model_name
if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_token_ids(self, text: str) -> List[int]:
"""Get the tokens present in the text with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model == "gpt-3.5-turbo-0301":
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [_convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
| [
"content"
] |
2024-01-10 | Audio-AGI/AudioSep | models~CLAP~open_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
import torch
from .model import CLAP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
if os.path.basename(cf)[0] == ".":
continue # Ignore hidden files
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for k in state_dict:
# if k.startswith('transformer'):
# v = state_dict.pop(k)
# state_dict['text_branch.' + k[12:]] = v
return state_dict
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = "None"
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type,
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url:
checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
elif os.path.exists(pretrained_orig):
checkpoint_path = pretrained_orig
if checkpoint_path:
logging.info(
f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained})."
)
ckpt = load_state_dict(checkpoint_path, skip_params=True)
model.load_state_dict(ckpt)
param_names = [n for n, p in model.named_parameters()]
# for n in param_names:
# print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
else:
logging.warning(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
raise RuntimeError(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
if pretrained_audio:
if amodel_name.startswith("PANN"):
if "Cnn14_mAP" in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
audio_ckpt = audio_ckpt["model"]
keys = list(audio_ckpt.keys())
for key in keys:
if (
"spectrogram_extractor" not in key
and "logmel_extractor" not in key
):
v = audio_ckpt.pop(key)
audio_ckpt["audio_branch." + key] = v
elif os.path.basename(pretrained_audio).startswith(
"PANN"
): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
audio_ckpt = audio_ckpt["state_dict"]
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith("sed_model"):
v = audio_ckpt.pop(key)
audio_ckpt["audio_branch." + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
"finetuned"
): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
else:
raise ValueError("Unknown audio checkpoint")
elif amodel_name.startswith("HTSAT"):
if "HTSAT_AudioSet_Saved" in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
audio_ckpt = audio_ckpt["state_dict"]
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith("sed_model") and (
"spectrogram_extractor" not in key
and "logmel_extractor" not in key
):
v = audio_ckpt.pop(key)
audio_ckpt["audio_branch." + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
"HTSAT"
): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
audio_ckpt = audio_ckpt["state_dict"]
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith("sed_model"):
v = audio_ckpt.pop(key)
audio_ckpt["audio_branch." + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
"finetuned"
): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
else:
raise ValueError("Unknown audio checkpoint")
else:
raise f"this audio encoder pretrained checkpoint is not support"
model.load_state_dict(audio_ckpt, strict=False)
logging.info(
f"Loading pretrained {amodel_name} weights ({pretrained_audio})."
)
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model, model_cfg
def create_model_and_transforms(
model_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
# pretrained_image: bool = False,
):
model = create_model(
model_name,
pretrained,
precision,
device,
jit,
force_quick_gelu=force_quick_gelu,
# pretrained_image=pretrained_image
)
preprocess_train = image_transform(model.visual.image_size, is_train=True)
preprocess_val = image_transform(model.visual.image_size, is_train=False)
return model, preprocess_train, preprocess_val
def list_models():
"""enumerate available model architectures based on config files"""
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
"""add model config path or file and update registry"""
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
| [] |
2024-01-10 | duxton/CarInsuranceChatbotWithLLM | UIChatbot.py | from tkinter import *
import json
import openai
# GUI
root = Tk()
root.title("Chatbot")
BG_GRAY = "#FFFFFF"
BG_COLOR = "#FFFFFF"
TEXT_COLOR = "#000000"
FONT = "Helvetica 14"
FONT_BOLD = "Helvetica 13 bold"
api_key = "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"
openai.api_key = api_key
# Send function
def send():
# send is user input
send = "Me: " + e.get()
# txt is respond from gpt
txt.insert(END, "\n" + send)
# Call GPT3 model and store the response to a variable and return
# Replace with GPT response
#new_prompt = "My Car proton model x70 year 2021 ncd 25%, how much would my car insurance be?"
new_prompt = e.get() + " ->"
if "FAQ-> " in e.get():
# for FAQ questions
replaced_prompt = new_prompt.replace("FAQ-> ", '')
answer = openai.Completion.create(
model="davinci:ft-pogtdev-2023-06-11-07-19-08",
prompt=replaced_prompt,
temperature=1,
max_tokens=100,
top_p=1,
best_of=5,
frequency_penalty=1,
presence_penalty=1
)
else:
# For car price
replaced_prompt = new_prompt.replace("Car-> ", '')
answer = openai.Completion.create(
model="davinci:ft-pogtdev-2023-06-11-07-19-08",
prompt=replaced_prompt,
temperature=1,
max_tokens=20,
top_p=1,
best_of=5,
frequency_penalty=1,
presence_penalty=1
)
txt.insert(END, "\n" + "POIBOT: " + answer['choices'][0]['text'])
# https://www.geeksforgeeks.org/gui-chat-application-using-tkinter-in-python/
# get user input
user = e.get().lower()
e.delete(0, END)
# Welcome on the top
lable1 = Label(root, bg=BG_COLOR, fg=TEXT_COLOR, text="Welcome", font=FONT_BOLD, pady=10, width=55, height=1).grid(
row=0)
# Display text
txt = Text(root, bg=BG_COLOR, fg=TEXT_COLOR, font=FONT, width=60)
txt.grid(row=1, column=0, columnspan=2)
txt.insert(END, "\n" + "POIBOT: Hi Welcome, how may I help you? Please include FAQ or Car to get a better result")
# Scroll bar
scrollbar = Scrollbar(txt)
scrollbar.place(relheight=1, relx=0.974)
# Input from user
e = Entry(root, bg="#FFFFFF", fg=TEXT_COLOR, font=FONT, width=55)
e.grid(row=2, column=0)
# Sent Btn
send = Button(root, text="Send", font=FONT_BOLD, bg=BG_GRAY,
command=send).grid(row=2, column=1)
root.mainloop()
| [
" ->",
"e.get() + \" ->"
] |
2024-01-10 | martincooperbiz/aipif-kids | src~text~Gpt4t8kTextMaker.py |
import os
import openai
from text.TextMaker import TextMaker
openai.api_key = os.getenv("OPENAI_API_KEY")
class Gpt4t8kTextMaker(TextMaker):
def make_text(self, prompt_dict:dict):
completion = openai.ChatCompletion.create(
model = 'gpt-4',
messages = [
{'role': 'user', 'content': prompt_dict['positive_prompt_text']}
],
temperature = 0.6
# temperature = 0.7
# temperature = 0.55
)
return completion['choices'][0]['message']['content']
# def generate_prompt(animal):
# return """Suggest three names for an animal that is a superhero.
# Animal: Cat
# Names: Captain Sharpclaw, Agent Fluffball, The Incredible Feline
# Animal: Dog
# Names: Ruff the Protector, Wonder Canine, Sir Barks-a-Lot
# Animal: {}
# Names:""".format(
# animal.capitalize()
# )
| [
"positive_prompt_text"
] |
2024-01-10 | martincooperbiz/aipif-kids | src~text~Gpt35TextMaker.py |
import os
import openai
from text.TextMaker import TextMaker
openai.api_key = os.getenv("OPENAI_API_KEY")
class Gpt35TextMaker(TextMaker):
def make_text(self, prompt_dict:dict):
completion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
messages = [
{'role': 'user', 'content': prompt_dict['positive_prompt_text']}
],
temperature = 0.6
# temperature = 0.7
# temperature = 0.55
)
return completion['choices'][0]['message']['content']
# def generate_prompt(animal):
# return """Suggest three names for an animal that is a superhero.
# Animal: Cat
# Names: Captain Sharpclaw, Agent Fluffball, The Incredible Feline
# Animal: Dog
# Names: Ruff the Protector, Wonder Canine, Sir Barks-a-Lot
# Animal: {}
# Names:""".format(
# animal.capitalize()
# )
| [
"positive_prompt_text"
] |
2024-01-10 | martincooperbiz/aipif-kids | src~text~Davinci003TextMaker.py | import os
import openai
from text.TextMaker import TextMaker
openai.api_key = os.getenv("OPENAI_API_KEY")
class Davinci003TextMaker(TextMaker):
def make_text(self, prompt_dict:dict):
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt_dict['positive_prompt_text'],
temperature=0.6,
)
return response
| [
"positive_prompt_text"
] |
2024-01-10 | BYZANTINE26/DocAI | creating_vector_store.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticKnnSearch, Pinecone, Weaviate, FAISS
from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders import UnstructuredHTMLLoader
import os
import pinecone
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import time
from keys import OPENAI_API_KEY
# PINECONE_API_KEY = PINECONE_API_KEY
# PINECONE_ENV = PINECONE_ENV
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
def pinecone_create_vector_store(software):
print(software)
loader = DirectoryLoader(f'/home/yuvraj/projects/docai/test/{software}', glob="**/*.html", use_multithreading=True, loader_cls=UnstructuredHTMLLoader, show_progress=True)
data = loader.load()
print(data)
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
docs = text_splitter.split_documents(data)
# file = open('docs.txt', 'w')
# for item in docs:
# file.write(item.page_content+"\n")
# file.close()
embeddings = OpenAIEmbeddings()
# pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENV)
# index_name = software
# pinecone.create_index(name=index_name, metric="cosine", shards=1, dimension=16000)
print(len(docs))
vector_store = None
token_count = 0
pre_tokens = 0
with get_openai_callback() as cb:
for i, doc in enumerate(docs):
print(i)
if vector_store is None:
vector_store = FAISS.from_documents([doc], embeddings)
else:
vector_store.add_documents([doc])
print(f'added to vector store doc {i}')
print(cb.total_tokens)
print(pre_tokens)
token_count += cb.total_tokens - pre_tokens
pre_tokens = cb.total_tokens
print(token_count)
if token_count > 995000:
time.sleep(45)
token_count = 0
print(token_count)
vector_store.save_local(f'/home/yuvraj/projects/docai/vector_stores/{software}', index_name=software)
return(f'uploaded vector {software}')
# vector = FAISS.from_documents(docs, embeddings, index_name=index_name)
# vector.save_local()
| [] |
2024-01-10 | Kiash254/QA-chatbot-with-Gemini-streamlit | chatpdf.py | import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from langchain_google_genai import GoogleGenerativeAIEmbeddings
import google.generativeai as genai
from langchain.vectorstores import FAISS
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
from dotenv import load_dotenv
# Load the environment variables from a .env file
load_dotenv()
os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Function to extract text from PDFs and return a list of strings
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
# Function to split the text into chunks
def get_text_chunks(text):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = text_splitter.split_text(text)
return chunks
# Function to create and save the vector store database
def create_vector_store(text_chunks):
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
vector_store.save_local("faiss_index")
# Function to load the vector store database
def load_vector_store():
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
return FAISS.load_local("faiss_index", embeddings)
# Function to initiate the conversational chain
def initiate_conversational_chain():
prompt_template = """
Answer the question as detailed as possible from the provided context. If the answer is not available in
provided context, indicate that the information is not present. Don't provide a wrong answer.\n\n
Context:\n{context}?\n
Question:\n{question}\n
Answer:
"""
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
return load_qa_chain(model, chain_type="stuff", prompt=prompt)
# Function to interact with the PDFs based on user input
def interact_with_pdfs(user_question, db):
chain = initiate_conversational_chain()
docs = db.similarity_search(user_question)
response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
return response["output_text"]
# Main function to run the Streamlit app
def main():
st.set_page_config("Chat PDF")
st.header("Chat with PDF using Gemini💁")
user_question = st.text_input("Ask a Question from the PDF Files")
if user_question:
vector_store = load_vector_store()
st.write("Reply: ", interact_with_pdfs(user_question, vector_store))
with st.sidebar:
st.title("Menu:")
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
if st.button("Submit & Process"):
with st.spinner("Processing..."):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
create_vector_store(text_chunks)
st.success("Done")
# Execute the main function
if __name__ == "__main__":
main()
| [
"\n Answer the question as detailed as possible from the provided context. If the answer is not available in\n provided context, indicate that the information is not present. Don't provide a wrong answer.\n\n\n Context:\n{context}?\n\n Question:\n{question}\n\n\n Answer:\n ",
"question",
"context"
] |
2024-01-10 | Kiash254/QA-chatbot-with-Gemini-streamlit | csv_chat.py | import streamlit as st
from streamlit_chat import message
import tempfile
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.chains import ConversationalRetrievalChain
DB_FAISS_PATH = 'vectorstore/db_faiss'
#Loading the model
def load_llm():
# Load the locally downloaded model here
llm = CTransformers(
model = "llama-2-7b-chat.ggmlv3.q8_0.bin",
model_type="llama",
max_new_tokens = 512,
temperature = 0.5
)
return llm
st.title("Chat with CSV using Llama2 🦙🦜")
st.markdown("<h3 style='text-align: center; color: white;'>Built by <a href='https://github.com/AIAnytime'>AI Anytime with ❤️ </a></h3>", unsafe_allow_html=True)
uploaded_file = st.sidebar.file_uploader("Upload your Data", type="csv")
if uploaded_file :
#use tempfile because CSVLoader only accepts a file_path
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={
'delimiter': ','})
data = loader.load()
#st.json(data)
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
model_kwargs={'device': 'cpu'})
db = FAISS.from_documents(data, embeddings)
db.save_local(DB_FAISS_PATH)
llm = load_llm()
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())
def conversational_chat(query):
result = chain({"question": query, "chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello ! Ask me anything about " + uploaded_file.name + " 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey ! 👋"]
#container for the chat history
response_container = st.container()
#container for the user's text input
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Talk to your csv data here (:", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversational_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="big-smile")
message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs")
| [] |
2024-01-10 | Kiash254/QA-chatbot-with-Gemini-streamlit | genai_langchain.py | import streamlit as st
import getpass
import os
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage, SystemMessage
# Load the API key from .env GOOGLE_API_KEY
load_dotenv()
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
# Initialize the model
llm = ChatGoogleGenerativeAI(model='gemini-pro')
# Streamlit application
st.title('LangChain Google Generative AI')
# User input
user_input = st.text_input('Enter a message:')
if st.button('Generate Ballad'):
# Use the model to generate a response
result = llm.invoke(f"Write a ballad about {user_input}")
# Display the model's response
st.write(result.content)
if st.button('Generate Limerick'):
# Use the model to generate a response
for chunk in llm.stream(f"Write a limerick about {user_input}."):
st.write(chunk.content)
# System and human message input
system_message = st.text_input('Enter a system message:')
human_message = st.text_input('Enter a human message:')
if st.button('Generate Conversation'):
model = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human=True)
conversation = model(
[
SystemMessage(content=system_message),
HumanMessage(content=human_message),
]
)
st.write(conversation) | [] |
2024-01-10 | Kiash254/QA-chatbot-with-Gemini-streamlit | blog.py | import streamlit as st
from langchain.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
from dotenv import load_dotenv
import os
import google.generativeai as genai
from langchain.schema import HumanMessage
# Load the environment variables from a .env file
load_dotenv()
os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Initialize the Google Generative AI model
llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
# Function to get response from Gemini model
def getGeminiResponse(input_text, no_words, blog_style):
# Prompt Template
template="""
Write a blog for {blog_style} job profile for a topic {input_text}
within {no_words} words.
"""
prompt = template.format(blog_style=blog_style, input_text=input_text, no_words=no_words)
# Generate the response from the Gemini model
response = llm([HumanMessage(content=prompt)])
return response.content
# Streamlit UI
st.set_page_config(page_title="Generate Blogs", page_icon='🤖', layout='centered', initial_sidebar_state='collapsed')
st.header("Generate Blogs 🤖")
input_text = st.text_input("Enter the Blog Topic")
# Creating two more columns for additional 2 fields
col1, col2 = st.columns([5,5])
with col1:
no_words = st.text_input('No of Words')
with col2:
blog_style = st.selectbox('Writing the blog for', ('Researchers', 'Data Scientist', 'Common People'), index=0)
submit = st.button("Generate")
# Final response
if submit:
st.write(getGeminiResponse(input_text, no_words, blog_style)) | [
"\n Write a blog for PLACEHOLDER job profile for a topic PLACEHOLDER\n within PLACEHOLDER words.\n ",
"\n Write a blog for {blog_style} job profile for a topic {input_text}\n within {no_words} words.\n "
] |
2024-01-10 | Kiash254/QA-chatbot-with-Gemini-streamlit | blog_langchain.py | import streamlit as st
from langchain.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
from dotenv import load_dotenv
import os
import google.generativeai as genai
from langchain.schema import HumanMessage
# Load the environment variables from a .env file
load_dotenv()
os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Initialize the Google Generative AI model
llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
# Function to get response from Gemini model
def getGeminiResponse(input_text, no_words, blog_style):
# Prompt Template
template="""
Write a blog for {blog_style} job profile for a topic {input_text}
within {no_words} words.
"""
prompt = template.format(blog_style=blog_style, input_text=input_text, no_words=no_words)
# Generate the response from the Gemini model
response = llm([HumanMessage(content=prompt)])
return response.content
# Streamlit UI
st.set_page_config(page_title="Generate Blogs", page_icon='🤖', layout='centered', initial_sidebar_state='collapsed')
st.header("Generate Blogs 🤖")
input_text = st.text_input("Enter the Blog Topic")
# Creating two more columns for additional 2 fields
col1, col2 = st.columns([5,5])
with col1:
no_words = st.text_input('No of Words')
with col2:
blog_style = st.selectbox('Writing the blog for', ('Researchers', 'Data Scientist', 'Common People'), index=0)
submit = st.button("Generate")
# Final response
if submit:
st.write(getGeminiResponse(input_text, no_words, blog_style)) | [
"\n Write a blog for PLACEHOLDER job profile for a topic PLACEHOLDER\n within PLACEHOLDER words.\n ",
"\n Write a blog for {blog_style} job profile for a topic {input_text}\n within {no_words} words.\n "
] |
2024-01-10 | Kiash254/QA-chatbot-with-Gemini-streamlit | face.py | from langchain_community.llms import HuggingFaceHub
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_community.llms import HuggingFaceTextGenInference
from langchain_community.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.document_loaders import AsyncChromiumLoader
from langchain_community.document_transformers import BeautifulSoupTransformer
# Load HTML
loader = AsyncChromiumLoader(["http://www.w3schools.com/"])
html = loader.load()
# Transform
bs_transformer = BeautifulSoupTransformer()
docs_transformed = bs_transformer.transform_documents(
html, tags_to_extract=["p", "li", "div", "a"]
)
docs_transformed[0].page_content[0:500]
| [] |
2024-01-10 | bcsamrudh/LearnQuest | notes~views.py | from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render,redirect
from .models import Notes
from .forms import NotesForm
from django.contrib import messages
from django.urls import reverse
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth import get_user_model
from django.views.decorators.cache import never_cache
import datetime
import google.generativeai as palm
import cohere
User_model = get_user_model()
@never_cache
def about(request):
return render(request,"about.html")
@login_required
def generate_questions(request,topic,subject):
palm.configure(api_key=settings.API_KEY)
defaults = {
'model': 'models/text-bison-001',
'temperature': 0.7,
'candidate_count': 1,
'top_k': 40,
'top_p': 0.95,
'max_output_tokens': 1024,
'stop_sequences': [],
'safety_settings': [{"category":"HARM_CATEGORY_DEROGATORY","threshold":1},{"category":"HARM_CATEGORY_TOXICITY","threshold":1},{"category":"HARM_CATEGORY_VIOLENCE","threshold":2},{"category":"HARM_CATEGORY_SEXUAL","threshold":2},{"category":"HARM_CATEGORY_MEDICAL","threshold":2},{"category":"HARM_CATEGORY_DANGEROUS","threshold":2}],
}
prompt = prompt = f"""Please generate 10 related follow-up questions of medium difficulty and answers to this topic: {topic} of subject {subject}"""
response = palm.generate_text(
**defaults,
prompt=prompt
)
data = response.result
context={"data":data,"topic":topic,"subject":subject}
return render(request,'notes/questions_view.html',context)
@login_required
def upload_notes(request):
if request.method=='POST':
form=NotesForm(request.POST,request.FILES)
if form.is_valid():
subject = form.cleaned_data.get('subject')
title = form.cleaned_data.get('title')
notesfile = form.cleaned_data.get('notesfile')
university = form.cleaned_data.get('university')
filetype = notesfile.name.split('.')[1].upper()
description = form.cleaned_data.get('description')
tags = form.cleaned_data.get('tags')
user =User_model.objects.filter(username=request.user.username).first()
user.consistency_score+=50
user.save()
try:
Notes.objects.create(title=title,user=user,subject=subject,notesfile=notesfile,filetype=filetype,description=description,tags=tags,university=university)
return redirect(reverse('notes',kwargs={'search':0}))
except:
messages.error(request,"Error occured while uploading Notes, Please try again!")
else:
form = NotesForm(None)
return render(request,'notes/upload_notes.html',{"form":form})
def my_notes(id):
user = get_object_or_404(User_model,id=id)
notes = Notes.objects.filter(user=user)
return notes
# @login_required
# def update_notes(request, slug):
# obj = get_object_or_404(Notes, slug=slug)
# print(request.FILES)
# form = NotesForm(request.POST or None,request.FILES or None,instance = obj)
# if form.is_valid():
# form.save()
# return redirect(reverse('note',kwargs={'slug':slug}))
# return render(request,'notes/upload_notes.html',{"form":form})
@login_required
def delete_notes(request,slug):
notes = get_object_or_404(Notes,slug=slug)
notes.delete()
return redirect(reverse('notes',kwargs={"search":0}))
def is_valid_queryparam(param):
return param != '' and param is not None
def filter(request):
qs = Notes.objects.all()
title_contains_query = request.POST.get('title_contains')
user_query = request.POST.get('user')
date_query = request.POST.get('date_query')
subject = request.POST.get('subject')
tags_query = request.POST.get('tags')
if is_valid_queryparam(title_contains_query):
qs = qs.filter(title__icontains=title_contains_query)
if is_valid_queryparam(user_query):
qs = qs.filter(Q(user__username__icontains=user_query)).distinct()
if is_valid_queryparam(date_query):
date = datetime.datetime.strptime(date_query, '%Y-%m-%d').date()
qs = qs.filter(date_uploaded__year=date.year,date_uploaded__month=date.month,date_uploaded__day=date.day)
if is_valid_queryparam(subject):
qs = qs.filter(subject__icontains=subject)
if is_valid_queryparam(tags_query):
qs = qs.filter(tags__icontains=tags_query)
return qs
@login_required
def notes(request,search):
if search:
notes=filter(request)
else:
notes=Notes.objects.all()
p = Paginator(notes.order_by('-date_uploaded'), 5)
page_number = request.POST.get('page')
try:
page_obj = p.get_page(page_number)
except PageNotAnInteger:
page_obj = p.page(1)
except EmptyPage:
page_obj = p.get_page(p.num_pages)
context = {'page_obj': page_obj}
return render(request, 'notes/view_notes.html', context)
@login_required
def note(request,slug):
try:
notes_display=get_object_or_404(Notes,slug=slug)
if notes_display.filetype == "PDF":
filetype = "PDF"
else:
filetype="None"
except Notes.DoesNotExist:
return render(request, '404.html')
except :
return render(request,'404.html')
context={"note": notes_display,"current_user":request.user,"filetype":filetype}
return render(request,'notes/note.html',context=context)
def upvote(request,id):
note_obj = get_object_or_404(Notes, id=id)
if note_obj.upvotes.filter(id=request.user.id).exists():
note_obj.upvotes.remove(request.user)
note_obj.save()
text = True
else:
note_obj.upvotes.add(request.user)
note_obj.save()
text = False
return JsonResponse({'total_upvotes': note_obj.total_upvotes,'text':text})
| [
"Please generate 10 related follow-up questions of medium difficulty and answers to this topic: PLACEHOLDER of subject PLACEHOLDER"
] |
2024-01-10 | DrScotthew/Senior-Project | SoftwareDevelopmentFinalProject.py | from ast import Try, TryStar
from cgitb import lookup
from http.server import ThreadingHTTPServer
import os
from pickle import FALSE
import string
from unittest import skip
from InventoryVersion1 import Inventory_Version_1 #import inventory management inventory_version_1.py
import colorama
#Colorama is a way to highlight text color in the output console
from colorama import just_fix_windows_console
#'Fixes' windows and forces colored text to work
from colorama import init
from colorama import Fore #ability for color change settings for objects...see 'settings'
import json
import traceback
import random
import StarWarsDataSets
import SupernaturalDataSets
import DemoDataSets
import openai
from openai import OpenAI #uses openai
import config
import time
global isUsingOpenAI
client = OpenAI(
organization=openai.api_key,
)
def random_generation_ask():
print("Would you like to use the OpenAI random generation? (see README for more info...)") #asks user if they want to use OpenAI random generation
choice = input()
global isUsingOpenAI
if choice in ("yes", "Y", "y"):
print("Do you have your own API key to use?") #asks user if they have their own API key to use
choice2=input()
if choice2 in ("yes", "Y", "y"):
print("Please type in the API key you would like to use: ") #asks user to put in their API key to use
isUsingOpenAI = True
user_api_key = input()
openai.api_key = user_api_key #sets api key to what user put in...
new_game_screen()
else:
print("The game will use preconfigured data sets...") #will use the prechosen OpenAI key so user can use OpenAI random generation still...
isUsingOpenAI = False
new_game_screen()
else:
print("The game will use preconfigured data sets...") #uses the preconfigured data sets...
isUsingOpenAI=False
new_game_screen()
random_generation_spn_weapon_names = [] #creates spn array elements for weapon names
random_generation_starwars_weapon_names = [] #creates star wars array elements for weapon names
random_generation_demo_weapon_names = [] #creates demo array elements for weapon names
random_generation_spn_weapon_descriptions = [] #creates spn array elements for weapon descriptions...array index values will match weapon array index values each time as both are added at same time...
random_generation_starwars_weapon_descriptions = [] #star wars weapon descriptions array
random_generation_demo_weapon_descriptions = [] #demo weapon descriptions array
def demo_openai_gen():
demo_prompt = ["give the name of a random weapon that is not listed here: {}".format(random_generation_demo_weapon_names) + "\n\n"]
#will ask for generic weapons for demo mode...each weapon must be unique...
response = openai.completions.create(
model="text-davinci-003", #using davinci003
prompt=demo_prompt, #asks openai to give a weapon name from 'Supernatural'
temperature=0.7,
frequency_penalty=0,
presence_penalty=0,
max_tokens=10
)
demo_weapon = response.choices[0].text;
random_generation_demo_weapon_names.append(demo_weapon) #adds weapon name to array...
L = [demo_weapon]
demo_file = open('random_weapons_demo.txt', 'w') #saves each weapon name into new .txt file
demo_file.writelines(L)
demo_file.close
demo_file = open('random_weapons_demo.txt', 'r')
demo_prompt2=["give a one sentence description of " + demo_weapon + "describing what it looks like and its function. Do not use the phrase '" + demo_weapon + "' in your description." +"\n\n"]
#asks openai for description of demo weapon(s)...
response = openai.completions.create(
model="text-davinci-003",
prompt=demo_prompt2, #asks openai to give a description from previously used name for demo weapon
temperature=0.7,
frequency_penalty=0,
presence_penalty=0,
max_tokens=50
)
demo_weapon_description = response.choices[0].text;
random_generation_demo_weapon_descriptions.append(demo_weapon_description) #adds weapon description to array...
final_weapon_description_save = open('random_weapons_demo_descriptions.txt', 'w')
final_weapon_description_save.writelines(demo_weapon_description)
final_weapon_description_save.close
def supernatural_openai_gen(): #all random generation for supernatural
supernatural_prompt = ["give a random weapon name from the tv show 'Supernatural' that is not listed here: {}".format(random_generation_spn_weapon_names) + "\n\n"]
#asks openai to give one weapon name each time...this is to prevent errors when ai gives list of items and program is unable to accurately prepare to read txt file in different formats...
#also asks for openai to only give a weapon name that is unique from weapon names already stored in array...
response = openai.completions.create(
model="text-davinci-003", #using davinci003
prompt=supernatural_prompt, #asks openai to give a weapon name from 'Supernatural'
temperature=0.7,
frequency_penalty=0,
presence_penalty=0,
max_tokens=10
)
supernatural_weapon = response.choices[0].text;
random_generation_spn_weapon_names.append(supernatural_weapon) #adds weapon name to array...
L = [supernatural_weapon]
spn_file = open('random_weapons_supernatural.txt', 'w') #saves each weapon name into new .txt file
spn_file.writelines(L)
spn_file.close
spn_file = open('random_weapons_supernatural.txt', 'r')
supernatural_prompt2=["give a one sentence description of the weapon from 'Supernatural' called " + supernatural_weapon + "describing what it looks like and its function. Do not use the phrase '" + supernatural_weapon + "' in your description." +"\n\n"]
#this will ask the openai to give a description for the weapon stored in supernatural_weapon variable without mentioning the weapon's name...this is to make the text in the game less redundant and awkward...
#need to check each time if program has already given a description for that weapon...? no since it will be called each time weapon is created
#will store descriptions in save file...
#will access said save file when player wants to see weapon descriptions from inventory screen...
#e.g. player opens inventory, selects weapon, program displays weapon description again for player before asking if thats the weapon they want to use...
#program will know which description to give based on array index value...since program gives description at same time as weapon name, the index values will match each time...
response = openai.completions.create(
model="text-davinci-003", #using davinci003
prompt=supernatural_prompt2, #asks openai to give a description from previously used name from 'Supernatural'
temperature=0.7,
frequency_penalty=0,
presence_penalty=0,
max_tokens=50
)
supernatural_weapon_description = response.choices[0].text;
random_generation_spn_weapon_descriptions.append(supernatural_weapon_description) #adds weapon description to array...
final_weapon_description_save = open('random_weapons_supernatural_descriptions.txt', 'w')
final_weapon_description_save.writelines(supernatural_weapon_description)
final_weapon_description_save.close
def starwars_openai_gen():
starwars_prompt = ["give a random weapon name from 'Star Wars' that is not listed here: {}".format(random_generation_starwars_weapon_names) + "\n\n"]
response = openai.completions.create(
model="text-davinci-003", #using davinci003
prompt=starwars_prompt, #asks openai to give a weapon name from 'Star Wars'
temperature=0.7,
frequency_penalty=0,
presence_penalty=0,
max_tokens=50
)
starwars_weapon = response.choices[0].text;
random_generation_starwars_weapon_names.append(starwars_weapon)
L1 = [starwars_weapon]
starwars_file = open('random_weapons_starwars.txt', 'w')
starwars_file.writelines(L1)
starwars_file.close
starwars_file = open('random_weapons_starwars.txt', 'r')
starwars_prompt2=["give a one sentence description of the weapon from 'Supernatural' called " + starwars_weapon + "describing what it looks like and its function. Do not use the phrase '" + starwars_weapon + "' in your description." +"\n\n"]
response = openai.completions.create(
model="text-davinci-003", #using davinci003
prompt=starwars_prompt2, #asks openai to give a description from previously used name from 'Supernatural'
temperature=0.7,
frequency_penalty=0,
presence_penalty=0,
max_tokens=50
)
starwars_weapon_description = response.choices[0].text;
random_generation_starwars_weapon_descriptions.append(starwars_weapon_description) #adds weapon description to array...
final_weapon_description_save = open('random_weapons_supernatural_descriptions.txt', 'w')
final_weapon_description_save.writelines(starwars_weapon_description)
final_weapon_description_save.close
def display_title_screen():
os.system("clear" if os.name == "posix" else "cls") # Clear the screen
title = """
_______ _______ _________ _______ _______
( ____ )( ___ )|\ /|\__ __/|\ /|( ____ )|\ /|( ____ \\
| ( )|| ( ) || ) ( | ) ( | ) ( || ( )|| ) ( || ( \/
| (____)|| | | || | | | | | | | | || (____)|| | | || (_____
| __)| | | |( ( ) ) | | ( ( ) )| __)| | | |(_____ )
| (\ ( | | | | \ \_/ / | | \ \_/ / | (\ ( | | | | ) |
| ) \ \__| (___) | \ / ___) (___ \ / | ) \ \__| (___) |/\____) |
|/ \__/(_______) \_/ \_______/ \_/ |/ \__/(_______)\_______)
"""
menu = """
1. Start New Game
2. Load Saved Game
3. Settings
4. Exit Game
"""
print(title)
print(menu)
def display_new_game_screen():
#asks player if they want to enter a custom seed or use a pregenerated random seed for their new game
os.system("clear" if os.name == "posix" else "cls") # Clear the screen
open('random_weapons_supernatural.txt', 'w').close() #clears all data
open('random_weapons_starwars.txt', 'w').close() #clears all data
open('save.txt', 'w').close()
seed_selection = """
Enter a custom seed. Alternatively, type 'random' for a random seed. Enter 'exit' to go back to the main menu.
"""
print(seed_selection)
def display_settings_screen():
#displays all available color choices player can choose from before starting their game
color_selection = """ Please choose a color for highlighted items. (*Note: The default highlight color for items is Yellow. All enemies will have a Red highlight color regardless of choice for object color.).
1. Blue
2. Yellow
3. Purple
4. Red
5. Green
Enter 'exit' to go back to the main menu.
"""
print(color_selection)
def data_starwars(): #accesses data sets for star wars
global place
global weapon
global weapon_name
global weapon_description
global trash_can
global trash_can_name
global piece_of_paper
global piece_of_paper_name
global enemy
global enemy_name
if isUsingOpenAI:
starwars_openai_gen() #performs openai generation for weapon names/descriptions
weapon_name = random.choice(random_generation_starwars_weapon_names) #will access from randomly generated lists
weapon_description = random.choice(random_generation_starwars_weapon_descriptions)
place = random.choice(StarWarsDataSets.woods) #randomly chooses values from specified list
trash_can_name = random.choice(StarWarsDataSets.trash_can)
piece_of_paper_name = random.choice(StarWarsDataSets.piece_of_paper)
enemy_name = random.choice(StarWarsDataSets.enemy)
else:
weapon_name = random.choice(StarWarsDataSets.weapons) #will access from predefined data sets
weapon_description = "test description of star wars weapon..."
place = random.choice(StarWarsDataSets.woods) #randomly chooses values from specified list
trash_can_name = random.choice(StarWarsDataSets.trash_can)
piece_of_paper_name = random.choice(StarWarsDataSets.piece_of_paper)
enemy_name = random.choice(StarWarsDataSets.enemy)
try:
weapon = color_choice + weapon_name + Fore.RESET
trash_can = color_choice + trash_can_name + Fore.RESET
piece_of_paper = color_choice + piece_of_paper_name + Fore.RESET
enemy = Fore.RED + enemy_name + Fore.RESET
except:
weapon = Fore.YELLOW + weapon_name + Fore.RESET
trash_can = Fore.YELLOW + trash_can_name + Fore.RESET
piece_of_paper = Fore.YELLOW + piece_of_paper_name + Fore.RESET
enemy = Fore.RED + enemy_name + Fore.RESET
def data_supernatural(): #accesses data sets for supernatural
global place
global weapon
global weapon_name
global weapon_description
global trash_can
global trash_can_name
global piece_of_paper
global piece_of_paper_name
global enemy
global enemy_name
if isUsingOpenAI:
supernatural_openai_gen() #performs openai generation for weapon names/descriptions
weapon_name = random.choice(random_generation_spn_weapon_names) #will access from randomly generated lists
weapon_description = random.choice(random_generation_spn_weapon_descriptions)
place = random.choice(SupernaturalDataSets.woods) #randomly chooses values from specified list
trash_can_name = random.choice(SupernaturalDataSets.trash_can)
piece_of_paper_name = random.choice(SupernaturalDataSets.piece_of_paper)
enemy_name = random.choice(SupernaturalDataSets.enemy)
else:
weapon_name = random.choice(SupernaturalDataSets.weapons) #will access from predefined data sets
weapon_description = "test description of supernatural weapon..."
place = random.choice(SupernaturalDataSets.woods) #randomly chooses values from specified list
trash_can_name = random.choice(SupernaturalDataSets.trash_can)
piece_of_paper_name = random.choice(SupernaturalDataSets.piece_of_paper)
enemy_name = random.choice(SupernaturalDataSets.enemy)
try:
weapon = color_choice + weapon_name + Fore.RESET
trash_can = color_choice + trash_can_name + Fore.RESET
piece_of_paper = color_choice + piece_of_paper_name + Fore.RESET
enemy = Fore.RED + enemy_name + Fore.RESET
except:
weapon = Fore.YELLOW + weapon_name + Fore.RESET
trash_can = Fore.YELLOW + trash_can_name + Fore.RESET
piece_of_paper = Fore.YELLOW + piece_of_paper_name + Fore.RESET
enemy = Fore.RED + enemy_name + Fore.RESET
def data_demo(): #accesses data set for traditional demo
global place
global weapon
global weapon_name
global weapon_description
global trash_can
global trash_can_name
global piece_of_paper
global piece_of_paper_name
global enemy
global enemy_name
if isUsingOpenAI:
demo_openai_gen() #performs openai generation for weapon names/descriptions
weapon_name = random.choice(random_generation_demo_weapon_names) #will access from randomly generated lists
weapon_description = random.choice(random_generation_demo_weapon_descriptions)
place = random.choice(DemoDataSets.woods) #randomly chooses values from specified list
trash_can_name = random.choice(DemoDataSets.trash_can)
piece_of_paper_name = random.choice(DemoDataSets.piece_of_paper)
enemy_name = random.choice(DemoDataSets.enemy)
else:
weapon_name = random.choice(DemoDataSets.weapons) #will access from predefined data sets
weapon_description = "test description of demo weapon..."
place = random.choice(DemoDataSets.woods) #randomly chooses values from specified list
trash_can_name = random.choice(DemoDataSets.trash_can)
piece_of_paper_name = random.choice(DemoDataSets.piece_of_paper)
enemy_name = random.choice(DemoDataSets.enemy)
try:
weapon = color_choice + weapon_name + Fore.RESET
trash_can = color_choice + trash_can_name + Fore.RESET
piece_of_paper = color_choice + piece_of_paper_name + Fore.RESET
enemy = Fore.RED + enemy_name + Fore.RESET
except:
weapon = Fore.YELLOW + weapon_name + Fore.RESET
trash_can = Fore.YELLOW + trash_can_name + Fore.RESET
piece_of_paper = Fore.YELLOW + piece_of_paper_name + Fore.RESET
enemy = Fore.RED + enemy_name + Fore.RESET
def settings_screen():
#uses ASCI codes to change colors...
display_settings_screen()
global color_choice
for color_choice in [Fore.BLUE, Fore.YELLOW, Fore.MAGENTA, Fore.RED, Fore.GREEN]:
while True:
choice = input("Make your choice: ")
if choice == '1':
data.update({'highlight_color': Fore.BLUE})
print(Fore.BLUE + 'Highlight color changed to Blue')
color_choice = Fore.BLUE
print(Fore.RESET) #resets color of text in console back to white...object color is set, however
elif choice == '2':
data.update({'highlight_color': Fore.YELLOW})
print(Fore.YELLOW + 'Highlight color changed to Yellow')
color_choice = Fore.YELLOW
print(Fore.RESET)
elif choice == '3':
data.update({'highlight_color': Fore.MAGENTA})
print(Fore.MAGENTA + 'Highlight color changed to Purple')
color_choice = Fore.MAGENTA
print(Fore.RESET)
elif choice == '4':
data.update({'highlight_color': Fore.RED})
print(Fore.RED + 'Highlight color changed to Red')
color_choice = Fore.RED
print(Fore.RESET)
elif choice == '5':
data.update({'highlight_color': Fore.GREEN})
print(Fore.GREEN + 'Highlight color changed to Green')
color_choice = Fore.GREEN
print(Fore.RESET)
elif choice == 'go back':
main()
else:
print("Invalid input. Please choose a highlight color from the list.")
color_choice = [Fore.BLUE, Fore.YELLOW, Fore.MAGENTA , Fore.RED, Fore.GREEN]
setting = [DemoDataSets.woods, StarWarsDataSets.woods, SupernaturalDataSets.woods]
data = {
'checkpoint': 0,
'complete inventory': Inventory_Version_1.Inventory,
'weapons': Inventory_Version_1.InventoryWeapons,
'armor': Inventory_Version_1.InventoryArmor,
'ammo': Inventory_Version_1.InventoryAmmo,
'health items': Inventory_Version_1.InventoryHealthItems,
'currently equipped': Inventory_Version_1.InventoryCurrentlyEquipped,
'money': Inventory_Version_1.InventoryMoney,
'highlight_color': Fore.YELLOW,
'setting': DemoDataSets.woods
#this allows for the player to load their game from a save file
}
def main():
#automatically goes to title screen from main() function
main_menu()
choice = input()
if choice == 'exit game':
print("Saving the game...")
checkpoints = [start, startpath, startpath_object, startpath_continue, crossroads, crossroads_left, crossroads_left1, crossroads_left2, crossroads_left3]
def load_game():
global color_choice
color_choice = [Fore.BLUE, Fore.YELLOW, Fore.MAGENTA, Fore.RED, Fore.GREEN]
checkpoints = [start, startpath, startpath_object, startpath_continue, crossroads, crossroads_left, crossroads_left1, crossroads_left2, crossroads_left3]
#checkpoints are defined as places in the story or 'timeline' for the player to access...
#this allows for the game to always know where the player is in the story
try:
with open('save.txt') as save_file:
data = json.load(save_file) #gets data from save file...
Inventory_Version_1.Inventory = data.get("inventory") #checks what items player had in save file
Inventory_Version_1.InventoryWeapons = data.get("weapons")
Inventory_Version_1.InventoryArmor = data.get("armor")
Inventory_Version_1.InventoryAmmo = data.get("ammo")
Inventory_Version_1.InventoryHealthItems = data.get("health items")
Inventory_Version_1.InventoryCurrentlyEquipped = data.get("currently equipped")
Inventory_Version_1.InventoryMoney = data.get("money")
color_choice = data.get("highlight_color")
saved_setting = data.get("setting")
if saved_setting == 'supernatural':
data_supernatural()
elif saved_setting == 'star wars':
data_starwars()
else:
data_demo()
saved_checkpoint = data.get("checkpoint") #checks where player was when they exited the game last
checkpoints[saved_checkpoint]() #puts player back at same 'checkpoint' they were in last
except:
start()
def main_menu():
#the title screen
display_title_screen()
while True:
choice = input("Make Your Choice (1/2/3/4): ")
if choice == '1':
# Allows player to either a) enter a custom seed or b) use a pregenerated random seed in order to create the world and start the game
random_generation_ask() #will ask player if they want to use the OpenAI random generation or preconfigured data sets...
elif choice == '2':
# Loads last saved game
print("Loading a saved game...")
load_game()
elif choice == '3':
# Allows player to change different settings (includes: highlight color)
settings_screen()
elif choice == '4':
# Exits the game
print("Exiting the game...")
else:
# Error control...
print("Invalid choice. Please select a valid option (1/2/3/4).")
#def random_generation_ask():
# print("Would you like to use the OpenAI random generation?")
# choice = input()
# if choice in ("yes", "Y", "y"):
# print("Do you have your own API key to use?")
# choice2=input()
# if choice2 in ("yes", "Y", "y"):
# print("Please type in the API key you would like to use: ")
# isUsingOpenAI = True
# open_api_key = input() #sets api key to what user put in...
# new_game_screen()
# else:
# openai.api_key = config.api_key #sets api key to example api key...
# isUsingOpenAI = True
# new_game_screen()
# else:
# print("The game will use preconfigured data sets...")
# new_game_screen()
def new_game_screen():
display_new_game_screen()
while True:
choice = input()
if choice == 'random': #for demo playthrough
#Will generate a random seed for the world the player will be in...however, this is currently for the demo
print("Generating random seed...")
print("Loading...")
Inventory_Version_1.isDemo=True
data.update({'setting': 'demo'}) #updates saved selection of data set used
Inventory_Version_1.Inventory.clear #clears inventory data for new game
data_demo() #will assign demo data for wold generation
start() #goes to start function to start game
elif choice == 'star wars':
print("Generating a Star Wars themed world...")
print("Loading...")
Inventory_Version_1.isStarWars=True
data.update({'setting': 'star wars'})
Inventory_Version_1.Inventory.clear
data_starwars() #will assign star wars data sets for wold generation
start()
elif choice == 'supernatural':
print("Generating a Supernatural themed world...")
print("Loading...")
Inventory_Version_1.isSupernatural=True
data.update({'setting': 'supernatural'})
Inventory_Version_1.Inventory.clear
data_supernatural() #will assign supernatural data sets for world generation
start()
elif choice == 'exit':
main_menu()
else: #incomplete...will finish later
print("Generating world based on seed given...")
print("Loading...")
flowchart = [] #the flowchart for the player's choices in the game...
def weapon_inventory_descriptions():
stack = traceback.extract_stack()
filename, codeline, funcName, text = stack[-2]
checkpoints = {'start': start, 'startpath':startpath, "startpath_object":startpath_object, "startpath_continue":startpath_continue, "crossroads":crossroads, "crossroads_left":crossroads_left, "crossroads_left1":crossroads_left1, "crossroads_left2":crossroads_left2}
last_checkpoint = flowchart[-1] #gets last checkpoint in player's flowchart
if not Inventory_Version_1.InventoryWeapons: #checks if player has weapons...if not, tells them they don't have any weapons and takes them back...
checkpoints[last_checkpoint]() #returns player to previous checkpoint...
else:
print("Which weapon do you want to look at?")
while True:
choice2 = input()
if choice2=="{}".format(Inventory_Version_1.weapon_description_index) and Inventory_Version_1.isStarWars: #and Inventory_Version_1.isStarWars
print("Description: ") #how to make program look at descriptions for weapon...how to make it know if using spn data sets or star wars data sets...
#print(random_generation_starwars_weapon_descriptions[Inventory_Version_1.weapon_description_index])
print(weapon_description)
checkpoints[last_checkpoint]() #returns player to last checkpoint...
elif choice2=="{}".format(Inventory_Version_1.weapon_description_index) and Inventory_Version_1.isSupernatural:
print("Description: ") #how to make program look at descriptions for weapon...how to make it know if using spn data sets or star wars data sets...
print(random_generation_spn_weapon_descriptions[Inventory_Version_1.weapon_description_index])
checkpoints[last_checkpoint]() #returns player to last checkpoint...
elif choice2=="{}".format(Inventory_Version_1.weapon_description_index) and Inventory_Version_1.isDemo:
print("Description: ") #how to make program look at descriptions for weapon...how to make it know if using spn data sets or star wars data sets...
print(random_generation_demo_weapon_descriptions[Inventory_Version_1.weapon_description_index])
checkpoints[last_checkpoint]() #returns player to last checkpoint...
elif choice2=="exit":
checkpoints[last_checkpoint]()
else:
print("I don't understand that statement. Please enter the number associated with the weapon you want to look at.")
def start():
print("""
You are standing in the middle of the """ + place + """ at night.
There is a full moon overhead casting a faint glow on the ground in front of you. There are trees surrounding you in every direction and span far into the night.
However, there seems to be traces of a path to your right. It doesn't look to have been walked on in a long time.
You suddenly remember why you are here. The people of """ + place + """ have requested your help in defeating a creature that has terrorized them for many years. Its last known location was somewhere in these woods...""")
print("")
flowchart.append("start")
while True: #Loop continuously
choice = input() #Get the input
if choice in ("go on path", "go along path"): #Correct responses...
startpath() #...break the loop
elif choice == 'exit game':
data.update({'checkpoint': 0})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1() #Will access Inventory_Version_1 to show inventory options
weapon_inventory_descriptions()
else:
print('I do not understand that statement.') #error control
def startpath():
print("")
print("""
You walk along the path, careful to not trip on any rocks or limbs along the way. You don't get very far before seeing an object lying on the ground, shining from the moonlight filtering through the trees. You can't make out exactly what it is, though.""")
flowchart.append("startpath")
print("")
while True:
choice = input()
if choice in ("pick up object", "pick up the object", "look at object", "look at the object"):
startpath_object()
elif choice == 'exit game':
data.update({'checkpoint': 1})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1()
weapon_inventory_descriptions()
else:
print('I do not understand that statement.')
def startpath_object():
print("")
print("""
You pick up the object and notice that it is a """ + weapon)
print(weapon_description)
print("")
flowchart.append("startpath_object")
global Inventory
while True:
choice = input()
if choice in ("take " + weapon_name , "take weapon", "take the weapon"): #allows player to specifically type in weapon name or just 'weapon'
print("""
You take the """ + weapon + """ and hold it tightly in your hand.""") #ANSI codes implemented...change to yellow then back to white
Inventory_Version_1.addToInventory(weapon_name) #this adds the item to overall inventory list
Inventory_Version_1.addToInventoryWeapons(weapon_name) #adds item to inventory under 'Weapons' list
#NOTE: this is currently hardcoded...to effectively be randomly generated, this will need to change so that there is a list of preconceived items associated with their respective types
#e.g. if the player picked up a health item, one would need to code 'addToInventoryHealthItems'...would be better if program automatically assigned it
#will work on this soon
data.update({'complete inventory': Inventory_Version_1.Inventory})
data.update({'weapons': Inventory_Version_1.InventoryWeapons})
startpath_continue()
elif choice in ("drop " + weapon_name, "leave " + weapon_name):
print(" You put the " + weapon + " back on the ground.")
startpath_continue()
elif choice in ("go down path", "continue", "go along path", "continue down path", "continue on path"):
startpath_continue()
elif choice == 'exit game':
data.update({'checkpoint': 2})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1()
weapon_inventory_descriptions()
else:
print("I don't understand that statement.")
def startpath_continue():
print("""
You look around and see that the path still continues in front of you. No other path is in sight and trees surround you. The moonlight still filters through shining
a faint light on the path ahead.""")
print("")
flowchart.append("startpath_continue")
global Inventory
while True:
choice = input()
if choice in ("go down path", "continue", "go along path", "continue down path", "continue on path"):
crossroads()
elif choice == 'exit game':
data.update({'checkpoint': 3})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1()
weapon_inventory_descriptions()
else:
print('I do not understand that statement.')
def crossroads():
print("")
print("""
You continue to go along the path and eventually reach the center of a crossroads. There are 3 paths in front of you: one to the left, one to the right, and one that seems to continue from the path you are on currently.
The middle section of the crossroads is a wide circle with a """ + trash_can +""" sitting in the center. There is a lamp post lighting the center of the crossroads.""")
print("")
flowchart.append("crossroads")
while True:
choice = input()
if choice in ("go to " + trash_can_name, "go to trash can"):
print(""" You are now standing right in front of the """ + trash_can)
elif choice in ("look inside " + trash_can_name, "look inside the " + trash_can_name):
print(" You look inside the " + trash_can + " and see there is a " + piece_of_paper + " at the bottom")
elif choice in ("get " + piece_of_paper_name, "get the " + piece_of_paper_name, "pick up " + piece_of_paper_name, "pick up the " + piece_of_paper_name):
print("""
You pick up the """ + piece_of_paper + """ and read it. """ +
"""It reads:
Welcome to Rovivrus! In this game, you will find there are many paths to go on. There is no right or wrong way to play this game. While one path might lead to something incredible,
another could lead to your demise. Be cautious. There are several others in this world, but not all will be friendly. Be prepared for anything.
""")
elif choice in ("go on left path", "go down left path"):
crossroads_left() #demo 1 playthrough...code others in phase 2
elif choice in ("go on right path", "go down right path"):
crossroads_right()
elif choice in ("go forward", "go down same path", "go on same path", "continue on same path", "continue down same path"):
crossroads_forward()
elif choice == 'exit game':
data.update({'checkpoint': 4})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1()
else:
print("I don't understand that statement.")
def crossroads_left():
print("")
print("""
You go down the path to your left, leaving the crossroads behind you. Eventually, the light from the crossroads becomes faint. The path in front of you is almost invisible from the pitch black darkness all around.
Suddenly, a growling sound can be heard from in front of you, though you cannot see what is making the sound.""")
print("")
flowchart.append("crossroads_left")
while True:
choice = input()
if choice in ("keep going forward", "go forward", "continue forward", "keep going"):
crossroads_left1()
elif choice in ("turn around", "turn back"):
print("""
You are turned around with the growling still menacingly continuing behind you.""")
elif choice in ("go back to crossroads", "go to crossroads", "return to crossroads"):
print("""
You turn around and go back to the crossroads that you can barely make out in the dark from walking so far from it. The growling continues behind you, but it eventually becomes faint. Soon you are back at the crossroads.""")
crossroads()
elif choice in ("fight", "attack", "kill"):
preattack_inventory_check()
elif choice == 'exit game':
data.update({'checkpoint': 5})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1()
else:
print("I don't understand that statement.")
def crossroads_left1():
print("")
print("""
You slowly move forward towards the growling which has gotten significantly louder and more menacing now. After a few steps, you can start to make out what seems to be a """ + enemy + """.
A pair of glowing eyes are faint, but seem to be staring right into your soul. The creature's growling starts to hurt your ears as it increases in volume.""")
print("")
flowchart.append("crossroads_left1")
while True:
choice = input()
if choice in ("keep going forward", "go forward", "continue forward", "keep going"):
crossroads_left2()
elif choice in ("turn around", "turn back"):
print("""
You are turned around with the growling still menacingly continuing behind you.""")
elif choice in ("go back to crossroads", "go to crossroads", "return to crossroads"):
print("""
You turn around and go back to the crossroads that you can barely make out in the dark from walking so far from it. The growling continues behind you, but it eventually becomes faint. Soon you are back at the crossroads.""")
crossroads()
elif choice in ("attack", "fight", "attack the " + enemy_name, "fight the " + enemy_name):
attack_wolf()
elif choice == 'exit game':
data.update({'checkpoint': 6})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1()
else:
print("I don't understand that statement.")
def crossroads_left2():
print("")
print("""
You continue to move forward towards the """ + enemy + """ which is still growling. You only get three steps further before the creature suddenly lunges forward at you. Fangs sink down into your right arm as the """
+ enemy + """ bites down hard. The pain causes you to scream out in pain. Blood is now all over your arm and falling to the forest floor. The """ + enemy + """ continues to hold his grip on your arm and shows no signs of letting go.""")
print("")
flowchart.append("crossroads_left2")
while True:
choice = input()
if choice in ("attack", "fight", "attack the " + enemy_name, "fight the " + enemy_name):
attack_wolf()
elif choice in ("run away", "run", "leave"):
print("""
You try to get away from the """ + enemy + ", but it is futile. It continues to bite down even harder than before. Not a few moments later, your vision starts to deteriorate and you fall into a dark abyss of nothingness...")
death()
elif choice == 'exit game':
data.update({'checkpoint': 7})
with open('save.txt', 'w') as save_file:
json.dump(data,save_file)
print("Saving the game...")
elif choice == 'inventory':
Inventory_Version_1.main1()
else:
print("I don't understand that statement.")
def death():
print("")
open('save.txt', 'w').close() #clears all saved data from save file upon death of player
print("You died...")
print("Would you like to return to the main menu? If not, the game will close.")
print("")
while True:
choice = input()
if choice in ("yes", "Y", "Yes", "y"):
main_menu() #allows player to restart game again
elif choice in ("no", "N", "No", "n"):
exit() #closes game completely
else:
print("I don't understand that statement.")
def preattack_inventory_check():
#checks if the player has any weapons available to attack with...if so, then the function continues...
while True:
if Inventory_Version_1.InventoryWeapons == []:
print("You have no weapons to attack with. ")
stack = traceback.extract_stack()
filename, codeline, funcName, text = stack[-2]
#print(funcName)
checkpoints = [start, startpath, startpath_object, startpath_continue, crossroads, crossroads_left, crossroads_left1, crossroads_left2]
#checkpoints = ["start", "startpath", "startpath_object", "startpath_continue", "crossroads", "crossroads_left", "crossroads_left1", "crossroads_left2"]
last_checkpoint = flowchart[-1] #gets last checkpoint in player's flowchart
print(last_checkpoint)
#methods = {last_checkpoint: start or startpath or crossroads_left}
#method_name = 'start' # set by the command line options
checkpoints[last_checkpoint]() #automatically sends player back to checkpoint...
if funcName in checkpoints: #compares checkpoint name to function names
checkpoints[last_checkpoint]() #automatically sends player back to checkpoint...
else:
raise Exception("Method %s not implemented" % last_checkpoint)
else:
attack_wolf()
def attack_wolf():
print("""
What do you want to fight with?""") #allows player to choose a weapon to fight with
print("""
Current weapons avaiable to use:""") #retrieves avaiable weapons from inventory player currently has
Inventory_Version_1.showWeapons()
flowchart.append("attack_wolf")
while True:
choice = input()
if choice in Inventory_Version_1.InventoryWeapons: #read player input and match string value to inventory weapons available
print("""
You use the """ + choice + """ to attack the """ + enemy + """ and cause a deep gash on its right side. Shrieking in pain and full of rage, it lunges towards you with viciousness.""")
attack_wolf2()
else:
print("I don't understand that statement.")
def attack_wolf2():
print(" Do you want to fight or flee?")
while True:
choice = input()
if choice in ("fight"):
print("""
You attack the """ + enemy + """ again, this time using all your strength to cause a devastating blow, knocking the """ + enemy + """ to the ground. It attempts to get up to fight again, but you finally finish it off.""")
win()
elif choice in ("flee"):
print("""
You try to run away from the """ + enemy + """ and you are almost to the end of the path where the crossroads begins when a sudden force knocks you down. You try to get back up, but the """ + enemy + """has you pinned down.
Pain rushes through your body as it digs its claws into you. You are completely helpless...""")
death()
else:
print("I don't understand that statement.")
def win():
print("")
print("""
Congratulations! You have fought the """ + enemy + " and lived to tell the tale. Because of your actions, the people of " + place + " can now live in peace.")
def crossroads_left3():
print("yay")
if __name__ == "__main__":
main()
checkpoints = [start, startpath, startpath_object, startpath_continue, crossroads, crossroads_left, crossroads_left1, crossroads_left2, crossroads_left3]
def load_game():
try:
saved_checkpoint = int(open("save.txt").read())
checkpoints[saved_checkpoint]()
except FileNotFoundError:
start() | [
"[\"give a random weapon name from 'Star Wars' that is not listed here: PLACEHOLDER\\n\\n\"]",
"['give the name of a random weapon that is not listed here: PLACEHOLDER\\n\\n']",
"[\"give a random weapon name from the tv show 'Supernatural' that is not listed here: PLACEHOLDER\\n\\n\"]",
"[\"give a one sentence description of PLACEHOLDERdescribing what it looks like and its function. Do not use the phrase 'PLACEHOLDER' in your description.\\n\\n\"]",
"[\"give a one sentence description of the weapon from 'Supernatural' called PLACEHOLDERdescribing what it looks like and its function. Do not use the phrase 'PLACEHOLDER' in your description.\\n\\n\"]"
] |
2024-01-10 | shivanandmn/quiz_me_on | quizzes~schematic_prompt.py | from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
from pydantic import BaseModel
from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS
from langchain.output_parsers.json import parse_json_markdown
line_template = '\t"{name}": {type} // {description}'
class ResponseSchema(BaseModel):
name: str
description: str
type: str = "string"
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
class StructuredDictOutputParser(StructuredOutputParser):
response_schemas: list[ResponseSchema]
def get_format_instructions(self) -> str:
schema_str = '{"response":[ ' + "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)+'],}'
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
def parse(self, text: str):
return parse_json_markdown(text)["response"]
@property
def _type(self) -> str:
return "structured_dict"
def get_format_parser():
question_text_schema = ResponseSchema(name="question_text",
description="Question text")
options_schema = ResponseSchema(name="options",
description=" Options that are associated to questions",
type="list[str]")
correct_option_idx_schema = ResponseSchema(name="correct_option_idx",
description="Correct option index starting from 0",
type="int")
explanation_schema = ResponseSchema(name="explanation",
description="Some explanation related to the quiz")
response_schemas = [question_text_schema, options_schema,
correct_option_idx_schema, explanation_schema]
output_parser = StructuredDictOutputParser.from_response_schemas(
response_schemas)
return output_parser
prompt_template = """
Generate as many questions as possible for Multiple choice question quizzes for {topic}, be pricise and factual.
Make sure that the following informations are generated. Options should be clear and should have misconceptual options to confuse student.
One option should be correct in the options.
question_text: Question text here, character limit is strictly less than 255.
options: Get multiple options here and output them as a comma separated Python list. Each option must of less than 100 characters.
correct_option_idx: Get the index of the options which is correct. Index starts from zero and output them as python integer.
explanation: character limit is strictly less than 190. Explain or give factual information about the quiz.
{format_instructions}
"""
| [
"\t\"{name}\": {type} // {description}",
"\nGenerate as many questions as possible for Multiple choice question quizzes for {topic}, be pricise and factual.\nMake sure that the following informations are generated. Options should be clear and should have misconceptual options to confuse student.\nOne option should be correct in the options.\n\nquestion_text: Question text here, character limit is strictly less than 255.\n\noptions: Get multiple options here and output them as a comma separated Python list. Each option must of less than 100 characters.\n\ncorrect_option_idx: Get the index of the options which is correct. Index starts from zero and output them as python integer.\n\nexplanation: character limit is strictly less than 190. Explain or give factual information about the quiz.\n\n{format_instructions}\n"
] |
2024-01-10 | shivanandmn/quiz_me_on | query_master~query_master.py | from pathlib import Path
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models.openai import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from get_parms import openai_key
import openai
openai.api_key = openai_key()
def load_file(file:Path):
if "pdf" in file:
loader = PyPDFLoader(file)
documents = loader.load()
else:
raise NotImplementedError
return documents
def get_retriever(documents, embd_type:str="openai", search_type="similarity", search_kwargs=None):
# define embedding
if embd_type == "openai":
embeddings = OpenAIEmbeddings()
else:
raise NotImplementedError
# create vector database from data
db = DocArrayInMemorySearch.from_documents(documents, embeddings)
# define retriever
retriever = db.as_retriever(search_type=search_type, search_kwargs=search_kwargs)
return retriever
def text_splits(documents):
# split documents
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
docs = text_splitter.split_documents(documents)
return docs
def load_query_db(file:Path, chain_type, k):
# load documents
documents = load_file(file)
documents = text_splits(documents)
retriever = get_retriever(documents, embd_type="openai", search_type="similarity", search_kwargs={"k":k})
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# create a chatbot chain. Memory is managed externally.
qa_chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0),
chain_type=chain_type,
retriever=retriever,
memory=memory
)
return qa_chain
if __name__ == "__main__":
file = "query_master/docs/MachineLearning-Lecture01.pdf"
qa_chain = load_query_db(file, chain_type="stuff", k=4)
questions = "who are TA in this lecture?"
results = qa_chain({"question":questions})
print(results["answer"])
| [] |
2024-01-10 | Rukiren/AQUA_INSIGHT | import_file~command.py | import os
import requests
import openai
# 設定 OpenAI API 的金鑰
OPENAI_API_KEY = "//Your open api key//"
# 初始化 OpenAI API
openai.api_key = OPENAI_API_KEY
def gpt35(role, q):
"""
gpt-3.5-turbo
role: 設定角色
q: 輸入的問題
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.7,
messages=[
{"role": "system", "content":role}, # 設定角色
{"role": "user", "content": q}
])
bot_response = response['choices'][0]['message']['content']
return bot_response
def gpt4(role, q):
"""
gpt-4
role: 設定角色
q: 輸入的問題
"""
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0.7,
messages=[
{"role": "system", "content":role}, # 設定角色
{"role": "user", "content": q}
])
bot_response = response['choices'][0]['message']['content']
return bot_response
| [] |
2024-01-10 | blockchain-Bitcion/ChatGPT-2 | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
import json
import uuid
from os import environ
from os import getenv
from os.path import exists
import requests
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://chatgpt.duti.tech/"
class Error(Exception):
"""Base class for exceptions in this module."""
source: str
message: str
code: int
class Chatbot:
"""
Chatbot class for ChatGPT
"""
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
) -> None:
self.config = config
self.session = requests.Session()
if "proxy" in config:
if isinstance(config["proxy"], str) is False:
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
self.session.proxies.update(proxies)
if "verbose" in config:
if not isinstance(config["verbose"], bool):
raise Exception("Verbose must be a boolean!")
self.verbose = config["verbose"]
else:
self.verbose = False
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
if "email" in config and "password" in config:
pass
elif "access_token" in config:
self.__refresh_headers(config["access_token"])
elif "session_token" in config:
pass
else:
raise Exception("No login details provided!")
if "access_token" not in config:
try:
self.__login()
except AuthError as error:
raise error
def __refresh_headers(self, access_token):
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
def __login(self):
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
raise Exception("No login details provided!")
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
auth.session_token = self.config["session_token"]
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.__login()
return
else:
auth.begin()
self.config["session_token"] = auth.session_token
auth.get_access_token()
self.__refresh_headers(auth.access_token)
def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
timeout=360,
# gen_title=True,
):
"""
Ask a question to the chatbot
:param prompt: String
:param conversation_id: UUID
:param parent_id: UUID
:param gen_title: Boolean
"""
if parent_id is not None and conversation_id is None:
error = Error()
error.source = "User"
error.message = "conversation_id must be set once parent_id is set"
error.code = -1
raise error
# user-specified covid and parid, check skipped to avoid rate limit
if (
conversation_id is not None and conversation_id != self.conversation_id
): # Update to new conversations
self.parent_id = None # Resetting parent_id
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None: # new conversation
parent_id = str(uuid.uuid4())
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
self.__map_conversations()
parent_id = self.conversation_mapping[conversation_id]
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": "text-davinci-002-render-sha"
if not self.config.get("paid")
else "text-davinci-002-render-paid",
}
# new_conv = data["conversation_id"] is None
self.conversation_id_prev_queue.append(
data["conversation_id"],
) # for rollback
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "api/conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
for line in response.iter_lines():
line = str(line)[2:-1]
if line == "Internal Server Error":
raise Exception("Error: " + str(line))
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
break
# Replace accidentally escaped double quotes
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
# Try parse JSON
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise Exception("Field missing. Details: " + str(line))
message = line["message"]["content"]["parts"][0]
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except TypeError:
return False
except KeyError:
return False
return True
def __check_response(self, response):
if response.status_code != 200:
print(response.text)
error = Error()
error.source = "OpenAI"
error.code = response.status_code
error.message = response.text
raise error
def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
def get_msg_history(self, convo_id, encoding="utf-8"):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.get(url)
if encoding != None:
response.encoding = encoding
else:
response.encoding = response.apparent_encoding
self.__check_response(response)
data = json.loads(response.text)
return data
def gen_title(self, convo_id, message_id):
"""
Generate title for conversation
"""
url = BASE_URL + f"api/conversation/gen_title/{convo_id}"
response = self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
def change_title(self, convo_id, title):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
def delete_conversation(self, convo_id):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
def rollback_conversation(self, num=1) -> None:
"""
Rollback the conversation.
:param num: The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
def get_input(prompt):
"""
Multiline input function.
"""
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
def configure():
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def main(config: dict):
"""
Main function for the chatGPT program.
"""
print("Logging in...")
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
!setconversation - Changes the conversation
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
# Default to 1 rollback if no number is specified
try:
rollback = int(command.split(" ")[1])
except IndexError:
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
print("Please include conversation UUID in command")
elif command == "!exit":
exit(0)
else:
return False
return True
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if handle_commands(prompt):
continue
print("Chatbot: ")
prev_text = ""
for data in chatbot.ask(
prompt,
):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print()
# print(message["message"])
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
main(configure())
| [
"text",
"\nYou:\n",
"content_type"
] |
2024-01-10 | MANIKANTA-POTNURU/Generate-content-maintaining-the-style-and-tone | functions.py | import streamlit as st
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
import cohere as ch
import base64
@st.cache_data
def convert_pdf_to_txt_pages(path):
texts = []
rsrcmgr = PDFResourceManager()
retstr = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
# fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
size = 0
c = 0
file_pages = PDFPage.get_pages(path)
nbPages = len(list(file_pages))
for page in PDFPage.get_pages(path):
interpreter.process_page(page)
t = retstr.getvalue()
if c == 0:
texts.append(t)
else:
texts.append(t[size:])
c = c+1
size = len(t)
device.close()
retstr.close()
return texts, nbPages
def generate_script(user_input):
co = ch.Client('COHERE_APIKEY') # This is your trial API key (Paste the cohere API KEY)
response = co.generate(
model='command',
prompt=user_input,
max_tokens=300,
temperature=0.9,
k=0,
stop_sequences=[],
return_likelihoods='NONE')
return response.generations[0].text
@st.cache_data
def convert_pdf_to_txt_file(textarea,path):
texts = []
rsrcmgr = PDFResourceManager()
retstr = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
# fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
file_pages = PDFPage.get_pages(path)
nbPages = len(list(file_pages))
for page in PDFPage.get_pages(path):
interpreter.process_page(page)
t = retstr.getvalue()
# text = retstr.getvalue()
# fp.close()
device.close()
retstr.close()
t=summarize_script(t)
t=generate_script(textarea+t)
return t, nbPages
def summarize_script(output1):
co = ch.Client('COHERE_APIKEY')# This is your trial API key (Paste the cohere API KEY)
response = co.summarize(
text=output1,
length='auto',
format='auto',
model='summarize-xlarge',
additional_command='',
temperature=0.3,
)
return response.summary
@st.cache_data
def save_pages(pages):
files = []
for page in range(len(pages)):
filename = "page_"+str(page)+".txt"
with open("./file_pages/"+filename, 'w', encoding="utf-8") as file:
file.write(pages[page])
files.append(file.name)
def displayPDF(file):
# Opening file from file path
# with open(file, "rb") as f:
base64_pdf = base64.b64encode(file).decode('utf-8')
# Embedding PDF in HTML
pdf_display = F'<iframe src="data:application/pdf;base64,{base64_pdf}" width="700" height="1000" type="application/pdf"></iframe>'
# Displaying File
st.markdown(pdf_display, unsafe_allow_html=True)
| [] |
2024-01-10 | MANIKANTA-POTNURU/Generate-content-maintaining-the-style-and-tone | Final.py | import streamlit as st
import cohere as ch
import time
from transformers import pipeline
from youtube_transcript_api import YouTubeTranscriptApi
import wikipedia
import requests
from functions import convert_pdf_to_txt_file, displayPDF
def main():
st.title("Generate content maintaining the style and tone")
st.sidebar.title("Choose an Option")
task = st.sidebar.radio("Select a task:", ["Using AI","From YouTube & AI", "From Wikipedia & AI","Chat with pdf using AI","Tone&Style Checker"])
if task == "From YouTube & AI":
st.title("Enter the youtube video Url")
youtube_url = st.text_input("Enter YouTube video URL:")
if youtube_url:
video_id = youtube_url.split("=")[-1]
if st.button("Summarize"):
summarized_text = summarize_youtube(video_id)
st.subheader("Summarized Transcript:")
st.write(summarized_text)
not_satisfied_checkbox = st.checkbox("Not Satisfied")
if not_satisfied_checkbox:
not_satisfied(summarized_text)
time.sleep(30000)
elif task == "From Wikipedia & AI":
st.title("Input The Text")
article_title = st.text_input("Enter Wikipedia article title:")
if article_title:
if st.button("Summarize"):
summarized_text =summarize_script(summarize_wikipedia(article_title))
st.subheader("Summarized Article:")
st.write(summarized_text)
not_satisfied_checkbox = st.checkbox("Not Satisfied")
if not_satisfied_checkbox:
not_satisfied(summarized_text)
time.sleep(30000)
elif task=="Using AI":
st.title("Input The Prompt")
user_input = st.text_input("Enter something:")
if st.button("Submit"):
output1 = generate_script(user_input)
st.write(output1)
not_satisfied_checkbox = st.checkbox("Not Satisfied")
if not_satisfied_checkbox:
not_satisfied(output1)
time.sleep(30000)
elif task=="Chat with pdf using AI":
pdf_file = st.file_uploader("Load your PDF", type=['pdf'])
if pdf_file:
path = pdf_file.read()
file_extension = pdf_file.name.split(".")[-1]
if file_extension == "pdf":
# display document
textarea=st.text_input("Enter some more text:")
with st.expander("Display document"):
displayPDF(path)
text_data_f, nbPages = convert_pdf_to_txt_file(textarea,pdf_file)
totalPages = "Pages: "+str(nbPages)+" in total"
st.info(totalPages)
st.download_button("Download txt file", text_data_f)
elif task=="Tone&Style Checker":
st.title("Text Tone Checker")
tetx= st.text_input("Enter something:")
st.write(tone_reco(tetx))
def not_satisfied(output1):
user_input1 = st.text_input("Enter some more text:")
tone=tone_reco(output1)
context = summarize_script(output1)
prompt1 = ' Follow this '+tone+'tone for this prompt regarding' + user_input1 +'based on this '+context #Prompt If the user is not satisfied or if user want some more
if prompt1!="":
st.write(generate_script(prompt1))
def tone_reco(tetx):
response = requests.post(
"https://api.sapling.ai/api/v1/tone", # SaplingAi
json={
"key": "SAPLING_APIKEY",#Use SaplingAi Api For Recongnizing The tone and Style For an text
"text": tetx
}
)
data = response.json()
overall_tones = data.get('overall', [])
results = data.get('results', [])
tone_results = overall_tones + (results[0] if results else [])
tones_formatted = [f"{tone[1]} ({tone[2]})" for tone in tone_results]
output_sentence = ', '.join(tones_formatted)
return output_sentence
def generate_script(user_input):
co = ch.Client('COHERE_APIKEY') # This is your trial API key (Paste the cohere API KEY)
response = co.generate(
model='command',
prompt=user_input,
max_tokens=300,
temperature=0.9,
k=0,
stop_sequences=[],
return_likelihoods='NONE')
return response.generations[0].text
def summarize_script(output1):
co = ch.Client('COHERE_APIKEY') # This is your trial API key(Paste the Api Key)
response = co.summarize(
text=output1,
length='auto',
format='auto',
model='summarize-xlarge',
additional_command='',
temperature=0.3,
)
return response.summary
def summarize_youtube(video_id):
transcript = YouTubeTranscriptApi.get_transcript(video_id)
result = ""
for i in transcript:
result += ' ' + i['text']
summarizer = pipeline('summarization')
summarized_text = summarizer(result)
return summarized_text[0]['summary_text']
def summarize_wikipedia(article_title):
content = wikipedia.page(article_title).content
summarizer = pipeline('summarization')
max_length = 1024 # Adjust this value as needed
chunks = [content[i:i + max_length] for i in range(0, len(content), max_length)]
summarized_text = ""
for chunk in chunks:
summarized_chunk = summarizer(chunk)
summarized_text += summarized_chunk[0]['summary_text'] + " "
return summarized_text
if __name__ == "__main__":
main()
| [
" Follow this PLACEHOLDERtone for this prompt regardingPLACEHOLDERbased on this PLACEHOLDER"
] |
2024-01-10 | rickli92/LiCSBAS | LiCSBAS_lib~LiCSBAS_inv_lib.py | #!/usr/bin/env python3
"""
========
Overview
========
Python3 library of time series inversion functions for LiCSBAS.
=========
Changelog
=========
v1.4 20200703 Yu Morioshita, GSI
- Replace problematic terms
v1.3 20200103 Yu Morioshita, Uni of Leeds and GSI
- Bag fix in calc_stc (return nonzero even if two adjacent pixels have identical ts)
v1.2 20190823 Yu Morioshita, Uni of Leeds and GSI
- Bag fix in calc_velstd_withnan
- Remove calc_velstd
v1.1 20190807 Yu Morioshita, Uni of Leeds and GSI
- Add calc_velsin
v1.0 20190730 Yu Morioshita, Uni of Leeds and GSI
- Original implementation
"""
import warnings
import numpy as np
import datetime as dt
import multiprocessing as multi
from astropy.stats import bootstrap
from astropy.utils import NumpyRNGContext
import LiCSBAS_tools_lib as tools_lib
#%%
def make_sb_matrix(ifgdates):
"""
Make small baseline incidence-like matrix.
Composed of 1 between primary and secondary. (n_ifg, n_im-1)
Unknown is incremental displacement.
"""
imdates = tools_lib.ifgdates2imdates(ifgdates)
n_im = len(imdates)
n_ifg = len(ifgdates)
G = np.zeros((n_ifg, n_im-1), dtype=np.int16)
for ifgix, ifgd in enumerate(ifgdates):
primarydate = ifgd[:8]
primaryix = imdates.index(primarydate)
secondarydate = ifgd[-8:]
secondaryix = imdates.index(secondarydate)
G[ifgix, primaryix:secondaryix] = 1
return G
#%%
def make_sb_matrix2(ifgdates):
"""
Make small baseline incidence-like matrix.
Composed of -1 at primary and 1 at secondary. (n_ifg, n_im)
Unknown is cumulative displacement.
"""
imdates = tools_lib.ifgdates2imdates(ifgdates)
n_im = len(imdates)
n_ifg = len(ifgdates)
A = np.zeros((n_ifg, n_im), dtype=np.int16)
for ifgix, ifgd in enumerate(ifgdates):
primarydate = ifgd[:8]
primaryix = imdates.index(primarydate)
secondarydate = ifgd[-8:]
secondaryix = imdates.index(secondarydate)
A[ifgix, primaryix] = -1
A[ifgix, secondaryix] = 1
return A
#%%
def invert_nsbas(unw, G, dt_cum, gamma, n_core):
"""
Calculate increment displacement difference by NSBAS inversion. Points with all unw data are solved by simple SB inversion firstly at a time.
Inputs:
unw : Unwrapped data block for each point (n_pt, n_ifg)
Still include nan to keep dimention
G : Design matrix (1 between primary and secondary) (n_ifg, n_im-1)
dt_cum : Cumulative years(or days) for each image (n_im)
gamma : Gamma value for NSBAS inversion, should be small enough (e.g., 0.0001)
n_core : Number of cores for parallel processing
Returns:
inc : Incremental displacement (n_im-1, n_pt)
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
"""
if n_core != 1:
global Gall, unw_tmp, mask ## for para_wrapper
### Settings
n_pt, n_ifg = unw.shape
n_im = G.shape[1]+1
result = np.zeros((n_im+1, n_pt), dtype=np.float32)*np.nan #[inc, vel, const]
### Set matrix of NSBAS part (bottom)
Gbl = np.tril(np.ones((n_im, n_im-1), dtype=np.float32), k=-1) #lower tri matrix without diag
Gbr = -np.ones((n_im, 2), dtype=np.float32)
Gbr[:, 0] = -dt_cum
Gb = np.concatenate((Gbl, Gbr), axis=1)*gamma
Gt = np.concatenate((G, np.zeros((n_ifg, 2), dtype=np.float32)), axis=1)
Gall = np.float32(np.concatenate((Gt, Gb)))
### Solve points with full unw data at a time. Very fast.
bool_pt_full = np.all(~np.isnan(unw), axis=1)
n_pt_full = bool_pt_full.sum()
if n_pt_full!=0:
print(' Solving {0:6}/{1:6}th points with full unw at a time...'.format(n_pt_full, n_pt), flush=True)
## Sovle
unw_tmp = np.concatenate((unw[bool_pt_full, :], np.zeros((n_pt_full, n_im), dtype=np.float32)), axis=1).transpose()
result[:, bool_pt_full] = np.linalg.lstsq(Gall, unw_tmp, rcond=None)[0]
### Solve other points with nan point by point.
unw_tmp = np.concatenate((unw[~bool_pt_full, :], np.zeros((n_pt-n_pt_full, n_im), dtype=np.float32)), axis=1).transpose()
mask = (~np.isnan(unw_tmp))
unw_tmp[np.isnan(unw_tmp)] = 0
print(' Next, solve {0} points including nan point-by-point...'.format(n_pt-n_pt_full), flush=True)
if n_core == 1:
result[:, ~bool_pt_full] = censored_lstsq_slow(Gall, unw_tmp, mask) #(n_im+1, n_pt)
else:
print(' {} parallel processing'.format(n_core), flush=True)
args = [i for i in range(n_pt-n_pt_full)]
p = multi.Pool(n_core)
_result = p.map(censored_lstsq_slow_para_wrapper, args) #list[n_pt][length]
result[:, ~bool_pt_full] = np.array(_result).T
inc = result[:n_im-1, :]
vel = result[n_im-1, :]
vconst = result[n_im, :]
return inc, vel, vconst
def censored_lstsq_slow_para_wrapper(i):
### Use global value
if np.mod(i, 1000) == 0:
print(' Running {0:6}/{1:6}th point...'.format(i, unw_tmp.shape[1]), flush=True)
m = mask[:,i] # drop rows where mask is zero
try:
X = np.linalg.lstsq(Gall[m], unw_tmp[m,i], rcond=None)[0]
except:
X = np.zeros((Gall.shape[1]), dtype=np.float32)*np.nan
return X
#%%
def invert_nsbas_wls(unw, var, G, dt_cum, gamma, n_core):
"""
Calculate increment displacement difference by NSBAS inversion with WLS.
Inputs:
unw : Unwrapped data block for each point (n_pt, n_ifg)
Still include nan to keep dimention
var : Variance estimated from coherence (n_pt, n_ifg)
G : Design matrix (1 between primary and secondary) (n_ifg, n_im-1)
dt_cum : Cumulative years(or days) for each image (n_im)
gamma : Gamma value for NSBAS inversion, should be small enough (e.g., 0.0001)
n_core : Number of cores for parallel processing
Returns:
inc : Incremental displacement (n_im-1, n_pt)
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
"""
global Gall, unw_tmp, var_tmp, mask ## for para_wrapper
### Settings
n_pt, n_ifg = unw.shape
n_im = G.shape[1]+1
result = np.zeros((n_im+1, n_pt), dtype=np.float32)*np.nan #[inc, vel, const]
### Set matrix of NSBAS part (bottom)
Gbl = np.tril(np.ones((n_im, n_im-1), dtype=np.float32), k=-1) #lower tri matrix without diag
Gbr = -np.ones((n_im, 2), dtype=np.float32)
Gbr[:, 0] = -dt_cum
Gb = np.concatenate((Gbl, Gbr), axis=1)*gamma
Gt = np.concatenate((G, np.zeros((n_ifg, 2), dtype=np.float32)), axis=1)
Gall = np.float32(np.concatenate((Gt, Gb)))
### Make unw_tmp, var_tmp, and mask
unw_tmp = np.concatenate((unw, np.zeros((n_pt, n_im), dtype=np.float32)), axis=1).transpose()
mask = (~np.isnan(unw_tmp))
unw_tmp[np.isnan(unw_tmp)] = 0
var_tmp = np.concatenate((var, 50*np.ones((n_pt, n_im), dtype=np.float32)), axis=1).transpose() #50 is var for coh=0.1, to scale bottom part of Gall
if n_core == 1:
for i in range(n_pt):
result[:, i] = wls_nsbas(i) #(n_im+1, n_pt)
else:
print(' {} parallel processing'.format(n_core), flush=True)
args = [i for i in range(n_pt)]
p = multi.Pool(n_core)
_result = p.map(wls_nsbas, args) #list[n_pt][length]
result = np.array(_result).T
inc = result[:n_im-1, :]
vel = result[n_im-1, :]
vconst = result[n_im, :]
return inc, vel, vconst
def wls_nsbas(i):
### Use global value of Gall, unw_tmp, mask
if np.mod(i, 1000) == 0:
print(' Running {0:6}/{1:6}th point...'.format(i, unw_tmp.shape[1]), flush=True)
## Weight unw and G
Gall_w = Gall/np.sqrt(np.float64(var_tmp[:,i][:,np.newaxis]))
unw_tmp_w = unw_tmp[:, i]/np.sqrt(np.float64(var_tmp[:,i]))
m = mask[:,i] # drop rows where mask is zero
try:
X = np.linalg.lstsq(Gall_w[m], unw_tmp_w[m], rcond=None)[0]
except:
X = np.zeros((Gall.shape[1]), dtype=np.float32)*np.nan
return X
#%%
def calc_vel(cum, dt_cum):
"""
Calculate velocity.
Inputs:
cum : cumulative phase block for each point (n_pt, n_im)
dt_cum : Cumulative days for each image (n_im)
Returns:
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
"""
n_pt, n_im = cum.shape
result = np.zeros((2, n_pt), dtype=np.float32)*np.nan #[vconst, vel]
G = np.stack((np.ones_like(dt_cum), dt_cum), axis=1)
vconst = np.zeros((n_pt), dtype=np.float32)*np.nan
vel = np.zeros((n_pt), dtype=np.float32)*np.nan
bool_pt_full = np.all(~np.isnan(cum), axis=1)
n_pt_full = bool_pt_full.sum()
if n_pt_full!=0:
print(' Solving {0:6}/{1:6}th points with full cum at a time...'.format(n_pt_full, n_pt), flush=True)
## Sovle
result[:, bool_pt_full] = np.linalg.lstsq(G, cum[bool_pt_full, :].transpose(), rcond=None)[0]
### Solve other points with nan point by point.
cum_tmp = cum[~bool_pt_full, :].transpose()
mask = (~np.isnan(cum_tmp))
cum_tmp[np.isnan(cum_tmp)] = 0
print(' Next, solve {0} points including nan point-by-point...'.format(n_pt-n_pt_full), flush=True)
result[:, ~bool_pt_full] = censored_lstsq_slow(G, cum_tmp, mask) #(n_im+1, n_pt)
vconst = result[0, :]
vel = result[1, :]
return vel, vconst
#%%
def calc_velsin(cum, dt_cum, imd0):
"""
Calculate velocity and coeffcients of sin (annual) function.
Inputs:
cum : cumulative phase block for each point (n_pt, n_im)
dt_cum : Cumulative days for each image (n_im)
imd0 : Date of first acquistion (str, yyyymmdd)
Returns:
vel : Velocity (n_pt)
vconst : Constant part of linear velocity (c of vt+c) (n_pt)
amp : Amplitude of sin function
dt : Time difference of sin function wrt Jan 1 (day)
"""
doy0 = (dt.datetime.strptime(imd0, '%Y%m%d')-dt.datetime.strptime(imd0[0:4]+'0101', '%Y%m%d')).days
n_pt, n_im = cum.shape
result = np.zeros((4, n_pt), dtype=np.float32)*np.nan #[vconst, vel, coef_s, coef_c]
sin = np.sin(2*np.pi*dt_cum)
cos = np.cos(2*np.pi*dt_cum)
G = np.stack((np.ones_like(dt_cum), dt_cum, sin, cos), axis=1)
vconst = np.zeros((n_pt), dtype=np.float32)*np.nan
vel = np.zeros((n_pt), dtype=np.float32)*np.nan
amp = np.zeros((n_pt), dtype=np.float32)*np.nan
delta_t = np.zeros((n_pt), dtype=np.float32)*np.nan
bool_pt_full = np.all(~np.isnan(cum), axis=1)
n_pt_full = bool_pt_full.sum()
if n_pt_full!=0:
print(' Solving {0:6}/{1:6}th points with full cum at a time...'.format(n_pt_full, n_pt), flush=True)
## Sovle
result[:, bool_pt_full] = np.linalg.lstsq(G, cum[bool_pt_full, :].transpose(), rcond=None)[0]
### Solve other points with nan point by point.
cum_tmp = cum[~bool_pt_full, :].transpose()
mask = (~np.isnan(cum_tmp))
cum_tmp[np.isnan(cum_tmp)] = 0
print(' Next, solve {0} points including nan point-by-point...'.format(n_pt-n_pt_full), flush=True)
result[:, ~bool_pt_full] = censored_lstsq_slow(G, cum_tmp, mask) #(n_im+1, n_pt)
vconst = result[0, :]
vel = result[1, :]
coef_s = result[2, :]
coef_c = result[3, :]
amp = np.sqrt(coef_s**2+coef_c**2)
delta_t = np.arctan2(-coef_c, coef_s)/2/np.pi*365.25 ## wrt 1st img
delta_t = delta_t+doy0 ## wrt Jan 1
delta_t[delta_t < 0] = delta_t[delta_t < 0]+365.25 #0-365.25
delta_t[delta_t > 365.25] = delta_t[delta_t > 365.25]-365.25
return vel, vconst, amp, delta_t
#%%
def calc_velstd_withnan(cum, dt_cum):
"""
Calculate std of velocity by bootstrap for each point which may include nan.
Inputs:
cum : Cumulative phase block for each point (n_pt, n_im)
Can include nan.
dt_cum : Cumulative days for each image (n_im)
Returns:
vstd : Std of Velocity for each point (n_pt)
"""
global bootcount, bootnum
n_pt, n_im = cum.shape
bootnum = 100
bootcount = 0
vstd = np.zeros((n_pt), dtype=np.float32)
G = np.stack((np.ones_like(dt_cum), dt_cum), axis=1)
data = cum.transpose().copy()
ixs_day = np.arange(n_im)
mask = (~np.isnan(data))
data[np.isnan(data)] = 0
velinv = lambda x : censored_lstsq2(G[x, :], data[x, :], mask[x, :])[1]
with NumpyRNGContext(1):
bootresult = bootstrap(ixs_day, bootnum, bootfunc=velinv)
vstd = np.nanstd(bootresult, axis=0)
print('')
return vstd
def censored_lstsq2(A, B, M):
## http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
global bootcount, bootnum
print('\r Running {0:3}/{1:3}th bootstrap...'.format(bootcount, bootnum), end='', flush=True)
bootcount = bootcount+1
# if B is a vector, simply drop out corresponding rows in A
if B.ndim == 1 or B.shape[1] == 1:
return np.linalg.leastsq(A[M], B[M])[0]
# else solve via tensor representation
rhs = np.dot(A.T, M * B).T[:,:,None] # n x r x 1 tensor
T = np.matmul(A.T[None,:,:], M.T[:,:,None] * A[None,:,:]) # n x r x r tensor
try:
X = np.squeeze(np.linalg.solve(T, rhs)).T # transpose to get r x n
except: ## In case Singular matrix
X = np.zeros((B.shape[1]), dtype=np.float32)*np.nan
return X
#%%
def calc_stc(cum):
"""
Calculate STC (spatio-temporal consistensy; Hanssen et al., 2008, Terrafirma) of time series of displacement.
Note that isolated pixels (which have no surrounding pixel) have nan of STC.
Input:
cum : Cumulative displacement (n_im, length, width)
Return:
stc : STC (length, width)
"""
n_im, length, width = cum.shape
### Add 1 pixel margin to cum data filled with nan
cum1 = np.ones((n_im, length+2, width+2), dtype=np.float32)*np.nan
cum1[:, 1:length+1, 1:width+1] = cum
### Calc STC for surrounding 8 pixels
_stc = np.ones((length, width, 8), dtype=np.float32)*np.nan
pixels = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1], [2, 2]]
## Left Top = [0, 0], Rigth Bottmon = [2, 2], Center = [1, 1]
for i, pixel in enumerate(pixels):
### Spatial difference (surrounding pixel-center)
d_cum = cum1[:, pixel[0]:length+pixel[0], pixel[1]:width+pixel[1]] - cum1[:, 1:length+1, 1:width+1]
### Temporal difference (double difference)
dd_cum = d_cum[:-1,:,:]-d_cum[1:,:,:]
### STC (i.e., RMS of DD)
sumsq_dd_cum = np.nansum(dd_cum**2, axis=0)
n_dd_cum = np.float32(np.sum(~np.isnan(dd_cum), axis=0)) #nof non-nan
n_dd_cum[n_dd_cum==0] = np.nan #to avoid 0 division
_stc[:, :, i] = np.sqrt(sumsq_dd_cum/n_dd_cum)
### Strange but some adjacent pixels can have identical time series,
### resulting in 0 of stc. To avoid this, replace 0 with nan.
_stc[_stc==0] = np.nan
### Identify minimum value as final STC
with warnings.catch_warnings(): ## To silence warning by All-Nan slice
warnings.simplefilter('ignore', RuntimeWarning)
stc = np.nanmin(_stc, axis=2)
return stc
#%%
def censored_lstsq(A, B, M):
## http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
## This is actually slow because matmul does not use multicore...
## Need multiprocessing.
## Precison is bad widh bad condition, so this is unfortunately useless for NSABS...
## But maybe usable for vstd because its condition is good.
"""Solves least squares problem subject to missing data.
Note: uses a broadcasted solve for speed.
Args
----
A (ndarray) : m x r matrix
B (ndarray) : m x n matrix
M (ndarray) : m x n binary matrix (zeros indicate missing values)
Returns
-------
X (ndarray) : r x n matrix that minimizes norm(M*(AX - B))
"""
# Note: we should check A is full rank but we won't bother...
# if B is a vector, simply drop out corresponding rows in A
if B.ndim == 1 or B.shape[1] == 1:
return np.linalg.leastsq(A[M], B[M])[0]
# else solve via tensor representation
rhs = np.dot(A.T, M * B).T[:,:,None] # n x r x 1 tensor
T = np.matmul(A.T[None,:,:], M.T[:,:,None] * A[None,:,:]) # n x r x r tensor
return np.squeeze(np.linalg.solve(T, rhs)).T # transpose to get r x n
#%%
def censored_lstsq_slow(A, B, M):
## http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
"""Solves least squares problem subject to missing data.
Note: uses a for loop over the columns of B, leading to a
slower but more numerically stable algorithm
Args
----
A (ndarray) : m x r matrix
B (ndarray) : m x n matrix
M (ndarray) : m x n binary matrix (zeros indicate missing values)
Returns
-------
X (ndarray) : r x n matrix that minimizes norm(M*(AX - B))
"""
X = np.empty((A.shape[1], B.shape[1]))
for i in range(B.shape[1]):
if np.mod(i, 1000) == 0:
print('\r Running {0:6}/{1:6}th point...'.format(i, B.shape[1]), end='', flush=True)
m = M[:,i] # drop rows where mask is zero
try:
X[:,i] = np.linalg.lstsq(A[m], B[m,i], rcond=None)[0]
except:
X[:,i] = np.nan
print('')
return X
| [] |
2024-01-10 | rickli92/LiCSBAS | bin~LiCSBAS13_sb_inv.py | #!/usr/bin/env python3
"""
v1.2 20200225 Yu Morishita, Uni of Leeds and GSI
========
Overview
========
This script inverts the SB network of unw to obtain the time series and velocity using NSBAS (López-Quiroz et al., 2009; Doin et al., 2011) approach.
A stable reference point is determined after the inversion. RMS of the time series wrt median among all points is calculated for each point. Then the point with minimum RMS and minimum n_gap is selected as new stable reference point.
===============
Input & output files
===============
Inputs in GEOCml*/ :
- yyyymmdd_yyyymmdd/
- yyyymmdd_yyyymmdd.unw
- yyyymmdd_yyyymmdd.cc
- EQA.dem_par
- slc.mli.par
- baselines (may be dummy)
Inputs in TS_GEOCml*/info/ :
- 11bad_ifg.txt
- 12bad_ifg.txt
- 12ref.txt
Outputs in TS_GEOCml*/ :
- cum.h5 : Cumulative displacement (time-seires) in mm
- results/
- vel[.png] : Velocity in mm/yr (positive means LOS decrease; uplift)
- vintercept[.png] : Constant part of linear velocity (c for vt+c) in mm
- resid_rms[.png] : RMS of residual in mm
- n_gap[.png] : Number of gaps in SB network
- n_ifg_noloop[.png] : Number of ifgs with no loop
- maxTlen[.png] : Max length of continous SB network in year
- info/
- 13parameters.txt : List of used parameters
- 13used_image.txt : List of used images
- 13resid.txt : List of RMS of residual for each ifg
- 13ref.txt[kml] : Auto-determined stable ref point
- 13rms_cum_wrt_med[.png] : RMS of cum wrt median used for ref selection
- 13increment/yyyymmdd_yyyymmdd.increment.png
: Comparison between unw and inverted incremental displacement
- 13resid/yyyymmdd_yyyymmdd.res.png : Residual for each ifg
- network/network13*.png : Figures of the network
=====
Usage
=====
LiCSBAS13_sb_inv.py -d ifgdir [-t tsadir] [--inv_alg LS|WLS] [--mem_size float] [--gamma float] [--n_core int] [--n_unw_r_thre float] [--keep_incfile]
-d Path to the GEOCml* dir containing stack of unw data
-t Path to the output TS_GEOCml* dir.
--inv_alg Inversion algolism (Default: LS)
LS : NSBAS Least Square with no weight
WLS: NSBAS Weighted Least Square (not well tested)
Weight (variance) is calculated by (1-coh**2)/(2*coh**2)
--mem_size Max memory size for each patch in MB. (Default: 4000)
--gamma Gamma value for NSBAS inversion (Default: 0.0001)
--n_core Number of cores for parallel processing (Default: 1)
--n_unw_r_thre
Threshold of n_unw (number of used unwrap data)
(Note this value is ratio to the number of images; i.e., 1.5*n_im)
Larger number (e.g. 2.5) makes processing faster but result sparser.
(Default: 1 and 0.5 for C- and L-band, respectively)
--keep_incfile
Not remove inc and resid files (Default: remove them)
"""
#%% Change log
'''
v1.2 20200225 Yu Morishita, Uni of Leeds and GSI
- Not output network pdf
- Change color of png
- Change name of parameters.txt to 13parameters.txt
- Deal with cc file in uint8 format
- Automatically find stable reference point
v1.1 20190829 Yu Morishita, Uni of Leeds and GSI
- Remove cum.h5 if exists before creation
v1.0 20190730 Yu Morishita, Uni of Leeds and GSI
- Original implementation
'''
#%% Import
import getopt
import os
import sys
import re
import time
import h5py as h5
import numpy as np
import datetime as dt
import SCM
import LiCSBAS_io_lib as io_lib
import LiCSBAS_inv_lib as inv_lib
import LiCSBAS_tools_lib as tools_lib
import LiCSBAS_loop_lib as loop_lib
import LiCSBAS_plot_lib as plot_lib
class Usage(Exception):
"""Usage context manager"""
def __init__(self, msg):
self.msg = msg
#%% Main
def main(argv=None):
#%% Check argv
if argv == None:
argv = sys.argv
start = time.time()
ver=1.2; date=20200225; author="Y. Morishita"
print("\n{} ver{} {} {}".format(os.path.basename(argv[0]), ver, date, author), flush=True)
print("{} {}".format(os.path.basename(argv[0]), ' '.join(argv[1:])), flush=True)
#%% Set default
ifgdir = []
tsadir = []
inv_alg = 'LS'
n_core = 1
memory_size = 4000
gamma = 0.0001
n_unw_r_thre = []
keep_incfile = False
cmap_vel = SCM.roma.reversed()
cmap_noise = 'viridis'
cmap_noise_r = 'viridis_r'
#%% Read options
try:
try:
opts, args = getopt.getopt(argv[1:], "hd:t:", ["help", "mem_size=", "gamma=", "n_unw_r_thre=", "keep_incfile", "inv_alg=", "n_core="])
except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o == '-h' or o == '--help':
print(__doc__)
return 0
elif o == '-d':
ifgdir = a
elif o == '-t':
tsadir = a
elif o == '--mem_size':
memory_size = float(a)
elif o == '--gamma':
gamma = float(a)
elif o == '--n_unw_r_thre':
n_unw_r_thre = float(a)
elif o == '--keep_incfile':
keep_incfile = True
elif o == '--inv_alg':
inv_alg = a
elif o == '--n_core':
n_core = int(a)
if not ifgdir:
raise Usage('No data directory given, -d is not optional!')
elif not os.path.isdir(ifgdir):
raise Usage('No {} dir exists!'.format(ifgdir))
elif not os.path.exists(os.path.join(ifgdir, 'slc.mli.par')):
raise Usage('No slc.mli.par file exists in {}!'.format(ifgdir))
except Usage as err:
print("\nERROR:", file=sys.stderr, end='')
print(" "+str(err.msg), file=sys.stderr)
print("\nFor help, use -h or --help.\n", file=sys.stderr)
return 2
#%% Directory settings
ifgdir = os.path.abspath(ifgdir)
if not tsadir:
tsadir = os.path.join(os.path.dirname(ifgdir), 'TS_'+os.path.basename(ifgdir))
if not os.path.isdir(tsadir):
print('\nNo {} exists!'.format(tsadir), file=sys.stderr)
return 1
tsadir = os.path.abspath(tsadir)
resultsdir = os.path.join(tsadir, 'results')
infodir = os.path.join(tsadir, 'info')
netdir = os.path.join(tsadir, 'network')
bad_ifg11file = os.path.join(infodir, '11bad_ifg.txt')
bad_ifg12file = os.path.join(infodir, '12bad_ifg.txt')
reffile = os.path.join(infodir, '12ref.txt')
if not os.path.exists(reffile): ## for old LiCSBAS12 < v1.1
reffile = os.path.join(infodir, 'ref.txt')
incdir = os.path.join(tsadir,'13increment')
if not os.path.exists(incdir): os.mkdir(incdir)
resdir = os.path.join(tsadir,'13resid')
if not os.path.exists(resdir): os.mkdir(resdir)
restxtfile = os.path.join(infodir,'13resid.txt')
cumh5file = os.path.join(tsadir,'cum.h5')
#%% Check files
try:
if not os.path.exists(bad_ifg11file):
raise Usage('No 11bad_ifg.txt file exists in {}!'.format(infodir))
if not os.path.exists(bad_ifg12file):
raise Usage('No 12bad_ifg.txt file exists in {}!'.format(infodir))
if not os.path.exists(reffile):
raise Usage('No 12ref.txt file exists in {}!'.format(infodir))
except Usage as err:
print("\nERROR:", file=sys.stderr, end='')
print(" "+str(err.msg), file=sys.stderr)
print("\nFor help, use -h or --help.\n", file=sys.stderr)
return 2
#%% Set preliminaly reference
with open(reffile, "r") as f:
refarea = f.read().split()[0] #str, x1/x2/y1/y2
refx1, refx2, refy1, refy2 = [int(s) for s in re.split('[:/]', refarea)]
#%% Read data information
### Get size
mlipar = os.path.join(ifgdir, 'slc.mli.par')
width = int(io_lib.get_param_par(mlipar, 'range_samples'))
length = int(io_lib.get_param_par(mlipar, 'azimuth_lines'))
speed_of_light = 299792458 #m/s
radar_frequency = float(io_lib.get_param_par(mlipar, 'radar_frequency')) #Hz
wavelength = speed_of_light/radar_frequency #meter
coef_r2m = -wavelength/4/np.pi*1000 #rad -> mm, positive is -LOS
### Calc pixel spacing depending on IFG or GEOC, used in later spatial filter
dempar = os.path.join(ifgdir, 'EQA.dem_par')
width_geo = int(io_lib.get_param_par(dempar, 'width'))
length_geo = int(io_lib.get_param_par(dempar, 'nlines'))
dlat = float(io_lib.get_param_par(dempar, 'post_lat')) #negative
dlon = float(io_lib.get_param_par(dempar, 'post_lon')) #positive
lat1 = float(io_lib.get_param_par(dempar, 'corner_lat'))
lon1 = float(io_lib.get_param_par(dempar, 'corner_lon'))
if width == width_geo and length == length_geo: ## Geocoded
print('In geographical coordinates', flush=True)
centerlat = lat1+dlat*(length/2)
ra = float(io_lib.get_param_par(dempar, 'ellipsoid_ra'))
recip_f = float(io_lib.get_param_par(dempar, 'ellipsoid_reciprocal_flattening'))
rb = ra*(1-1/recip_f) ## polar radius
pixsp_a = 2*np.pi*rb/360*abs(dlat)
pixsp_r = 2*np.pi*ra/360*dlon*np.cos(np.deg2rad(centerlat))
else:
print('In radar coordinates', flush=True)
pixsp_r_org = float(io_lib.get_param_par(mlipar, 'range_pixel_spacing'))
pixsp_a = float(io_lib.get_param_par(mlipar, 'azimuth_pixel_spacing'))
inc_agl = float(io_lib.get_param_par(mlipar, 'incidence_angle'))
pixsp_r = pixsp_r_org/np.sin(np.deg2rad(inc_agl))
### Set n_unw_r_thre and cycle depending on L- or C-band
if wavelength > 0.2: ## L-band
if not n_unw_r_thre: n_unw_r_thre = 0.5
cycle = 1.5 # 2pi/cycle for comparison png
elif wavelength <= 0.2: ## C-band
if not n_unw_r_thre: n_unw_r_thre = 1.0
cycle = 3 # 3*2pi/cycle for comparison png
#%% Read date and network information
### Get all ifgdates in ifgdir
ifgdates_all = tools_lib.get_ifgdates(ifgdir)
imdates_all = tools_lib.ifgdates2imdates(ifgdates_all)
n_im_all = len(imdates_all)
n_ifg_all = len(ifgdates_all)
### Read bad_ifg11 and 12
bad_ifg11 = io_lib.read_ifg_list(bad_ifg11file)
bad_ifg12 = io_lib.read_ifg_list(bad_ifg12file)
bad_ifg_all = list(set(bad_ifg11+bad_ifg12))
bad_ifg_all.sort()
### Remove bad ifgs and images from list
ifgdates = list(set(ifgdates_all)-set(bad_ifg_all))
ifgdates.sort()
imdates = tools_lib.ifgdates2imdates(ifgdates)
n_ifg = len(ifgdates)
n_ifg_bad = len(set(bad_ifg11+bad_ifg12))
n_im = len(imdates)
n_unw_thre = int(n_unw_r_thre*n_im)
### Make 13used_image.txt
imfile = os.path.join(infodir, '13used_image.txt')
with open(imfile, 'w') as f:
for i in imdates:
print('{}'.format(i), file=f)
### Calc dt in year
imdates_dt = ([dt.datetime.strptime(imd, '%Y%m%d').toordinal() for imd in imdates])
dt_cum = np.float32((np.array(imdates_dt)-imdates_dt[0])/365.25)
### Construct G and Aloop matrix for increment and n_gap
G = inv_lib.make_sb_matrix(ifgdates)
Aloop = loop_lib.make_loop_matrix(ifgdates)
n_loop = Aloop.shape[0] # (n_loop,n_ifg)
#%% Plot network
## Read bperp data or dummy
bperp_file = os.path.join(ifgdir, 'baselines')
if os.path.exists(bperp_file):
bperp_all = io_lib.read_bperp_file(bperp_file, imdates_all)
bperp = io_lib.read_bperp_file(bperp_file, imdates)
else: #dummy
bperp_all = np.random.random(len(imdates_all)).tolist()
bperp = np.random.random(n_im).tolist()
pngfile = os.path.join(netdir, 'network13_all.png')
plot_lib.plot_network(ifgdates_all, bperp_all, [], pngfile)
pngfile = os.path.join(netdir, 'network13.png')
plot_lib.plot_network(ifgdates_all, bperp_all, bad_ifg_all, pngfile)
pngfile = os.path.join(netdir, 'network13_nobad.png')
plot_lib.plot_network(ifgdates_all, bperp_all, bad_ifg_all, pngfile, plot_bad=False)
#%% Get patch row number
if inv_alg == 'WLS':
n_store_data = n_ifg*3+n_im*2+n_im*0.3 #
else:
n_store_data = n_ifg*2+n_im*2+n_im*0.3 #not sure
n_patch, patchrow = tools_lib.get_patchrow(width, length, n_store_data, memory_size)
#%% Display and output settings & parameters
print('')
print('Size of image (w,l) : {}, {}'.format(width, length))
print('# of all images : {}'.format(n_im_all))
print('# of images to be used : {}'.format(n_im))
print('# of all ifgs : {}'.format(n_ifg_all))
print('# of ifgs to be used : {}'.format(n_ifg))
print('# of removed ifgs : {}'.format(n_ifg_bad))
print('Threshold of used unw : {}'.format(n_unw_thre))
print('')
print('Reference area (X/Y) : {}:{}/{}:{}'.format(refx1, refx2, refy1, refy2))
print('Allowed memory size : {} MB'.format(memory_size))
print('Number of patches : {}'.format(n_patch))
print('Inversion algorism : {}'.format(inv_alg))
print('Gamma value : {}'.format(gamma), flush=True)
with open(os.path.join(infodir, '13parameters.txt'), "w") as f:
print('range_samples: {}'.format(width), file=f)
print('azimuth_lines: {}'.format(length), file=f)
print('wavelength: {}'.format(wavelength), file=f)
print('n_im_all: {}'.format(n_im_all), file=f)
print('n_im: {}'.format(n_im), file=f)
print('n_ifg_all: {}'.format(n_ifg_all), file=f)
print('n_ifg: {}'.format(n_ifg), file=f)
print('n_ifg_bad: {}'.format(n_ifg_bad), file=f)
print('n_unw_thre: {}'.format(n_unw_thre), file=f)
print('ref_area: {}:{}/{}:{}'.format(refx1, refx2, refy1, refy2), file=f)
print('memory_size: {} MB'.format(memory_size), file=f)
print('n_patch: {}'.format(n_patch), file=f)
print('inv_alg: {}'.format(inv_alg), file=f)
print('gamma: {}'.format(gamma), file=f)
print('pixel_spacing_r: {:.2f} m'.format(pixsp_r), file=f)
print('pixel_spacing_a: {:.2f} m'.format(pixsp_a), file=f)
#%% Ref phase for inversion
lengththis = refy2-refy1
countf = width*refy1
countl = width*lengththis # Number to be read
ref_unw = []
for i, ifgd in enumerate(ifgdates):
unwfile = os.path.join(ifgdir, ifgd, ifgd+'.unw')
f = open(unwfile, 'rb')
f.seek(countf*4, os.SEEK_SET) #Seek for >=2nd path, 4 means byte
### Read unw data (mm) at ref area
unw = np.fromfile(f, dtype=np.float32, count=countl).reshape((lengththis, width))[:, refx1:refx2]*coef_r2m
unw[unw == 0] = np.nan
if np.all(np.isnan(unw)):
print('All nan in ref area in {}.'.format(ifgd))
print('Rerun LiCSBAS12.')
return 1
ref_unw.append(np.nanmean(unw))
f.close()
#%% Open cum.h5 for output
if os.path.exists(cumh5file): os.remove(cumh5file)
cumh5 = h5.File(cumh5file, 'w')
cumh5.create_dataset('imdates', data=[np.int32(imd) for imd in imdates])
if not np.all(np.abs(np.array(bperp))<=1):# if not dummy
cumh5.create_dataset('bperp', data=bperp)
cum = cumh5.require_dataset('cum', (n_im, length, width), dtype=np.float32)
vel = cumh5.require_dataset('vel', (length, width), dtype=np.float32)
vconst = cumh5.require_dataset('vintercept', (length, width), dtype=np.float32)
gap = cumh5.require_dataset('gap', (n_im-1, length, width), dtype=np.int8)
if width == width_geo and length == length_geo: ## if geocoded
cumh5.create_dataset('corner_lat', data=lat1)
cumh5.create_dataset('corner_lon', data=lon1)
cumh5.create_dataset('post_lat', data=dlat)
cumh5.create_dataset('post_lon', data=dlon)
#%% For each patch
i_patch = 1
for rows in patchrow:
print('\nProcess {0}/{1}th line ({2}/{3}th patch)...'.format(rows[1], patchrow[-1][-1], i_patch, n_patch), flush=True)
start2 = time.time()
#%% Read data
### Allocate memory
lengththis = rows[1] - rows[0]
n_pt_all = lengththis*width
unwpatch = np.zeros((n_ifg, lengththis, width), dtype=np.float32)
if inv_alg == 'WLS':
cohpatch = np.zeros((n_ifg, lengththis, width), dtype=np.float32)
### For each ifg
print(" Reading {0} ifg's unw data...".format(n_ifg), flush=True)
countf = width*rows[0]
countl = width*lengththis
for i, ifgd in enumerate(ifgdates):
unwfile = os.path.join(ifgdir, ifgd, ifgd+'.unw')
f = open(unwfile, 'rb')
f.seek(countf*4, os.SEEK_SET) #Seek for >=2nd patch, 4 means byte
### Read unw data (mm) at patch area
unw = np.fromfile(f, dtype=np.float32, count=countl).reshape((lengththis, width))*coef_r2m
unw[unw == 0] = np.nan # Fill 0 with nan
unw = unw - ref_unw[i]
unwpatch[i] = unw
f.close()
### Read coh file at patch area for WLS
if inv_alg == 'WLS':
cohfile = os.path.join(ifgdir, ifgd, ifgd+'.cc')
f = open(cohfile, 'rb')
if os.path.getsize(cohfile) == length*width: ## uint8 format
f.seek(countf, os.SEEK_SET) #Seek for >=2nd patch
cohpatch[i, :, :] = (np.fromfile(f, dtype=np.uint8, count=countl).reshape((lengththis, width))).astype(np.float32)/255
else: ## old float32 format
f.seek(countf*4, os.SEEK_SET) #Seek for >=2nd patch, 4 means byte
cohpatch[i, :, :] = np.fromfile(f, dtype=np.float32, count=countl).reshape((lengththis, width))
cohpatch[cohpatch==0] = np.nan
unwpatch = unwpatch.reshape((n_ifg, n_pt_all)).transpose() #(n_pt_all, n_ifg)
### Calc variance from coherence for WLS
if inv_alg == 'WLS':
cohpatch = cohpatch.reshape((n_ifg, n_pt_all)).transpose() #(n_pt_all, n_ifg)
cohpatch[cohpatch<0.01] = 0.01 ## because negative value possible due to geocode
cohpatch[cohpatch>0.99] = 0.99 ## because >1 possible due to geocode
varpatch = (1-cohpatch**2)/(2*cohpatch**2)
del cohpatch
#%% Remove points with less valid data than n_unw_thre
ix_unnan_pt = np.where(np.sum(~np.isnan(unwpatch), axis=1) > n_unw_thre)[0]
n_pt_unnan = len(ix_unnan_pt)
unwpatch = unwpatch[ix_unnan_pt,:] ## keep only unnan data
if inv_alg == 'WLS':
varpatch = varpatch[ix_unnan_pt,:] ## keep only unnan data
print(' {}/{} points removed due to not enough ifg data...'.format(n_pt_all-n_pt_unnan, n_pt_all), flush=True)
#%% Compute number of gaps, ifg_noloop, maxTlen point-by-point
ns_gap_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
gap_patch = np.zeros((n_im-1, n_pt_all), dtype=np.int8)
ns_ifg_noloop_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
maxTlen_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
### n_gap
print('\n Identifing gaps and counting n_gap...', flush=True)
# ns_unw_unnan4inc = (np.matmul(np.int8(G[:, :, None]), (~np.isnan(unwpatch.T))[:, None, :])).sum(axis=0, dtype=np.int16) #n_ifg, n_im-1, n_pt -> n_im-1, n_pt
ns_unw_unnan4inc = np.array([(G[:, i]*(~np.isnan(unwpatch))).sum(axis=1, dtype=np.int16) for i in range(n_im-1)]) #n_ifg*(n_pt,n_ifg) -> (n_im-1,n_pt)
ns_gap_patch[ix_unnan_pt] = (ns_unw_unnan4inc==0).sum(axis=0) #n_pt
gap_patch[:, ix_unnan_pt] = ns_unw_unnan4inc==0
del ns_unw_unnan4inc
### n_ifg_noloop
print(' Counting n_ifg_noloop...', flush=True)
# n_ifg*(n_pt,n_ifg)->(n_loop,n_pt)
# Number of ifgs for each loop at eath point.
# 3 means complete loop, 1 or 2 means broken loop.
ns_ifg4loop = np.array([
(np.abs(Aloop[i, :])*(~np.isnan(unwpatch))).sum(axis=1)
for i in range(n_loop)])
bool_loop = (ns_ifg4loop==3) #(n_loop,n_pt) identify complete loop only
# n_loop*(n_loop,n_pt)*n_pt->(n_ifg,n_pt)
# Number of loops for each ifg at eath point.
ns_loop4ifg = np.array([(
(np.abs(Aloop[:, i])*bool_loop.T).T*
(~np.isnan(unwpatch[:, i]))
).sum(axis=0) for i in range(n_ifg)]) #
ns_ifg_noloop_tmp = (ns_loop4ifg==0).sum(axis=0) #n_pt
ns_nan_ifg = np.isnan(unwpatch).sum(axis=1) #n_pt, nan ifg count
ns_ifg_noloop_patch[ix_unnan_pt] = ns_ifg_noloop_tmp - ns_nan_ifg
del bool_loop, ns_ifg4loop, ns_loop4ifg
### maxTlen
_maxTlen = np.zeros((n_pt_unnan), dtype=np.float32) #temporaly
_Tlen = np.zeros((n_pt_unnan), dtype=np.float32) #temporaly
for imx in range(n_im-1):
_Tlen = _Tlen + (dt_cum[imx+1]-dt_cum[imx]) ## Adding dt
_Tlen[gap_patch[imx, ix_unnan_pt]==1] = 0 ## reset to 0 if gap
_maxTlen[_maxTlen<_Tlen] = _Tlen[_maxTlen<_Tlen] ## Set Tlen to maxTlen
maxTlen_patch[ix_unnan_pt] = _maxTlen
#%% Time series inversion
print('\n Small Baseline inversion by {}...\n'.format(inv_alg), flush=True)
if inv_alg == 'WLS':
inc_tmp, vel_tmp, vconst_tmp = inv_lib.invert_nsbas_wls(unwpatch, varpatch, G, dt_cum, gamma, n_core)
else:
inc_tmp, vel_tmp, vconst_tmp = inv_lib.invert_nsbas(unwpatch, G, dt_cum, gamma, n_core)
### Set to valuables
inc_patch = np.zeros((n_im-1, n_pt_all), dtype=np.float32)*np.nan
vel_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
vconst_patch = np.zeros((n_pt_all), dtype=np.float32)*np.nan
inc_patch[:, ix_unnan_pt] = inc_tmp
vel_patch[ix_unnan_pt] = vel_tmp
vconst_patch[ix_unnan_pt] = vconst_tmp
### Calculate residuals
res_patch = np.zeros((n_ifg, n_pt_all), dtype=np.float32)*np.nan
res_patch[:, ix_unnan_pt] = unwpatch.T-np.dot(G, inc_tmp)
res_sumsq = np.nansum(res_patch**2, axis=0)
res_n = np.float32((~np.isnan(res_patch)).sum(axis=0))
res_n[res_n==0] = np.nan # To avoid 0 division
res_rms_patch = np.sqrt(res_sumsq/res_n)
### Cumulative displacememt
cum_patch = np.zeros((n_im, n_pt_all), dtype=np.float32)*np.nan
cum_patch[1:, :] = np.cumsum(inc_patch, axis=0)
## Fill 1st image with 0 at unnan points from 2nd images
bool_unnan_pt = ~np.isnan(cum_patch[1, :])
cum_patch[0, bool_unnan_pt] = 0
## Drop (fill with nan) interpolated cum by 2 continuous gaps
for i in range(n_im-2): ## from 1->n_im-1
gap2 = gap_patch[i, :]+gap_patch[i+1, :]
bool_gap2 = (gap2==2) ## true if 2 continuous gaps for each point
cum_patch[i+1, :][bool_gap2] = np.nan
## Last (n_im th) image. 1 gap means interpolated
cum_patch[-1, :][gap_patch[-1, :]==1] = np.nan
#%% Output data and image
### cum.h5 file
cum[:, rows[0]:rows[1], :] = cum_patch.reshape((n_im, lengththis, width))
vel[rows[0]:rows[1], :] = vel_patch.reshape((lengththis, width))
vconst[rows[0]:rows[1], :] = vconst_patch.reshape((lengththis, width))
gap[:, rows[0]:rows[1], :] = gap_patch.reshape((n_im-1, lengththis, width))
### Others
openmode = 'w' if rows[0] == 0 else 'a' #w only 1st patch
## For each imd. cum and inc
for imx, imd in enumerate(imdates):
## Incremental displacement
if imd == imdates[-1]: continue #skip last
incfile = os.path.join(incdir, '{0}_{1}.inc'.format(imd, imdates[imx+1]))
with open(incfile, openmode) as f:
inc_patch[imx, :].tofile(f)
## For each ifgd. resid
for i, ifgd in enumerate(ifgdates):
resfile = os.path.join(resdir, '{0}.res'.format(ifgd))
with open(resfile, openmode) as f:
res_patch[i, :].tofile(f)
## velocity and noise indecies in results dir
names = ['vel', 'vintercept', 'resid_rms', 'n_gap', 'n_ifg_noloop', 'maxTlen']
data = [vel_patch, vconst_patch, res_rms_patch, ns_gap_patch, ns_ifg_noloop_patch, maxTlen_patch]
for i in range(len(names)):
file = os.path.join(resultsdir, names[i])
with open(file, openmode) as f:
data[i].tofile(f)
#%% Finish patch
elapsed_time2 = int(time.time()-start2)
hour2 = int(elapsed_time2/3600)
minite2 = int(np.mod((elapsed_time2/60),60))
sec2 = int(np.mod(elapsed_time2,60))
print(" Elapsed time for {0}th patch: {1:02}h {2:02}m {3:02}s".format(i_patch, hour2, minite2, sec2), flush=True)
i_patch += 1 #Next patch count
#%% Find stable ref point
print('\nFind stable reference point...', flush=True)
### Compute RMS of time series with reference to all points
sumsq_cum_wrt_med = np.zeros((length, width), dtype=np.float32)
for i in range(n_im):
sumsq_cum_wrt_med = sumsq_cum_wrt_med + (cum[i, :, :]-np.nanmedian(cum[i, :, :]))**2
rms_cum_wrt_med = np.sqrt(sumsq_cum_wrt_med/n_im)
### Mask by minimum n_gap
n_gap = io_lib.read_img(os.path.join(resultsdir, 'n_gap'), length, width)
min_n_gap = np.nanmin(n_gap)
mask_n_gap = np.float32(n_gap==min_n_gap)
mask_n_gap[mask_n_gap==0] = np.nan
rms_cum_wrt_med = rms_cum_wrt_med*mask_n_gap
### Find stable reference
min_rms = np.nanmin(rms_cum_wrt_med)
refy1s, refx1s = np.where(rms_cum_wrt_med==min_rms)
refy1s, refx1s = refy1s[0], refx1s[0] ## Only first index
refy2s, refx2s = refy1s+1, refx1s+1
print('Selected ref: {}:{}/{}:{}'.format(refx1s, refx2s, refy1s, refy2s), flush=True)
### Rerferencing cumulative displacement and vel to new stable ref
for i in range(n_im):
cum[i, :, :] = cum[i, :, :] - cum[i, refy1s, refx1s]
vel = vel - vel[refy1s, refx1s]
vconst = vconst - vconst[refy1s, refx1s]
### Save image
rms_cum_wrt_med_file = os.path.join(infodir, '13rms_cum_wrt_med')
with open(rms_cum_wrt_med_file, 'w') as f:
rms_cum_wrt_med.tofile(f)
pngfile = os.path.join(infodir, '13rms_cum_wrt_med.png')
plot_lib.make_im_png(rms_cum_wrt_med, pngfile, cmap_noise_r, 'RMS of cum wrt median (mm)', np.nanpercentile(rms_cum_wrt_med, 1), np.nanpercentile(rms_cum_wrt_med, 99))
### Save ref
cumh5.create_dataset('refarea', data='{}:{}/{}:{}'.format(refx1s, refx2s, refy1s, refy2s))
refsfile = os.path.join(infodir, '13ref.txt')
with open(refsfile, 'w') as f:
print('{}:{}/{}:{}'.format(refx1s, refx2s, refy1s, refy2s), file=f)
if width == width_geo and length == length_geo: ## Geocoded
### Make ref_stable.kml
reflat = lat1+dlat*refy1s
reflon = lon1+dlon*refx1s
io_lib.make_point_kml(reflat, reflon, os.path.join(infodir, '13ref.kml'))
#%% Close h5 file
cumh5.close()
#%% Output png images
print('\nOutput png images...', flush=True)
### Incremental displacement
for imx, imd in enumerate(imdates):
if imd == imdates[-1]: continue #skip last for increment
## Comparison of increment and daisy chain pair
ifgd = '{}_{}'.format(imd, imdates[imx+1])
incfile = os.path.join(incdir, '{}.inc'.format(ifgd))
unwfile = os.path.join(ifgdir, ifgd, '{}.unw'.format(ifgd))
pngfile = os.path.join(incdir, '{}.inc_comp.png'.format(ifgd))
inc = io_lib.read_img(incfile, length, width)
try:
unw = io_lib.read_img(unwfile, length, width)*coef_r2m
ix_ifg = ifgdates.index(ifgd)
unw = unw - ref_unw[ix_ifg]
except:
unw = np.zeros((length, width), dtype=np.float32)*np.nan
### Output png for comparison
data3 = [np.angle(np.exp(1j*(data/coef_r2m/cycle))*cycle) for data in [unw, inc, inc-unw]]
title3 = ['Daisy-chain IFG ({}pi/cycle)'.format(cycle*2), 'Inverted ({}pi/cycle)'.format(cycle*2), 'Difference ({}pi/cycle)'.format(cycle*2)]
pngfile = os.path.join(incdir, '{}.increment.png'.format(ifgd))
plot_lib.make_3im_png(data3, pngfile, 'insar', title3, vmin=-np.pi, vmax=np.pi, cbar=False)
if not keep_incfile:
os.remove(incfile)
### Residual for each ifg. png and txt.
with open(restxtfile, "w") as f:
print('# RMS of residual (mm)', file=f)
for ifgd in ifgdates:
infile = os.path.join(resdir, '{}.res'.format(ifgd))
resid = io_lib.read_img(infile, length, width)
resid_rms = np.sqrt(np.nanmean(resid**2))
with open(restxtfile, "a") as f:
print('{} {:5.2f}'.format(ifgd, resid_rms), file=f)
pngfile = infile+'.png'
title = 'Residual (mm) of {} (RMS:{:.2f}mm)'.format(ifgd, resid_rms)
plot_lib.make_im_png(resid, pngfile, cmap_vel, title, -wavelength/2*1000, wavelength/2*1000)
if not keep_incfile:
os.remove(infile)
### Velocity and noise indices
cmins = [None, None, None, None, None, None]
cmaxs = [None, None, None, None, None, None]
cmaps = [cmap_vel, cmap_vel, cmap_noise_r, cmap_noise_r, cmap_noise_r, cmap_noise]
titles = ['Velocity (mm/yr)', 'Intercept of velocity (mm)', 'RMS of residual (mm)', 'Number of gaps in SB network', 'Number of ifgs with no loops', 'Max length of connected SB network (yr)']
for i in range(len(names)):
file = os.path.join(resultsdir, names[i])
data = io_lib.read_img(file, length, width)
pngfile = file+'.png'
## Get color range if None
if cmins[i] is None:
cmins[i] = np.nanpercentile(data, 1)
if cmaxs[i] is None:
cmaxs[i] = np.nanpercentile(data, 99)
if cmins[i] == cmaxs[i]: cmins[i] = cmaxs[i]-1
plot_lib.make_im_png(data, pngfile, cmaps[i], titles[i], cmins[i], cmaxs[i])
#%% Finish
elapsed_time = time.time()-start
hour = int(elapsed_time/3600)
minite = int(np.mod((elapsed_time/60),60))
sec = int(np.mod(elapsed_time,60))
print("\nElapsed time: {0:02}h {1:02}m {2:02}s".format(hour,minite,sec))
print('\n{} Successfully finished!!\n'.format(os.path.basename(argv[0])))
print('Output directory: {}\n'.format(os.path.relpath(tsadir)))
#%% main
if __name__ == "__main__":
sys.exit(main())
| [] |
2024-01-10 | robzeh/genai_hackathon | gpt-4.py | import os
import whisper
from openai import OpenAI
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY")
)
def read_text_file(file_path):
with open(file_path, 'r') as file:
return file.read()
text_content = read_text_file('all_text.txt')
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "You are a helpful assistant.",
},
{
"role": "system",
"content": "Can you list general categories, desired service, common types of information for each category (other than patient identification)? Make your response organized.",
},
{
"role": "user",
"content": text_content
}
],
model="gpt-4",
)
print(chat_completion)
| [
"Can you list general categories, desired service, common types of information for each category (other than patient identification)? Make your response organized.",
"You are a helpful assistant."
] |
2024-01-10 | robzeh/genai_hackathon | whisp.py | import os
import whisper
from openai import OpenAI
# load model
model = whisper.load_model("medium")
files = os.listdir("./sample_audio")
for f in files:
result = model.transcribe(f"./sample_audio/{f}")
print(result)
with open(f"./sample_audio_transcripts/{f}-transcript.txt", "w") as file:
file.write(result["text"]) | [] |
2024-01-10 | JMousqueton/ransomware.live | analyse_negotiation.py | # Pre version to analyse ransom negotiation with chat GPT
import json
import openai
import argparse
import os
from dotenv import load_dotenv
# Load OpenAI API key from .env file
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def read_json_file(filename):
with open(filename, 'r') as file:
return json.load(file)
def ask_openai(question, content):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=f"{content}\n\n{question}",
max_tokens=150
)
return response.choices[0].text.strip()
def extract_details_from_json(json_content):
# Extract ransom demand
ransom_demand_question = "How much was the ransom demand, answer only the figure of the amount?"
ransom_demand = ask_openai(ransom_demand_question, json_content)
print(f"Ransom Demand: {ransom_demand}")
# Extract negotiated ransom
negotiated_ransom_question = "How much was the negotiated ransom, answer only the figure of the amount?"
negotiated_ransom = ask_openai(negotiated_ransom_question, json_content)
print(f"Negotiated Ransom: {negotiated_ransom}")
# Check if victim paid the ransom
paid_ransom_question = "Did the victim pay the ransom, answer only yes or no?"
paid_ransom = ask_openai(paid_ransom_question, json_content)
print(f"Paid Ransom: {paid_ransom}")
def main():
parser = argparse.ArgumentParser(description='Analyse negotiation json file file using ChatGPT.')
parser.add_argument('filename', help='Path to the JSON file.')
args = parser.parse_args()
json_content = read_json_file(args.filename)
extract_details_from_json(str(json_content))
if __name__ == "__main__":
main()
| [
"PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | JMousqueton/ransomware.live | updatecountry.py | import openai
import json
import os
import re
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Get the API key from the environment variables
api_key = os.getenv("OPENAI_API_KEY")
# Check if the API key is available
if api_key is None:
raise ValueError("API key not found in the .env file")
# Open the JSON file for reading
with open('posts.json', 'r') as json_file:
# Load the JSON data from the file
data = json.load(json_file)
# Loop through each post and update the "country" field if it's empty
for post in data:
if not post["country"]:
# Extract the country code based on post_title
prompt = f"From which country is based '{post['post_title']}'? Give me only the country code."
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=5, # Adjust as needed
api_key=api_key
)
# Extract the country code from the API response
response_text = response.choices[0].text.strip()
# Extract the country code from the response text
#country_code = response_text.split("code is ")[-1].strip('" ')
country_code = country_code = re.search(r'(?<=code\sis\s)[A-Z]{2}(?="\.)', response_text ).group()
# If country code is still empty, ask based on the description
if not country_code:
prompt = f"Based on this description: '{post['description']}' can you tell me in which country is based the company described ? Give me only the country code."
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=5, # Adjust as needed
api_key=api_key
)
# Extract the country code from the response
response_text = response.choices[0].text.strip()
country_code = country_code = re.search(r'(?<=code\sis\s)[A-Z]{2}(?="\.)', response_text ).group()
# Update the "country" field with the extracted country code
if country_code:
post["country"] = country_code
# Save the updated data back to the JSON file
with open('posts.json', 'w') as json_file:
json.dump(data, json_file, indent=4)
print("Country codes inserted into the 'country' field for posts with empty country.") | [
"Based on this description: 'PLACEHOLDER' can you tell me in which country is based the company described ? Give me only the country code.",
"From which country is based 'PLACEHOLDER'? Give me only the country code."
] |
2024-01-10 | leoshum/FL_Python_Scripts | frontline-commit-analyzer~codeReview.py | import openai
import tiktoken
import os
from openpyxl import load_workbook
class CodeReviewProvider:
def __init__(self, chat_completion=False):
self.chat_completion = chat_completion
openai.api_key = os.environ.get("OPENAI_API_TOKEN")
with open(f"{os.path.dirname(__file__)}\\preprompt.txt", "r") as file:
self.prepromt = file.read()
with open(f"{os.path.dirname(__file__)}\\c-sharp-issues.txt", "r") as file:
self.csharp_preprompt = file.read()
with open(f"{os.path.dirname(__file__)}\\sql-issues.txt", "r") as file:
self.sql_preprompt = file.read()
with open(f"{os.path.dirname(__file__)}\\angular-issues.txt", "r") as file:
self.angular_preprompt = file.read()
with open(f"{os.path.dirname(__file__)}\\js-issues.txt", "r") as file:
self.js_preprompt = file.read()
with open(f"{os.path.dirname(__file__)}\\binary-answer-preprompt.txt", "r") as file:
self.binary_prepromt = file.read()
def get_chat_completion_answer(self, prompt):
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[
{"role": "user", "content": prompt}
]
)
return response['choices'][0]['message']['content']
def get_completion_answer(self, prompt):
model_engine = "text-davinci-003"
encoding = tiktoken.get_encoding("p50k_base")
max_tokens = 4097 - len(encoding.encode(prompt))
if max_tokens < 0:
max_tokens = 4097
completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=max_tokens,
temperature=0.5,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return completion.choices[0].text.strip()
def get_bot_answer(self, prepromt, code, file_path, binary_answer=False):
file_ext = os.path.splitext(file_path)[1]
ext_to_excluede = [".xml", ".rdlc", ".resx", ".json", ".md", ".csproj", ".sln"]
if file_ext in ext_to_excluede:
return "Skipped"
code_issues = ""
if file_ext == ".cs":
code_issues = self.csharp_preprompt
elif file_ext == ".sql":
code_issues = self.sql_preprompt
elif file_ext == ".ts":
code_issues = self.angular_preprompt
elif file_ext == ".js":
code_issues = self.js_preprompt
else:
code_issues = prepromt
prompt = ""
if binary_answer:
prompt = f"{self.binary_prepromt}\n{code}"
else:
prompt = f"{code_issues}\n{code}"
result = ""
if self.chat_completion:
result = self.get_chat_completion_answer(prompt)
else:
result = self.get_completion_answer(prompt)
return result
def get_code_review(self, code, file_path):
return self.get_bot_answer(self.prepromt, code, file_path)
def get_binary_answer(self, review, file_path):
return self.get_bot_answer(self.binary_prepromt, review, file_path, binary_answer=True)
def main():
code_review = CodeReviewProvider(chat_completion=True)
file_name = f"{os.path.dirname(__file__)}\\code_issues.xlsx"
wb = load_workbook(file_name, data_only=True)
wb_sheet = wb.active
file_ext = ".md"
for row in wb_sheet.iter_rows():
if row[0].value in ["C#", "JS/TS/Angular", "SQL"]:
file_ext = row[1].value
else:
row[4].value = code_review.get_code_review(row[0].value, f"https://api.github.com/repos/octokit/octokit.rb/contents/README{file_ext}")
row[5].value = code_review.get_binary_answer(row[0].value, f"https://api.github.com/repos/octokit/octokit.rb/contents/README{file_ext}")
wb.save(file_name)
if __name__ == "__main__":
main() | [
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | ContextualAI/HALOs | compare.py | # Copyright (c) 2023 Contextual AI, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Compare a candidate model to some baseline model by using GPT4 as an judge.
Typical use is
python compare.py -f samples/sft_llama7b.json -mc 512 -bk chosen -ck policy -r result.jsonl -j gpt-4-0613
where
-f is a JSON file of generations, where the "samples" key maps to a list of dicts of the form
{
history_key: the prompt,
baseline_key: the generation by the baseline (this can be model-written (Anthropic-HH) or human-written (SHP)),
candidate_key: the generation by the candidate model you want to evaluate,
}
- mc denotes the maximum number of comparisons to make between baseline_key and candidate_key (optional)
- bk is the baseline model's key in the dict (optional, default: chosen)
- ck is the candidate model's key in the dict (optional, default: policy)
- r is the JSONL file to which to append the result, a JSON dict containing the metadata, the number of winning matchups by each model, and the lengths of all outputs
- j is the version of GPT to use as a judge (optional, default: gpt-4-0613)
To overwrite the template used to evaluate with GPT-4 as a judge, subclass PromptTemplate.
The default template asks GPT-4 to pick the response that is "more helpful, harmless, and concise", since helpfulness and harmlessness are the two key objectives of model alignment and GPT-4 has a bias for longer outputs by default.
If GPT-4's response does not contain 'Response 1' or 'Response 2' (case-insensitive), then we assume that no winner is picked and it does not count as a win for either model.
Therefore the number of baseline wins and the number of candidate wins add up to less total # of comparisons.
"""
import os
import openai
import random
import json
import numpy as np
import re
import time
import signal
from dataclasses import dataclass
from scipy.stats import binomtest, binom
from math import ceil, floor
from typing import Dict, Tuple
from collections import defaultdict
from datetime import datetime
from transformers import AutoTokenizer
client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f', help="JSON file with the generated samples; list of dicts containing candidate, baseline, and history as keys", type= str)
parser.add_argument('--candidate_key', '-ck', help="model that you want to test; should be a key in the JSON dicts", type=str, default='policy')
parser.add_argument('--baseline_key', '-bk', help="model that you want to use as a baseline; should be a key in the JSON dicts", type=str, default='chosen')
parser.add_argument('--history_key', '-hk', help="key for prompt; should be a key in the JSON dicts", type=str, default='prompt')
parser.add_argument('--labels', '-l', help="used to enumerate the responses being compared in the GPT-4 API call (e.g., Response 1, Response A)", type=str, default='12')
parser.add_argument('--seed', '-s', help="seed for GPT eval", type=int, default=0)
parser.add_argument('--sleep_time', '-st', help="how long to sleep to prevent rate limit hit", type=int, default=0.5)
parser.add_argument('--max_comp', '-mc', help="maximum number of comparisons to make", type=int, default=None)
parser.add_argument('--verbose', '-v', help="detailed outputs", type=bool, default=True)
parser.add_argument('--results_file', '-r', help="JSONL file to append to", type=str, default='results.jsonl')
parser.add_argument('--judge', '-j', help="version of GPT-4 used as judge", type=str, default='gpt-4-0613')
class APITimeoutException(Exception):
pass
@dataclass
class PromptTemplate:
"""
Prompt generator for comparing the outputs of any number of models using GPT-4 as a judge.
"""
models: Tuple[str] # list of models under consideration
labels: str # list of labels to assign to models (e.g., "12345")
seed: int # random seed
verbose: bool
human_prefix: str="\n<|user|>\n"
assistant_prefix: str="\n<|assistant|>\n" # Tulu format; modify as needed
def __post_init__(self):
random.seed(self.seed)
def shuffle(self):
"""
Shuffle the list of models under consideration. This is to correct for any bias GPT-4 might have
in picking the first/last response.
"""
random.shuffle(self.models)
def clean_response(self, response):
"""
Truncate any hanging sentence (not ending in a ?!.) and return the cleaned response.
"""
match = re.search(r'[.!?](?=[^.!?]*$)', response)
if match:
return response[:match.start() + 1]
else:
return response
def format(self, history: str, samples: Dict[str, str]) -> str:
"""
Format samples into a given prompt. Overwrite in subclass as needed.
Args:
- history: the input/instruction
- samples: a dictionary mapping models to their generations
Returns:
A list of messages to send to the GPT-4 API. Each item is a dictionary containing the role of the speaker
and the candidate text.
"""
self.shuffle()
history = history[:history.rfind(self.assistant_prefix)].strip()
messages = [{"role": "user", "content": f"Here is a conversation between {self.human_prefix.strip()} and {self.assistant_prefix.strip()}.\n\n" + history }]
prompt = [f'{self.assistant_prefix.strip()} can now choose from different responses.']
for i,m in enumerate(self.models):
response = self.clean_response(samples[m])
prompt.append(f'Response {self.labels[i]}: {response}\nEND OF RESPONSE {self.labels[i]}')
prompt.append("Between these two responses, which response is more helpful, harmless, and concise? Pick one.")
messages.append({
"role": "user",
"content": "\n\n".join(prompt),
})
return messages
def get_model_choice_from_response(self, response) -> str:
"""
Given a response from the GPT-4 evaluator, identify and return the model it chose.
Args:
- response: response from calling GPT-4 API
Returns:
One of the models in self.models (or None if GPT-4's choice cannot be inferred).
"""
completion = response.choices[0].message.content
answer = re.search(r'response (.).*', completion, re.IGNORECASE)
if self.verbose:
print(completion)
if answer is None:
return None
idx = self.labels.index(answer.group(1))
return self.models[idx]
def get_preferred_model(history: str, samples: Dict[str, str], prompt_template: PromptTemplate, judge: str, rate_limit_size: int=1000) -> str:
"""
Find the model whose generation is most preferred by the judge.
Args:
- history: prompt used to condition generations
- samples: generations for the given history, indexed by model name
- prompt_template: instance of PromptTemplate
- judge: one of the OpenAI chat models
- rate_limit_size: maximum number of characters that can be in any message to avoid rate limit problem (tokens is ~ 1/3 of chars)
Returns:
A 2-tuple of the evaluation completion and the name of the more preferred model.
"""
# Set up a timeout handler
def timeout_handler(signum, frame):
"""Handler for when OpenAI call takes too long."""
raise APITimeoutException("API call took too long")
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(10)
try:
response = client.chat.completions.create(
model=judge,
messages=prompt_template.format(history, samples),
temperature=0,
max_tokens=10,
seed=prompt_template.seed,
)
signal.alarm(0) # Cancel the alarm since the call completed within the timeout
return prompt_template.get_model_choice_from_response(response)
except ValueError:
print("The chosen response could not be determined.")
pass
except APITimeoutException:
pass
except openai.APIConnectionError as e:
print("The server could not be reached.")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
except openai.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
signal.alarm(0)
time.sleep(5)
except openai.APIStatusError as e:
print("Another non-200-range status code was received")
print(e.response)
finally:
signal.alarm(0)
return None
if __name__ == "__main__":
args = parser.parse_args()
samples = json.load(open(args.file))
prompt_template = PromptTemplate(
[args.candidate_key, args.baseline_key],
args.labels,
args.seed,
verbose=args.verbose,
human_prefix=samples['config']['human_prefix'],
assistant_prefix=samples['config']['assistant_prefix']
)
tokenizer = AutoTokenizer.from_pretrained(samples['config']['local_run_dir'])
i = 0
lengths = defaultdict(list)
wins = defaultdict(lambda: 0)
for batch in samples["samples"]:
if args.max_comp is not None and i >= args.max_comp:
break
lengths[args.candidate_key].append(len(tokenizer.encode(batch[args.candidate_key])))
lengths[args.baseline_key].append(len(tokenizer.encode(batch[args.baseline_key])))
time.sleep(args.sleep_time)
choice = get_preferred_model(batch[args.history_key], batch, prompt_template, judge=args.judge)
i += 1
if choice is not None:
wins[choice] += 1
if args.verbose:
print(wins, 'of', i, { k: np.mean(lengths[k]) for k in lengths })
results = {
'date': str(datetime.now()),
'total': i,
'seed': args.seed,
'exp_name': samples["config"]["exp_name"],
'judge' : args.judge,
'candidate': {
'name': args.candidate_key,
'wins': wins[args.candidate_key],
'lengths': lengths[args.candidate_key],
},
'baseline': {
'name': args.baseline_key,
'wins': wins[args.baseline_key],
'lengths': lengths[args.baseline_key],
},
'config' : samples["config"],
}
with open(args.results_file, 'a+') as f:
json.dump(results, f)
f.write('\n')
print(wins)
| [
"\n\n",
"config",
"assistant_prefix",
"human_prefix"
] |
2024-01-10 | Marcos-VM-1708/Automode | version.py | # bibliophiles:
import openai
import time
import pandas as pd
import aws
# ------------------------------------#
# rules:
openai.api_key = "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"
tam_request = 50
result = aws.data
# ------------------------------------#
inicio_1= time.time()
def request(messages):
# api log:
response = openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages= messages,
max_tokens=1024,
temperature=0.5)
# request:
return [response.choices[0].message.content, response.usage]
# ------------------------------------#
# process:
comentarios = []
gpt_result = []
inicio = time.time()
for i in range(0, len(result), tam_request):
coment = result[i:i + tam_request]
mensagens = [{"role": "system",
"content": "preciso que você interplete o sentimento desses textos, retorne apenas positivo, neutro, negativo e 0 caso vc não consigua identificar, escreva o resultado em um formato de lista separado por virgula e sem ponto final "}]
mensagens.append({"role": "user", "content": str(coment)})
try:
temp = request(mensagens)
comentarios.extend(coment)
gpt_result.append(temp[0])
print(temp[0]) # diagnostico
print(coment) #comentarios
print()
except openai.error.RateLimitError:
print("aguardando limite")
fim = time.time()
time.sleep(80 - (fim - inicio))
continue
# # ------------------------------------#
diagnostico = []
for elemento in gpt_result:
palavras = elemento.split(',')
palavras = [palavra.strip('.') for palavra in palavras]
diagnostico.extend(palavras)
print(f"{diagnostico} tamanho{len(diagnostico)}")
print(f"{comentarios} tamanho{len(comentarios)}")
# ------------------------------------#
# close:
df = pd.DataFrame({'comentarios': comentarios, 'sentimentos': diagnostico})
pd.to_csv(df)
fim_1 = time.time()
print(fim_1-inicio_1)
# # ------------------------------------# | [
"preciso que você interplete o sentimento desses textos, retorne apenas positivo, neutro, negativo e 0 caso vc não consigua identificar, escreva o resultado em um formato de lista separado por virgula e sem ponto final "
] |
2024-01-10 | Marcos-VM-1708/Automode | gpt_request.py | import openai
#--------------------------------------------------
# teste de requisições, imput um texto manual e retorna o sentimento
openai.api_key = "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"
def request(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages= messages,
max_tokens=1024,
temperature=0.5
)
return [response.choices[0].message.content, response.usage]
#-----------------------------------------------------------
mensagens = [{"role": "system", "content": "voce é um interpletador de sentimentos, seu obijetivo é classificar em positivo, negativo ou neutro os textos enseridos"}]
while True:
# Ask a question
question = input("texto: (\"sair\"): ")
mensagens.append({"role": "user", "content": str(question)})
answer = request(mensagens)
print("enviado:", question)
print("ChatGPT:", answer[0], "\nCusto:\n", answer[1])
debugar = False
if debugar:
print("Mensagens", mensagens, type(mensagens))
| [
"voce é um interpletador de sentimentos, seu obijetivo é classificar em positivo, negativo ou neutro os textos enseridos"
] |
2024-01-10 | ccbehera5/jstechno_Task | Jstechno~Task_2.py | from openai import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
client = OpenAI(
api_key=os.environ['OPENAI_API_KEY'],
)
mes=input("write your message: ")
completion = client.completions.create(
model="gpt-3.5-turbo",
prompt=[
{
"role": "user",
"content": mes
}
],
temperature=1,
max_tokens=256
)
print(completion.choices[0].text) | [] |
2024-01-10 | ShivaNaniSimha/OpenChat-YIC | OpenChat.py | from langchain.chat_models import ChatOpenAI
from googleapiclient.discovery import build
import streamlit as st
from langchain.schema import HumanMessage, SystemMessage
from key import youtube_api_key,chat_apikey
#set up your openai api key.
import os
os.environ["OPENAI_API_KEY"]=chat_apikey
chat=ChatOpenAI(temperature=0.9)
#web framework
st.title(":orange[OpenChat]")
user_prompt = st.chat_input("Enter your prompt ")
#Chatbot Interaction
if user_prompt:
messages = [
SystemMessage(
content='''You are a helpful OpenChat assistant where as an AI language model,
trained on enormous data like chatgpt and google bard.And you are founded by shiva nani
and developed by the openchat developers.
for every single task you need to respond accordingly and you should aslo understand
the follow up messages,remember this instruction particularly.
'''
),
HumanMessage(
content=user_prompt
)
]
#this gives us only the content.
response_list=[]
for message in chat(messages):
response_list.append(message[1])
assistant_response=response_list[0]
#Integrating youtube inks using youtube data api
youtube = build('youtube', 'v3', developerKey=youtube_api_key)
video_search_response = youtube.search().list(
part="snippet",
q=user_prompt,
type="video",
order="relevance"
).execute()
videos_list=[]
for item in video_search_response['items']:
video_id = item['id']['videoId']
videos_list.append(video_id)
#session_state is used to show the conversation history for that session
if "history" not in st.session_state:
st.session_state.history=[]
st.session_state.history.append([user_prompt,assistant_response,videos_list])
print(st.session_state.history)
for prompt,response,video in st.session_state.history:
user_message=st.chat_message("User")
user_message.write(prompt)
assistant_message = st.chat_message('Assistant')
assistant_message.write(response)
assistant_message.write("Here are few videos from youtube based on your search.")
for url in range(0,5):
assistant_message.video(f"https://www.youtube.com/watch?v={video[url]}")
| [
"Enter your prompt ",
"You are a helpful OpenChat assistant where as an AI language model,\n trained on enormous data like chatgpt and google bard.And you are founded by shiva nani \n and developed by the openchat developers.\n for every single task you need to respond accordingly and you should aslo understand\n the follow up messages,remember this instruction particularly.\n "
] |
2024-01-10 | cananeadouglas/pythonExercicios | tecnologia_assistiva~fala.py | # pip install SpeechRecognition
# pip install pyaudio
# sudo apt-get install python-pyaudio python3-pyaudio
import openai
import speech_recognition as sr
import pyttsx3
import time
# Initialize OpenAI API
openai.api_key = "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"
# Initialize the text to speech engine
engine=pyttsx3.init()
def transcribe_audio_to_test(filename):
recogizer=sr.Recognizer()
with sr.AudioFile(filename)as source:
audio=recogizer.record(source)
try:
return recogizer.recognize_google(audio)
except:
print("skipping unkown error")
def generate_response(prompt):
response= openai.completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=4000,
n=1,
stop=None,
temperature=0.5,
)
return response ["Choices"][0]["text"]
def speak_text(text):
engine.say(text)
engine.runAndWait()
def main():
while True:
#Waith for user say "genius"
print("Say 'Genius' to start recording your question")
with sr.Microphone() as source:
recognizer=sr.Recognizer()
audio=recognizer.listen(source)
try:
transcription = recognizer.recognize_google(audio)
if transcription.lower()=="genius":
#record audio
filename ="input.wav"
print("Say your question")
with sr.Microphone() as source:
recognizer=sr.recognize()
source.pause_threshold=1
audio=recognizer.listen(source,phrase_time_limit=None,timeout=None)
with open(filename,"wb")as f:
f.write(audio.get_wav_data())
#transcript audio to test
text=transcribe_audio_to_test(filename)
if text:
print(f"yuo said {text}")
#Generate the response
response = generate_response(text)
print(f"chat gpt 3 say {response}")
#read resopnse using GPT3
speak_text(response)
except Exception as e:
print("An error ocurred : {}".format(e))
if __name__=="__main__":
main() | [] |
2024-01-10 | elehman16/do-we-still-need-clinical-lms | src~zero-shot~clip~clip_gpt3.py | import json
import argparse
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from datasets import Dataset, DatasetDict
from tqdm import tqdm
import random
import os
import requests
import json
import openai
import re
openai.api_key = ""
openai.api_base = ""
openai.api_type = ""
openai.api_version = ""
deployment_id=""
def conversion(prediction_str: str) -> str:
mapper = {'N/A': ''}
if prediction_str in mapper:
return mapper[prediction_str]
else:
return prediction_str
LABEL_TYPES = ['Appointment-related followup',
'Medication-related followups',
'Other helpful contextual information',
'Lab-related followup',
'Case-specific instructions for patient',
'Procedure-related followup',
'Imaging-related followup']
LOWERCASED_TYPES = [x.lower() for x in LABEL_TYPES]
def str_tags_to_binary_tensor(los):
"""Convert list of strings to indexed positions. """
arr = np.zeros(len(LABEL_TYPES))
for str_ in los:
if not str_ in LABEL_TYPES and not str_ in LOWERCASED_TYPES: continue
# It's in our list. Get the label. Mark as 1 in our label list.
if str_ in LOWERCASED_TYPES: arr[LOWERCASED_TYPES.index(str_)] = 1
else: arr[LABEL_TYPES.index(str_)] = 1
return arr
def load_ids(clip_path):
"""Load the training/val/test ids. """
tr_id = pd.read_csv(clip_path + '/train_ids.csv', header=None)
vl_id = pd.read_csv(clip_path + '/val_ids.csv', header=None)
te_id = pd.read_csv(clip_path + '/test_ids.csv', header=None)
return set(tr_id[0].values), set(vl_id[0].values), set(te_id[0].values)
def prompt_label_type(s, prompt: str) -> str:
if prompt == '0':
return f"Context: {s}\nLabel the above sentence as one of the following:\n\nOptions:\n-Appointment-related followup\n-Medication-related followup\n-Lab-related followup\n-Case-specific instructions for the patient\n-Procedure-related followup\n-Imaging-related followup\n-Other helpful contextual information for the patient\n-None of the above"
elif prompt == '1':
return f"Context: {s}\nLabel the above sentence as one of the following:\n\nOptions:\n-Appointment-related followup\n-Medication-related followup\n-Lab-related followup\n-Case-specific instructions for the patient\n-Procedure-related followup\n-Imaging-related followup\n-None of the above"
elif prompt == '2':
return f"Context: {s}\nLabel the above sentence as one of the following:\n\nOptions:\n-Appointment-related followup\n-Medication-related Information\n-Lab-related Information\n-Case-specific instructions for the patient\n-Procedure-related followup\n-Imaging-related followup\n-None of the above"
elif prompt == '3':
return f"Context: {s}\nLabel the above sentence as one or more of the following, separated by a comma:\n\nOptions:\n-Appointment-related followup\n-Medication-related Information\n-Lab-related Information\n-Case-specific instructions for the patient\n-Procedure-related followup\n-Imaging-related followup\n-None of the above"
elif prompt == '4':
return f"Context: {s}\nLabel the above sentence as one or more of the following, delimited by comma:\n\nOptions:\n-Appointment-related followup information\n-Medication-related followup information\n-Lab-related followup information\n-Case-specific instructions for the patient\n-Procedure-related followup information\n-Imaging-related followup information\n-None of the above"
elif prompt == '5':
examples = ['He has a follow-up neck CTA and appointment with [ **Month/Year ( 2 ) 1106** ] surgery on 1978-10-18 , with possible subsequent carotid stenting procedure to follow . .'] + [s]
labels = ['Appointment-related followup, Imaging-related followup, Procedure-related followup'] + [""]
in_context = [f"Context: {s}\nLabel the above sentence as one or more of the following, delimited by comma:\n\nOptions:\n-Appointment-related followup information\n-Medication-related followup information\n-Lab-related followup information\n-Case-specific instructions for the patient\n-Procedure-related followup information\n-Imaging-related followup information\n-None of the above\n{l}" for s, l in zip(examples, labels)]
return "\n\n".join(in_context)
elif prompt == '6':
examples = ['He has a follow-up neck CTA and appointment with [ **Month/Year ( 2 ) 1106** ] surgery on 1978-10-18 , with possible subsequent carotid stenting procedure to follow . .', 'He is also to be seen by his primary care physician to regulate medications and to restart Plavix approximately two weeks after his surgery .', 'JP drains were left in place under his wound , with planned removal by Thoracics surgery once there was trace to no output .'] + [s]
labels = ['Appointment-related followup, Imaging-related followup, Procedure-related followup', 'Appointment-related followup, Case-specific instructions for patient, Medication-related followups', 'Appointment-related followup, Other helpful contextual information'] + [""]
in_context = [f"Context: {s}\nLabel the above sentence as one or more of the following, delimited by comma:\n\nOptions:\n-Appointment-related followup information\n-Medication-related followup information\n-Lab-related followup information\n-Case-specific instructions for the patient\n-Procedure-related followup information\n-Imaging-related followup information\n-None of the above\n{l}" for s, l in zip(examples, labels)]
return "\n\n".join(in_context)
elif prompt == '7':
examples = ['He has a follow-up neck CTA and appointment with [ **Month/Year ( 2 ) 1106** ] surgery on 1978-10-18 , with possible subsequent carotid stenting procedure to follow . .', 'He is also to be seen by his primary care physician to regulate medications and to restart Plavix approximately two weeks after his surgery .', 'JP drains were left in place under his wound , with planned removal by Thoracics surgery once there was trace to no output .'] + [s]
labels = ['Appointment-related followup, Imaging-related followup, Procedure-related followup', 'Appointment-related followup, Case-specific instructions for patient, Medication-related followups', 'Appointment-related followup, Other helpful contextual information'] + [""]
in_context = [f"Context: {s}\nLabel the above sentence as one or more of the following, delimited by comma:\n\nOptions:\n-Appointment-related followup information\n-Medication-related followup information\n-Lab-related followup information\n-Case-specific instructions for the patient\n-Medical procedure-related followup information\n-Medical imaging-related followup information\n-None of the above\n{l}" for s, l in zip(examples, labels)]
return "\n\n".join(in_context)
elif prompt == '8':
examples = ['He has a follow-up neck CTA and appointment with [ **Month/Year ( 2 ) 1106** ] surgery on 1978-10-18 , with possible subsequent carotid stenting procedure to follow . .', 'He is also to be seen by his primary care physician to regulate medications and to restart Plavix approximately two weeks after his surgery .', 'JP drains were left in place under his wound , with planned removal by Thoracics surgery once there was trace to no output .'] + [s]
labels = ['Appointment-related followup, Imaging-related followup, Procedure-related followup', 'Appointment-related followup, Case-specific instructions for patient, Medication-related followups', 'Appointment-related followup, Other helpful contextual information'] + [""]
in_context = [f"Context: {s}\nLabel the above sentence as one or more of the following, delimited by comma:\n\nOptions:\n-Appointment-related followup information\n-Medication-related followup information\n-Lab-related followup information\n-Case-specific instructions for the patient\n-Medical procedure-related followup information\n-Medical imaging-related followup information\n-None of the above\n{l}" for s, l in zip(examples, labels)]
return "\n\n".join(in_context)
elif prompt == '9':
return f"Tag the following sentence as one OR more of the following. If there are multiple options, separate each by a comma.\n\nOptions:\n-Appointment-related followup\n-Medication-related followup\n-Lab-related followup\n-Case-specific instructions for the patient\n-Procedure-related followup\n-Imaging-related followup\n-Other helpful contextual information for the patient\n-None of the above\n\nSentence: {s}Tags: "
else:
raise ValueError(f"Invalid prompt {prompt}")
def tokenize_data(df: pd.DataFrame, prompt: str) -> pd.DataFrame:
"""Split the data into chunks of `max_seq_length`. Attempt to take an equal number
of tokens before and after the text.
@param tokenizer
@param replace_text_with_tags
@param max_seq_length """
inputs, labels, ids = [], [], []
j = 0
for idx, row in tqdm(df.iterrows(), total=df.shape[0]):
text = row.text
#
for s in row.labels:
sent = row.text[s[0]:s[1]]
all_prompts = prompt_label_type(sent, prompt)
idx_labels = str_tags_to_binary_tensor(s[2].split(", "))
inputs.append(all_prompts)
labels.append(idx_labels)
ids.append(row.note_id)
j += 1
tokenized_inputs = {}
tokenized_inputs['inputs'] = inputs
tokenized_inputs['idx_labels'] = labels
tokenized_inputs['id'] = ids
return pd.DataFrame.from_dict(tokenized_inputs)
def load_data(clip_path) -> pd.DataFrame:
"""Load the data from the sentences.csv. """
df = pd.read_csv(clip_path + '/sentence_level.csv')
df['sentence'] = [eval(x) for x in df['sentence']] # type: ignore
df['labels'] = [eval(x) for x in df['labels']] # type: ignore
# Combine the text, remember sentence offsets.
docs = {'note_id': [], 'text': [], 'labels': []}
for id, group in df.groupby('doc_id'):
text, sentence_offsets = "", []
for _, row in group.iterrows():
sent = ' '.join(row['sentence'])
# Remove the weird `I-` in front of the labels
row['labels'] = [x[2:] for x in row['labels']]
row['labels'].sort()
labels = ', '.join(row['labels'])
sentence_offsets.append((len(text), len(text) + len(sent), labels))
# Now join the text
text += sent + ' '
docs['note_id'].append(id)
docs['text'].append(text)
docs['labels'].append(sentence_offsets)
return pd.DataFrame(docs)
def get_data(clip_path, prompt) -> DatasetDict:
"""Get the CLIP data.
@param tokenizer is a Huggingface tokenizer.
@param clip_path is the path to the clip data.
@param replace_text_with_tags determines whether or not we modify the text to remove PHI.
@param max_seq_length is the maximum sequence length."""
df = load_data(clip_path)
tr_id, vl_id, te_id = load_ids(clip_path)
# Split the data into chunks + into train/val/test
model_inputs = tokenize_data(df, prompt)
train = model_inputs[model_inputs['id'].isin(tr_id)]
val = model_inputs[model_inputs['id'].isin(vl_id)]
test = model_inputs[model_inputs['id'].isin(te_id)]
# Create the dataset objects and return
input_dict = {'train': None, 'val': Dataset.from_pandas(val), 'test': Dataset.from_pandas(test)}
return input_dict
def compute_metrics_prompt(predictions, examples, prompt):
"""Given some predictions, calculate the F1. """
# Decode the predictions + labels
decoded_predictions = predictions
mapper = {
'Appointment-related followup': LABEL_TYPES[0],
'Appointment-related followup information': LABEL_TYPES[0],
'Medication-related followup': LABEL_TYPES[1],
'Medication-related Information': LABEL_TYPES[1],
'Medication-related followup information': LABEL_TYPES[1],
'Lab-related Information': LABEL_TYPES[3],
'Lab-related followup information': LABEL_TYPES[3],
'Lab-related instructions for the patient': LABEL_TYPES[3],
'Case-specific instructions for the patient': LABEL_TYPES[4],
'Procedure-related followup': LABEL_TYPES[5],
'Imaging-related followup': LABEL_TYPES[6],
'Procedure-related followup information': LABEL_TYPES[5],
'Imaging-related followup information': LABEL_TYPES[6],
'Medical procedure-related followup information': LABEL_TYPES[5],
'Medical imaging-related followup information': LABEL_TYPES[6],
'Other helpful contextual information': LABEL_TYPES[2]
}
preds = []
for d in decoded_predictions:
str_pred = []
for p in d.split(', '):
if p in mapper:
str_pred.append(mapper[p])
else:
str_pred.append(p)
preds.append(", ".join(str_pred))
preds = [str_tags_to_binary_tensor([x]) for x in preds]
labels = examples['idx_labels']
return {
'macro_f1': f1_score(labels, preds, average='macro'),
'micro_f1': f1_score(labels, preds, average='micro'),
'per_class': f1_score(labels, preds, average=None)
}
def query_openai_all(all_inputs):
all_answers = []
for i in range(0, len(all_inputs), 20):
response = openai.Completion.create(engine=deployment_id, prompt=all_inputs[i:i+20], max_tokens=50, temperature=0)
all_answers.extend([resp['text'].strip() for resp in response['choices']])
return all_answers
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--clip-dir', type=str, required=True)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--prompt', type=str, required=True)
parser.add_argument('--set', required=True, choices=['dev', 'test'])
args = parser.parse_args()
print(f"Running with {args.seed}")
# Get data and use the tokenizer on the data
tokenized_datasets = get_data(args.clip_dir, args.prompt)
random.seed(0)
if args.set == 'dev':
ds = tokenized_datasets['val']
ds.shuffle()
ds = ds[:200]
else:
ds = tokenized_datasets['test']
ds.shuffle()
ds = ds[:int(len(ds) * 0.25)]
predictions = query_openai_all(ds['inputs'])
metrics = compute_metrics_prompt(predictions, ds, args.prompt)
print(metrics) | [] |
2024-01-10 | elehman16/do-we-still-need-clinical-lms | src~zero-shot~radqa~radqa_gpt_3.py | import json
import argparse
import pandas as pd
import numpy as np
import random
from sklearn.metrics import f1_score, accuracy_score
from evaluate import load
squad_metric = load("squad_v2")
import os
import requests
import json
import openai
from sklearn.utils import shuffle
openai.api_key = ""
openai.api_base = ""
openai.api_type = ""
openai.api_version = ""
deployment_id=""
def conversion(prediction_str: str) -> str:
mapper = {'N/A': ''}
if prediction_str in mapper:
return mapper[prediction_str]
else:
return prediction_str
def precision_filter(pred: str, context: str) -> str:
mapper = {'N/A': ''}
if pred in mapper:
return mapper[pred]
elif not pred in context:
return ''
else:
return pred
def compute_metrics_prompt(context, predictions, answers):
"""Given some predictions, calculate the F1. """
# Decode the predictions + labels
clean_predictions = [{'prediction_text': conversion(x), 'id': str(i), 'no_answer_probability': -1} for i, x in enumerate(predictions)]
precision = [{'prediction_text': precision_filter(x, context[i]), 'id': str(i), 'no_answer_probability': -1} for i, x in enumerate(predictions)]
references = [{'id': str(i), 'answers': l} for i, l in enumerate(answers)]
results = squad_metric.compute(predictions=clean_predictions, references=references)
clean_results = squad_metric.compute(predictions=precision, references=references)
return {'reg': results, 'precision': clean_results}
def format_single_example(e, prompt: str) -> str:
if prompt == '0':
return f"Context: {e['context']}\nQuestion: {e['question']}\nAnswer:"
elif prompt == '1':
return f"Context: {e['context']}\nGiven the above context, {e['question']}? Answer N/A if there is no answer."
elif prompt == '2':
return f"Answer N/A if there is no answer.\n\nContext: {e['context']}\nQuestion:{e['question']}\nAnswer:"
elif prompt == '3':
return f"Context: {e['context']}\nGiven the above context, {e['question']}? Answer N/A if there is no answer or give a quote from the context: "
elif prompt == '4':
return f"Context: {e['context']}\nGiven the above context, {e['question']}?\nAnswer: \""
elif prompt == '5':
return f"Context: {e['context']}\nGiven the above context, {e['question']}? Give a quote from the text: "
elif prompt == '6':
prompt = "Context: IMPRESSION: Subdural hematomas with blood products of different ages.\n Question vescular abnormality in left suprasellar space. Findings were\n discussed with Dr. [**Last Name (STitle) 8620**] at 9:25 am on [**2191-8-5**]. An MRI of the brain and MRA\n of the COW is recommended.\nGiven the above context, Is there any significant change in bleeding? Answer N/A if there is no answer or give a quote from the context: N/A\n\n"
return prompt + f"Context: {e['context']}\nGiven the above context, {e['question']}? Answer N/A if there is no answer or give a quote from the context: "
elif prompt == '7':
prompt = "Context: FINAL REPORT\nCHEST SINGLE AP FILM:\n\nHISTORY: ICD placement and shortness of breath.\n\nThere is a right sided dual chamber ICD with leads unchanged in location in\n this single view compared with the prior film of [**2101-5-25**]. No pneumothorax.\n There is cardiomegaly with upper zone redistribution, bilateral pleural\n effusions and associated bibasilar atelectases. Consolidation at the lung\n bases cannot be ruled out.\nGiven the above context, Is there any significant change from prior visit? Answer N/A if there is no answer or give a quote from the context: ICD with leads unchanged in location\n\n"
prompt += f"Context: {e['context']}\nGiven the above context, {e['question']}? Answer N/A if there is no answer or give a quote from the context: "
return prompt
elif prompt == '8':
prompt = "Context: IMPRESSION: Subdural hematomas with blood products of different ages.\n Question vescular abnormality in left suprasellar space. Findings were\n discussed with Dr. [**Last Name (STitle) 8620**] at 9:25 am on [**2191-8-5**]. An MRI of the brain and MRA\n of the COW is recommended.\nGiven the above context, Is there any significant change in bleeding? Answer N/A if there is no answer or give a quote from the context: N/A\n\nContext: FINAL REPORT\nCHEST SINGLE AP FILM:\n\nHISTORY: ICD placement and shortness of breath.\n\nThere is a right sided dual chamber ICD with leads unchanged in location in\n this single view compared with the prior film of [**2101-5-25**]. No pneumothorax.\n There is cardiomegaly with upper zone redistribution, bilateral pleural\n effusions and associated bibasilar atelectases. Consolidation at the lung\n bases cannot be ruled out.\nGiven the above context, Is there any significant change from prior visit? Answer N/A if there is no answer or give a quote from the context: ICD with leads unchanged in location\n\n"
prompt += f"Context: {e['context']}\nGiven the above context, {e['question']}? Answer N/A if there is no answer or give a quote from the context: "
return prompt
elif prompt == '9':
prompt = "Context: IMPRESSION: Subdural hematomas with blood products of different ages.\n Question vescular abnormality in left suprasellar space. Findings were\n discussed with Dr. [**Last Name (STitle) 8620**] at 9:25 am on [**2191-8-5**]. An MRI of the brain and MRA\n of the COW is recommended.\nGiven the above context, Is there any significant change in bleeding? Answer N/A if there is no answer or give a quote from the context: N/A\n\n"
prompt += "Context: FINAL REPORT\nCHEST SINGLE AP FILM:\n\nHISTORY: ICD placement and shortness of breath.\n\nThere is a right sided dual chamber ICD with leads unchanged in location in\n this single view compared with the prior film of [**2101-5-25**]. No pneumothorax.\n There is cardiomegaly with upper zone redistribution, bilateral pleural\n effusions and associated bibasilar atelectases. Consolidation at the lung\n bases cannot be ruled out.\nGiven the above context, Is there any significant change from prior visit? Answer N/A if there is no answer or give a quote from the context: ICD with leads unchanged in location\n\n"
prompt += f"Context: {e['context']}\nGiven the above context, {e['question']}? Answer N/A if there is no answer or give a quote from the context: "
return prompt
elif prompt == '10':
prompt = f"Context: {e['context']}\nGiven the above context, {e['question']}?\nGive an answer from the text, but do not reply if there is no answer: "
return prompt
elif prompt == '11':
if e['question'][-1] == '?':
e['question'] = e['question'][:-1]
return f"Radiology Report: {e['context']}\nGiven the above radiology report, {e['question']}? Answer N/A if there is no answer or give a quote from the context: "
elif prompt == '12':
begin = "Find the answer to the question based on the context below. Provided a quote from the context, otherwise write N/A.\n\n"
begin += f"Context: {e['context']}\nGiven the above context, {e['question']} Answer:"
return begin
elif prompt == '13':
begin = "Find the answer to the question based on the context below. Provided a quote from the context, otherwise write N/A.\n\n"
begin += f"Context: {e['context']}\nQuestion: {e['question']} Answer:"
return begin
elif prompt == '14':
begin = "Find the answer to the question based on the context below. Provided a quote from the context, otherwise write N/A.\n\n"
begin += "Context: IMPRESSION: Subdural hematomas with blood products of different ages.\n Question vescular abnormality in left suprasellar space. Findings were\n discussed with Dr. [**Last Name (STitle) 8620**] at 9:25 am on [**2191-8-5**]. An MRI of the brain and MRA\n of the COW is recommended. Is there any significant change in bleeding? Answer: N/A\n\n"
begin += "Context: FINAL REPORT\nCHEST SINGLE AP FILM:\n\nHISTORY: ICD placement and shortness of breath.\n\nThere is a right sided dual chamber ICD with leads unchanged in location in\n this single view compared with the prior film of [**2101-5-25**]. No pneumothorax.\n There is cardiomegaly with upper zone redistribution, bilateral pleural\n effusions and associated bibasilar atelectases. Consolidation at the lung\n bases cannot be ruled out.\nQuestion: Is there any significant change from prior visit? Answer: ICD with leads unchanged in location\n\n"
begin += f"Context: {e['context']}\nQuestion: {e['question']} Answer:"
return begin
elif prompt == '15':
begin = "Find the answer to the question based on the context below. Provided a quote from the context, otherwise write N/A.\n\n"
begin += "Context: IMPRESSION: Subdural hematomas with blood products of different ages.\n Question vescular abnormality in left suprasellar space. Findings were\n discussed with Dr. [**Last Name (STitle) 8620**] at 9:25 am on [**2191-8-5**]. An MRI of the brain and MRA\n of the COW is recommended.\nQuestion: Is there any significant change in bleeding?\nAnswer: N/A\n\n"
begin += "Context: FINAL REPORT\nCHEST SINGLE AP FILM:\n\nHISTORY: ICD placement and shortness of breath.\n\nThere is a right sided dual chamber ICD with leads unchanged in location in\n this single view compared with the prior film of [**2101-5-25**]. No pneumothorax.\n There is cardiomegaly with upper zone redistribution, bilateral pleural\n effusions and associated bibasilar atelectases. Consolidation at the lung\n bases cannot be ruled out.\nQuestion: Is there any significant change from prior visit?\nAnswer: ICD with leads unchanged in location\n\n"
begin += 'Context: IMPRESSION: 1) Large constricting circumferential mass is present in the\n antrum of the stomach, causing massive dilatation of the body and fundus of\n the stomach. Adenopathy is present in the gastrohepatic ligament. No\n definite metastatic lesions are seen in the liver, although there is a tiny\n low attenuation lesion in the posterior segment of the right hepatic lobe\n which is too small to characterize. No other evidence of metastatic disease\n is seen in the torso. 2) Markedly enlarged prostate. 3) Bilateral small\n anastomotic aneurysms at the junction of the bypass graft with the common\n femoral arteries. 4) Small fat-containing left inguinal hernia.\nQuestion: Did the gastric cancer metastasize to chest.\nAnswer: No other evidence of metastatic disease\n is seen in the torso\n\n'
begin += f"Context: {e['context']}\nQuestion: {e['question']}\nAnswer:"
return begin
else:
raise ValueError("Prompt id not implemented")
def preprocess_function(examples, prompt: str):
"""Format the examples and then tokenize them. """
inputs = [format_single_example(e, prompt) for _, e in examples.iterrows()]
return {'inputs': inputs}
def format_csv(df: pd.DataFrame):
df['answers'] = [eval(x) for x in df['answers']] #type: ignore
return df
def get_data(radqa_path: str):
"""Get the radqa data. """
train = format_csv(pd.read_csv(radqa_path + '/train.csv'))
val = format_csv(pd.read_csv(radqa_path + '/dev.csv'))
val = shuffle(val)
test = format_csv(pd.read_csv(radqa_path + '/test.csv'))
return {"train": train, "val": val, "test": test}
def get_prompted_data(dataset_dict, prompt: str):
"""Tokenize stuff. """
processed_dict = {}
for name, dataset_ in dataset_dict.items():
processed_item = preprocess_function(dataset_, prompt)
processed_item['answers'] = dataset_['answers'].values
processed_item['context'] = dataset_['context'].values
processed_dict[name] = processed_item
return processed_dict
def query_openai_all(all_inputs):
all_answers = []
for i in range(0, len(all_inputs), 20):
response = openai.Completion.create(engine=deployment_id, prompt=all_inputs[i:i+20], max_tokens=256, temperature=0)
all_answers.extend([resp['text'].strip() for resp in response['choices']])
return all_answers
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--radqa-dir', type=str, required=True)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--output-dir', type=str)
parser.add_argument('--prompt', type=str)
parser.add_argument('--set', required=True, choices=['dev', 'test'])
args = parser.parse_args()
print(f"Running with {args.seed}")
# Get data and use the tokenizer on the data
dataset_dict = get_data(args.radqa_dir)
prompted_dataset = get_prompted_data(dataset_dict, args.prompt)
if args.set == 'dev':
ds = prompted_dataset['val']
selection = 200
ds = {'inputs': ds['inputs'][:selection], 'answers': ds['answers'][:selection], 'context': ds['context'][:selection]}
else:
ds = prompted_dataset['test']
predictions = query_openai_all(ds['inputs'])
output_metrics = compute_metrics_prompt(ds['context'], predictions, ds['answers'])
print(output_metrics) | [
"Context: FINAL REPORT\nCHEST SINGLE AP FILM:\n\nHISTORY: ICD placement and shortness of breath.\n\nThere is a right sided dual chamber ICD with leads unchanged in location in\n this single view compared with the prior film of [**2101-5-25**]. No pneumothorax.\n There is cardiomegaly with upper zone redistribution, bilateral pleural\n effusions and associated bibasilar atelectases. Consolidation at the lung\n bases cannot be ruled out.\nGiven the above context, Is there any significant change from prior visit? Answer N/A if there is no answer or give a quote from the context: ICD with leads unchanged in location\n\n",
"Context: PLACEHOLDER\nGiven the above context, PLACEHOLDER? Answer N/A if there is no answer or give a quote from the context: ",
"Context: IMPRESSION: Subdural hematomas with blood products of different ages.\n Question vescular abnormality in left suprasellar space. Findings were\n discussed with Dr. [**Last Name (STitle) 8620**] at 9:25 am on [**2191-8-5**]. An MRI of the brain and MRA\n of the COW is recommended.\nGiven the above context, Is there any significant change in bleeding? Answer N/A if there is no answer or give a quote from the context: N/A\n\n",
"Context: PLACEHOLDER\nGiven the above context, PLACEHOLDER?\nGive an answer from the text, but do not reply if there is no answer: ",
"Context: IMPRESSION: Subdural hematomas with blood products of different ages.\n Question vescular abnormality in left suprasellar space. Findings were\n discussed with Dr. [**Last Name (STitle) 8620**] at 9:25 am on [**2191-8-5**]. An MRI of the brain and MRA\n of the COW is recommended.\nGiven the above context, Is there any significant change in bleeding? Answer N/A if there is no answer or give a quote from the context: N/A\n\nContext: FINAL REPORT\nCHEST SINGLE AP FILM:\n\nHISTORY: ICD placement and shortness of breath.\n\nThere is a right sided dual chamber ICD with leads unchanged in location in\n this single view compared with the prior film of [**2101-5-25**]. No pneumothorax.\n There is cardiomegaly with upper zone redistribution, bilateral pleural\n effusions and associated bibasilar atelectases. Consolidation at the lung\n bases cannot be ruled out.\nGiven the above context, Is there any significant change from prior visit? Answer N/A if there is no answer or give a quote from the context: ICD with leads unchanged in location\n\n"
] |
2024-01-10 | elehman16/do-we-still-need-clinical-lms | src~zero-shot~mednli~mednli_gpt_3.py | import json
import argparse
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from datasets import Dataset, DatasetDict
from transformers import AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer
import openai
from sklearn.utils import shuffle
openai.api_key = ""
openai.api_base = ""
openai.api_type = ""
openai.api_version = ""
deployment_id=""
def get_mapping() -> dict:
mapper1 = {'yes': 'entailment', 'no': 'contradiction', 'maybe': 'neutral', 'it is not possible to tell': 'neutral'}
mapper2 = {'true': 'entailment', 'false': 'contradiction', 'inconclusive': 'neutral'}
mapper3 = {'always': 'entailment', 'never': 'contradiction', 'sometimes': 'neutral'}
mapper4 = {'entailment': 'entailment', 'contradiction': 'contradiction', 'neutral': 'neutral'}
mapper5 = {'true': 'entailment', 'false': 'contradiction', 'neither': 'neutral'}
mapper6 = {'yes': 'entailment', 'no': 'contradiction', "it's impossible to say": 'neutral', 'it is not possible to say': 'neutral'}
combiner = {}
mappers = [mapper1, mapper2, mapper3, mapper4, mapper5, mapper6]
for m in mappers:
combiner = combiner | m
return combiner
def format_single_example(sentence1: str, sentence2: str, prompt: str) -> str:
if prompt == '0':
return f"Answer entailment, neutral or contradiction.\n\nPremise: {sentence1}\nHypothesis: {sentence2}\nAnswer:"
elif prompt == '1':
return f'Suppose {sentence1} Can we infer that "{sentence2}"? Yes, no, or maybe?'
elif prompt == '2':
return f'{sentence1} Based on that information, is the claim: "{sentence2}" true, false, or inconclusive?'
elif prompt == '3':
return f'Given that "{sentence1}". Does it follow that "{sentence2}" Yes, no, or maybe?'
elif prompt == '4':
return f'Suppose it\'s true that {sentence1} Then, is "{sentence2}" always, sometimes, or never true?'
elif prompt == '5':
prompt = 'Answer entailment, contradiction or neutral.\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated Cr\nAnswer: entailment\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated BUN\nAnswer: neutral\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has normal Cr\nAnswer: contradiction\n\n'
prompt += f'Premise: {sentence1}\nHypothesis: {sentence2}\nAnswer:'
return prompt
elif prompt == '6':
prompt = 'Premise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated Cr\nAnswer entailment, contradiction or neutral: entailment\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated BUN\nAnswer entailment, contradiction or neutral: neutral\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has normal Cr\nAnswer entailment, contradiction or neutral: contradiction\n\n'
prompt += f'Premise: {sentence1}\nHypothesis: {sentence2}\nAnswer entailment, contradiction or neutral:'
return prompt
elif prompt == '7':
prompt = f"{sentence1} Question: {sentence2} True, False, or Neither?"
return prompt
elif prompt == '8':
prompt = 'Given that "Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.". Does it follow that "Patient has elevated Cr" Yes, no, or maybe? Yes.\n\nGiven that "Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.". Does it follow that "Patient has elevated BUN" Yes, no, or maybe? Maybe.\n\nGiven that "Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.". Does it follow that "Patient has normal Cr" Yes, no, or maybe? No.\n\n'
prompt += f'Given that "{sentence1}". Does it follow that "{sentence2}" Yes, no, or maybe?'
return prompt
elif prompt == '9':
prompt = 'Given that "It was not associated with any shortness of breath, nausea, vomiting, or she tried standing during this episode.” Does it follow that "She had vomiting and dyspnea with this episode”? Yes, no, or maybe? No.\n\nGiven that "He has been followed by Dr. [**Last Name (STitle) 21267**] of Podiatry for chronic first toe MTP ulcer, which is now resolving.” Does it follow that "He had a chronic wound on his toe”? Yes, no, or maybe? Yes.\n\nGiven that "She had no fevers/chills/sweats prior to coming to the hosptial.” Does it follow that "Patient has a normal abdominal CT”? Yes, no, or maybe? Neutral.'
prompt += f'Given that {sentence1} Does it follow that {sentence2.strip()}? Yes, no, or maybe?'
return prompt
elif prompt == '10':
prompt = f"Does {sentence1} mean that {sentence2}?\n\n Options:\n-Yes\n-No\n-It's impossible to say"
return prompt
elif prompt == '11':
if sentence1.strip()[-1] == '.':
sentence1 = sentence1[:-1]
prompt = f'Does "{sentence1}" mean that "{sentence2.strip()}"?\n\nOptions:\n-Yes\n-No\n-It\'s impossible to say'
return prompt
elif prompt == '12':
if sentence1.strip()[-1] == '.':
sentence1 = sentence1[:-1]
prompt = 'Does “Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4” mean that "Patient has elevated Cr"?\n\nOptions:\n-Yes\n-No\n-It\'s impossible to say \nYes\n\nDoes “Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4” mean that "Patient has elevated BUN"?\n\nOptions:\n-Yes\n-No\n-It\'s impossible to say \nNo\n\nDoes “Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4” mean that "Patient has normal Cr"?\n\nOptions:\n-Yes\n-No\n-It\'s impossible to say \nIt’s impossible to say\n\n'
actual_prompt = f'Does "{sentence1}" mean that "{sentence2.strip()}"?\n\nOptions:\n-Yes\n-No\n-It\'s impossible to say'
return prompt + actual_prompt
elif prompt == '13':
if sentence1.strip()[-1] == '.':
sentence1 = sentence1[:-1]
prompt = 'Does “It was not associated with any shortness of breath, nausea, vomiting, or she tried standing during this episode” mean that "She had vomiting and dyspnea with this episode"?\n\n Options:\n-Yes\n-No\n-It\'s impossible to say \nNo\n\nDoes “He has been followed by Dr. [**Last Name (STitle) 21267**] of Podiatry for chronic first toe MTP ulcer, which is now resolving” mean that "He had a chronic wound on his toe"?\n\n Options:\n-Yes\n-No\n-It\'s impossible to say \nYes\n\nDoes “She had no fevers/chills/sweats prior to coming to the hosptial” mean that "Patient has a normal abdominal CT"?\n\n Options:\n-Yes\n-No\n-It\'s impossible to say \nIt’s impossible to say'
actual_prompt = f'Does "{sentence1}" mean that "{sentence2.strip()}"?\n\nOptions:\n-Yes\n-No\n-It\'s impossible to say'
return prompt + actual_prompt
elif prompt == '14':
prompt = f"Does Discharge Summary: {sentence1} mean that {sentence2}?\n\n Options:\n-Yes\n-No\n-It's impossible to say"
return prompt
else:
return ""
def preprocess_function(examples, prompt: str) -> tuple[list, list]:
"""Format the examples and then tokenize them. """
inputs = [format_single_example(s1, s2, prompt) for s1, s2 in zip(examples['sentence1'], examples['sentence2'])]
targets = examples['gold_label']
return inputs, targets
def read_jsonl(file_path: str):
"""Read the given JSONL file."""
with open(file_path) as f:
data = [json.loads(line) for line in f]
return data
def get_data(mednli_path: str):
"""Get the mednli data. """
# mli_dev_v1.jsonl mli_test_v1.jsonl mli_train_v1.jsonl
train = Dataset.from_list(read_jsonl(mednli_path + '/mli_train_v1.jsonl'))
val = Dataset.from_list(read_jsonl(mednli_path + '/mli_dev_v1.jsonl'))
test = Dataset.from_list(read_jsonl(mednli_path + '/mli_test_v1.jsonl'))
return DatasetDict({"train": train, "val": val, "test": test})
def query_openai_all(all_inputs):
"""Query OpenAI. """
all_answers = []
for i in range(0, len(all_inputs), 20):
response = openai.Completion.create(engine=deployment_id, prompt=all_inputs[i:i+20], max_tokens=256, temperature=0)
all_answers.extend([resp['text'].strip() for resp in response['choices']])
return all_answers
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mednli-dir', type=str, required=True)
parser.add_argument('--output-dir', type=str)
parser.add_argument('--prompt', type=str)
args = parser.parse_args()
# Get data and use the tokenizer on the data
dataset_dict = get_data(args.mednli_dir)
test_inputs, test_targets = preprocess_function(dataset_dict['test'], args.prompt)
# Test the model.
predictions = query_openai_all(dataset_dict['test'])
with open(args.output_dir + '/predictions.json', 'w') as f:
json.dump(predictions, f) | [
"PLACEHOLDER Question: PLACEHOLDER True, False, or Neither?",
"Given that \"It was not associated with any shortness of breath, nausea, vomiting, or she tried standing during this episode.” Does it follow that \"She had vomiting and dyspnea with this episode”? Yes, no, or maybe? No.\n\nGiven that \"He has been followed by Dr. [**Last Name (STitle) 21267**] of Podiatry for chronic first toe MTP ulcer, which is now resolving.” Does it follow that \"He had a chronic wound on his toe”? Yes, no, or maybe? Yes.\n\nGiven that \"She had no fevers/chills/sweats prior to coming to the hosptial.” Does it follow that \"Patient has a normal abdominal CT”? Yes, no, or maybe? Neutral.",
"Premise: PLACEHOLDER\nHypothesis: PLACEHOLDER\nAnswer:",
"Does “Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4” mean that \"Patient has elevated Cr\"?\n\nOptions:\n-Yes\n-No\n-It's impossible to say \nYes\n\nDoes “Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4” mean that \"Patient has elevated BUN\"?\n\nOptions:\n-Yes\n-No\n-It's impossible to say \nNo\n\nDoes “Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4” mean that \"Patient has normal Cr\"?\n\nOptions:\n-Yes\n-No\n-It's impossible to say \nIt’s impossible to say\n\n",
"Does PLACEHOLDER mean that PLACEHOLDER?\n\n Options:\n-Yes\n-No\n-It's impossible to say",
"Given that \"PLACEHOLDER\". Does it follow that \"PLACEHOLDER\" Yes, no, or maybe?",
"Premise: PLACEHOLDER\nHypothesis: PLACEHOLDER\nAnswer entailment, contradiction or neutral:",
"Answer entailment, contradiction or neutral.\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated Cr\nAnswer: entailment\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated BUN\nAnswer: neutral\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has normal Cr\nAnswer: contradiction\n\n",
"Does Discharge Summary: PLACEHOLDER mean that PLACEHOLDER?\n\n Options:\n-Yes\n-No\n-It's impossible to say",
"Does “It was not associated with any shortness of breath, nausea, vomiting, or she tried standing during this episode” mean that \"She had vomiting and dyspnea with this episode\"?\n\n Options:\n-Yes\n-No\n-It's impossible to say \nNo\n\nDoes “He has been followed by Dr. [**Last Name (STitle) 21267**] of Podiatry for chronic first toe MTP ulcer, which is now resolving” mean that \"He had a chronic wound on his toe\"?\n\n Options:\n-Yes\n-No\n-It's impossible to say \nYes\n\nDoes “She had no fevers/chills/sweats prior to coming to the hosptial” mean that \"Patient has a normal abdominal CT\"?\n\n Options:\n-Yes\n-No\n-It's impossible to say \nIt’s impossible to say",
"Given that \"Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\". Does it follow that \"Patient has elevated Cr\" Yes, no, or maybe? Yes.\n\nGiven that \"Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\". Does it follow that \"Patient has elevated BUN\" Yes, no, or maybe? Maybe.\n\nGiven that \"Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\". Does it follow that \"Patient has normal Cr\" Yes, no, or maybe? No.\n\n",
"Premise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated Cr\nAnswer entailment, contradiction or neutral: entailment\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has elevated BUN\nAnswer entailment, contradiction or neutral: neutral\n\nPremise: Labs were notable for Cr 1.7 (baseline 0.5 per old records) and lactate 2.4.\nHypothesis: Patient has normal Cr\nAnswer entailment, contradiction or neutral: contradiction\n\n"
] |
2024-01-10 | NoviScl/MoRE | gpt_router.py | import os
import argparse
import numpy as np
from time import sleep
import json
import random
import openai
from tqdm import tqdm
test_dir = "uniqa_predictions_final/test/"
dev_dir = "uniqa_predictions_final/dev/"
datasets = [
"nq",
"triviaqa",
"squad",
"hotpotqa",
"beerqa_3hop",
"musique",
"gsm8k",
"svamp",
"multiarith",
"csqa",
"csqa2",
"qasc"
]
experts = ["factual", "multihop", "math", "commonsense"]
choices = ["A", "B", "C", "D"]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--apikey', type=str, help='api key; https://openai.com/api/')
parser.add_argument('--engine', type=str, default='text-davinci-003', help='api engine; https://openai.com/api/')
parser.add_argument('--maxlen', type=int, default=1, help='max len')
args = parser.parse_args()
openai.api_key = args.apikey
prompt = ""
prompt += "I am in a question-answering contest. There are four experts on my team: expert A specializes in factoid questions that focus on factual knowledge; expert B specializes in multihop questions that need multiple steps of reasoning; expert C specializes in mathematics questions that require logical reasoning and math calculation; expert D specializes in commonsense questions that focus on commonsense knowledge.\n"
prompt += "Given a question, I will get answers from all four experts, your task is to choose the best answer from the four answers. Directly predict the index of the chosen expert (A, B, C, or D). If there are multiple correct answers, just choose one of them.\n\n"
with open("sampled_demos.json", 'r') as f:
sampled_examples = json.load(f)
for dp in sampled_examples:
prompt += "Question: " + dp["question"] + "\n"
prompt += "A: " + dp["factual"].replace("\n", " ").strip() + "\n"
prompt += "B: " + dp["multihop"].replace("\n", " ").strip() + "\n"
prompt += "C: " + dp["math"].replace("\n", " ").strip() + "\n"
prompt += "D: " + dp["commonsense"].replace("\n", " ").strip() + "\n"
prompt += "Best Answer: " + dp["answer"].strip() + "\n"
prompt += "\n"
all_gpt_preds = {}
for dataset in datasets:
correct = 0
total = 0
all_dp = {"dataset": dataset, "data": []}
all_data = {}
for expert in experts:
fname = os.path.join(test_dir, dataset + '_' + expert + ".json")
with open(fname, 'r') as f:
data = json.load(f)
all_data[expert] = data
for i in tqdm(range(len(all_data["factual"]))):
total += 1
EMs = []
dp = {}
cur_prompt = prompt
cur_prompt += "Question: " + all_data["factual"][i]["question"].strip() + "\n"
dp["question"] = all_data["factual"][i]["question"].strip()
if all_data["factual"][i]["full_answer"]:
cur_prompt += "A: " + all_data["factual"][i]["full_answer"].replace("\n", " ").strip() + "\n"
else:
cur_prompt += "A: " + all_data["factual"][i]["answer"].replace("\n", " ").strip() + "\n"
EMs.append(all_data["factual"][i]["em"])
dp["factual"] = all_data["factual"][i]["answer"].replace("\n", " ").strip()
if all_data["multihop"][i]["full_answer"]:
cur_prompt += "B: " + all_data["multihop"][i]["full_answer"].replace("\n", " ").strip() + "\n"
else:
cur_prompt += "B: " + all_data["multihop"][i]["answer"].replace("\n", " ").strip() + "\n"
EMs.append(all_data["multihop"][i]["em"])
dp["multihop"] = all_data["multihop"][i]["answer"].replace("\n", " ").strip()
if all_data["math"][i]["full_answer"]:
cur_prompt += "C: " + all_data["math"][i]["full_answer"].replace("\n", " ").strip() + "\n"
else:
cur_prompt += "C: " + all_data["math"][i]["answer"].replace("\n", " ").strip() + "\n"
EMs.append(all_data["math"][i]["em"])
dp["math"] = all_data["math"][i]["answer"].replace("\n", " ").strip()
if all_data["commonsense"][i]["full_answer"]:
cur_prompt += "D: " + all_data["commonsense"][i]["full_answer"].replace("\n", " ").strip() + "\n"
else:
cur_prompt += "D: " + all_data["commonsense"][i]["answer"].replace("\n", " ").strip() + "\n"
EMs.append(all_data["commonsense"][i]["em"])
dp["commonsense"] = all_data["commonsense"][i]["answer"].replace("\n", " ").strip()
dp["EMs"] = EMs
cur_prompt += "Best Answer: "
# print (cur_prompt)
# print ("\n\n\n")
response = None
while response is None:
try:
response = openai.Completion.create(
engine=args.engine,
prompt=cur_prompt,
max_tokens=args.maxlen,
logprobs=5,
temperature=0.,
stream=False,
stop=["<|endoftext|>", "\n"]
)
except:
sleep(10)
continue
output = response['choices'][0]["text"].strip()
try:
raw_logprobs = response['choices'][0]["logprobs"]["top_logprobs"][0]
except:
raw_logprobs = []
# print (output)
# print (raw_logprobs)
# print ("\n\n")
if output not in choices:
output = random.choice(choices)
dp["choice"] = output
dp["EM"] = EMs[choices.index(output)]
correct += EMs[choices.index(output)]
all_dp["data"].append(dp)
accuracy = correct / total * 100
print ("Accuracy on {}: {} / {} = {}%".format(dataset, correct, total, accuracy))
all_gpt_preds[dataset] = all_dp
with open("gpt_preds.json", 'w') as f:
json.dump(all_gpt_preds, f, indent=4)
if __name__ == '__main__':
main() | [
"\n",
"Best Answer: ",
"B: PLACEHOLDER\n",
"Question: PLACEHOLDER\n",
" ",
"answer",
"A: ",
"full_answer",
"question",
"D: ",
"commonsense",
"Best Answer: PLACEHOLDER\n",
"B: ",
"A: PLACEHOLDER\n",
"D: PLACEHOLDER\n",
"C: PLACEHOLDER\n",
"Given a question, I will get answers from all four experts, your task is to choose the best answer from the four answers. Directly predict the index of the chosen expert (A, B, C, or D). If there are multiple correct answers, just choose one of them.\n\n",
"C: ",
"Question: ",
"I am in a question-answering contest. There are four experts on my team: expert A specializes in factoid questions that focus on factual knowledge; expert B specializes in multihop questions that need multiple steps of reasoning; expert C specializes in mathematics questions that require logical reasoning and math calculation; expert D specializes in commonsense questions that focus on commonsense knowledge.\n"
] |
2024-01-10 | krishnannarayanaswamy/wit-ai-assistant | data~crawlload.py | from langchain.docstore.document import Document
from langchain.utilities import ApifyWrapper
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import AstraDB
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import ApifyDatasetLoader
import os
token=os.environ['ASTRA_DB_APPLICATION_TOKEN']
api_endpoint=os.environ['ASTRA_DB_API_ENDPOINT']
openai_api_key=os.environ["OPENAI_API_KEY"]
apify_api_key=os.environ["APIFY_API_TOKEN"]
vstore = AstraDB(
embedding=OpenAIEmbeddings(),
collection_name="wit_chatbot",
api_endpoint=api_endpoint,
token=token,
)
apify = ApifyWrapper()
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size = 1000,
chunk_overlap = 20,
length_function = len,
is_separator_regex = False,
)
#loader = apify.call_actor(
# actor_id="apify/website-content-crawler",
# run_input={"startUrls": [{"url": "https://www.wit.co.th/"}]},
# dataset_mapping_function=lambda item: Document(
# page_content=item["text"] or "", metadata={"source": item["url"]}
# ),
#)
loader = ApifyDatasetLoader(
dataset_id="fQChcE0tvhKKAOyHg",
dataset_mapping_function=lambda dataset_item: Document(
page_content=dataset_item["text"], metadata={"source": dataset_item["url"]}
),
)
docs = loader.load()
texts = text_splitter.split_documents(docs)
#texts = text_splitter.create_documents([docs])
print(texts[0])
print(texts[1])
inserted_ids = vstore.add_documents(texts)
print(f"\nInserted {len(inserted_ids)} documents.")
| [] |
2024-01-10 | AppDirectory/open-interpreter | interpreter~interpreter.py | from .cli import cli
from .utils import merge_deltas, parse_partial_json
from .message_block import MessageBlock
from .code_block import CodeBlock
from .code_interpreter import CodeInterpreter
from .llama_2 import get_llama_2_instance
import os
import time
import platform
import openai
import getpass
import requests
import readline
import urllib.parse
import tokentrim as tt
from rich import print
from rich.markdown import Markdown
from rich.rule import Rule
# Function schema for gpt-4
function_schema = {
"name": "run_code",
"description":
"Executes code in various programming languages and returns the output.",
"parameters": {
"type": "object",
"properties": {
"language": {
"type": "string",
"description":
"The programming language.",
"enum": ["python", "shell", "applescript", "javascript", "html"]
},
"code": {
"type": "string",
"description": "The code to execute."
}
},
"required": ["language", "code"]
},
}
# Message for when users don't have an OpenAI API key.
missing_api_key_message = """> OpenAI API key not found
To use `GPT-4` (recommended) please provide an OpenAI API key.
To use `Code-Llama` (free but less capable) press `enter`.
"""
confirm_mode_message = """
**Open Interpreter** will require approval before running code. Use `interpreter -y` to bypass this.
Press `CTRL-C` to exit.
"""
class Interpreter:
def __init__(self):
self.messages = []
self.temperature = 0.001
self.api_key = None
self.auto_run = False
self.local = False
self.model = "gpt-4"
self.debug_mode = False
# Get default system message
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'system_message.txt'), 'r') as f:
self.system_message = f.read().strip()
# Store Code Interpreter instances for each language
self.code_interpreters = {}
# No active block to start
# (blocks are visual representation of messages on the terminal)
self.active_block = None
# Note: While Open Interpreter can use Llama, we will prioritize gpt-4.
# gpt-4 is faster, smarter, can call functions, and is all-around easier to use.
# This makes gpt-4 better aligned with Open Interpreters priority to be easy to use.
self.llama_instance = None
def cli(self):
# The cli takes the current instance of Interpreter,
# modifies it according to command line flags, then runs chat.
cli(self)
def get_info_for_system_message(self):
"""
Gets relevent information for the system message.
"""
info = ""
# Add user info
username = getpass.getuser()
current_working_directory = os.getcwd()
operating_system = platform.system()
info += f"\n\n[User Info]\nName: {username}\nCWD: {current_working_directory}\nOS: {operating_system}"
if not self.local:
# Open Procedures is an open-source database of tiny, structured coding tutorials.
# We can query it semantically and append relevant tutorials/procedures to our system message:
# Encode and truncate the last two messages
query = str(self.messages[-2:])
query = urllib.parse.quote(query)
query = query[-2000:]
# Use them to query Open Procedures
url = f"https://open-procedures.replit.app/search/?query={query}"
try:
relevant_procedures = requests.get(url).json()["procedures"]
info += "\n\n# Recommended Procedures\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for depracation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
except:
# For someone, this failed for a super secure SSL reason.
# Since it's not stricly necessary, let's worry about that another day. Should probably log this somehow though.
pass
elif self.local:
# Tell Code-Llama how to run code.
info += "\n\nTo run Python code, simply write a fenced Python code block (i.e ```python) in markdown. When you close it with ```, it will be run. You'll then be given its output."
return info
def reset(self):
self.messages = []
self.code_interpreters = {}
def load(self, messages):
self.messages = messages
def chat(self, message=None, return_messages=False):
# Connect to an LLM (an large language model)
if not self.local:
# gpt-4
self.verify_api_key()
# ^ verify_api_key may set self.local to True, so we run this as an 'if', not 'elif':
if self.local:
# Code-Llama
if self.llama_instance == None:
# Find or install Code-Llama
try:
self.llama_instance = get_llama_2_instance()
except:
# If it didn't work, apologize and switch to GPT-4
print(">Failed to install Code-LLama.")
print("\n**We have likely not built the proper `Code-Llama` support for your system.**")
print("\n(Running language models locally is a difficult task! If you have insight into the best way to implement this across platforms/architectures, please join the Open Interpreter community Discord and consider contributing the project's development.)")
print("\nPlease press enter to switch to `GPT-4` (recommended).")
input()
# Switch to GPT-4
self.local = False
self.verify_api_key()
# Display welcome message
welcome_message = ""
if self.debug_mode:
welcome_message += "> Entered debug mode"
# If self.local, we actually don't use self.model
# (self.auto_run is like advanced usage, we display no messages)
if not self.local and not self.auto_run:
welcome_message += f"\n> Model set to `{self.model.upper()}`\n\n**Tip:** To run locally, use `interpreter --local`"
if self.local:
welcome_message += f"\n> Model set to `Code-Llama`"
# If not auto_run, tell the user we'll ask permission to run code
# We also tell them here how to exit Open Interpreter
if not self.auto_run:
welcome_message += "\n\n" + confirm_mode_message
welcome_message = welcome_message.strip()
# Print welcome message with newlines on either side (aesthetic choice)
# unless we're starting with a blockquote (aesthetic choice)
if welcome_message != "":
if welcome_message.startswith(">"):
print(Markdown(welcome_message), '')
else:
print('', Markdown(welcome_message), '')
# Check if `message` was passed in by user
if message:
# If it was, we respond non-interactivley
self.messages.append({"role": "user", "content": message})
self.respond()
else:
# If it wasn't, we start an interactive chat
while True:
try:
user_input = input("> ").strip()
except EOFError:
break
except KeyboardInterrupt:
print() # Aesthetic choice
break
# Use `readline` to let users up-arrow to previous user messages,
# which is a common behavior in terminals.
readline.add_history(user_input)
# Add the user message to self.messages
self.messages.append({"role": "user", "content": user_input})
# Let the user turn on debug mode mid-chat
if user_input == "%debug":
print('', Markdown("> Entered debug mode"), '')
print(self.messages)
self.debug_mode = True
continue
# Respond, but gracefully handle CTRL-C / KeyboardInterrupt
try:
self.respond()
except KeyboardInterrupt:
pass
finally:
# Always end the active block. Multiple Live displays = issues
self.end_active_block()
if return_messages:
return self.messages
def verify_api_key(self):
"""
Makes sure we have an OPENAI_API_KEY.
"""
if self.api_key == None:
if 'OPENAI_API_KEY' in os.environ:
self.api_key = os.environ['OPENAI_API_KEY']
else:
# This is probably their first time here!
print('', Markdown("**Welcome to Open Interpreter.**"), '')
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_api_key_message), '', Rule(style="white"), '')
response = input("OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
self.local = True
print(Markdown("> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."), '')
time.sleep(2)
print(Rule(style="white"))
return
else:
self.api_key = response
print('', Markdown("**Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows."), '')
time.sleep(2)
print(Rule(style="white"))
openai.api_key = self.api_key
def end_active_block(self):
if self.active_block:
self.active_block.end()
self.active_block = None
def respond(self):
# Add relevant info to system_message
# (e.g. current working directory, username, os, etc.)
info = self.get_info_for_system_message()
system_message = self.system_message + "\n\n" + info
if self.local:
# Model determines how much we'll trim the messages list to get it under the context limit
# So for Code-Llama, we'll use "gpt-3.5-turbo" which (i think?) has the same context window as Code-Llama
self.model = "gpt-3.5-turbo"
# In the future lets make --model {model} just work / include llama
messages = tt.trim(self.messages, self.model, system_message=system_message)
if self.debug_mode:
print("\n", "Sending `messages` to LLM:", "\n")
print(messages)
print()
# Make LLM call
if not self.local:
# gpt-4
response = openai.ChatCompletion.create(
model=self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
elif self.local:
# Code-Llama
# Turn function messages -> system messages for llama compatability
messages = self.messages
for message in messages:
if message['role'] == 'function':
message['role'] = 'system'
response = self.llama_instance.create_chat_completion(
messages=messages,
stream=True,
temperature=self.temperature,
)
# Initialize message, function call trackers, and active block
self.messages.append({})
in_function_call = False
llama_function_call_finished = False
self.active_block = None
for chunk in response:
delta = chunk["choices"][0]["delta"]
# Accumulate deltas into the last message in messages
self.messages[-1] = merge_deltas(self.messages[-1], delta)
# Check if we're in a function call
if not self.local:
condition = "function_call" in self.messages[-1]
elif self.local:
# Since Code-Llama can't call functions, we just check if we're in a code block.
# This simply returns true if the number of "```" in the message is odd.
if "content" in self.messages[-1]:
condition = self.messages[-1]["content"].count("```") % 2 == 1
else:
# If it hasn't made "content" yet, we're certainly not in a function call.
condition = False
if condition:
# We are in a function call.
# Check if we just entered a function call
if in_function_call == False:
# If so, end the last block,
self.end_active_block()
# Print newline if it was just a code block or user message
# (this just looks nice)
last_role = self.messages[-2]["role"]
if last_role == "user" or last_role == "function":
print()
# then create a new code block
self.active_block = CodeBlock()
# Remember we're in a function_call
in_function_call = True
# Now let's parse the function's arguments:
if not self.local:
# gpt-4
# Parse arguments and save to parsed_arguments, under function_call
if "arguments" in self.messages[-1]["function_call"]:
arguments = self.messages[-1]["function_call"]["arguments"]
new_parsed_arguments = parse_partial_json(arguments)
if new_parsed_arguments:
# Only overwrite what we have if it's not None (which means it failed to parse)
self.messages[-1]["function_call"][
"parsed_arguments"] = new_parsed_arguments
elif self.local:
# Code-Llama
# Get contents of current code block and save to parsed_arguments, under function_call
if "content" in self.messages[-1]:
current_code_block = self.messages[-1]["content"].split("```python")[-1]
arguments = {"language": "python", "code": current_code_block}
# Code-Llama won't make a "function_call" property for us to store this under, so:
if "function_call" not in self.messages[-1]:
self.messages[-1]["function_call"] = {}
self.messages[-1]["function_call"]["parsed_arguments"] = arguments
else:
# We are not in a function call.
# Check if we just left a function call
if in_function_call == True:
if self.local:
# This is the same as when gpt-4 gives finish_reason as function_call.
# We have just finished a code block, so now we should run it.
llama_function_call_finished = True
# Remember we're not in a function_call
in_function_call = False
# If there's no active block,
if self.active_block == None:
# Create a message block
self.active_block = MessageBlock()
# Update active_block
self.active_block.update_from_message(self.messages[-1])
# Check if we're finished
if chunk["choices"][0]["finish_reason"] or llama_function_call_finished:
if chunk["choices"][
0]["finish_reason"] == "function_call" or llama_function_call_finished:
# Time to call the function!
# (Because this is Open Interpreter, we only have one function.)
if self.debug_mode:
print("Running function:")
print(self.messages[-1])
print("---")
# Ask for user confirmation to run code
if self.auto_run == False:
# End the active block so you can run input() below it
# Save language and code so we can create a new block in a moment
self.active_block.end()
language = self.active_block.language
code = self.active_block.code
# Prompt user
response = input(" Would you like to run this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
if response.strip().lower() == "y":
# Create a new, identical block where the code will actually be run
self.active_block = CodeBlock()
self.active_block.language = language
self.active_block.code = code
else:
# User declined to run code.
self.active_block.end()
self.messages.append({
"role":
"function",
"name":
"run_code",
"content":
"User decided not to run this code."
})
return
# If we couldn't parse its arguments, we need to try again.
if "parsed_arguments" not in self.messages[-1]["function_call"]:
print("> Function call could not be parsed.\n\nPlease open an issue on Github (openinterpreter.com, click Github) and paste the following:")
print("\n", self.messages[-1]["function_call"], "\n")
time.sleep(2)
print("Informing the language model and continuing...")
# Reiterate what we need to the language model:
self.messages.append({
"role": "function",
"name": "run_code",
"content": """Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON."""
})
# Go around again
self.respond()
return
# Create or retrieve a Code Interpreter for this language
language = self.messages[-1]["function_call"]["parsed_arguments"][
"language"]
if language not in self.code_interpreters:
self.code_interpreters[language] = CodeInterpreter(language, self.debug_mode)
code_interpreter = self.code_interpreters[language]
# Let this Code Interpreter control the active_block
code_interpreter.active_block = self.active_block
code_interpreter.run()
# End the active_block
self.active_block.end()
# Append the output to messages
# Explicitly tell it if there was no output (sometimes "" = hallucinates output)
self.messages.append({
"role": "function",
"name": "run_code",
"content": self.active_block.output if self.active_block.output else "No output"
})
# Go around again
self.respond()
if chunk["choices"][0]["finish_reason"] != "function_call":
# Done!
# Code Llama likes to output "###" at the end of every message for some reason
if self.local and "content" in self.messages[-1]:
self.messages[-1]["content"] = self.messages[-1]["content"].strip().rstrip("#")
self.active_block.update_from_message(self.messages[-1])
time.sleep(0.1)
self.active_block.end()
return | [
"User decided not to run this code.",
"No output",
"Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON."
] |
2024-01-10 | explomind1/gpt-researcher-LLMHackatlon | agent~llm_utils.py | from __future__ import annotations
import json
from fastapi import WebSocket
import time
import openai
from colorama import Fore, Style
from openai.error import APIError, RateLimitError
from agent.prompts import auto_agent_instructions
from config import Config
CFG = Config()
openai.api_key = CFG.openai_api_key
from typing import Optional
import logging
def create_chat_completion(
messages: list, # type: ignore
model: Optional[str] = None,
temperature: float = CFG.temperature,
max_tokens: Optional[int] = None,
stream: Optional[bool] = False,
websocket: WebSocket | None = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (list[dict[str, str]]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
stream (bool, optional): Whether to stream the response. Defaults to False.
Returns:
str: The response from the chat completion
"""
# validate input
if model is None:
raise ValueError("Model cannot be None")
if max_tokens is not None and max_tokens > 8001:
raise ValueError(f"Max tokens cannot be more than 8001, but got {max_tokens}")
if stream and websocket is None:
raise ValueError("Websocket cannot be None when stream is True")
# create response
for attempt in range(10): # maximum of 10 attempts
try:
response = send_chat_completion_request(
messages, model, temperature, max_tokens, stream, websocket
)
return response
except RateLimitError:
logging.warning("Rate limit reached, backing off...")
time.sleep(2 ** (attempt + 2)) # exponential backoff
except APIError as e:
if e.http_status != 502 or attempt == 9: # if not Bad Gateway error or final attempt
raise
logging.error("API Error: Bad gateway, backing off...")
time.sleep(2 ** (attempt + 2)) # exponential backoff
logging.error("Failed to get response after 10 attempts")
raise RuntimeError("Failed to get response from OpenAI API")
def send_chat_completion_request(
messages, model, temperature, max_tokens, stream, websocket
):
if not stream:
result = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
return result.choices[0].message["content"]
else:
return stream_response(model, messages, temperature, max_tokens, websocket)
async def stream_response(model, messages, temperature, max_tokens, websocket):
paragraph = ""
response = ""
print(f"streaming response...")
for chunk in openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
stream=True,
):
content = chunk["choices"][0].get("delta", {}).get("content")
if content is not None:
response += content
paragraph += content
if "\n" in paragraph:
await websocket.send_json({"type": "report", "output": paragraph})
paragraph = ""
print(f"streaming response complete")
return response
def choose_agent(task: str) -> str:
"""Determines what agent should be used
Args:
task (str): The research question the user asked
Returns:
agent - The agent that will be used
agent_role_prompt (str): The prompt for the agent
"""
try:
response = openai.ChatCompletion.create(
model=CFG.smart_llm_model,
messages=[
{"role": "system", "content": f"{auto_agent_instructions()}"},
{"role": "user", "content": f"task: {task}"}],
temperature=0,
)
return json.loads(response["choices"][0]["message"]["content"])
except Exception as e:
print(f"{Fore.RED}Error in choose_agent: {e}{Style.RESET_ALL}")
return {"agent": "Default Agent",
"agent_role_prompt": "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text."}
| [
"task: PLACEHOLDER"
] |
2024-01-10 | gutessitore/SaWine | src~nlp~sentiment_analysis.py | import openai
import os
class ReviewClassifier:
def __init__(self):
self.prompt = "Decide whether a wine review's sentiment is positive, neutral, or negative.\n\nreview: \"{}\"\nSentiment:"
self.model = "text-davinci-003"
self.temperature = 0
self.max_tokens = 60
self.top_p = 1.0
self.frequency_penalty = 0.5
self.presence_penalty = 0.0
self.__authenticate()
def __authenticate(self):
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_sentiment(self, review):
prompt = self.prompt.format(review)
response = openai.Completion.create(
engine=self.model,
prompt=prompt,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty
)
if not response['choices']:
raise Exception("OpenAI API request failed: {}".format(response))
sentiment = response['choices'][0]['text'].strip().lower()
return sentiment
| [] |
2024-01-10 | patrickamadeus/Prettify.md | templates~factory.py | from langchain.prompts import (
PromptTemplate,
load_prompt
)
class PromptFactory():
PROMPTDIR: str = "./templates/prompts"
prompts: dict = {}
def __init__(self):
self.prompts["base_prompt"] = load_prompt(f"{self.PROMPTDIR}/base.json")
pass
def append_prompt(self, prompt: PromptTemplate) -> None:
pass
def remove_prompt(self, prompt_key: str) -> None:
pass
def get_prompt_keys(self) -> [str]:
return list(self.prompts.keys())
def get_prompt_dict(self) -> dict:
return self.prompts
def get_prompts(self) -> [PromptTemplate]:
return list(self.prompts.values())
| [
"./templates/prompts",
"{}"
] |
2024-01-10 | Hengle/Tools-7 | Python~ChatGPT~TestRequestOpenAI.py | import openai
# 将YOUR_API_KEY替换成自己的API Key
openai.api_key = YOUR_API_KEY
# 指定请求的模型ID和文本
model_engine = "davinci" # 模型ID,可选davinci、curie、babbage等
prompt_text = "Hello, ChatGPT!"
# 发送API请求,获取响应
response = openai.Completion.create(
engine=model_engine,
prompt=prompt_text,
max_tokens=5
)
# 解析响应结果
if response.choices[0].text:
answer = response.choices[0].text.strip()
print(answer)
else:
print("No response received")
# print(response.json()) | [
"Hello, ChatGPT!"
] |
2024-01-10 | sert121/probable-broccoli | langch~qdrant_testing_repl.py | from langchain.vectorstores import Qdrant
import pinecone
from langchain.vectorstores import Chroma
from langchain.embeddings import CohereEmbeddings
import os
import time
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
import pickle
import cohere
from qdrant_client.http.models import Batch
from qdrant_client.http import models
from langchain.document_loaders import PyPDFLoader
COHERE_API_KEY = os.environ['COHERE_API_KEY']
COHERE_API_KEY = 'lgi7A2ZBRIswmmUy3FIB0AbjfNhEnvWtgEXnElPi'
cohere_client = cohere.Client(api_key=COHERE_API_KEY)
API_KEY_QDRANT = os.environ['QDRANT_KEY']
embeddings = CohereEmbeddings(cohere_api_key=COHERE_API_KEY)
#initializing qdrant client cloud ~~
def initialize_vecstore():
client_q = QdrantClient(
url=
'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333',
api_key=API_KEY_QDRANT)
collection_name = "Pandora"
# client.create_collection(collection_name=collection_name,
# vectors_config=VectorParams(
# size=4096, distance=Distance.COSINE))
qdrant = Qdrant(client_q,
collection_name,
embedding_function=embeddings.embed_documents)
with open('gpt4.pkl', 'rb') as f:
texts = pickle.load(f)
fine_texts = [t.page_content for t in texts]
# qdrant.from_documents(documents=texts,
# embedding=embeddings,
# collection_name=collection_name)
# qdrant.add_texts(fine_texts)
ids = [i for i in range(len(fine_texts))]
embedded_vectors = cohere_client.embed(model="large",
texts=fine_texts).embeddings
# Conversion to float is required for Qdrant
vectors = [list(map(float, vector)) for vector in embedded_vectors]
client_q.upsert(collection_name=collection_name,
points=Batch(ids=ids, vectors=vectors))
def create_collection(collection_name='Pandora'):
client_q = QdrantClient(
url=
'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333',
api_key=API_KEY_QDRANT)
client_q.recreate_collection(
collection_name=f"{collection_name}",
vectors_config=models.VectorParams(size=4096,
distance=models.Distance.COSINE),
)
print('done---')
def get_collection():
print("in here guys")
client_q = QdrantClient(
url=
'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333',
api_key=API_KEY_QDRANT)
collection_name = 'Pandora'
details = client_q.get_collection(collection_name=f"{collection_name}")
print(f"Details : {details}")
def query_vecstore(collection_name='Pandora', questions=['']):
client_q = QdrantClient(
url=
'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333',
api_key=API_KEY_QDRANT)
embedded_vectors = cohere_client.embed(model="large",
texts=questions).embeddings
# Conversion to float is required for Qdrant
vectors = [list(map(float, vector)) for vector in embedded_vectors]
k_max = 5
response = client_q.search(collection_name=f"{collection_name}",
query_vector=vectors[0],
limit=k_max,
with_payload=True)
print('------\n', response[0].payload['page_content'], '\n------')
print(f'Response h: -----\n {response} \n-----')
def text_store_lang():
loader = PyPDFLoader("potential_topics.pdf")
pages = loader.load_and_split()
host = 'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333'
finer_texts = [t.page_content for t in pages]
doc_store = Qdrant.from_texts(finer_texts,
embeddings,
collection_name='rune',
url=host,
api_key=API_KEY_QDRANT)
print(doc_store)
print(
doc_store.similarity_search(
'What are you interested in federated learning?'))
def doc_store_lang():
with open('django_texts.pkl', 'rb') as f:
texts = pickle.load(f)
host = 'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333'
doc_store = Qdrant.from_documents(texts,
embeddings,
collection_name='django',
url=host,
api_key=API_KEY_QDRANT)
print(doc_store)
print(doc_store.similarity_search('How to install django?'))
def load_vec_store_lang():
print(callable(cohere_client.embed))
host = 'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333'
client_q = QdrantClient(
url=
'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333',
api_key=API_KEY_QDRANT)
store = Qdrant(client=client_q,
embedding_function=cohere_client.embed,
collection_name='ronan')
print("store", store)
r = store.similarity_search_with_score(query='how big is gpt4?')
print("Results ----\n", r)
def delete_collection(collection_name: str):
client_q = QdrantClient(
url=
'https://5bcda451-5eec-489e-a663-1349d8693bf3.us-east-1-0.aws.cloud.qdrant.io:6333',
api_key=API_KEY_QDRANT)
client_q.delete_collection(collection_name=f"{collection_name}")
print('done--')
# create_collection('rune')
# delete_collection('freshman')
# get_collection()
# initialize_vecstore()
# query_vecstore(collection_name='ronan',
# questions=['What is the size of gpt4?'])
text_store_lang()
# load_vec_store_lang()
# doc_store_lang()
# query_vecstore(collection_name='artichoke',
# questions=['What is the size of gpt4?'])
| [] |
2024-01-10 | sert121/probable-broccoli | langch~languish.py |
# [1]
import sys
# import document loader from lamgcahin
from langchain.document_loaders import OnlinePDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma,Pinecone
from langchain import VectorDBQA
from langchain.llms import Cohere,OpenAI
from langchain.embeddings import CohereEmbeddings
import pinecone
import os
from langchain.chains.question_answering import load_qa_chain
# defining the cohere keys
COHERE_API_KEY = 'lgi7A2ZBRIswmmUy3FIB0AbjfNhEnvWtgEXnElPi'
EMBEDDING_TYPE = 'cohere'
PINECONE_KEY = os.getenv("PINECONE_KEY")
#defining the loader
def load_data(data_path='https://django.readthedocs.io/_/downloads/en/latest/pdf/',loader_type='online'
):
if loader_type == 'online':
loader = OnlinePDFLoader('https://django.readthedocs.io/_/downloads/en/latest/pdf/')
data = loader.load()
print(f"--- No of pages: {len(data)} \n")
# Chunking up the data
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = splitter.split_documents(data)
print(f"--- No of chunks: {len(texts)}")
return texts
def generate_embeddings(texts, embedding_type=EMBEDDING_TYPE):
if embedding_type == 'cohere':
embeddings = CohereEmbeddings(cohere_api_key=COHERE_API_KEY)
elif embedding_type == 'openai':
embeddings = OpenAIEmbeddings(openai_api_key='OPENAI_API_KEY')
return embeddings
def initialize_vecstore(embeddings,texts, vector_store='pinecone'):
if vector_store == 'pinecone':
#initialize pinecone
# pinecone.init( api_key=PINECONE_KEY,environment='us-west1-gcp')
index_name = 'testindex-co'
# if index_name not in pinecone.list_indexes():
# if EMBEDDING_TYPE == 'cohere':
# pinecone.create_index(index_name, vector_size=4096, metric='cosine')
# elif EMBEDDING_TYPE == 'openai':
# pinecone.create_index(index_name, vector_size=768, metric='cosine')
# else:
# index_pine = pinecone.Index("cohere")
search_docs = Pinecone.from_texts([t.page_content for t in texts],embeddings, index_name=index_name)
return search_docs
def initialize_llm(llmtype='cohere'):
if llmtype == 'cohere':
llm = Cohere(cohere_api_key=COHERE_API_KEY)
elif llmtype == 'openai':
llm = OpenAI(openai_api_key='OPENAI_API_KEY')
return llm
def query_vecstore(search_docs, query,llm):
topk_docs = search_docs.similarity_search(query, include_metadata=True)
llm = initialize_llm()
# qa = VectorDBQA.from_chain_type(llm=llm, chain_type="stuff", vectorstore=search_docs)
chain = load_qa_chain(llm, chain_type="stuff")
output = chain.run(query, topk_docs)
print(output)
return output
def run_process():
texts = load_data()
embeddings = generate_embeddings(texts)
search_docs = initialize_vecstore(embeddings,texts)
llm = initialize_llm()
query = 'How to setup models in django?'
output = query_vecstore(search_docs, query,llm)
return output
# running the process
run_process()
'''
search_docs = Pinecone.from_texts([t.page_content for t in texts],cohere_embeddings, index_name=index_name)
q = input("Enter your query: ")
print(search_docs.similarity_search(q,include_metadata=True))
# docsearch = Chroma.from_documents(texts, embeddings)
from langchain.llms import Cohere
cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="lgi7A2ZBRIswmmUy3FIB0AbjfNhEnvWtgEXnElPi")
qa = VectorDBQA.from_chain_type(llm=cohere, chain_type="stuff", vectorstore=search_docs)
''' | [] |
2024-01-10 | HaythmKenway/dotfiles | .local~bin~hey | #!/usr/bin/env python3
import os
import openai
import json
import sys
import requests
from termcolor import colored
import re
# Set your secret API key
sys.argv[0]=re.search(r'[^/]+$',sys.argv[0]).group(0)
prompt= ' '.join(sys.argv[0:])
openai.api_key="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"
openai.organization = "org-dgLV2rtLJwJKYYPncP8q86zi"
payload = {"model": "text-davinci-003", "prompt": prompt, "temperature": 0, "max_tokens": 500}
header={"Content-Type":"application/json","Authorization": "Bearer {}".format(openai.api_key)}
try:
r = requests.post("https://api.openai.com/v1/completions", headers=header, data=json.dumps(payload))
print(colored('|\n','red')+colored('└╼','red')+colored("Rhaenyra",'cyan')+colored('# ','yellow')+r.json()["choices"][0]["text"].strip())
except:
print(colored('| 🥺\n|👉👈\n','red')+colored('└╼','red')+colored("Rhaenyra",'cyan')+colored('# ','yellow')+"I don't know what to say")
| [
" "
] |
2024-01-10 | fujitatomoya/ros2ai | ros2ai~api~config.py | # Copyright 2023 Tomoya Fujita <[email protected]>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ros2ai.api.constants as constants
from openai import OpenAI
def get_api_key() -> str:
"""
Get OpenAI API Key from OPENAI_API_KEY environment variable.
OpenAI API Key must be set by ros2ai user with OPENAI_API_KEY environment variable.
:return: string of OpenAI API Key.
:raises: if OPENAI_API_KEY is not set.
"""
key_name = os.environ.get(constants.ROS_OPENAI_API_KEY_ENV_VAR)
if not key_name:
raise EnvironmentError(
f"'{constants.ROS_OPENAI_API_KEY_ENV_VAR}' environment variable is not set'"
)
else:
return key_name
def get_ai_model() -> str:
"""
Get OpenAI Model from OPENAI_MODEL_NAME environment variable.
OpenAI Model is optional, in default to gpt-3.5-turbo
:return: string of OpenAI Model.
"""
model_name = os.environ.get(constants.ROS_OPENAI_MODEL_NAME_ENV_VAR)
if not model_name:
# TODO(@fujitatomoya):better to print info here that using default model.
return constants.ROS_OPENAI_DEFAULT_MODEL
else:
return model_name
def get_endpoint_url() -> str:
"""
Get OpenAI API service endpoint URL from OPENAI_ENDPOINT environment variable.
OpenAI API service endpoint URL is optional, in default fallback to openai.
:return: string of OpenAI API service endpoint URL, could be None.
"""
url = os.environ.get(constants.ROS_OPENAI_ENDPOINT_ENV_VAR)
# TODO(@fujitatomoya):check if that is valid url before return.
if not url:
return constants.ROS_OPENAI_DEFAULT_ENDPOINT
else:
return url
def get_temperature() -> float:
"""
Get temperature parameter to be used with OpenAI API.
:return: temperature, could be None.
"""
temperature = os.environ.get(constants.ROS_OPENAI_TEMPERATURE_ENV_VAR)
if not temperature:
return float(constants.ROS_OPENAI_DEFAULT_TEMPERATURE)
else:
return float(temperature)
class OpenAiConfig:
"""
Collect all OpenAI API related configuration from user setting as key-value pair.
"""
def __init__(self, args):
self.config_pair = {}
# api key is mandatory, this could throw the exception if not set
self.config_pair['api_key'] = get_api_key()
# ai model is optional, command line argument prevails
self.config_pair['api_model'] = get_ai_model()
if args.model != constants.ROS_OPENAI_DEFAULT_MODEL:
self.config_pair['api_model'] = args.model
# api endpoint is optional, command line argument prevails
self.config_pair['api_endpoint'] = get_endpoint_url()
if args.url != constants.ROS_OPENAI_DEFAULT_MODEL:
self.config_pair['api_endpoint'] = args.url
# api token is optional, only available via command line argument
self.config_pair['api_token'] = args.token
# temperature is optional, only available via environmental variable
self.config_pair['api_temperature'] = get_temperature()
def set_value(self, key, value):
# Set a key-value pair
self.config_pair[key] = value
def get_value(self, key):
# Get the value for a given key
return self.config_pair.get(key, None)
def remove_key(self, key):
# Remove a key and its associated value
if key in self.config_pair:
del self.config_pair[key]
def display_all(self):
# Display all key-value pairs
for key, value in self.config_pair.items():
# we should never print the api key for the security
if key != 'api_key':
print(f"----- {key}: {value}")
def is_api_key_valid(self):
# Validate api key, model and endpoint to post the API
client = OpenAI(
api_key=self.get_value('api_key')
)
try:
completion = client.chat.completions.create(
model = self.get_value('api_model'),
messages = [
{
"role": "user",
"content": "Are you in service?",
},
],
temperature = self.get_value('api_temperature'),
max_tokens = self.get_value('api_token')
)
except Exception as e:
print('Failed to call OpenAI API: ' + str(e))
return False
else:
if (completion.choices[0].finish_reason != 'stop'):
print('Failed chat completion with: ' + completion.choices[0].finish_reason)
return False
else:
print(completion.choices[0].message.content)
return True
| [
"Are you in service?"
] |
2024-01-10 | uvens/HappyBirthday | servis1.py | import openai
from random import choice
import csv
from datetime import datetime
openai.api_key = 'API_KEY'
messages = [
{"role": "system", "content": "You are a helpful assistant."},
]
class HappyBirthday:
__FLAG=None
def __new__(cls, *args, **kwargs):
if HappyBirthday.__FLAG is None:
HappyBirthday.__FLAG=super().__new__(cls)
return HappyBirthday.__FLAG
return HappyBirthday.__FLAG
def __call__(self, *args, **kwargs):
if args[0].rsplit('.')[1] == 'csv':
self.congratulation_file(args[0])
else:
if len(args) == 2:
return ','.join(self.congratulation_name(*args))
else:
print(f'Неверный формат данных')
def congratulation_file(self, *args):
'''Открытие файла на чтение
Перебор данных из файла
открытие файла на запись
и передача имени и даты в ChatGpt с дальнейшей записью в файл'''
lst = []
with open(args[0], newline='') as f:
file = csv.reader(f, delimiter=' ', quotechar='|')
for n, i in enumerate(file):
lst.append(tuple(''.join(i).split(',')))
with open(args[0], 'w') as user:
writer = csv.writer(user)
for n, i in enumerate(lst):
data, name = i[0],i[1]
if n == 0 and i[0].isalpha() and i[1].isalpha():
writer.writerow([data, name])
else:
writer.writerow(self.congratulation_name(data, name))
def congratulation_name(self, data, name):
'''Проверка на корректность данных и генерация ответа ChatGpt '''
if self.check_name(name) and self.check_date(data):
message = ''.join(choice([
f"Напиши поздравление с днём рождения для {name}, как аристократ, уложись в 120 символов ответ предоставь на русском"
f"Напиши поздравление с днём рождения для {name}, сделав акцент на месяц рождения{data[1]} и указав его дату {data[0]}, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} как чёткий пацан, уложись в 120 символов, добавь смайлы в конец поздравления ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} как писатель Есенин, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} в стиле хоку, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} по фене, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} в форме анекдота, уложись в 120 символов, добавь смайлы в конец поздравления ответ предоставь на русском",
]))
messages.append(
{"role": "user", "content": message},
)
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
answer = ' '.join(chat_completion.choices[0].message.content.split('\n'))
messages.append({"role": "assistant", "content": answer})
return [data, name, answer]
return [data, name, f'Неправильный формат даты']
@staticmethod
def check_date(x: str):
'''Проверка даты на корректность ввода'''
try:
datetime.strptime(x, "%d.%m")
return True
except ValueError:
return False
@staticmethod
def check_name(x):
'''Проверка имени на корректность ввода'''
if all((i.isalpha() for i in x)):
return True
return False
| [
"You are a helpful assistant."
] |
2024-01-10 | uvens/HappyBirthday | servis.py | import openai
from random import choice
import csv
from datetime import datetime
openai.api_key = 'API_KEY'
messages = [
{"role": "system", "content": "You are a helpful assistant."},
]
class HappyBirthday:
__FLAG=None
def __new__(cls, *args, **kwargs):
if HappyBirthday.__FLAG is None:
HappyBirthday.__FLAG=super().__new__(cls)
return HappyBirthday.__FLAG
return HappyBirthday.__FLAG
def __call__(self, *args, **kwargs):
if args[0].rsplit('.')[1] == 'csv':
self.congratulation_file(args[0])
else:
if len(args) == 2:
return ','.join(self.congratulation_name(*args))
else:
print(f'Неверный формат данных')
def congratulation_file(self, *args):
'''Открытие файла на чтение
Перебор данных из файла
открытие файла на запись
и передача имени и даты в ChatGpt с дальнейшей записью в файл'''
lst = []
with open(args[0], newline='') as f:
file = csv.reader(f, delimiter=' ', quotechar='|')
for n, i in enumerate(file):
lst.append(tuple(''.join(i).split(',')))
with open(args[0], 'w') as user:
writer = csv.writer(user)
for n, i in enumerate(lst):
data, name = i
if n == 0 and i[0].isalpha() and i[1].isalpha():
writer.writerow([data, name])
else:
writer.writerow(self.congratulation_name(data, name))
def congratulation_name(self, data, name):
'''Проверка на корректность данных и генерация ответа ChatGpt '''
if self.check_name(name) and self.check_date(data):
message = ''.join(choice([
f"Напиши поздравление с днём рождения для {name}, как аристократ, уложись в 120 символов ответ предоставь на русском"
f"Напиши поздравление с днём рождения для {name}, сделав акцент на месяц рождения{data[1]} и указав его дату {data[0]}, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} как чёткий пацан, уложись в 120 символов, добавь смайлы в конец поздравления ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} как писатель Есенин, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} в стиле хоку, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} по фене, уложись в 120 символов ответ предоставь на русском",
f"Напиши поздравление с днём рождения для {name} в форме анекдота, уложись в 120 символов, добавь смайлы в конец поздравления ответ предоставь на русском",
]))
messages.append(
{"role": "user", "content": message},
)
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
answer = ' '.join(chat_completion.choices[0].message.content.split('\n'))
messages.append({"role": "assistant", "content": answer})
return [data, name, answer]
return [data, name, f'Неправильный формат даты']
@staticmethod
def check_date(x: str):
'''Проверка даты на корректность ввода'''
try:
datetime.strptime(x, "%d.%m")
return True
except Exception:
raise ValueError('Неверный формат даты')
@staticmethod
def check_name(x):
'''Проверка имени на корректность ввода'''
if all((i.isalpha() for i in x)):
return True
raise ValueError('Неверный формат имени')
| [
"You are a helpful assistant."
] |
2024-01-10 | Bfault/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | Bfault/langchain | templates~rag-timescale-hybrid-search-time~rag_timescale_hybrid_search_time~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | Bfault/langchain | libs~langchain~langchain~document_loaders~docusaurus.py | """Load Documents from Docusarus Documentation"""
from typing import Any, List, Optional
from langchain.document_loaders.sitemap import SitemapLoader
class DocusaurusLoader(SitemapLoader):
"""
Loader that leverages the SitemapLoader to loop through the generated pages of a
Docusaurus Documentation website and extracts the content by looking for specific
HTML tags. By default, the parser searches for the main content of the Docusaurus
page, which is normally the <article>. You also have the option to define your own
custom HTML tags by providing them as a list, for example: ["div", ".main", "a"].
"""
def __init__(
self,
url: str,
custom_html_tags: Optional[List[str]] = None,
**kwargs: Any,
):
"""
Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get("is_local"):
url = f"{url}/sitemap.xml"
self.custom_html_tags = custom_html_tags or ["main article"]
super().__init__(
url,
parsing_function=kwargs.get("parsing_function") or self._parsing_function,
**kwargs,
)
def _parsing_function(self, content: Any) -> str:
"""Parses specific elements from a Docusarus page."""
relevant_elements = content.select(",".join(self.custom_html_tags))
for element in relevant_elements:
if element not in relevant_elements:
element.decompose()
return str(content.get_text())
| [] |
2024-01-10 | Bfault/langchain | templates~rag-timescale-conversation~rag_timescale_conversation~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | SanderMoon/guidance | guidance~llms~caches~_diskcache.py | import os
import diskcache
import platformdirs
from guidance.llms.caches import Cache
class DiskCache(Cache):
"""DiskCache is a cache that uses diskcache lib."""
def __init__(self, llm_name: str):
self._diskcache = diskcache.Cache(
os.path.join(
platformdirs.user_cache_dir("guidance"), f"_{llm_name}.diskcache"
)
)
def __getitem__(self, key: str) -> str:
return self._diskcache[key]
def __setitem__(self, key: str, value: str) -> None:
self._diskcache[key] = value
def __contains__(self, key: str) -> bool:
return key in self._diskcache
def clear(self):
self._diskcache.clear()
| [] |
2024-01-10 | tarkalabs/genai-workshop | test_vectordb.py | from qdrant_client import QdrantClient
from langchain.vectorstores import Qdrant
from langchain.embeddings import OpenAIEmbeddings
from env import QDRANT_URL, OPENAI_API_KEY
client = QdrantClient(url=QDRANT_URL)
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
vector_store = Qdrant(client=client, collection_name="insurance", embeddings=embeddings)
matching_docs = vector_store.similarity_search(query="what is Domiciliary Hospitalization?",k=5)
for doc in matching_docs:
print(doc.page_content)
print("-"*20) | [] |
2024-01-10 | tarkalabs/genai-workshop | load_vectordb.py | from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Qdrant
from langchain.embeddings import OpenAIEmbeddings
from env import OPENAI_API_KEY, QDRANT_URL
loader = TextLoader("./data/insurance.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
all_splits = text_splitter.split_documents(documents)
Qdrant.from_documents(
documents=all_splits,
embedding=OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY),
url=QDRANT_URL,
collection_name='insurance'
)
| [] |
2024-01-10 | definitive-io/human-eval-sampling-benchmark | 1_run_eval.py | import json
import os
import subprocess
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
from human_eval.data import read_problems
from langchain.callbacks import get_openai_callback
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
# CONFIG
DEBUG = True
PRINT_TOKEN_CONSUMPTION = False
TEMP_MIN = 0.0
TEMP_MAX = 1.0
TOP_P_MIN = 0.0
TOP_P_MAX = 1.0
STEP = 0.2
if DEBUG:
STEP = 1
def get_result(samples_path):
result = subprocess.run(
[
"evaluate_functional_correctness",
samples_path,
"1,10,100",
"4",
"3",
"data/problems.jsonl",
],
capture_output=True,
text=True,
)
return result.stdout, result.stderr
def eval_sample_problem_pair(samples_path):
# Get result of running ".venv/bin/evaluate_functional_correctness"
result, _ = get_result(samples_path)
with open(f"{samples_path}_results.txt", "w") as f:
f.write(result.splitlines()[-1])
def write_modified_problems(problems):
"""
Write modified problems to data/problems.jsonl.
The modification is because the GPT-4 model
is a chat model that doesn't do well on strict sentence completion.
So instead, we ask it to define the complete function.
This requires slightly updated HumanEval problem jsonl lines.
"""
# Print first problem, it is a dict like this:
# Iterate over dict
collect_problems = []
for key, value in problems.items():
# Create empty problem
collect_problems.append(
{
"task_id": key,
"prompt": "",
"entry_point": value["entry_point"],
"test": value["test"],
}
)
# This is just validation, our logic doesn't work if no `def ` is in the prompt
def_split = value["prompt"].split("def ")
assert len(def_split) != 1, "Prompt must always contain a 'def '"
# Write problems to JSONL with a single line for each entry in collect_problems
# be sure to convert the dict to a single line string with json.dumps()
with open("data/problems.jsonl", "w") as f:
for problem in collect_problems:
f.write(json.dumps(problem) + "\n")
# Modified to return the collect_samples dictionary
def generate_solution_for_problem(task_id, problem, chat):
with get_openai_callback() as cb:
message = chat(
[
SystemMessage(
content="You are an expert Python programmer. Implement the function provided by the user. Make sure your implementation is correct. Only output code since your output will directly be executed."
),
HumanMessage(content=problem["prompt"]),
]
)
if PRINT_TOKEN_CONSUMPTION:
print(cb)
# Assuming no import code exists after first function definition
# manual inspection of the HumanEval data showed this was the case.
def_split = problem["prompt"].split("def ")
imports_code = def_split[0]
solution = imports_code + message.content
return {
"task_id": task_id,
"completion": solution,
}
def generate_solutions_for_params(temperature, top_p, problems):
# Set max_retries really high, the concurrency will
# cause us to hit the rate limit often.
chat = ChatOpenAI(
max_retries=1000,
temperature=temperature,
model_kwargs={
"top_p": top_p,
},
)
collect_samples = []
# Generate solutions in parallel
with ThreadPoolExecutor() as executor:
future_to_task = {
executor.submit(generate_solution_for_problem, key, value, chat): (
key,
value,
)
for key, value in problems.items()
}
for future in as_completed(future_to_task):
collect_samples.append(future.result())
with open(f"data/samples_{temperature:.2f}_{top_p:.2f}_.jsonl", "w") as f:
for sample in collect_samples:
f.write(json.dumps(sample) + "\n")
def generate_solutions():
problems = read_problems()
if DEBUG:
# Only use the first two problems
problems = {k: v for k, v in list(problems.items())[:2]}
write_modified_problems(problems)
combinations = [
(temperature, top_p)
for temperature in np.arange(TEMP_MIN, TEMP_MAX + STEP, STEP)
for top_p in np.arange(TOP_P_MIN, TOP_P_MAX + STEP, STEP)
]
# Generate solutions for all combinations in parallel
with ThreadPoolExecutor() as executor:
future_to_comb = {
executor.submit(
generate_solutions_for_params, temperature, top_p, problems
): (temperature, top_p)
for temperature, top_p in combinations
}
for future in as_completed(future_to_comb):
temperature, top_p = future_to_comb[future]
print(f"Completed temperature: {temperature}, top_p: {top_p}")
def verify_generated_solutions():
# List all JSONL files in the data directory
jsonl_files = [f for f in os.listdir("data/") if f.endswith(".jsonl")]
# Separate samples and problems files
samples_files = [f for f in jsonl_files if "samples" in f and "results" not in f]
for samples_file in samples_files:
samples_path = os.path.join("data", samples_file)
print(f"Running eval() for {samples_file}.")
eval_sample_problem_pair(samples_path)
if __name__ == "__main__":
generate_solutions()
verify_generated_solutions()
| [
"You are an expert Python programmer. Implement the function provided by the user. Make sure your implementation is correct. Only output code since your output will directly be executed."
] |
2024-01-10 | mbchang/panel_simulation | dialogue.py | import random
import tenacity
from typing import List, Callable
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import RegexParser
from langchain.schema import (
HumanMessage,
SystemMessage,
)
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class IntegerOutputParser(RegexParser):
def get_format_instructions(self) -> str:
return "Your response should be an integer delimited by angled brackets, like this: <int>."
class DirectorDialogueAgent(DialogueAgent):
def __init__(
self,
name,
system_message: SystemMessage,
model: ChatOpenAI,
speakers: List[DialogueAgent],
stopping_probability: float,
) -> None:
super().__init__(name, system_message, model)
self.speakers = speakers
self.next_speaker = ""
self.stop = False
self.stopping_probability = stopping_probability
self.termination_clause = "Finish the conversation by stating a concluding message and thanking everyone."
self.continuation_clause = "Do not end the conversation. Keep the conversation going by adding your own ideas."
# 1. have a prompt for generating a response to the previous speaker
self.response_prompt_template = PromptTemplate(
input_variables=["message_history", "termination_clause"],
template=f"""{{message_history}}
Follow up with an insightful comment.
{{termination_clause}}
{self.prefix}
""",
)
# 2. have a prompt for deciding who to speak next
self.choice_parser = IntegerOutputParser(
regex=r"<(\d+)>", output_keys=["choice"], default_output_key="choice"
)
self.choose_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "speaker_names"],
template=f"""{{message_history}}
Given the above conversation, select a new speaker by choosing index next to their name:
{{speaker_names}}
{self.choice_parser.get_format_instructions()}
Do nothing else.
""",
)
# 3. have a prompt for prompting the next speaker to speak
self.prompt_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "next_speaker"],
template=f"""{{message_history}}
The next speaker is {{next_speaker}}.
Prompt the next speaker to speak with an insightful question.
{self.prefix}
""",
)
def _generate_response(self):
# if self.stop = True, then we will inject the prompt with a termination clause
sample = random.uniform(0, 1)
self.stop = sample < self.stopping_probability
print(f"\tStop? {self.stop}\n")
response_prompt = self.response_prompt_template.format(
message_history="\n".join(self.message_history),
termination_clause=self.termination_clause if self.stop else "",
)
self.response = self.model(
[
self.system_message,
HumanMessage(content=response_prompt),
]
).content
return self.response
@tenacity.retry(
stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_none(), # No waiting time between retries
retry=tenacity.retry_if_exception_type(ValueError),
before_sleep=lambda retry_state: print(
f"ValueError occurred: {retry_state.outcome.exception()}, retrying..."
),
retry_error_callback=lambda retry_state: 0,
) # Default value when all retries are exhausted
def _choose_next_speaker(self) -> str:
speaker_names = "\n".join(
[f"{idx}: {name}" for idx, name in enumerate(self.speakers)]
)
choice_prompt = self.choose_next_speaker_prompt_template.format(
message_history="\n".join(
self.message_history + [self.prefix] + [self.response]
),
speaker_names=speaker_names,
)
choice_string = self.model(
[
self.system_message,
HumanMessage(content=choice_prompt),
]
).content
choice = int(self.choice_parser.parse(choice_string)["choice"])
return choice
def select_next_speaker(self):
return self.chosen_speaker_id
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
# 1. generate and save response to the previous speaker
self.response = self._generate_response()
if self.stop:
message = self.response
else:
# 2. decide who to speak next
self.chosen_speaker_id = self._choose_next_speaker()
self.next_speaker = self.speakers[self.chosen_speaker_id]
print(f"\tNext speaker: {self.next_speaker}\n")
# 3. prompt the next speaker to speak
next_prompt = self.prompt_next_speaker_prompt_template.format(
message_history="\n".join(
self.message_history + [self.prefix] + [self.response]
),
next_speaker=self.next_speaker,
)
message = self.model(
[
self.system_message,
HumanMessage(content=next_prompt),
]
).content
message = " ".join([self.response, message])
return message
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
# increment time
self._step += 1
def step(self) -> tuple[str, str]:
# 1. choose the next speaker
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
# 2. next speaker sends message
message = speaker.send()
# 3. everyone receives message
for receiver in self.agents:
receiver.receive(speaker.name, message)
# 4. increment time
self._step += 1
return speaker.name, message
| [
"\n"
] |
2024-01-10 | mbchang/panel_simulation | panel.py | from dataclasses import dataclass
from langchain.schema import (
SystemMessage,
)
@dataclass
class PanelistConfig:
name: str
role: str
title: str
bio: str
url: str
icon_path: str
voice: str
def url_markdown(self):
return f"[{self.name}]({self.url})"
def generate_system_message(self, conversation_description):
return SystemMessage(
content=f"""{conversation_description}
Your name is {self.name}, your role is {self.role}.
Your description is as follows: {self.bio}
Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your background.
Speak in the first person with the style and perspective of {self.name}.
For describing your own body movements, wrap your description in '*'.
Be concise and limit your response to 30 words.
"""
)
def validate_agent_cfgs(agent_cfgs):
directors = [agent for agent in agent_cfgs if agent.role == "director"]
assert len(directors) == 1
def get_director(agent_cfgs):
validate_agent_cfgs(agent_cfgs)
return next(agent for agent in agent_cfgs if agent.role == "director")
def get_panelists(agent_cfgs):
validate_agent_cfgs(agent_cfgs)
return [agent for agent in agent_cfgs if agent.role == "panelist"]
def get_summary(agent_cfgs):
summary = "\n- ".join(
[""] + [f"{agent.name}: {agent.title}" for agent in agent_cfgs]
)
return summary
| [] |
2024-01-10 | zwbgood6/local_chatgpt | local_chatgpt.py | import os
import openai
import gradio as gr
# step 1: Run the following command to create a new environment variable named OPENAI_API_KEY:
# export OPENAI_API_KEY=<your-api-key>
# step 2: Save the changes to your environment variables by running the following command:
# source ~/.bashrc
# step 3: You can now check if the environment variable has been set by running the following command:
# echo $OPENAI_API_KEY
openai.api_key = os.environ['OPENAI_API_KEY']
messages = [
{"role": "system",
"content": "You are a helpful AI assistant."},
]
def chat(user_input):
if user_input:
messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = response.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
return reply
inputs = gr.inputs.Textbox(label="User input")
outputs = gr.outputs.Textbox(label="Response")
gr.Interface(
fn=chat,
inputs=inputs,
outputs=outputs,
title="ChatGPT Demo",
).launch(share=True)
| [
"You are a helpful AI assistant."
] |
2024-01-10 | JialongMei/GPT-WSD-thesis-project | custom_evaluation.py | import re
import openai
from nltk.corpus import wordnet as wn
import json
#same as evalution.py but it reads json file instead of xml and gold key txt
openai.api_key = 'replace with openai api key'
sense_keys = []
ok = 0
notok = 0
def get_word_meanings(word):
global sense_keys
sense_keys.clear()
synsets = wn.synsets(word)
for i, synset in enumerate(synsets):
sense_keys.append((i, synset.name()))
return 0
def same_check(model_question, real_answer, keyword):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=model_question,
max_tokens=33,
temperature=0,
top_p=0.3,
frequency_penalty=0,
presence_penalty=0
)
get_word_meanings(keyword)
reply = response.choices[0].text.strip()
sense_key_num = re.search(r'\d+', reply)
synset_id = None
if sense_key_num:
chosen_number = int(sense_key_num.group())
if chosen_number < len(sense_keys):
synset_id = sense_keys[chosen_number][1]
print(reply)
print(synset_id)
print(real_answer)
if synset_id == real_answer:
return True
else:
return False
with open('custom_data.json', 'r') as file:
data = json.load(file)
counter = 0
for item in data:
question = item['question']
answer = item['answer']
word = item['word']
if same_check(question, answer, word) == True:
ok += 1
else:
notok += 1
counter += 1
if counter == 45:
break
precision = ok/(ok + notok)
#recall is always 1 here since the model is just doing mcq
f1 = (2*precision*1)/(precision+1)
print("ok: " + str(ok))
print("notok: " + str(notok))
print("precision: " + str(precision))
print("f1 socre: " + str(f1)) | [] |
2024-01-10 | JialongMei/GPT-WSD-thesis-project | check_status.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
job = openai.FineTune.retrieve(id="fine-tuning job ID")
print(job)
#check fine-tuning progress from openai servers | [] |
2024-01-10 | JialongMei/GPT-WSD-thesis-project | WSD.py | import openai
import tkinter as tk
from nltk.corpus import wordnet
import re
openai.api_key = 'replace with openai api key'
def get_word_meanings(word):
synsets = wordnet.synsets(word)
if not synsets:
return "Cannot provide meaning for this word, so do not choose a number and just answer the meaning of this word in this sentence yourself"
meanings = []
for i, synset in enumerate(synsets):
meanings.append(f"{i}. {synset.definition()}")
return '\n'.join(meanings)#extra new line for stop sequences
def remove_number_dot_space(reply):
pattern = r'^\d+\.\s'
if re.match(pattern, reply):
reply = re.sub(pattern, '', reply)
return reply
def pairs():
text = text_text.get("1.0", tk.END).strip()
target_word = word_entry.get()
meanings = get_word_meanings(target_word)
prompt_content = "What is the meaning of word \"" + target_word + "\" in the sentence: \"" + text + "\"\n" + "Options" + "\n" + meanings + "\n" + "Repeat the option you selected(include the number):" + "\n"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt_content,
max_tokens=30,
temperature=0,
top_p=0.3,
frequency_penalty=0,
presence_penalty=0
)
reply = response.choices[0].text.strip()
reply = remove_number_dot_space(reply)#reply process
Reply_text.config(state=tk.NORMAL)
Reply_text.delete(1.0, tk.END)
Reply_text.insert(tk.END, reply)
Reply_text.config(state=tk.DISABLED)
window = tk.Tk()
window.title("SenseQuery")
window.geometry("500x250")
word_label = tk.Label(window, text="Target Word")
word_label.pack()
word_entry = tk.Entry(window)
word_entry.pack()
text_label = tk.Label(window, text="Text")
text_label.pack()
text_text = tk.Text(window, height=3)
text_text.pack()
the_button = tk.Button(window, text="Submit", command=pairs)
the_button.pack()
Reply_label = tk.Label(window, text="Reply:")
Reply_label.pack()
Reply_text = tk.Text(window, height=5, state=tk.DISABLED)
Reply_text.pack()
window.mainloop() | [
"What is the meaning of word \"PLACEHOLDER\" in the sentence: \"PLACEHOLDER\"\nOptions\nPLACEHOLDER\nRepeat the option you selected(include the number):\n"
] |
2024-01-10 | windweller/AutoGrade_Pixel | autograde~train~rad_ppo2.py | import time
import gym
import numpy as np
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import explained_variance, ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.tf_util import total_episode_reward_logger
from stable_baselines.common.math_util import safe_mean
class PPO2(ActorCriticRLModel):
"""
Proximal Policy Optimization algorithm (GPU version).
Paper: https://arxiv.org/abs/1707.06347
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) Discount factor
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param ent_coef: (float) Entropy coefficient for the loss calculation
:param learning_rate: (float or callable) The learning rate, it can be a function
:param vf_coef: (float) Value function coefficient for the loss calculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param nminibatches: (int) Number of training minibatches per update. For recurrent policies,
the number of environments run in parallel should be a multiple of nminibatches.
:param noptepochs: (int) Number of epoch when optimizing the surrogate
:param cliprange: (float or callable) Clipping parameter, it can be a function
:param cliprange_vf: (float or callable) Clipping parameter for the value function, it can be a function.
This is a parameter specific to the OpenAI implementation. If None is passed (default),
then `cliprange` (that is used for the policy) will be used.
IMPORTANT: this clipping depends on the reward scaling.
To deactivate value function clipping (and recover the original PPO implementation),
you have to pass a negative value (e.g. -1).
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, n_steps=128, ent_coef=0.01, learning_rate=2.5e-4, vf_coef=0.5,
max_grad_norm=0.5, lam=0.95, nminibatches=4, noptepochs=4, cliprange=0.2, cliprange_vf=None,
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None, data_aug='normal'):
self.learning_rate = learning_rate
self.cliprange = cliprange
self.cliprange_vf = cliprange_vf
self.n_steps = n_steps
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.gamma = gamma
self.lam = lam
self.nminibatches = nminibatches
self.noptepochs = noptepochs
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.data_aug = data_aug
self.action_ph = None
self.advs_ph = None
self.rewards_ph = None
self.old_neglog_pac_ph = None
self.old_vpred_ph = None
self.learning_rate_ph = None
self.clip_range_ph = None
self.entropy = None
self.vf_loss = None
self.pg_loss = None
self.approxkl = None
self.clipfrac = None
self._train = None
self.loss_names = None
self.train_model = None
self.act_model = None
self.value = None
self.n_batch = None
self.summary = None
super().__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
if _init_setup_model:
self.setup_model()
def _make_runner(self):
return RAD_Runner(env=self.env, model=self, n_steps=self.n_steps,
gamma=self.gamma, lam=self.lam, data_aug=self.data_aug)
def _get_pretrain_placeholders(self):
policy = self.act_model
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, self.action_ph, policy.policy
return policy.obs_ph, self.action_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO2 model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.n_batch = self.n_envs * self.n_steps
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
n_batch_step = None
n_batch_train = None
if issubclass(self.policy, RecurrentActorCriticPolicy):
assert self.n_envs % self.nminibatches == 0, "For recurrent policies, "\
"the number of environments run in parallel should be a multiple of nminibatches."
n_batch_step = self.n_envs
n_batch_train = self.n_batch // self.nminibatches
act_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs // self.nminibatches, self.n_steps, n_batch_train,
reuse=True, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.action_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
self.advs_ph = tf.placeholder(tf.float32, [None], name="advs_ph")
self.rewards_ph = tf.placeholder(tf.float32, [None], name="rewards_ph")
self.old_neglog_pac_ph = tf.placeholder(tf.float32, [None], name="old_neglog_pac_ph")
self.old_vpred_ph = tf.placeholder(tf.float32, [None], name="old_vpred_ph")
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
self.clip_range_ph = tf.placeholder(tf.float32, [], name="clip_range_ph")
neglogpac = train_model.proba_distribution.neglogp(self.action_ph)
self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())
vpred = train_model.value_flat
# Value function clipping: not present in the original PPO
if self.cliprange_vf is None:
# Default behavior (legacy from OpenAI baselines):
# use the same clipping as for the policy
self.clip_range_vf_ph = self.clip_range_ph
self.cliprange_vf = self.cliprange
elif isinstance(self.cliprange_vf, (float, int)) and self.cliprange_vf < 0:
# Original PPO implementation: no value function clipping
self.clip_range_vf_ph = None
else:
# Last possible behavior: clipping range
# specific to the value function
self.clip_range_vf_ph = tf.placeholder(tf.float32, [], name="clip_range_vf_ph")
if self.clip_range_vf_ph is None:
# No clipping
vpred_clipped = train_model.value_flat
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
vpred_clipped = self.old_vpred_ph + \
tf.clip_by_value(train_model.value_flat - self.old_vpred_ph,
- self.clip_range_vf_ph, self.clip_range_vf_ph)
vf_losses1 = tf.square(vpred - self.rewards_ph)
vf_losses2 = tf.square(vpred_clipped - self.rewards_ph)
self.vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(self.old_neglog_pac_ph - neglogpac)
pg_losses = -self.advs_ph * ratio
pg_losses2 = -self.advs_ph * tf.clip_by_value(ratio, 1.0 - self.clip_range_ph, 1.0 +
self.clip_range_ph)
self.pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
self.approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - self.old_neglog_pac_ph))
self.clipfrac = tf.reduce_mean(tf.cast(tf.greater(tf.abs(ratio - 1.0),
self.clip_range_ph), tf.float32))
loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef
tf.summary.scalar('entropy_loss', self.entropy)
tf.summary.scalar('policy_gradient_loss', self.pg_loss)
tf.summary.scalar('value_function_loss', self.vf_loss)
tf.summary.scalar('approximate_kullback-leibler', self.approxkl)
tf.summary.scalar('clip_factor', self.clipfrac)
tf.summary.scalar('loss', loss)
with tf.variable_scope('model'):
self.params = tf.trainable_variables()
if self.full_tensorboard_log:
for var in self.params:
tf.summary.histogram(var.name, var)
grads = tf.gradients(loss, self.params)
if self.max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph, epsilon=1e-5)
self._train = trainer.apply_gradients(grads)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_range_ph))
if self.clip_range_vf_ph is not None:
tf.summary.scalar('clip_range_vf', tf.reduce_mean(self.clip_range_vf_ph))
tf.summary.scalar('old_neglog_action_probability', tf.reduce_mean(self.old_neglog_pac_ph))
tf.summary.scalar('old_value_pred', tf.reduce_mean(self.old_vpred_ph))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', self.rewards_ph)
tf.summary.histogram('learning_rate', self.learning_rate_ph)
tf.summary.histogram('advantage', self.advs_ph)
tf.summary.histogram('clip_range', self.clip_range_ph)
tf.summary.histogram('old_neglog_action_probability', self.old_neglog_pac_ph)
tf.summary.histogram('old_value_pred', self.old_vpred_ph)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.proba_step = act_model.proba_step
self.value = act_model.value
self.initial_state = act_model.initial_state
tf.global_variables_initializer().run(session=self.sess) # pylint: disable=E1101
self.summary = tf.summary.merge_all()
def _train_step(self, learning_rate, cliprange, obs, returns, masks, actions, values, neglogpacs, update,
writer, states=None, cliprange_vf=None):
"""
Training of PPO2 Algorithm
:param learning_rate: (float) learning rate
:param cliprange: (float) Clipping factor
:param obs: (np.ndarray) The current observation of the environment
:param returns: (np.ndarray) the rewards
:param masks: (np.ndarray) The last masks for done episodes (used in recurent policies)
:param actions: (np.ndarray) the actions
:param values: (np.ndarray) the values
:param neglogpacs: (np.ndarray) Negative Log-likelihood probability of Actions
:param update: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param states: (np.ndarray) For recurrent policies, the internal state of the recurrent model
:return: policy gradient loss, value function loss, policy entropy,
approximation of kl divergence, updated clipping range, training update operation
:param cliprange_vf: (float) Clipping factor for the value function
"""
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {self.train_model.obs_ph: obs, self.action_ph: actions,
self.advs_ph: advs, self.rewards_ph: returns,
self.learning_rate_ph: learning_rate, self.clip_range_ph: cliprange,
self.old_neglog_pac_ph: neglogpacs, self.old_vpred_ph: values}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.dones_ph] = masks
if cliprange_vf is not None and cliprange_vf >= 0:
td_map[self.clip_range_vf_ph] = cliprange_vf
if states is None:
update_fac = self.n_batch // self.nminibatches // self.noptepochs + 1
else:
update_fac = self.n_batch // self.nminibatches // self.noptepochs // self.n_steps + 1
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if self.full_tensorboard_log and (1 + update) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % (update * update_fac))
else:
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map)
writer.add_summary(summary, (update * update_fac))
else:
policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train], td_map)
return policy_loss, value_loss, policy_entropy, approxkl, clipfrac
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2",
reset_num_timesteps=True):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
cliprange_vf = get_schedule_fn(self.cliprange_vf)
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
t_first_start = time.time()
n_updates = total_timesteps // self.n_batch
callback.on_training_start(locals(), globals())
for update in range(1, n_updates + 1):
assert self.n_batch % self.nminibatches == 0, ("The number of minibatches (`nminibatches`) "
"is not a factor of the total number of samples "
"collected per rollout (`n_batch`), "
"some samples won't be used."
)
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - (update - 1.0) / n_updates
lr_now = self.learning_rate(frac)
cliprange_now = self.cliprange(frac)
cliprange_vf_now = cliprange_vf(frac)
callback.on_rollout_start()
# true_reward is the reward without discount
rollout = self.runner.run(callback)
# Unpack
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = rollout
callback.on_rollout_end()
# Early stopping due to the callback
if not self.runner.continue_training:
break
self.ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
update_fac = self.n_batch // self.nminibatches // self.noptepochs + 1
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
timestep = self.num_timesteps // update_fac + ((self.noptepochs * self.n_batch + epoch_num *
self.n_batch + start) // batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, writer=writer,
update=timestep, cliprange_vf=cliprange_vf_now))
else: # recurrent version
update_fac = self.n_batch // self.nminibatches // self.noptepochs // self.n_steps + 1
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for start in range(0, self.n_envs, envs_per_batch):
timestep = self.num_timesteps // update_fac + ((self.noptepochs * self.n_envs + epoch_num *
self.n_envs + start) // envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, update=timestep,
writer=writer, states=mb_states,
cliprange_vf=cliprange_vf_now))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, self.num_timesteps)
if self.verbose >= 1 and (update % log_interval == 0 or update == 1):
explained_var = explained_variance(values, returns)
logger.logkv("serial_timesteps", update * self.n_steps)
logger.logkv("n_updates", update)
logger.logkv("total_timesteps", self.num_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"lam": self.lam,
"nminibatches": self.nminibatches,
"noptepochs": self.noptepochs,
"cliprange": self.cliprange,
"cliprange_vf": self.cliprange_vf,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
import data_augs as rad
class RAD_Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam, data_aug):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
self.data_aug = data_aug
# we don't need "eval_flag" because we directly use model.predict()
# in eval.
nenvs = self.obs.shape[0]
self.augs_funcs = None
if self.data_aug != 'normal': # and self.eval_flag is False:
aug_to_func = {
'gray': rad.RandGray,
'cutout': rad.Cutout,
'cutout_color': rad.Cutout_Color,
'flip': rad.Rand_Flip,
'rotate': rad.Rand_Rotate,
'color_jitter': rad.ColorJitterLayer,
'crop': rad.Rand_Crop,
}
self.augs_funcs = aug_to_func[data_aug](batch_size=nenvs, p_gray=0.8)
self.obs = self.augs_funcs.do_augmentation(self.obs)
def _run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
for _ in range(self.n_steps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
obs, rewards, self.dones, infos = self.env.step(clipped_actions)
# self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
# augment the observation here
if self.data_aug != 'normal':
# This is the "training" condition
self.obs[:] = self.augs_funcs.do_augmentation(obs)
else:
self.obs[:] = obs
self.model.num_timesteps += self.n_envs
if self.callback is not None:
# Abort training early
if self.callback.on_step() is False:
self.continue_training = False
# Return dummy values
return [None] * 9
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def swap_and_flatten(arr):
"""
swap and then flatten axes 0 and 1
:param arr: (np.ndarray)
:return: (np.ndarray)
"""
shape = arr.shape
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
| [] |
2024-01-10 | geekan/MetaGPT | tests~metagpt~provider~test_fireworks_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of fireworks api
import pytest
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
Choice,
)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice as AChoice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.config import CONFIG
from metagpt.provider.fireworks_api import (
MODEL_GRADE_TOKEN_COSTS,
FireworksCostManager,
FireworksLLM,
)
from metagpt.utils.cost_manager import Costs
CONFIG.fireworks_api_key = "xxx"
CONFIG.max_budget = 10
CONFIG.calc_usage = True
resp_content = "I'm fireworks"
default_resp = ChatCompletion(
id="cmpl-a6652c1bb181caae8dd19ad8",
model="accounts/fireworks/models/llama-v2-13b-chat",
object="chat.completion",
created=1703300855,
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(role="assistant", content=resp_content),
logprobs=None,
)
],
usage=CompletionUsage(completion_tokens=110, prompt_tokens=92, total_tokens=202),
)
default_resp_chunk = ChatCompletionChunk(
id=default_resp.id,
model=default_resp.model,
object="chat.completion.chunk",
created=default_resp.created,
choices=[
AChoice(
delta=ChoiceDelta(content=resp_content, role="assistant"),
finish_reason="stop",
index=0,
logprobs=None,
)
],
usage=dict(default_resp.usage),
)
prompt_msg = "who are you"
messages = [{"role": "user", "content": prompt_msg}]
def test_fireworks_costmanager():
cost_manager = FireworksCostManager()
assert MODEL_GRADE_TOKEN_COSTS["-1"] == cost_manager.model_grade_token_costs("test")
assert MODEL_GRADE_TOKEN_COSTS["-1"] == cost_manager.model_grade_token_costs("xxx-81b-chat")
assert MODEL_GRADE_TOKEN_COSTS["16"] == cost_manager.model_grade_token_costs("llama-v2-13b-chat")
assert MODEL_GRADE_TOKEN_COSTS["16"] == cost_manager.model_grade_token_costs("xxx-15.5b-chat")
assert MODEL_GRADE_TOKEN_COSTS["16"] == cost_manager.model_grade_token_costs("xxx-16b-chat")
assert MODEL_GRADE_TOKEN_COSTS["80"] == cost_manager.model_grade_token_costs("xxx-80b-chat")
assert MODEL_GRADE_TOKEN_COSTS["mixtral-8x7b"] == cost_manager.model_grade_token_costs("mixtral-8x7b-chat")
cost_manager.update_cost(prompt_tokens=500000, completion_tokens=500000, model="llama-v2-13b-chat")
assert cost_manager.total_cost == 0.5
async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
if stream:
class Iterator(object):
async def __aiter__(self):
yield default_resp_chunk
return Iterator()
else:
return default_resp
@pytest.mark.asyncio
async def test_fireworks_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
fireworks_gpt = FireworksLLM()
fireworks_gpt.model = "llama-v2-13b-chat"
fireworks_gpt._update_costs(
usage=CompletionUsage(prompt_tokens=500000, completion_tokens=500000, total_tokens=1000000)
)
assert fireworks_gpt.get_costs() == Costs(
total_prompt_tokens=500000, total_completion_tokens=500000, total_cost=0.5, total_budget=0
)
resp = await fireworks_gpt.acompletion(messages)
assert resp.choices[0].message.content in resp_content
resp = await fireworks_gpt.aask(prompt_msg, stream=False)
assert resp == resp_content
resp = await fireworks_gpt.acompletion_text(messages, stream=False)
assert resp == resp_content
resp = await fireworks_gpt.acompletion_text(messages, stream=True)
assert resp == resp_content
resp = await fireworks_gpt.aask(prompt_msg)
assert resp == resp_content
| [
"I'm fireworks",
"who are you"
] |
2024-01-10 | geekan/MetaGPT | examples~search_kb.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File : search_kb.py
@Modified By: mashenquan, 2023-12-22. Delete useless codes.
"""
import asyncio
from langchain.embeddings import OpenAIEmbeddings
from metagpt.config import CONFIG
from metagpt.const import DATA_PATH, EXAMPLE_PATH
from metagpt.document_store import FaissStore
from metagpt.logs import logger
from metagpt.roles import Sales
def get_store():
embedding = OpenAIEmbeddings(openai_api_key=CONFIG.openai_api_key, openai_api_base=CONFIG.openai_base_url)
return FaissStore(DATA_PATH / "example.json", embedding=embedding)
async def search():
store = FaissStore(EXAMPLE_PATH / "example.json")
role = Sales(profile="Sales", store=store)
query = "Which facial cleanser is good for oily skin?"
result = await role.run(query)
logger.info(result)
if __name__ == "__main__":
asyncio.run(search())
| [] |
2024-01-10 | geekan/MetaGPT | tests~metagpt~provider~test_anthropic_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : the unittest of Claude2
import pytest
from anthropic.resources.completions import Completion
from metagpt.config import CONFIG
from metagpt.provider.anthropic_api import Claude2
CONFIG.anthropic_api_key = "xxx"
prompt = "who are you"
resp = "I'am Claude2"
def mock_anthropic_completions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
async def mock_anthropic_acompletions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
return Completion(id="xx", completion=resp, model="claude-2", stop_reason="stop_sequence", type="completion")
def test_claude2_ask(mocker):
mocker.patch("anthropic.resources.completions.Completions.create", mock_anthropic_completions_create)
assert resp == Claude2().ask(prompt)
@pytest.mark.asyncio
async def test_claude2_aask(mocker):
mocker.patch("anthropic.resources.completions.AsyncCompletions.create", mock_anthropic_acompletions_create)
assert resp == await Claude2().aask(prompt)
| [
"who are you"
] |
2024-01-10 | geekan/MetaGPT | metagpt~document_store~faiss_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/25 10:20
@Author : alexanderwu
@File : faiss_store.py
"""
import asyncio
from pathlib import Path
from typing import Optional
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain_core.embeddings import Embeddings
from metagpt.config import CONFIG
from metagpt.document import IndexableDocument
from metagpt.document_store.base_store import LocalStore
from metagpt.logs import logger
class FaissStore(LocalStore):
def __init__(
self, raw_data: Path, cache_dir=None, meta_col="source", content_col="output", embedding: Embeddings = None
):
self.meta_col = meta_col
self.content_col = content_col
self.embedding = embedding or OpenAIEmbeddings(
openai_api_key=CONFIG.openai_api_key, openai_api_base=CONFIG.openai_base_url
)
super().__init__(raw_data, cache_dir)
def _load(self) -> Optional["FaissStore"]:
index_file, store_file = self._get_index_and_store_fname(index_ext=".faiss") # langchain FAISS using .faiss
if not (index_file.exists() and store_file.exists()):
logger.info("Missing at least one of index_file/store_file, load failed and return None")
return None
return FAISS.load_local(self.raw_data_path.parent, self.embedding, self.fname)
def _write(self, docs, metadatas):
store = FAISS.from_texts(docs, self.embedding, metadatas=metadatas)
return store
def persist(self):
self.store.save_local(self.raw_data_path.parent, self.fname)
def search(self, query, expand_cols=False, sep="\n", *args, k=5, **kwargs):
rsp = self.store.similarity_search(query, k=k, **kwargs)
logger.debug(rsp)
if expand_cols:
return str(sep.join([f"{x.page_content}: {x.metadata}" for x in rsp]))
else:
return str(sep.join([f"{x.page_content}" for x in rsp]))
async def asearch(self, *args, **kwargs):
return await asyncio.to_thread(self.search, *args, **kwargs)
def write(self):
"""Initialize the index and library based on the Document (JSON / XLSX, etc.) file provided by the user."""
if not self.raw_data_path.exists():
raise FileNotFoundError
doc = IndexableDocument.from_path(self.raw_data_path, self.content_col, self.meta_col)
docs, metadatas = doc.get_docs_and_metadatas()
self.store = self._write(docs, metadatas)
self.persist()
return self.store
def add(self, texts: list[str], *args, **kwargs) -> list[str]:
"""FIXME: Currently, the store is not updated after adding."""
return self.store.add_texts(texts)
def delete(self, *args, **kwargs):
"""Currently, langchain does not provide a delete interface."""
raise NotImplementedError
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~azure_openai_api.py | # -*- coding: utf-8 -*-
"""
@Time : 2023/5/5 23:08
@Author : alexanderwu
@File : openai.py
@Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation;
Change cost control from global to company level.
@Modified By: mashenquan, 2023/11/21. Fix bug: ReadTimeout.
@Modified By: mashenquan, 2023/12/1. Fix bug: Unclosed connection caused by openai 0.x.
"""
from openai import AsyncAzureOpenAI
from openai._base_client import AsyncHttpxClientWrapper
from metagpt.config import LLMProviderEnum
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM
@register_provider(LLMProviderEnum.AZURE_OPENAI)
class AzureOpenAILLM(OpenAILLM):
"""
Check https://platform.openai.com/examples for examples
"""
def _init_client(self):
kwargs = self._make_client_kwargs()
# https://learn.microsoft.com/zh-cn/azure/ai-services/openai/how-to/migration?tabs=python-new%2Cdalle-fix
self.aclient = AsyncAzureOpenAI(**kwargs)
self.model = self.config.DEPLOYMENT_NAME # Used in _calc_usage & _cons_kwargs
def _make_client_kwargs(self) -> dict:
kwargs = dict(
api_key=self.config.OPENAI_API_KEY,
api_version=self.config.OPENAI_API_VERSION,
azure_endpoint=self.config.OPENAI_BASE_URL,
)
# to use proxy, openai v1 needs http_client
proxy_params = self._get_proxy_params()
if proxy_params:
kwargs["http_client"] = AsyncHttpxClientWrapper(**proxy_params)
return kwargs
| [] |
2024-01-10 | geekan/MetaGPT | tests~metagpt~test_gpt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 19:47
@Author : alexanderwu
@File : test_gpt.py
"""
import openai
import pytest
from metagpt.config import CONFIG
from metagpt.logs import logger
@pytest.mark.usefixtures("llm_api")
class TestGPT:
@pytest.mark.asyncio
async def test_llm_api_aask(self, llm_api):
answer = await llm_api.aask("hello chatgpt", stream=False)
logger.info(answer)
assert len(answer) > 0
answer = await llm_api.aask("hello chatgpt", stream=True)
logger.info(answer)
assert len(answer) > 0
@pytest.mark.asyncio
async def test_llm_api_aask_code(self, llm_api):
try:
answer = await llm_api.aask_code(["请扮演一个Google Python专家工程师,如果理解,回复明白", "写一个hello world"], timeout=60)
logger.info(answer)
assert len(answer) > 0
except openai.BadRequestError:
assert CONFIG.OPENAI_API_TYPE == "azure"
@pytest.mark.asyncio
async def test_llm_api_costs(self, llm_api):
await llm_api.aask("hello chatgpt", stream=False)
costs = llm_api.get_costs()
logger.info(costs)
assert costs.total_cost > 0
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/5 22:59
@Author : alexanderwu
@File : __init__.py
"""
from metagpt.provider.fireworks_api import FireworksLLM
from metagpt.provider.google_gemini_api import GeminiLLM
from metagpt.provider.ollama_api import OllamaLLM
from metagpt.provider.open_llm_api import OpenLLM
from metagpt.provider.openai_api import OpenAILLM
from metagpt.provider.zhipuai_api import ZhiPuAILLM
from metagpt.provider.azure_openai_api import AzureOpenAILLM
from metagpt.provider.metagpt_api import MetaGPTLLM
__all__ = [
"FireworksLLM",
"GeminiLLM",
"OpenLLM",
"OpenAILLM",
"ZhiPuAILLM",
"AzureOpenAILLM",
"MetaGPTLLM",
"OllamaLLM",
]
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~zhipuai_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : zhipuai LLM from https://open.bigmodel.cn/dev/api#sdk
import json
from enum import Enum
import openai
import zhipuai
from requests import ConnectionError
from tenacity import (
after_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_random_exponential,
)
from metagpt.config import CONFIG, LLMProviderEnum
from metagpt.logs import log_llm_stream, logger
from metagpt.provider.base_llm import BaseLLM
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import log_and_reraise
from metagpt.provider.zhipuai.zhipu_model_api import ZhiPuModelAPI
class ZhiPuEvent(Enum):
ADD = "add"
ERROR = "error"
INTERRUPTED = "interrupted"
FINISH = "finish"
@register_provider(LLMProviderEnum.ZHIPUAI)
class ZhiPuAILLM(BaseLLM):
"""
Refs to `https://open.bigmodel.cn/dev/api#chatglm_turbo`
From now, there is only one model named `chatglm_turbo`
"""
def __init__(self):
self.__init_zhipuai(CONFIG)
self.llm = ZhiPuModelAPI
self.model = "chatglm_turbo" # so far only one model, just use it
self.use_system_prompt: bool = False # zhipuai has no system prompt when use api
def __init_zhipuai(self, config: CONFIG):
assert config.zhipuai_api_key
zhipuai.api_key = config.zhipuai_api_key
# due to use openai sdk, set the api_key but it will't be used.
# openai.api_key = zhipuai.api_key # due to use openai sdk, set the api_key but it will't be used.
if config.openai_proxy:
# FIXME: openai v1.x sdk has no proxy support
openai.proxy = config.openai_proxy
def _const_kwargs(self, messages: list[dict]) -> dict:
kwargs = {"model": self.model, "prompt": messages, "temperature": 0.3}
return kwargs
def _update_costs(self, usage: dict):
"""update each request's token cost"""
if CONFIG.calc_usage:
try:
prompt_tokens = int(usage.get("prompt_tokens", 0))
completion_tokens = int(usage.get("completion_tokens", 0))
CONFIG.cost_manager.update_cost(prompt_tokens, completion_tokens, self.model)
except Exception as e:
logger.error(f"zhipuai updats costs failed! exp: {e}")
def get_choice_text(self, resp: dict) -> str:
"""get the first text of choice from llm response"""
assist_msg = resp.get("data", {}).get("choices", [{"role": "error"}])[-1]
assert assist_msg["role"] == "assistant"
return assist_msg.get("content")
def completion(self, messages: list[dict], timeout=3) -> dict:
resp = self.llm.invoke(**self._const_kwargs(messages))
usage = resp.get("data").get("usage")
self._update_costs(usage)
return resp
async def _achat_completion(self, messages: list[dict], timeout=3) -> dict:
resp = await self.llm.ainvoke(**self._const_kwargs(messages))
usage = resp.get("data").get("usage")
self._update_costs(usage)
return resp
async def acompletion(self, messages: list[dict], timeout=3) -> dict:
return await self._achat_completion(messages, timeout=timeout)
async def _achat_completion_stream(self, messages: list[dict], timeout=3) -> str:
response = await self.llm.asse_invoke(**self._const_kwargs(messages))
collected_content = []
usage = {}
async for event in response.async_events():
if event.event == ZhiPuEvent.ADD.value:
content = event.data
collected_content.append(content)
log_llm_stream(content)
elif event.event == ZhiPuEvent.ERROR.value or event.event == ZhiPuEvent.INTERRUPTED.value:
content = event.data
logger.error(f"event error: {content}", end="")
elif event.event == ZhiPuEvent.FINISH.value:
"""
event.meta
{
"task_status":"SUCCESS",
"usage":{
"completion_tokens":351,
"prompt_tokens":595,
"total_tokens":946
},
"task_id":"xx",
"request_id":"xxx"
}
"""
meta = json.loads(event.meta)
usage = meta.get("usage")
else:
print(f"zhipuapi else event: {event.data}", end="")
self._update_costs(usage)
full_content = "".join(collected_content)
return full_content
@retry(
stop=stop_after_attempt(3),
wait=wait_random_exponential(min=1, max=60),
after=after_log(logger, logger.level("WARNING").name),
retry=retry_if_exception_type(ConnectionError),
retry_error_callback=log_and_reraise,
)
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
"""response in async with stream or non-stream mode"""
if stream:
return await self._achat_completion_stream(messages)
resp = await self._achat_completion(messages)
return self.get_choice_text(resp)
| [
"prompt_tokens"
] |
2024-01-10 | geekan/MetaGPT | metagpt~document.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/8 14:03
@Author : alexanderwu
@File : document.py
@Desc : Classes and Operations Related to Files in the File System.
"""
from enum import Enum
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from langchain.document_loaders import (
TextLoader,
UnstructuredPDFLoader,
UnstructuredWordDocumentLoader,
)
from langchain.text_splitter import CharacterTextSplitter
from pydantic import BaseModel, ConfigDict, Field
from tqdm import tqdm
from metagpt.repo_parser import RepoParser
def validate_cols(content_col: str, df: pd.DataFrame):
if content_col not in df.columns:
raise ValueError("Content column not found in DataFrame.")
def read_data(data_path: Path):
suffix = data_path.suffix
if ".xlsx" == suffix:
data = pd.read_excel(data_path)
elif ".csv" == suffix:
data = pd.read_csv(data_path)
elif ".json" == suffix:
data = pd.read_json(data_path)
elif suffix in (".docx", ".doc"):
data = UnstructuredWordDocumentLoader(str(data_path), mode="elements").load()
elif ".txt" == suffix:
data = TextLoader(str(data_path)).load()
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=256, chunk_overlap=0)
texts = text_splitter.split_documents(data)
data = texts
elif ".pdf" == suffix:
data = UnstructuredPDFLoader(str(data_path), mode="elements").load()
else:
raise NotImplementedError("File format not supported.")
return data
class DocumentStatus(Enum):
"""Indicates document status, a mechanism similar to RFC/PEP"""
DRAFT = "draft"
UNDERREVIEW = "underreview"
APPROVED = "approved"
DONE = "done"
class Document(BaseModel):
"""
Document: Handles operations related to document files.
"""
path: Path = Field(default=None)
name: str = Field(default="")
content: str = Field(default="")
# metadata? in content perhaps.
author: str = Field(default="")
status: DocumentStatus = Field(default=DocumentStatus.DRAFT)
reviews: list = Field(default_factory=list)
@classmethod
def from_path(cls, path: Path):
"""
Create a Document instance from a file path.
"""
if not path.exists():
raise FileNotFoundError(f"File {path} not found.")
content = path.read_text()
return cls(content=content, path=path)
@classmethod
def from_text(cls, text: str, path: Optional[Path] = None):
"""
Create a Document from a text string.
"""
return cls(content=text, path=path)
def to_path(self, path: Optional[Path] = None):
"""
Save content to the specified file path.
"""
if path is not None:
self.path = path
if self.path is None:
raise ValueError("File path is not set.")
self.path.parent.mkdir(parents=True, exist_ok=True)
# TODO: excel, csv, json, etc.
self.path.write_text(self.content, encoding="utf-8")
def persist(self):
"""
Persist document to disk.
"""
return self.to_path()
class IndexableDocument(Document):
"""
Advanced document handling: For vector databases or search engines.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
data: Union[pd.DataFrame, list]
content_col: Optional[str] = Field(default="")
meta_col: Optional[str] = Field(default="")
@classmethod
def from_path(cls, data_path: Path, content_col="content", meta_col="metadata"):
if not data_path.exists():
raise FileNotFoundError(f"File {data_path} not found.")
data = read_data(data_path)
if isinstance(data, pd.DataFrame):
validate_cols(content_col, data)
return cls(data=data, content=str(data), content_col=content_col, meta_col=meta_col)
else:
content = data_path.read_text()
return cls(data=data, content=content, content_col=content_col, meta_col=meta_col)
def _get_docs_and_metadatas_by_df(self) -> (list, list):
df = self.data
docs = []
metadatas = []
for i in tqdm(range(len(df))):
docs.append(df[self.content_col].iloc[i])
if self.meta_col:
metadatas.append({self.meta_col: df[self.meta_col].iloc[i]})
else:
metadatas.append({})
return docs, metadatas
def _get_docs_and_metadatas_by_langchain(self) -> (list, list):
data = self.data
docs = [i.page_content for i in data]
metadatas = [i.metadata for i in data]
return docs, metadatas
def get_docs_and_metadatas(self) -> (list, list):
if isinstance(self.data, pd.DataFrame):
return self._get_docs_and_metadatas_by_df()
elif isinstance(self.data, list):
return self._get_docs_and_metadatas_by_langchain()
else:
raise NotImplementedError("Data type not supported for metadata extraction.")
class RepoMetadata(BaseModel):
name: str = Field(default="")
n_docs: int = Field(default=0)
n_chars: int = Field(default=0)
symbols: list = Field(default_factory=list)
class Repo(BaseModel):
# Name of this repo.
name: str = Field(default="")
# metadata: RepoMetadata = Field(default=RepoMetadata)
docs: dict[Path, Document] = Field(default_factory=dict)
codes: dict[Path, Document] = Field(default_factory=dict)
assets: dict[Path, Document] = Field(default_factory=dict)
path: Path = Field(default=None)
def _path(self, filename):
return self.path / filename
@classmethod
def from_path(cls, path: Path):
"""Load documents, code, and assets from a repository path."""
path.mkdir(parents=True, exist_ok=True)
repo = Repo(path=path, name=path.name)
for file_path in path.rglob("*"):
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
if file_path.is_file() and file_path.suffix in [".json", ".txt", ".md", ".py", ".js", ".css", ".html"]:
repo._set(file_path.read_text(), file_path)
return repo
def to_path(self):
"""Persist all documents, code, and assets to the given repository path."""
for doc in self.docs.values():
doc.to_path()
for code in self.codes.values():
code.to_path()
for asset in self.assets.values():
asset.to_path()
def _set(self, content: str, path: Path):
"""Add a document to the appropriate category based on its file extension."""
suffix = path.suffix
doc = Document(content=content, path=path, name=str(path.relative_to(self.path)))
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
if suffix.lower() == ".md":
self.docs[path] = doc
elif suffix.lower() in [".py", ".js", ".css", ".html"]:
self.codes[path] = doc
else:
self.assets[path] = doc
return doc
def set(self, filename: str, content: str):
"""Set a document and persist it to disk."""
path = self._path(filename)
doc = self._set(content, path)
doc.to_path()
def get(self, filename: str) -> Optional[Document]:
"""Get a document by its filename."""
path = self._path(filename)
return self.docs.get(path) or self.codes.get(path) or self.assets.get(path)
def get_text_documents(self) -> list[Document]:
return list(self.docs.values()) + list(self.codes.values())
def eda(self) -> RepoMetadata:
n_docs = sum(len(i) for i in [self.docs, self.codes, self.assets])
n_chars = sum(sum(len(j.content) for j in i.values()) for i in [self.docs, self.codes, self.assets])
symbols = RepoParser(base_directory=self.path).generate_symbols()
return RepoMetadata(name=self.name, n_docs=n_docs, n_chars=n_chars, symbols=symbols)
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~open_llm_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : self-host open llm model with openai-compatible interface
from openai.types import CompletionUsage
from metagpt.config import CONFIG, Config, LLMProviderEnum
from metagpt.logs import logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM
from metagpt.utils.cost_manager import CostManager, Costs
from metagpt.utils.token_counter import count_message_tokens, count_string_tokens
class OpenLLMCostManager(CostManager):
"""open llm model is self-host, it's free and without cost"""
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
Update the total cost, prompt tokens, and completion tokens.
Args:
prompt_tokens (int): The number of tokens used in the prompt.
completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call.
"""
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
max_budget = CONFIG.max_budget if CONFIG.max_budget else CONFIG.cost_manager.max_budget
logger.info(
f"Max budget: ${max_budget:.3f} | reference "
f"prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}"
)
@register_provider(LLMProviderEnum.OPEN_LLM)
class OpenLLM(OpenAILLM):
def __init__(self):
self.config: Config = CONFIG
self.__init_openllm()
self.auto_max_tokens = False
self._cost_manager = OpenLLMCostManager()
def __init_openllm(self):
self.is_azure = False
self.rpm = int(self.config.get("RPM", 10))
self._init_client()
self.model = self.config.open_llm_api_model # `self.model` should after `_make_client` to rewrite it
def _make_client_kwargs(self) -> dict:
kwargs = dict(api_key="sk-xxx", base_url=self.config.open_llm_api_base)
return kwargs
def _calc_usage(self, messages: list[dict], rsp: str) -> CompletionUsage:
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
if not CONFIG.calc_usage:
return usage
try:
usage.prompt_tokens = count_message_tokens(messages, "open-llm-model")
usage.completion_tokens = count_string_tokens(rsp, "open-llm-model")
except Exception as e:
logger.error(f"usage calculation failed!: {e}")
return usage
def _update_costs(self, usage: CompletionUsage):
if self.config.calc_usage and usage:
try:
# use OpenLLMCostManager not CONFIG.cost_manager
self._cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
except Exception as e:
logger.error(f"updating costs failed!, exp: {e}")
def get_costs(self) -> Costs:
return self._cost_manager.get_costs()
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~fireworks_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : fireworks.ai's api
import re
from openai import APIConnectionError, AsyncStream
from openai.types import CompletionUsage
from openai.types.chat import ChatCompletionChunk
from tenacity import (
after_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_random_exponential,
)
from metagpt.config import CONFIG, Config, LLMProviderEnum
from metagpt.logs import logger
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import OpenAILLM, log_and_reraise
from metagpt.utils.cost_manager import CostManager, Costs
MODEL_GRADE_TOKEN_COSTS = {
"-1": {"prompt": 0.0, "completion": 0.0}, # abnormal condition
"16": {"prompt": 0.2, "completion": 0.8}, # 16 means model size <= 16B; 0.2 means $0.2/1M tokens
"80": {"prompt": 0.7, "completion": 2.8}, # 80 means 16B < model size <= 80B
"mixtral-8x7b": {"prompt": 0.4, "completion": 1.6},
}
class FireworksCostManager(CostManager):
def model_grade_token_costs(self, model: str) -> dict[str, float]:
def _get_model_size(model: str) -> float:
size = re.findall(".*-([0-9.]+)b", model)
size = float(size[0]) if len(size) > 0 else -1
return size
if "mixtral-8x7b" in model:
token_costs = MODEL_GRADE_TOKEN_COSTS["mixtral-8x7b"]
else:
model_size = _get_model_size(model)
if 0 < model_size <= 16:
token_costs = MODEL_GRADE_TOKEN_COSTS["16"]
elif 16 < model_size <= 80:
token_costs = MODEL_GRADE_TOKEN_COSTS["80"]
else:
token_costs = MODEL_GRADE_TOKEN_COSTS["-1"]
return token_costs
def update_cost(self, prompt_tokens: int, completion_tokens: int, model: str):
"""
Refs to `https://app.fireworks.ai/pricing` **Developer pricing**
Update the total cost, prompt tokens, and completion tokens.
Args:
prompt_tokens (int): The number of tokens used in the prompt.
completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call.
"""
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
token_costs = self.model_grade_token_costs(model)
cost = (prompt_tokens * token_costs["prompt"] + completion_tokens * token_costs["completion"]) / 1000000
self.total_cost += cost
max_budget = CONFIG.max_budget if CONFIG.max_budget else CONFIG.cost_manager.max_budget
logger.info(
f"Total running cost: ${self.total_cost:.4f} | Max budget: ${max_budget:.3f} | "
f"Current cost: ${cost:.4f}, prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}"
)
CONFIG.total_cost = self.total_cost
@register_provider(LLMProviderEnum.FIREWORKS)
class FireworksLLM(OpenAILLM):
def __init__(self):
self.config: Config = CONFIG
self.__init_fireworks()
self.auto_max_tokens = False
self._cost_manager = FireworksCostManager()
def __init_fireworks(self):
self.is_azure = False
self.rpm = int(self.config.get("RPM", 10))
self._init_client()
self.model = self.config.fireworks_api_model # `self.model` should after `_make_client` to rewrite it
def _make_client_kwargs(self) -> dict:
kwargs = dict(api_key=self.config.fireworks_api_key, base_url=self.config.fireworks_api_base)
return kwargs
def _update_costs(self, usage: CompletionUsage):
if self.config.calc_usage and usage:
try:
# use FireworksCostManager not CONFIG.cost_manager
self._cost_manager.update_cost(usage.prompt_tokens, usage.completion_tokens, self.model)
except Exception as e:
logger.error(f"updating costs failed!, exp: {e}")
def get_costs(self) -> Costs:
return self._cost_manager.get_costs()
async def _achat_completion_stream(self, messages: list[dict]) -> str:
response: AsyncStream[ChatCompletionChunk] = await self.aclient.chat.completions.create(
**self._cons_kwargs(messages), stream=True
)
collected_content = []
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
# iterate through the stream of events
async for chunk in response:
if chunk.choices:
choice = chunk.choices[0]
choice_delta = choice.delta
finish_reason = choice.finish_reason if hasattr(choice, "finish_reason") else None
if choice_delta.content:
collected_content.append(choice_delta.content)
print(choice_delta.content, end="")
if finish_reason:
# fireworks api return usage when finish_reason is not None
usage = CompletionUsage(**chunk.usage)
full_content = "".join(collected_content)
self._update_costs(usage)
return full_content
@retry(
wait=wait_random_exponential(min=1, max=60),
stop=stop_after_attempt(6),
after=after_log(logger, logger.level("WARNING").name),
retry=retry_if_exception_type(APIConnectionError),
retry_error_callback=log_and_reraise,
)
async def acompletion_text(self, messages: list[dict], stream=False, timeout: int = 3) -> str:
"""when streaming, print each token in place."""
if stream:
return await self._achat_completion_stream(messages)
rsp = await self._achat_completion(messages)
return self.get_choice_text(rsp)
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~metagpt_api.py | # -*- coding: utf-8 -*-
"""
@Time : 2023/5/5 23:08
@Author : alexanderwu
@File : metagpt_api.py
@Desc : MetaGPT LLM provider.
"""
from metagpt.config import LLMProviderEnum
from metagpt.provider import OpenAILLM
from metagpt.provider.llm_provider_registry import register_provider
@register_provider(LLMProviderEnum.METAGPT)
class MetaGPTLLM(OpenAILLM):
def __init__(self):
super().__init__()
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~anthropic_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/7/21 11:15
@Author : Leo Xiao
@File : anthropic_api.py
"""
import anthropic
from anthropic import Anthropic, AsyncAnthropic
from metagpt.config import CONFIG
class Claude2:
def ask(self, prompt: str) -> str:
client = Anthropic(api_key=CONFIG.anthropic_api_key)
res = client.completions.create(
model="claude-2",
prompt=f"{anthropic.HUMAN_PROMPT} {prompt} {anthropic.AI_PROMPT}",
max_tokens_to_sample=1000,
)
return res.completion
async def aask(self, prompt: str) -> str:
aclient = AsyncAnthropic(api_key=CONFIG.anthropic_api_key)
res = await aclient.completions.create(
model="claude-2",
prompt=f"{anthropic.HUMAN_PROMPT} {prompt} {anthropic.AI_PROMPT}",
max_tokens_to_sample=1000,
)
return res.completion
| [] |
2024-01-10 | geekan/MetaGPT | tests~mock~mock_llm.py | from typing import Optional
from metagpt.logs import log_llm_stream, logger
from metagpt.provider.openai_api import OpenAILLM
class MockLLM(OpenAILLM):
def __init__(self, allow_open_api_call):
super().__init__()
self.allow_open_api_call = allow_open_api_call
self.rsp_cache: dict = {}
self.rsp_candidates: list[dict] = [] # a test can have multiple calls with the same llm, thus a list
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
"""Overwrite original acompletion_text to cancel retry"""
if stream:
resp = self._achat_completion_stream(messages, timeout=timeout)
collected_messages = []
async for i in resp:
log_llm_stream(i)
collected_messages.append(i)
full_reply_content = "".join(collected_messages)
usage = self._calc_usage(messages, full_reply_content)
self._update_costs(usage)
return full_reply_content
rsp = await self._achat_completion(messages, timeout=timeout)
return self.get_choice_text(rsp)
async def original_aask(
self,
msg: str,
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
timeout=3,
stream=True,
):
"""A copy of metagpt.provider.base_llm.BaseLLM.aask, we can't use super().aask because it will be mocked"""
if system_msgs:
message = self._system_msgs(system_msgs)
else:
message = [self._default_system_msg()] if self.use_system_prompt else []
if format_msgs:
message.extend(format_msgs)
message.append(self._user_msg(msg))
rsp = await self.acompletion_text(message, stream=stream, timeout=timeout)
return rsp
async def original_aask_batch(self, msgs: list, timeout=3) -> str:
"""A copy of metagpt.provider.base_llm.BaseLLM.aask_batch, we can't use super().aask because it will be mocked"""
context = []
for msg in msgs:
umsg = self._user_msg(msg)
context.append(umsg)
rsp_text = await self.acompletion_text(context, timeout=timeout)
context.append(self._assistant_msg(rsp_text))
return self._extract_assistant_rsp(context)
async def aask(
self,
msg: str,
system_msgs: Optional[list[str]] = None,
format_msgs: Optional[list[dict[str, str]]] = None,
timeout=3,
stream=True,
) -> str:
msg_key = msg # used to identify it a message has been called before
if system_msgs:
joined_system_msg = "#MSG_SEP#".join(system_msgs) + "#SYSTEM_MSG_END#"
msg_key = joined_system_msg + msg_key
rsp = await self._mock_rsp(msg_key, self.original_aask, msg, system_msgs, format_msgs, timeout, stream)
return rsp
async def aask_batch(self, msgs: list, timeout=3) -> str:
msg_key = "#MSG_SEP#".join([msg if isinstance(msg, str) else msg.content for msg in msgs])
rsp = await self._mock_rsp(msg_key, self.original_aask_batch, msgs, timeout)
return rsp
async def _mock_rsp(self, msg_key, ask_func, *args, **kwargs):
if msg_key not in self.rsp_cache:
if not self.allow_open_api_call:
raise ValueError(
"In current test setting, api call is not allowed, you should properly mock your tests, "
"or add expected api response in tests/data/rsp_cache.json. "
f"The prompt you want for api call: {msg_key}"
)
# Call the original unmocked method
rsp = await ask_func(*args, **kwargs)
else:
logger.warning("Use response cache")
rsp = self.rsp_cache[msg_key]
self.rsp_candidates.append({msg_key: rsp})
return rsp
| [] |
2024-01-10 | geekan/MetaGPT | tests~metagpt~provider~test_general_api_base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import os
from typing import AsyncGenerator, Generator, Iterator, Tuple, Union
import aiohttp
import pytest
import requests
from openai import OpenAIError
from metagpt.provider.general_api_base import (
APIRequestor,
ApiType,
OpenAIResponse,
_aiohttp_proxies_arg,
_build_api_url,
_make_session,
_requests_proxies_arg,
log_debug,
log_info,
log_warn,
logfmt,
parse_stream,
parse_stream_helper,
)
def test_basic():
_ = ApiType.from_str("azure")
_ = ApiType.from_str("azuread")
_ = ApiType.from_str("openai")
with pytest.raises(OpenAIError):
_ = ApiType.from_str("xx")
os.environ.setdefault("LLM_LOG", "debug")
log_debug("debug")
log_warn("warn")
log_info("info")
logfmt({"k1": b"v1", "k2": 1, "k3": "a b"})
_build_api_url(url="http://www.baidu.com/s?wd=", query="baidu")
def test_openai_response():
resp = OpenAIResponse(data=[], headers={"retry-after": 3})
assert resp.request_id is None
assert resp.retry_after == 3
assert resp.operation_location is None
assert resp.organization is None
assert resp.response_ms is None
def test_proxy():
assert _requests_proxies_arg(proxy=None) is None
proxy = "127.0.0.1:80"
assert _requests_proxies_arg(proxy=proxy) == {"http": proxy, "https": proxy}
proxy_dict = {"http": proxy}
assert _requests_proxies_arg(proxy=proxy_dict) == proxy_dict
assert _aiohttp_proxies_arg(proxy_dict) == proxy
proxy_dict = {"https": proxy}
assert _requests_proxies_arg(proxy=proxy_dict) == proxy_dict
assert _aiohttp_proxies_arg(proxy_dict) == proxy
assert _make_session() is not None
assert _aiohttp_proxies_arg(None) is None
assert _aiohttp_proxies_arg("test") == "test"
with pytest.raises(ValueError):
_aiohttp_proxies_arg(-1)
def test_parse_stream():
assert parse_stream_helper(None) is None
assert parse_stream_helper(b"data: [DONE]") is None
assert parse_stream_helper(b"data: test") == "test"
assert parse_stream_helper(b"test") is None
for line in parse_stream([b"data: test"]):
assert line == "test"
api_requestor = APIRequestor(base_url="http://www.baidu.com")
def mock_interpret_response(
self, result: requests.Response, stream: bool
) -> Tuple[Union[bytes, Iterator[Generator]], bytes]:
return b"baidu", False
async def mock_interpret_async_response(
self, result: aiohttp.ClientResponse, stream: bool
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool]:
return b"baidu", True
def test_requestor_headers():
# validate_headers
headers = api_requestor._validate_headers(None)
assert not headers
with pytest.raises(Exception):
api_requestor._validate_headers(-1)
with pytest.raises(Exception):
api_requestor._validate_headers({1: 2})
with pytest.raises(Exception):
api_requestor._validate_headers({"test": 1})
supplied_headers = {"test": "test"}
assert api_requestor._validate_headers(supplied_headers) == supplied_headers
api_requestor.organization = "test"
api_requestor.api_version = "test123"
api_requestor.api_type = ApiType.OPEN_AI
request_id = "test123"
headers = api_requestor.request_headers(method="post", extra={}, request_id=request_id)
assert headers["LLM-Organization"] == api_requestor.organization
assert headers["LLM-Version"] == api_requestor.api_version
assert headers["X-Request-Id"] == request_id
def test_api_requestor(mocker):
mocker.patch("metagpt.provider.general_api_base.APIRequestor._interpret_response", mock_interpret_response)
resp, _, _ = api_requestor.request(method="get", url="/s?wd=baidu")
resp, _, _ = api_requestor.request(method="post", url="/s?wd=baidu")
@pytest.mark.asyncio
async def test_async_api_requestor(mocker):
mocker.patch(
"metagpt.provider.general_api_base.APIRequestor._interpret_async_response", mock_interpret_async_response
)
resp, _, _ = await api_requestor.arequest(method="get", url="/s?wd=baidu")
resp, _, _ = await api_requestor.arequest(method="post", url="/s?wd=baidu")
| [] |
2024-01-10 | geekan/MetaGPT | tests~metagpt~actions~test_write_code.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:45
@Author : alexanderwu
@File : test_write_code.py
@Modifiled By: mashenquan, 2023-12-6. According to RFC 135
"""
from pathlib import Path
import pytest
from metagpt.actions.write_code import WriteCode
from metagpt.config import CONFIG
from metagpt.const import (
CODE_SUMMARIES_FILE_REPO,
SYSTEM_DESIGN_FILE_REPO,
TASK_FILE_REPO,
TEST_OUTPUTS_FILE_REPO,
)
from metagpt.logs import logger
from metagpt.provider.openai_api import OpenAILLM as LLM
from metagpt.schema import CodingContext, Document
from metagpt.utils.common import aread
from metagpt.utils.file_repository import FileRepository
from tests.metagpt.actions.mock_markdown import TASKS_2, WRITE_CODE_PROMPT_SAMPLE
@pytest.mark.asyncio
async def test_write_code():
context = CodingContext(
filename="task_filename.py", design_doc=Document(content="设计一个名为'add'的函数,该函数接受两个整数作为输入,并返回它们的和。")
)
doc = Document(content=context.model_dump_json())
write_code = WriteCode(context=doc)
code = await write_code.run()
logger.info(code.model_dump_json())
# 我们不能精确地预测生成的代码,但我们可以检查某些关键字
assert "def add" in code.code_doc.content
assert "return" in code.code_doc.content
@pytest.mark.asyncio
async def test_write_code_directly():
prompt = WRITE_CODE_PROMPT_SAMPLE + "\n" + TASKS_2[0]
llm = LLM()
rsp = await llm.aask(prompt)
logger.info(rsp)
@pytest.mark.asyncio
async def test_write_code_deps():
# Prerequisites
CONFIG.src_workspace = CONFIG.git_repo.workdir / "snake1/snake1"
demo_path = Path(__file__).parent / "../../data/demo_project"
await FileRepository.save_file(
filename="test_game.py.json",
content=await aread(str(demo_path / "test_game.py.json")),
relative_path=TEST_OUTPUTS_FILE_REPO,
)
await FileRepository.save_file(
filename="20231221155954.json",
content=await aread(str(demo_path / "code_summaries.json")),
relative_path=CODE_SUMMARIES_FILE_REPO,
)
await FileRepository.save_file(
filename="20231221155954.json",
content=await aread(str(demo_path / "system_design.json")),
relative_path=SYSTEM_DESIGN_FILE_REPO,
)
await FileRepository.save_file(
filename="20231221155954.json", content=await aread(str(demo_path / "tasks.json")), relative_path=TASK_FILE_REPO
)
await FileRepository.save_file(
filename="main.py", content='if __name__ == "__main__":\nmain()', relative_path=CONFIG.src_workspace
)
context = CodingContext(
filename="game.py",
design_doc=await FileRepository.get_file(filename="20231221155954.json", relative_path=SYSTEM_DESIGN_FILE_REPO),
task_doc=await FileRepository.get_file(filename="20231221155954.json", relative_path=TASK_FILE_REPO),
code_doc=Document(filename="game.py", content="", root_path="snake1"),
)
coding_doc = Document(root_path="snake1", filename="game.py", content=context.json())
action = WriteCode(context=coding_doc)
rsp = await action.run()
assert rsp
assert rsp.code_doc.content
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| [
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | geekan/MetaGPT | tests~metagpt~provider~test_open_llm_api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import pytest
from openai.types.chat.chat_completion import (
ChatCompletion,
ChatCompletionMessage,
Choice,
)
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice as AChoice
from openai.types.chat.chat_completion_chunk import ChoiceDelta
from openai.types.completion_usage import CompletionUsage
from metagpt.config import CONFIG
from metagpt.provider.open_llm_api import OpenLLM
from metagpt.utils.cost_manager import Costs
CONFIG.max_budget = 10
CONFIG.calc_usage = True
resp_content = "I'm llama2"
default_resp = ChatCompletion(
id="cmpl-a6652c1bb181caae8dd19ad8",
model="llama-v2-13b-chat",
object="chat.completion",
created=1703302755,
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(role="assistant", content=resp_content),
logprobs=None,
)
],
)
default_resp_chunk = ChatCompletionChunk(
id=default_resp.id,
model=default_resp.model,
object="chat.completion.chunk",
created=default_resp.created,
choices=[
AChoice(
delta=ChoiceDelta(content=resp_content, role="assistant"),
finish_reason="stop",
index=0,
logprobs=None,
)
],
)
prompt_msg = "who are you"
messages = [{"role": "user", "content": prompt_msg}]
async def mock_openai_acompletions_create(self, stream: bool = False, **kwargs) -> ChatCompletionChunk:
if stream:
class Iterator(object):
async def __aiter__(self):
yield default_resp_chunk
return Iterator()
else:
return default_resp
@pytest.mark.asyncio
async def test_openllm_acompletion(mocker):
mocker.patch("openai.resources.chat.completions.AsyncCompletions.create", mock_openai_acompletions_create)
openllm_gpt = OpenLLM()
openllm_gpt.model = "llama-v2-13b-chat"
openllm_gpt._update_costs(usage=CompletionUsage(prompt_tokens=100, completion_tokens=100, total_tokens=200))
assert openllm_gpt.get_costs() == Costs(
total_prompt_tokens=100, total_completion_tokens=100, total_cost=0, total_budget=0
)
resp = await openllm_gpt.acompletion(messages)
assert resp.choices[0].message.content in resp_content
resp = await openllm_gpt.aask(prompt_msg, stream=False)
assert resp == resp_content
resp = await openllm_gpt.acompletion_text(messages, stream=False)
assert resp == resp_content
resp = await openllm_gpt.acompletion_text(messages, stream=True)
assert resp == resp_content
resp = await openllm_gpt.aask(prompt_msg)
assert resp == resp_content
| [
"I'm llama2",
"who are you"
] |
2024-01-10 | geekan/MetaGPT | metagpt~tools~ut_writer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from pathlib import Path
from metagpt.provider.openai_api import OpenAILLM as GPTAPI
from metagpt.utils.common import awrite
ICL_SAMPLE = """Interface definition:
```text
Interface Name: Element Tagging
Interface Path: /projects/{project_key}/node-tags
Method: POST
Request parameters:
Path parameters:
project_key
Body parameters:
Name Type Required Default Value Remarks
nodes array Yes Nodes
node_key string No Node key
tags array No Original node tag list
node_type string No Node type DATASET / RECIPE
operations array Yes
tags array No Operation tag list
mode string No Operation type ADD / DELETE
Return data:
Name Type Required Default Value Remarks
code integer Yes Status code
msg string Yes Prompt message
data object Yes Returned data
list array No Node list true / false
node_type string No Node type DATASET / RECIPE
node_key string No Node key
```
Unit test:
```python
@pytest.mark.parametrize(
"project_key, nodes, operations, expected_msg",
[
("project_key", [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "success"),
("project_key", [{"node_key": "dataset_002", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["tag1"], "mode": "DELETE"}], "success"),
("", [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "Missing the required parameter project_key"),
(123, [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "Incorrect parameter type"),
("project_key", [{"node_key": "a"*201, "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "Request parameter exceeds field boundary")
]
)
def test_node_tags(project_key, nodes, operations, expected_msg):
pass
# The above is an interface definition and a unit test example.
# Next, please play the role of an expert test manager with 20 years of experience at Google. When I give the interface definition,
# reply to me with a unit test. There are several requirements:
# 1. Only output one `@pytest.mark.parametrize` and the corresponding test_<interface name> function (inside pass, do not implement).
# -- The function parameter contains expected_msg for result verification.
# 2. The generated test cases use shorter text or numbers and are as compact as possible.
# 3. If comments are needed, use Chinese.
# If you understand, please wait for me to give the interface definition and just answer "Understood" to save tokens.
"""
ACT_PROMPT_PREFIX = """Refer to the test types: such as missing request parameters, field boundary verification, incorrect field type.
Please output 10 test cases within one `@pytest.mark.parametrize` scope.
```text
"""
YFT_PROMPT_PREFIX = """Refer to the test types: such as SQL injection, cross-site scripting (XSS), unauthorized access and privilege escalation,
authentication and authorization, parameter verification, exception handling, file upload and download.
Please output 10 test cases within one `@pytest.mark.parametrize` scope.
```text
"""
OCR_API_DOC = """```text
Interface Name: OCR recognition
Interface Path: /api/v1/contract/treaty/task/ocr
Method: POST
Request Parameters:
Path Parameters:
Body Parameters:
Name Type Required Default Value Remarks
file_id string Yes
box array Yes
contract_id number Yes Contract id
start_time string No yyyy-mm-dd
end_time string No yyyy-mm-dd
extract_type number No Recognition type 1- During import 2- After import Default 1
Response Data:
Name Type Required Default Value Remarks
code integer Yes
message string Yes
data object Yes
```
"""
class UTGenerator:
"""UT Generator: Construct UT through API documentation"""
def __init__(
self,
swagger_file: str,
ut_py_path: str,
questions_path: str,
chatgpt_method: str = "API",
template_prefix=YFT_PROMPT_PREFIX,
) -> None:
"""Initialize UT Generator
Args:
swagger_file: path to the swagger file
ut_py_path: path to store test cases
questions_path: path to store the template, facilitating subsequent checks
chatgpt_method: API method
template_prefix: use the template, default is YFT_UT_PROMPT
"""
self.swagger_file = swagger_file
self.ut_py_path = ut_py_path
self.questions_path = questions_path
assert chatgpt_method in ["API"], "Invalid chatgpt_method"
self.chatgpt_method = chatgpt_method
# ICL: In-Context Learning, provide an example here for GPT to mimic
self.icl_sample = ICL_SAMPLE
self.template_prefix = template_prefix
def get_swagger_json(self) -> dict:
"""Load Swagger JSON from a local file"""
with open(self.swagger_file, "r", encoding="utf-8") as file:
swagger_json = json.load(file)
return swagger_json
def __para_to_str(self, prop, required, name=""):
name = name or prop["name"]
ptype = prop["type"]
title = prop.get("title", "")
desc = prop.get("description", "")
return f'{name}\t{ptype}\t{"Yes" if required else "No"}\t{title}\t{desc}'
def _para_to_str(self, prop):
required = prop.get("required", False)
return self.__para_to_str(prop, required)
def para_to_str(self, name, prop, prop_object_required):
required = name in prop_object_required
return self.__para_to_str(prop, required, name)
def build_object_properties(self, node, prop_object_required, level: int = 0) -> str:
"""Recursively output properties of object and array[object] types
Args:
node (_type_): value of the child item
prop_object_required (_type_): whether it's a required field
level: current recursion depth
"""
doc = ""
def dive_into_object(node):
"""If it's an object type, recursively output its properties"""
if node.get("type") == "object":
sub_properties = node.get("properties", {})
return self.build_object_properties(sub_properties, prop_object_required, level=level + 1)
return ""
if node.get("in", "") in ["query", "header", "formData"]:
doc += f'{" " * level}{self._para_to_str(node)}\n'
doc += dive_into_object(node)
return doc
for name, prop in node.items():
if not isinstance(prop, dict):
doc += f'{" " * level}{self._para_to_str(node)}\n'
break
doc += f'{" " * level}{self.para_to_str(name, prop, prop_object_required)}\n'
doc += dive_into_object(prop)
if prop["type"] == "array":
items = prop.get("items", {})
doc += dive_into_object(items)
return doc
def get_tags_mapping(self) -> dict:
"""Process tag and path mappings
Returns:
Dict: mapping of tag to path
"""
swagger_data = self.get_swagger_json()
paths = swagger_data["paths"]
tags = {}
for path, path_obj in paths.items():
for method, method_obj in path_obj.items():
for tag in method_obj["tags"]:
if tag not in tags:
tags[tag] = {}
if path not in tags[tag]:
tags[tag][path] = {}
tags[tag][path][method] = method_obj
return tags
async def generate_ut(self, include_tags) -> bool:
"""Generate test case files"""
tags = self.get_tags_mapping()
for tag, paths in tags.items():
if include_tags is None or tag in include_tags:
await self._generate_ut(tag, paths)
return True
def build_api_doc(self, node: dict, path: str, method: str) -> str:
summary = node["summary"]
doc = f"API Name: {summary}\nAPI Path: {path}\nMethod: {method.upper()}\n"
doc += "\nRequest Parameters:\n"
if "parameters" in node:
parameters = node["parameters"]
doc += "Path Parameters:\n"
# param["in"]: path / formData / body / query / header
for param in parameters:
if param["in"] == "path":
doc += f'{param["name"]} \n'
doc += "\nBody Parameters:\n"
doc += "Name\tType\tRequired\tDefault Value\tRemarks\n"
for param in parameters:
if param["in"] == "body":
schema = param.get("schema", {})
prop_properties = schema.get("properties", {})
prop_required = schema.get("required", [])
doc += self.build_object_properties(prop_properties, prop_required)
else:
doc += self.build_object_properties(param, [])
# Display response data information
doc += "\nResponse Data:\n"
doc += "Name\tType\tRequired\tDefault Value\tRemarks\n"
responses = node["responses"]
response = responses.get("200", {})
schema = response.get("schema", {})
properties = schema.get("properties", {})
required = schema.get("required", {})
doc += self.build_object_properties(properties, required)
doc += "\n"
doc += "```"
return doc
async def ask_gpt_and_save(self, question: str, tag: str, fname: str):
"""Generate questions and store both questions and answers"""
messages = [self.icl_sample, question]
result = await self.gpt_msgs_to_code(messages=messages)
await awrite(Path(self.questions_path) / tag / f"{fname}.txt", question)
data = result.get("code", "") if result else ""
await awrite(Path(self.ut_py_path) / tag / f"{fname}.py", data)
async def _generate_ut(self, tag, paths):
"""Process the structure under a data path
Args:
tag (_type_): module name
paths (_type_): Path Object
"""
for path, path_obj in paths.items():
for method, node in path_obj.items():
summary = node["summary"]
question = self.template_prefix
question += self.build_api_doc(node, path, method)
await self.ask_gpt_and_save(question, tag, summary)
async def gpt_msgs_to_code(self, messages: list) -> str:
"""Choose based on different calling methods"""
result = ""
if self.chatgpt_method == "API":
result = await GPTAPI().aask_code(messages=messages)
return result
| [
"Refer to the test types: such as SQL injection, cross-site scripting (XSS), unauthorized access and privilege escalation, \nauthentication and authorization, parameter verification, exception handling, file upload and download.\nPlease output 10 test cases within one `@pytest.mark.parametrize` scope.\n```text\n",
"Refer to the test types: such as missing request parameters, field boundary verification, incorrect field type.\nPlease output 10 test cases within one `@pytest.mark.parametrize` scope.\n```text\n"
] |
2024-01-10 | geekan/MetaGPT | metagpt~provider~general_api_base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : refs to openai 0.x sdk
import asyncio
import json
import os
import platform
import re
import sys
import threading
import time
from contextlib import asynccontextmanager
from enum import Enum
from typing import (
AsyncGenerator,
AsyncIterator,
Dict,
Iterator,
Optional,
Tuple,
Union,
overload,
)
from urllib.parse import urlencode, urlsplit, urlunsplit
import aiohttp
import requests
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
import logging
import openai
from openai import version
logger = logging.getLogger("openai")
TIMEOUT_SECS = 600
MAX_SESSION_LIFETIME_SECS = 180
MAX_CONNECTION_RETRIES = 2
# Has one attribute per thread, 'session'.
_thread_context = threading.local()
LLM_LOG = os.environ.get("LLM_LOG", "debug")
class ApiType(Enum):
AZURE = 1
OPEN_AI = 2
AZURE_AD = 3
@staticmethod
def from_str(label):
if label.lower() == "azure":
return ApiType.AZURE
elif label.lower() in ("azure_ad", "azuread"):
return ApiType.AZURE_AD
elif label.lower() in ("open_ai", "openai"):
return ApiType.OPEN_AI
else:
raise openai.OpenAIError(
"The API type provided in invalid. Please select one of the supported API types: 'azure', 'azure_ad', 'open_ai'"
)
api_key_to_header = (
lambda api, key: {"Authorization": f"Bearer {key}"}
if api in (ApiType.OPEN_AI, ApiType.AZURE_AD)
else {"api-key": f"{key}"}
)
def _console_log_level():
if LLM_LOG in ["debug", "info"]:
return LLM_LOG
else:
return None
def log_debug(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() == "debug":
print(msg, file=sys.stderr)
logger.debug(msg)
def log_info(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() in ["debug", "info"]:
print(msg, file=sys.stderr)
logger.info(msg)
def log_warn(message, **params):
msg = logfmt(dict(message=message, **params))
print(msg, file=sys.stderr)
logger.warning(msg)
def logfmt(props):
def fmt(key, val):
# Handle case where val is a bytes or bytesarray
if hasattr(val, "decode"):
val = val.decode("utf-8")
# Check if val is already a string to avoid re-encoding into ascii.
if not isinstance(val, str):
val = str(val)
if re.search(r"\s", val):
val = repr(val)
# key should already be a string
if re.search(r"\s", key):
key = repr(key)
return "{key}={val}".format(key=key, val=val)
return " ".join([fmt(key, val) for key, val in sorted(props.items())])
class OpenAIResponse:
def __init__(self, data, headers):
self._headers = headers
self.data = data
@property
def request_id(self) -> Optional[str]:
return self._headers.get("request-id")
@property
def retry_after(self) -> Optional[int]:
try:
return int(self._headers.get("retry-after"))
except TypeError:
return None
@property
def operation_location(self) -> Optional[str]:
return self._headers.get("operation-location")
@property
def organization(self) -> Optional[str]:
return self._headers.get("LLM-Organization")
@property
def response_ms(self) -> Optional[int]:
h = self._headers.get("Openai-Processing-Ms")
return None if h is None else round(float(h))
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlunsplit((scheme, netloc, path, query, fragment))
def _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]:
"""Returns a value suitable for the 'proxies' argument to 'requests.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return {"http": proxy, "https": proxy}
elif isinstance(proxy, dict):
return proxy.copy()
else:
raise ValueError(
"'openai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _aiohttp_proxies_arg(proxy) -> Optional[str]:
"""Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return proxy
elif isinstance(proxy, dict):
return proxy["https"] if "https" in proxy else proxy["http"]
else:
raise ValueError(
"'openai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _make_session() -> requests.Session:
s = requests.Session()
s.mount(
"https://",
requests.adapters.HTTPAdapter(max_retries=MAX_CONNECTION_RETRIES),
)
return s
def parse_stream_helper(line: bytes) -> Optional[str]:
if line:
if line.strip() == b"data: [DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
if line.startswith(b"data: "):
line = line[len(b"data: ") :]
return line.decode("utf-8")
else:
return None
return None
def parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
async def parse_stream_async(rbody: aiohttp.StreamReader):
async for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
class APIRequestor:
def __init__(
self,
key=None,
base_url=None,
api_type=None,
api_version=None,
organization=None,
):
self.base_url = base_url or openai.base_url
self.api_key = key or openai.api_key
self.api_type = ApiType.from_str(api_type) if api_type else ApiType.from_str("openai")
self.api_version = api_version or openai.api_version
self.organization = organization or openai.organization
@overload
def request(
self,
method,
url,
params,
headers,
files,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Iterator[OpenAIResponse], bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
*,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Iterator[OpenAIResponse], bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: Literal[False] = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[OpenAIResponse, bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: bool = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
pass
def request(
self,
method,
url,
params=None,
headers=None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
result = self.request_raw(
method.lower(),
url,
params=params,
supplied_headers=headers,
files=files,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
resp, got_stream = self._interpret_response(result, stream)
return resp, got_stream, self.api_key
@overload
async def arequest(
self,
method,
url,
params,
headers,
files,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[AsyncGenerator[OpenAIResponse, None], bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
*,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[AsyncGenerator[OpenAIResponse, None], bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: Literal[False] = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[OpenAIResponse, bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: bool = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool, str]:
pass
async def arequest(
self,
method,
url,
params=None,
headers=None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool, str]:
ctx = aiohttp_session()
session = await ctx.__aenter__()
try:
result = await self.arequest_raw(
method.lower(),
url,
session,
params=params,
supplied_headers=headers,
files=files,
request_id=request_id,
request_timeout=request_timeout,
)
resp, got_stream = await self._interpret_async_response(result, stream)
except Exception:
await ctx.__aexit__(None, None, None)
raise
if got_stream:
async def wrap_resp():
assert isinstance(resp, AsyncGenerator)
try:
async for r in resp:
yield r
finally:
await ctx.__aexit__(None, None, None)
return wrap_resp(), got_stream, self.api_key
else:
await ctx.__aexit__(None, None, None)
return resp, got_stream, self.api_key
def request_headers(self, method: str, extra, request_id: Optional[str]) -> Dict[str, str]:
user_agent = "LLM/v1 PythonBindings/%s" % (version.VERSION,)
uname_without_node = " ".join(v for k, v in platform.uname()._asdict().items() if k != "node")
ua = {
"bindings_version": version.VERSION,
"httplib": "requests",
"lang": "python",
"lang_version": platform.python_version(),
"platform": platform.platform(),
"publisher": "openai",
"uname": uname_without_node,
}
headers = {
"X-LLM-Client-User-Agent": json.dumps(ua),
"User-Agent": user_agent,
}
headers.update(api_key_to_header(self.api_type, self.api_key))
if self.organization:
headers["LLM-Organization"] = self.organization
if self.api_version is not None and self.api_type == ApiType.OPEN_AI:
headers["LLM-Version"] = self.api_version
if request_id is not None:
headers["X-Request-Id"] = request_id
headers.update(extra)
return headers
def _validate_headers(self, supplied_headers: Optional[Dict[str, str]]) -> Dict[str, str]:
headers: Dict[str, str] = {}
if supplied_headers is None:
return headers
if not isinstance(supplied_headers, dict):
raise TypeError("Headers must be a dictionary")
for k, v in supplied_headers.items():
if not isinstance(k, str):
raise TypeError("Header keys must be strings")
if not isinstance(v, str):
raise TypeError("Header values must be strings")
headers[k] = v
# NOTE: It is possible to do more validation of the headers, but a request could always
# be made to the API manually with invalid headers, so we need to handle them server side.
return headers
def _prepare_request_raw(
self,
url,
supplied_headers,
method,
params,
files,
request_id: Optional[str],
) -> Tuple[str, Dict[str, str], Optional[bytes]]:
abs_url = "%s%s" % (self.base_url, url)
headers = self._validate_headers(supplied_headers)
data = None
if method == "get" or method == "delete":
if params:
encoded_params = urlencode([(k, v) for k, v in params.items() if v is not None])
abs_url = _build_api_url(abs_url, encoded_params)
elif method in {"post", "put"}:
if params and files:
data = params
if params and not files:
data = json.dumps(params).encode()
headers["Content-Type"] = "application/json"
else:
raise openai.APIConnectionError(
message=f"Unrecognized HTTP method {method}. This may indicate a bug in the LLM bindings.",
request=None,
)
headers = self.request_headers(method, headers, request_id)
# log_debug("Request to LLM API", method=method, path=abs_url)
# log_debug("Post details", data=data, api_version=self.api_version)
return abs_url, headers, data
def request_raw(
self,
method,
url,
*,
params=None,
supplied_headers: Optional[Dict[str, str]] = None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> requests.Response:
abs_url, headers, data = self._prepare_request_raw(url, supplied_headers, method, params, files, request_id)
if not hasattr(_thread_context, "session"):
_thread_context.session = _make_session()
_thread_context.session_create_time = time.time()
elif time.time() - getattr(_thread_context, "session_create_time", 0) >= MAX_SESSION_LIFETIME_SECS:
_thread_context.session.close()
_thread_context.session = _make_session()
_thread_context.session_create_time = time.time()
try:
result = _thread_context.session.request(
method,
abs_url,
headers=headers,
data=data,
files=files,
stream=stream,
timeout=request_timeout if request_timeout else TIMEOUT_SECS,
proxies=_thread_context.session.proxies,
)
except requests.exceptions.Timeout as e:
raise openai.APITimeoutError("Request timed out: {}".format(e)) from e
except requests.exceptions.RequestException as e:
raise openai.APIConnectionError(message="Error communicating with LLM: {}".format(e), request=None) from e
# log_debug(
# "LLM API response",
# path=abs_url,
# response_code=result.status_code,
# processing_ms=result.headers.get("LLM-Processing-Ms"),
# request_id=result.headers.get("X-Request-Id"),
# )
return result
async def arequest_raw(
self,
method,
url,
session,
*,
params=None,
supplied_headers: Optional[Dict[str, str]] = None,
files=None,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> aiohttp.ClientResponse:
abs_url, headers, data = self._prepare_request_raw(url, supplied_headers, method, params, files, request_id)
if isinstance(request_timeout, tuple):
timeout = aiohttp.ClientTimeout(
connect=request_timeout[0],
total=request_timeout[1],
)
else:
timeout = aiohttp.ClientTimeout(total=request_timeout if request_timeout else TIMEOUT_SECS)
if files:
# TODO: Use `aiohttp.MultipartWriter` to create the multipart form data here.
# For now we use the private `requests` method that is known to have worked so far.
data, content_type = requests.models.RequestEncodingMixin._encode_files(files, data) # type: ignore
headers["Content-Type"] = content_type
request_kwargs = {
"method": method,
"url": abs_url,
"headers": headers,
"data": data,
"timeout": timeout,
}
try:
result = await session.request(**request_kwargs)
# log_info(
# "LLM API response",
# path=abs_url,
# response_code=result.status,
# processing_ms=result.headers.get("LLM-Processing-Ms"),
# request_id=result.headers.get("X-Request-Id"),
# )
return result
except (aiohttp.ServerTimeoutError, asyncio.TimeoutError) as e:
raise openai.APITimeoutError("Request timed out") from e
except aiohttp.ClientError as e:
raise openai.APIConnectionError(message="Error communicating with LLM", request=None) from e
def _interpret_response(
self, result: requests.Response, stream: bool
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool]:
"""Returns the response(s) and a bool indicating whether it is a stream."""
async def _interpret_async_response(
self, result: aiohttp.ClientResponse, stream: bool
) -> Tuple[Union[OpenAIResponse, AsyncGenerator[OpenAIResponse, None]], bool]:
"""Returns the response(s) and a bool indicating whether it is a stream."""
def _interpret_response_line(self, rbody: str, rcode: int, rheaders, stream: bool) -> OpenAIResponse:
...
@asynccontextmanager
async def aiohttp_session() -> AsyncIterator[aiohttp.ClientSession]:
async with aiohttp.ClientSession() as session:
yield session
| [] |
2024-01-10 | geekan/MetaGPT | metagpt~memory~memory_storage.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Desc : the implement of memory storage
@Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation.
"""
from pathlib import Path
from typing import Optional
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain_core.embeddings import Embeddings
from metagpt.const import DATA_PATH, MEM_TTL
from metagpt.document_store.faiss_store import FaissStore
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.serialize import deserialize_message, serialize_message
class MemoryStorage(FaissStore):
"""
The memory storage with Faiss as ANN search engine
"""
def __init__(self, mem_ttl: int = MEM_TTL, embedding: Embeddings = None):
self.role_id: str = None
self.role_mem_path: str = None
self.mem_ttl: int = mem_ttl # later use
self.threshold: float = 0.1 # experience value. TODO The threshold to filter similar memories
self._initialized: bool = False
self.embedding = embedding or OpenAIEmbeddings()
self.store: FAISS = None # Faiss engine
@property
def is_initialized(self) -> bool:
return self._initialized
def _load(self) -> Optional["FaissStore"]:
index_file, store_file = self._get_index_and_store_fname(index_ext=".faiss") # langchain FAISS using .faiss
if not (index_file.exists() and store_file.exists()):
logger.info("Missing at least one of index_file/store_file, load failed and return None")
return None
return FAISS.load_local(self.role_mem_path, self.embedding, self.role_id)
def recover_memory(self, role_id: str) -> list[Message]:
self.role_id = role_id
self.role_mem_path = Path(DATA_PATH / f"role_mem/{self.role_id}/")
self.role_mem_path.mkdir(parents=True, exist_ok=True)
self.store = self._load()
messages = []
if not self.store:
# TODO init `self.store` under here with raw faiss api instead under `add`
pass
else:
for _id, document in self.store.docstore._dict.items():
messages.append(deserialize_message(document.metadata.get("message_ser")))
self._initialized = True
return messages
def _get_index_and_store_fname(self, index_ext=".index", pkl_ext=".pkl"):
if not self.role_mem_path:
logger.error(f"You should call {self.__class__.__name__}.recover_memory fist when using LongTermMemory")
return None, None
index_fpath = Path(self.role_mem_path / f"{self.role_id}{index_ext}")
storage_fpath = Path(self.role_mem_path / f"{self.role_id}{pkl_ext}")
return index_fpath, storage_fpath
def persist(self):
self.store.save_local(self.role_mem_path, self.role_id)
logger.debug(f"Agent {self.role_id} persist memory into local")
def add(self, message: Message) -> bool:
"""add message into memory storage"""
docs = [message.content]
metadatas = [{"message_ser": serialize_message(message)}]
if not self.store:
# init Faiss
self.store = self._write(docs, metadatas)
self._initialized = True
else:
self.store.add_texts(texts=docs, metadatas=metadatas)
self.persist()
logger.info(f"Agent {self.role_id}'s memory_storage add a message")
def search_dissimilar(self, message: Message, k=4) -> list[Message]:
"""search for dissimilar messages"""
if not self.store:
return []
resp = self.store.similarity_search_with_score(query=message.content, k=k)
# filter the result which score is smaller than the threshold
filtered_resp = []
for item, score in resp:
# the smaller score means more similar relation
if score < self.threshold:
continue
# convert search result into Memory
metadata = item.metadata
new_mem = deserialize_message(metadata.get("message_ser"))
filtered_resp.append(new_mem)
return filtered_resp
def clean(self):
index_fpath, storage_fpath = self._get_index_and_store_fname()
if index_fpath and index_fpath.exists():
index_fpath.unlink(missing_ok=True)
if storage_fpath and storage_fpath.exists():
storage_fpath.unlink(missing_ok=True)
self.store = None
self._initialized = False
| [] |
2024-01-10 | Open-Swarm-Net/OSNAP | osnap_client~utils~ai_engines~GPTConversEngine.py | import os
import openai
import tiktoken
from osnap_client.utils.ai_engines.EngineBase import EngineBase
class GPTConversEngine(EngineBase):
"""
gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, gpt-3.5-turbo, gpt-3.5-turbo-0301
"""
SUPPORTED_MODELS = [
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
]
def __init__(self, model_name: str, temperature: float, max_response_tokens: int):
if model_name not in self.SUPPORTED_MODELS:
raise ValueError(
f"Model {model_name} is not supported. Supported models are: {self.SUPPORTED_MODELS}"
)
super().__init__("openai", model_name, temperature, max_response_tokens)
if "OPENAI_API_KEY" not in os.environ:
raise ValueError("OPENAI_API_KEY environment variable is not set.")
openai.api_key = os.getenv("OPENAI_API_KEY")
self.tiktoken_encoding = tiktoken.encoding_for_model(model_name)
def call_model(self, conversation, max_tokens=None, temperature=None) -> str:
"""Calls the gpt-3.5 or gpt-4 model to generate a response to a conversation.
Args:
conversation (list[dict]): The conversation to be completed. Example:
[
{"role": "system", "content": configuration_prompt},
{"role": "user", "content": prompt}
]
"""
if max_tokens is None:
max_tokens = self.max_response_tokens
if temperature is None:
temperature = self.temperature
if isinstance(conversation, str):
conversation = [{"role": "user", "content": conversation}]
if len(conversation) == 0:
raise ValueError(
"Conversation must have at least one message of format: [{'role': 'user', 'content': 'message'}]"
)
total_len = 0
for message in conversation:
if "role" not in message:
raise ValueError(
"Conversation messages must have a format: {'role': 'user', 'content': 'message'}. 'role' is missing."
)
if "content" not in message:
raise ValueError(
"Conversation messages must have a format: {'role': 'user', 'content': 'message'}. 'content' is missing."
)
message["content"] = self.truncate_message(
message["content"], self.max_input_length() - total_len - 100
)
new_message_len = len(self.tiktoken_encoding.encode(message["content"]))
total_len += new_message_len
try:
response = openai.ChatCompletion.create(
model=self.model_name,
messages=conversation,
max_tokens=max_tokens,
temperature=temperature,
n=1,
)
except:
return ""
return response["choices"][0]["message"]["content"]
| [] |
2024-01-10 | Open-Swarm-Net/OSNAP | osnap_client~utils~ai_engines~LanchainGoogleEngine.py | import os
import openai
import tiktoken
from osnap.utils.ai_engines.EngineBase import EngineBase
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
from langchain.utilities import GoogleSearchAPIWrapper
class LanchainGoogleEngine(EngineBase):
"""
gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, gpt-3.5-turbo, gpt-3.5-turbo-0301
"""
SUPPORTED_MODELS = [
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301"
]
def __init__(self, model_name: str, temperature: float, max_response_tokens: int):
if model_name not in self.SUPPORTED_MODELS:
raise ValueError(f"Model {model_name} is not supported. Supported models are: {self.SUPPORTED_MODELS}")
super().__init__("openai", model_name, temperature, max_response_tokens)
if "OPENAI_API_KEY" not in os.environ:
raise ValueError("OPENAI_API_KEY environment variable is not set.")
openai.api_key = os.getenv("OPENAI_API_KEY")
self.tiktoken_encoding = tiktoken.encoding_for_model(model_name)
self.agent = self._init_chain()
self.search = GoogleSearchAPIWrapper()
def _init_chain(self):
"""Instantiates langchain chain with all the necessary tools
"""
llm = OpenAI(temperature=self.temperature)
tools = load_tools(["google-search", "google-search-results-json"], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False, return_intermediate_steps=True)
return agent
def call_model(self, conversation: list) -> str:
"""Does the search itself but provides very short answers!
"""
if isinstance(conversation, list):
prompt = self._convert_conversation_to_str(conversation)
else:
prompt = conversation
response = self.agent(prompt)
final_response = ""
intermediate_steps = response["intermediate_steps"]
for step in intermediate_steps:
final_response += step[0].log + "\n" + step[1]
final_response += response["output"]
return final_response
def google_query(self, query: str) -> str:
"""Does the search itself but provides very short answers!
"""
response = self.search.run(query)
return response
def search_sources(self, query: str, n=5):
"""Does the search itself but provides very short answers!
"""
response = self.search.results(query, n)
return response
def _convert_conversation_to_str(self, conversation):
"""Converts conversation to a string
"""
prompt = ""
for message in conversation:
prompt += message["content"] + "\n"
return prompt
| [
"PLACEHOLDER\n"
] |
2024-01-10 | microsoft/clinical-self-verification | clin~eval~med_status.py | import os
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import join
from tqdm import tqdm
import pandas as pd
import joblib
import sys
import datasets
import time
import openai
import numpy as np
from typing import List, Dict, Set
from collections import defaultdict
import clin.parse
import sklearn.metrics
def add_status_eval(r, dfe):
"""Add status eval by aggregating over all columns with dict_ in the name"""
d = defaultdict(list)
dict_columns = [
k
for k in r.keys()
if k.startswith("dict_") and not k.startswith("dict_evidence")
]
# common_meds_status_gt_dict = clin.eval.get_common_medications(r[dict_columns].values.flatten().tolist(), dfe)
for i in range(r.shape[0]):
row = r.iloc[i]
med_status_dicts_list = [row[k] for k in dict_columns]
common_meds_status_gt_dict = get_common_medications(med_status_dicts_list, dfe)
accs_cond, f1s_macro_cond = eval_medication_status(
med_status_dicts_list, common_meds_status_gt_dict
)
for j, setting in enumerate(dict_columns):
setting_name = setting.replace("dict_", "")
d[f"status_acc_cond___{setting_name}"].append(accs_cond[j])
d[f"status_f1_macro_cond___{setting_name}"].append(f1s_macro_cond[j])
for k in d:
r[k] = d[k]
return r
def process_med_lists(
med_status_dict: Dict[str, str], df_row: pd.Series, verbose=False
) -> List[bool]:
"""
Given a dictionary of medication status, and a row of the dataframe,
return precision and recall
"""
# process meds_retrieved
meds_retrieved = list(med_status_dict.keys())
meds_retrieved = [med.strip(' "').lower() for med in meds_retrieved]
# get meds_true
meds_true = (
clin.parse.str_to_list(df_row["active_medications"])
+ clin.parse.str_to_list(df_row["discontinued_medications"])
+ clin.parse.str_to_list(df_row["neither_medications"])
)
meds_true = [med.strip(' "').lower() for med in meds_true]
return meds_retrieved, meds_true
def get_common_medications(
med_status_dicts_list: List[List[Dict[str, str]]],
dfe: pd.DataFrame
) -> List[Set[str]]:
'''get the common retrieved medications for each row by all models
'''
n_runs_to_compare = len(med_status_dicts_list)
n = len(dfe)
common_meds = []
for i in range(n_runs_to_compare):
med_status_dict_list = med_status_dicts_list[i]
med_status_dicts = clin.parse.convert_keys_to_lowercase(med_status_dict_list)
if i == 0:
common_meds = [set(med_status_dicts[j].keys()) for j in range(n)]
else:
common_meds = [
common_meds[j].intersection(set(med_status_dicts[j].keys()))
for j in range(n)
]
# add status and only keep medications that are present in the groundtruth
def _get_status_of_med(row, med):
if med in row["active_medications"].lower():
return "active"
elif med in row["discontinued_medications"].lower():
return "discontinued"
elif med in row["neither_medications"].lower():
return "neither"
else:
return None
common_meds_status_gt_dict = [
{
med: _get_status_of_med(dfe.iloc[i], med)
for med in common_meds[i]
if _get_status_of_med(dfe.iloc[i], med) is not None
}
for i in range(len(dfe))
]
return common_meds_status_gt_dict
def eval_medication_status(
med_status_dicts_list: List[List[Dict[str, str]]],
common_meds_status_gt_dict: Dict[str, str],
verbose=False,
):
"""Compute the metrics for medication status,
conditioned on the medications that are retrieved by all models and are valid medications in the groundtruth
"""
n_runs_to_compare = len(med_status_dicts_list)
n = len(common_meds_status_gt_dict)
# compute conditional accuracy for all rows
accs_cond = []
f1s_macro_cond = []
for i in range(n_runs_to_compare):
status_extracted_list = []
status_gt_list = []
for j in range(n):
med_status_dict = clin.parse.convert_keys_to_lowercase(
[med_status_dicts_list[i][j]]
)[0]
for med in common_meds_status_gt_dict[j]:
status_extracted_list.append(med_status_dict[med])
status_gt_list.append(common_meds_status_gt_dict[j][med])
if verbose:
if not med_status_dict[med] == common_meds_status_gt_dict[j][med]:
print(
"med",
med,
"\n\t",
"pred\t",
med_status_dict[med],
"\n\t",
"gt\t",
common_meds_status_gt_dict[j][med],
)
accs_cond.append(
np.mean(np.array(status_extracted_list) == np.array(status_gt_list))
)
f1s_macro_cond.append(
sklearn.metrics.f1_score(
status_gt_list, status_extracted_list, average="macro"
)
)
return accs_cond, f1s_macro_cond
| [] |
2024-01-10 | RaulV10/Lunchi | Lunchi~fetching_recipe.py | import os
import sys
import openai
API_KEY = sys.argv[1]
openai.api_key = API_KEY
model_id = "gpt-3.5-turbo"
def chatgpt_conversation(messages):
response = openai.ChatCompletion.create(
model=model_id,
messages=messages,
temperature=0.85,
max_tokens=1000
)
return response
def main(recipe_type, must_include, dont_include):
prompt = f"Please give me a recipe for {recipe_type}. "
if must_include:
prompt = prompt + f"Please this recipe must include the following ingredients: {must_include}. "
if dont_include:
prompt = prompt + f"Please this recipe must not include the following ingredients: {dont_include}."
messages = [
{'role': 'system', 'content': 'The response must include "Title", "Ingredients", "Steps" and "Time" in a JSON format, the Time option is the estimate preparation time, but leave it only "Time"'},
{'role': 'user', 'content': prompt},
]
try:
response_content = chatgpt_conversation(messages)['choices'][0]['message']['content']
print(response_content)
except Exception as e:
print("Error fetching the recipe.")
print(e)
if __name__ == "__main__":
if len(sys.argv) >= 3:
recipe_type = sys.argv[2]
must_include = sys.argv[3]
dont_include = sys.argv[4]
main(recipe_type, must_include, dont_include)
else:
print("Please provide the API key, recipe type, must include, and don't include as command-line arguments.")
| [
"The response must include \"Title\", \"Ingredients\", \"Steps\" and \"Time\" in a JSON format, the Time option is the estimate preparation time, but leave it only \"Time\"",
"PLACEHOLDERPlease this recipe must not include the following ingredients: PLACEHOLDER.",
"Please give me a recipe for PLACEHOLDER. ",
"PLACEHOLDERPlease this recipe must include the following ingredients: PLACEHOLDER. "
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.