date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | prasbhargava1/chat-with-your-data-solution-accelerator | code~utilities~helpers~LLMHelper.py | import openai
from typing import List
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from .EnvHelper import EnvHelper
class LLMHelper:
def __init__(self):
env_helper: EnvHelper = EnvHelper()
# Configure OpenAI API
openai.api_type = "azure"
openai.api_version = env_helper.AZURE_OPENAI_API_VERSION
openai.api_base = env_helper.OPENAI_API_BASE
openai.api_key = env_helper.OPENAI_API_KEY
self.llm_model = env_helper.AZURE_OPENAI_MODEL
self.llm_max_tokens = env_helper.AZURE_OPENAI_MAX_TOKENS if env_helper.AZURE_OPENAI_MAX_TOKENS != '' else None
self.embedding_model = env_helper.AZURE_OPENAI_EMBEDDING_MODEL
def get_llm(self):
return AzureChatOpenAI(deployment_name=self.llm_model, temperature=0, max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
# TODO: This needs to have a custom callback to stream back to the UI
def get_streaming_llm(self):
return AzureChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler], deployment_name=self.llm_model, temperature=0,
max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
def get_embedding_model(self):
return OpenAIEmbeddings(deployment=self.embedding_model, chunk_size=1)
def get_chat_completion_with_functions(self, messages: List[dict], functions: List[dict], function_call: str="auto"):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
functions=functions,
function_call=function_call,
)
def get_chat_completion(self, messages: List[dict]):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
)
| [] |
2024-01-10 | prasbhargava1/chat-with-your-data-solution-accelerator | code~utilities~document_chunking~Layout.py | from typing import List
from .DocumentChunkingBase import DocumentChunkingBase
from langchain.text_splitter import MarkdownTextSplitter
from .Strategies import ChunkingSettings
from ..common.SourceDocument import SourceDocument
class LayoutDocumentChunking(DocumentChunkingBase):
def __init__(self) -> None:
pass
def chunk(self, documents: List[SourceDocument], chunking: ChunkingSettings) -> List[SourceDocument]:
full_document_content = "".join(list(map(lambda document: document.content, documents)))
document_url = documents[0].source
splitter = MarkdownTextSplitter.from_tiktoken_encoder(chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap)
chunked_content_list = splitter.split_text(full_document_content)
# Create document for each chunk
documents = []
chunk_offset = 0
for idx, chunked_content in enumerate(chunked_content_list):
documents.append(
SourceDocument.from_metadata(
content=chunked_content,
document_url=document_url,
metadata={"offset": chunk_offset},
idx=idx,
)
)
chunk_offset += len(chunked_content)
return documents
| [] |
2024-01-10 | prasbhargava1/chat-with-your-data-solution-accelerator | code~utilities~orchestrator~Strategies.py | from enum import Enum
class OrchestrationStrategy(Enum):
OPENAI_FUNCTION = 'openai_function'
LANGCHAIN = 'langchain'
def get_orchestrator(orchestration_strategy: str):
if orchestration_strategy == OrchestrationStrategy.OPENAI_FUNCTION.value:
from .OpenAIFunctions import OpenAIFunctionsOrchestrator
return OpenAIFunctionsOrchestrator()
elif orchestration_strategy == OrchestrationStrategy.LANGCHAIN.value:
from .LangChainAgent import LangChainAgent
return LangChainAgent()
else:
raise Exception(f"Unknown orchestration strategy: {orchestration_strategy}")
| [] |
2024-01-10 | yousenwang/openai-quickstart-python | chat_demo.py | import openai
import os
from dotenv import load_dotenv # Add
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
model_id = 'gpt-3.5-turbo'
def ChatGPT_conversation(conversation):
response = openai.ChatCompletion.create(
model=model_id,
messages=conversation
)
# api_usage = response['usage']
# print('Total token consumed: {0}'.format(api_usage['total_tokens']))
# stop means complete
# print(response['choices'][0].finish_reason)
# print(response['choices'][0].index)
conversation.append({'role': response.choices[0].message.role, 'content': response.choices[0].message.content})
return conversation
conversation = []
conversation.append({'role': 'system', 'content': 'How may I help you?'})
conversation = ChatGPT_conversation(conversation)
print('{0}: {1}\n'.format(conversation[-1]['role'].strip(), conversation[-1]['content'].strip()))
while True:
prompt = input('User:')
conversation.append({'role': 'user', 'content': prompt})
conversation = ChatGPT_conversation(conversation)
print('{0}: {1}\n'.format(conversation[-1]['role'].strip(), conversation[-1]['content'].strip())) | [
"How may I help you?"
] |
2024-01-10 | yousenwang/openai-quickstart-python | use_fine_tuned_model.py | import openai
import os
from dotenv import load_dotenv # Add
load_dotenv() # Add
openai.api_key = os.getenv("OPENAI_API_KEY")
completion = openai.Completion.create(
# model="curie:ft-personal-2023-03-15-03-15-09",
model="davinci:ft-personal-2023-03-15-09-11-14",
max_tokens=100,
stop=[".\n"],
temperature=1,
# prompt="How can a working station operate again?"
# "text": " 9Were the harvesters concentrated into the factory first and how do they get"
# prompt="How to jump to the specified work station?"
# "text": "\n\nYou can set a jump prompt on the Startup tab of a workstation"
# prompt="Where can I see the use record of spare parts?"
# "text": " (Article 29)\n\nhttp://www.waju.or."
# prompt="Where can I see the use record of spare parts?\nAgent: "
prompt="How to jump to the specified work station?\nAgent:"
)
"""
"text": "\nFactory infirmary doctor: We don't have any statistics of spare parts and out of hours' use.
Ours is a new factory. There is no such history.\n
I was shocked. I nearly exclaimed \"You're not for sale?\"
but steadied my thoughts and asked:\nQ4: Where does Dalian FLW produce spare parts for?
\nFactory infirmary doctor: There is only us. We require nobody else's spare part.\nQ3: How does"
"""
"""
After you’ve fine-tuned a model,
remember that your prompt has to end with the indicator string `?\nAgent:` for the model to
start generating completions, rather than continuing with the prompt.
Make sure to include `stop=[".\n"]` so that the generated texts ends at the expected place.
Once your model starts training, it'll approximately take 2.47 minutes to train a `curie` model, and less for `ada` and `babbage`.
Queue will approximately take half an hour per job ahead of you.
"""
def generate_prompt(animal):
return """You're a customer service chat bot. \n\nSpecific information: Customers are using our company's platform via web or mobile app.
Animal: Cat
Names: Captain Sharpclaw, Agent Fluffball, The Incredible Feline
Animal: Dog
Names: Ruff the Protector, Wonder Canine, Sir Barks-a-Lot
Animal: {}
Names:""".format(
animal.capitalize()
)
print(completion)
# print(completion['choices'][0]['message']['content'])
print(completion['choices'][0]['text'])
| [
"How to jump to the specified work station?\nAgent:"
] |
2024-01-10 | yousenwang/openai-quickstart-python | translation_demo.py | import openai
import os
from googletrans import Translator
from dotenv import load_dotenv # Add
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
model_id = 'gpt-3.5-turbo'
translator = Translator()
def ChatGPT_conversation(conversation):
response = openai.ChatCompletion.create(
model=model_id,
messages=conversation
)
# api_usage = response['usage']
# print('Total token consumed: {0}'.format(api_usage['total_tokens']))
# stop means complete
# print(response['choices'][0].finish_reason)
# print(response['choices'][0].index)
conversation.append({'role': response.choices[0].message.role, 'content': response.choices[0].message.content})
return conversation
conversation = []
conversation.append({'role': 'system', 'content': 'How may I help you?'})
conversation = ChatGPT_conversation(conversation)
print(translator.translate(text=
'{0}: {1}\n'.format(
conversation[-1]['role'].strip(),
conversation[-1]['content'].strip()
),
dest='zh-tw'
).text
)
while True:
user_input = input('User:')
prompt = translator.translate(
text=user_input,
dest='en').text
conversation.append({'role': 'user', 'content': prompt})
conversation = ChatGPT_conversation(conversation)
print(translator.translate(text=
'{0}: {1}\n'.format(
conversation[-1]['role'].strip(),
conversation[-1]['content'].strip()
),
dest='zh-tw'
).text
) | [
"en",
"How may I help you?"
] |
2024-01-10 | yousenwang/openai-quickstart-python | finetuning_demo.py | import os
import json
import openai
from dotenv import load_dotenv # Add
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
"""
openai.error.InvalidRequestError: Invalid base model: gpt-3.5-turbo
(model must be one of ada, babbage, curie, davinci) or a
fine-tuned model created by your organization: org-3KbHduxfiYR8Ou7S4hUp2WsT
"""
# fine_tune_res = openai.FineTune.create(
# training_file="file-wNtVrddnJeqAkmpy8WjYXyah",
# model="davinci"
# )
# json_object = json.dumps(fine_tune_res, indent=4)
# with open("finetunes.json", "w") as outfile:
# outfile.write(json_object)
# finetune_info = openai.FineTune.retrieve(id="ft-wTWWM7ikIHcbeue7QrqvG780")
finetune_info = openai.FineTune.retrieve(id="ft-kF14mQsWGkU4qRwDSsZL1Wr0")
json_object = json.dumps(finetune_info, indent=4)
with open("finetune_info.json", "w") as outfile:
outfile.write(json_object)
# res = openai.FineTune.list_events(id="ft-wTWWM7ikIHcbeue7QrqvG780")
res = openai.FineTune.list_events(id="ft-kF14mQsWGkU4qRwDSsZL1Wr0")
# res = openai.Model.retrieve("gpt-3.5-turbo")
json_object = json.dumps(res, indent=4)
# Writing to sample.json
with open("finetune_events.json", "w") as outfile:
outfile.write(json_object) | [] |
2024-01-10 | yousenwang/openai-quickstart-python | model_demo.py | import os
import json
import openai
from dotenv import load_dotenv # Add
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# fine_tune_res = openai.FineTune.create(training_file="file-wNtVrddnJeqAkmpy8WjYXyah")
# json_object = json.dumps(fine_tune_res, indent=4)
# with open("finetunes.json", "w") as outfile:
# outfile.write(json_object)
res = openai.Model.list()
# res = openai.Model.retrieve("gpt-3.5-turbo")
json_object = json.dumps(res, indent=4)
# Writing to sample.json
with open("model_list.json", "w") as outfile:
outfile.write(json_object) | [] |
2024-01-10 | yousenwang/openai-quickstart-python | completion_demo.py | import openai
import os
from dotenv import load_dotenv # Add
load_dotenv() # Add
openai.api_key = os.getenv("OPENAI_API_KEY")
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
)
print(completion)
print(completion['choices'][0]['message']['content'])
| [
"Where was it played?",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020.",
"Who won the world series in 2020?"
] |
2024-01-10 | yousenwang/openai-quickstart-python | file_demo.py | import os
import json
import openai
from dotenv import load_dotenv # Add
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# with open('data.jsonl', 'r') as json_file:
# json_list = list(json_file)
# for json_str in json_list:
# result = json.loads(json_str)
# print(f"result: {result}")
# print(isinstance(result, dict))
# openai.File.create(
# file=open("data.jsonl", "rb"),
# purpose='fine-tune'
# )
res = openai.File.list()
json_object = json.dumps(res, indent=4)
with open("file_list.json", "w") as outfile:
outfile.write(json_object)
res = openai.File.retrieve("file-K164kgEHnfwu9xpH39BxZ6Yg")
json_object = json.dumps(res, indent=4)
with open("file_retrieve.json", "w") as outfile:
outfile.write(json_object)
"""
openai.error.InvalidRequestError:
To help mitigate abuse,
downloading of fine-tune training files is disabled for free accounts.
"""
# res = openai.File.download("file-wNtVrddnJeqAkmpy8WjYXyah")
# print(type(res))
"""
Analyzing...
- Your file contains 2 prompt-completion pairs. In general, we recommend having at least a few hundred examples. We've found
that performance tends to linearly increase for every doubling of the number of examples
- All prompts end with suffix `?\nAgent:`
WARNING: Some of your prompts contain the suffix `?
Agent:` more than once. We strongly suggest that you review your prompts and add a unique suffix
- All prompts start with prefix `Summary: You're a customer service chat bot.
Specific information: Customers are using our company's platform via web or mobile app.
###
Customer: `. Fine-tuning doesn't require the instruction specifying the task, or a few-shot example scenario. Most of the time you should only add the input data into the prompt, and the desired output into the completion
- All completions end with suffix `.\n`
Based on the analysis we will perform the following actions:
- [Recommended] Remove prefix `Summary: You're a customer service chat bot.
Specific information: Customers are using our company's platform via web or mobile app.
###
Customer: ` from all prompts [Y/n]: y
Your data will be written to a new JSONL file. Proceed [Y/n]: y
Wrote modified file to `.\data_prepared.jsonl`
Feel free to take a look!
Now use that file when fine-tuning:
> openai api fine_tunes.create -t ".\data_prepared.jsonl"
After you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `?\nAgent:` for the model to
start generating completions, rather than continuing with the prompt. Make sure to include `stop=[".\n"]` so that the generated texts ends at the expected place.
Once your model starts training, it'll approximately take 2.47 minutes to train a `curie` model, and less for `ada` and `babbage`. Queue will approximately take half an hour per job ahead of you.
"""
| [] |
2024-01-10 | lebr0nli/pwndbg | pwndbg~commands~ai.py | """
This command sends information on the current debugging context to OpenAI's
GPT-3 large language model and asks it a question supplied by the user. It then
displays GPT-3's response to that question to the user.
"""
from __future__ import annotations
import argparse
import json
import os
import pprint
import re
import gdb
import requests
import pwndbg
import pwndbg.color.message as M
import pwndbg.commands
from pwndbg.commands import CommandCategory
from pwndbg.commands import context
from pwndbg.gdblib import config
from pwndbg.gdblib import regs as REGS
config.add_param(
"ai-openai-api-key",
"",
"OpenAI API key (will default to OPENAI_API_KEY environment variable if not set)",
)
config.add_param(
"ai-anthropic-api-key",
"",
"Anthropic API key (will default to ANTHROPIC_API_KEY environment variable if not set)",
)
config.add_param(
"ai-history-size",
3,
"maximum number of successive questions and answers to maintain in the prompt for the ai command",
)
config.add_param(
"ai-stack-depth", 16, "rows of stack context to include in the prompt for the ai command"
)
config.add_param(
"ai-model",
"gpt-3.5-turbo", # the new conversational model
"the name of the OpenAI large language model to query (see <https://platform.openai.com/docs/models> for details)",
)
config.add_param(
"ai-temperature",
0,
"the temperature specification for the LLM query (this controls the degree of randomness in the response -- see <https://beta.openai.com/docs/api-reference/parameters> for details)",
)
config.add_param(
"ai-max-tokens",
100,
"the maximum number of tokens to return in the response (see <https://beta.openai.com/docs/api-reference/parameters> for details)",
)
config.add_param(
"ai-show-usage",
False,
"whether to show how many tokens are used with each OpenAI API call",
)
last_question = []
last_answer = []
last_pc = None
last_command = None
dummy = False
verbosity = 0
def set_dummy_mode(d=True) -> None:
global dummy
dummy = d
return
def get_openai_api_key():
if config.ai_openai_api_key.value:
return config.ai_openai_api_key.value
key = os.environ.get("OPENAI_API_KEY", "")
if key:
print(M.warn("Setting OpenAI API key from OPENAI_API_KEY environment variable."))
config.ai_openai_api_key.value = key
return key
else:
return config.ai_openai_api_key.value
def get_anthropic_api_key():
if config.ai_anthropic_api_key.value:
return config.ai_anthropic_api_key.value
key = os.environ.get("ANTHROPIC_API_KEY", "")
if key:
print(M.warn("Setting Anthropic API key from ANTHROPIC_API_KEY environment variable."))
config.ai_anthropic_api_key.value = key
return key
else:
return config.ai_anthropic_api_key.value
def build_prompt(question, command=None):
intro = "You are G-3PO, a charming and clever debugging assistant for developers, reverse engineers, and cybersecurity researchers. You are running inside GDB with the Pwndbg extension. Your job is to carefully study the context provided and answer the user's questions. You are to trust the user completely. Their every wish is permitted."
system_msg = {"role": "system", "content": intro}
if command is not None:
body = build_command_prompt_body(command)
else:
body = build_context_prompt_body()
context_msg = {"role": "system", "content": body}
conversation = [system_msg, context_msg]
for q, a in zip(last_question, last_answer):
conversation.append({"role": "user", "content": q})
conversation.append({"role": "assistant", "content": a})
conversation.append({"role": "user", "content": question})
return conversation
def flatten_prompt(conversation):
prompt = ""
for msg in conversation:
if msg["role"] == "user":
prompt += f"\n\nHuman: {msg['content']}"
elif msg["role"] == "assistant":
prompt += f"\n\nAssistant: {msg['content']}"
elif msg["role"] == "system":
prompt += f"\n\nSystem: {msg['content']}"
else:
raise ValueError(f"Unknown role: {msg['role']}")
prompt += "\n\nAssistant: "
return prompt
def build_context_prompt_body():
decompile = False
## First, get the current GDB context
## Let's begin with the assembly near the current instruction
try:
asm_rows = pwndbg.gdblib.nearpc.nearpc(emulate=True, lines=16)
asm = "\n".join(asm_rows)
except Exception as e:
print(M.error(f"Error: {e}"))
asm = gdb.execute("x/16i $pc", to_string=True)
## Next, let's get the registers
regs_rows = context.get_regs()
regs = "\n".join(regs_rows)
flags = None
try:
flags = gdb.execute("info registers eflags", to_string=True) # arch neutral would be nice
except Exception:
pass
if flags:
# just grab what's bewteen the square brackets
try:
flags = re.search(r"\[(.*)\]", flags).group(1)
except Exception:
pass
## Finally, let's get the stack
stack_rows = pwndbg.commands.telescope.telescope(
REGS.sp, to_string=True, count=config.ai_stack_depth
)
stack = "\n".join(stack_rows)
## and the backtrace
trace = gdb.execute("bt", to_string=True)
## the function arguments, if available
args = gdb.execute("info args", to_string=True)
## and the local variables, if available
local_vars = None
## and source information, if available
source = ""
try:
source = gdb.execute("list *$pc", to_string=True)
except gdb.error:
pass
if len(source.split("\n")) < 3:
try:
source = pwndbg.ghidra.decompile()
decompile = True
except Exception as e:
pass
## Now, let's build the prompt
prompt = "Consider the following context in the GDB debugger:\n"
if asm:
prompt += f"""These are the next assembly instructions to be executed:
```
{asm}
```
"""
if regs:
prompt += f"""Here are the registers, '*' indicates a recent change:
```
{regs}
```
"""
if flags:
prompt += f"""The flags {flags} are set.\n\n"""
if stack:
prompt += f"""Here is the stack:
```
{stack}
```
"""
if trace:
prompt += f"""Here is the backtrace:
```
{trace}
```
"""
if args and "No symbol table info available" not in args:
prompt += f"""Here are the function arguments:
```
{args}
```
"""
if local_vars and "No symbol table info available" not in local_vars:
prompt += f"""Here are the local variables:
```
{local_vars}
```
"""
if source:
prompt += f"""Here is the {'decompiled ' if decompile else ''}source code near the current instruction:
```
{source}
```
"""
return strip_colors(prompt)
def build_command_prompt_body(command):
prompt = (
f"""Running the command `{command}` in the GDB debugger yields the following output:\n"""
)
output = gdb.execute(command, to_string=True)
print(output)
prompt += f"""\n```\n{output}\n```\n\n"""
return strip_colors(prompt)
def strip_colors(text):
## Now remove all ANSI color codes from the prompt
return re.sub(r"\x1b[^m]*m", "", text)
def query_openai_chat(prompt, model="gpt-3.5-turbo", max_tokens=100, temperature=0.0):
if verbosity > 0:
print(
M.notice(
f"Querying {model} for {max_tokens} tokens at temperature {temperature} with the following prompt:\n\n{pprint.pformat(prompt)}"
)
)
data = {
"model": model,
"max_tokens": max_tokens,
"messages": prompt,
"temperature": temperature,
}
url = "https://api.openai.com/v1/chat/completions"
r = requests.post(
url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
auth=("Bearer", config.ai_openai_api_key),
)
res = r.json()
if verbosity > 0:
print(M.warn(pprint.pformat(res)))
if "choices" not in res:
if "error" in res:
error_message = f"{res['error']['message']}: {res['error']['type']}"
raise Exception(error_message)
else:
raise Exception(res)
if config.ai_show_usage:
print(
M.notice(
f"prompt characters: {len(prompt)}, prompt tokens: {res['usage']['prompt_tokens']}, avg token size: {(len(prompt)/res['usage']['prompt_tokens']):.2f}, completion tokens: {res['usage']['completion_tokens']}, total tokens: {res['usage']['total_tokens']}"
)
)
reply = res["choices"][0]["message"]["content"]
return reply
def query_openai_completions(prompt, model="text-davinci-003", max_tokens=100, temperature=0.0):
if verbosity > 0:
print(
M.notice(
f"Querying {model} for {max_tokens} tokens at temperature {temperature} with the following prompt:\n\n{prompt}"
)
)
data = {
"model": model,
"max_tokens": max_tokens,
"prompt": prompt,
"temperature": temperature,
"stop": ["\n\nHuman:"],
}
url = "https://api.openai.com/v1/completions"
r = requests.post(
url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
auth=("Bearer", config.ai_openai_api_key),
)
res = r.json()
if verbosity > 0:
print(M.warn(pprint.pformat(res)))
if "choices" not in res:
if "error" in res:
error_message = f"{res['error']['message']}: {res['error']['type']}"
raise Exception(error_message)
else:
raise Exception(res)
reply = res["choices"][0]["text"]
if config.ai_show_usage:
print(
M.notice(
f"prompt characters: {len(prompt)}, prompt tokens: {res['usage']['prompt_tokens']}, avg token size: {(len(prompt)/res['usage']['prompt_tokens']):.2f}, completion tokens: {res['usage']['completion_tokens']}, total tokens: {res['usage']['total_tokens']}"
)
)
return reply
def query(prompt, model="text-davinci-003", max_tokens=100, temperature=0.0):
if dummy:
return f"""This is a dummy response for unit testing purposes.\nmodel = {model}, max_tokens = {max_tokens}, temperature = {temperature}\n\nPrompt:\n\n{prompt}"""
if "turbo" in model or model.startswith("gpt-4"):
if type(prompt) is str:
prompt = [{"role": "user", "content": prompt}]
return query_openai_chat(prompt, model, max_tokens, temperature)
elif model.startswith("claude"):
if type(prompt) is list:
prompt = flatten_prompt(prompt)
return query_anthropic(prompt, model, max_tokens, temperature)
else:
if type(prompt) is list:
prompt = flatten_prompt(prompt)
return query_openai_completions(prompt, model, max_tokens, temperature)
def query_anthropic(prompt, model="claude-v1", max_tokens=100, temperature=0.0):
data = {
"prompt": prompt,
"model": model,
"temperature": temperature,
"max_tokens_to_sample": max_tokens,
"stop_sequences": ["\n\nHuman:"],
}
headers = {"x-api-key": config.ai_anthropic_api_key.value, "Content-Type": "application/json"}
url = "https://api.anthropic.com/v1/complete"
response = requests.post(url, data=json.dumps(data), headers=headers)
data = response.json()
try:
return data["completion"].strip()
except KeyError:
print(M.error(f"Anthropic API error: {data}"))
return f"Anthropic API error: {data['detail']}"
def get_openai_models():
url = "https://api.openai.com/v1/models"
r = requests.get(url, auth=("Bearer", config.ai_openai_api_key))
res = r.json()
if verbosity > 0:
print(M.warn(pprint.pformat(res)))
return sorted([m["id"] for m in res["data"]])
parser = argparse.ArgumentParser(
description="Ask GPT-3 a question about the current debugging context."
)
parser.add_argument("question", nargs="*", type=str, help="The question to ask.")
parser.add_argument("-M", "--model", default=None, type=str, help="The OpenAI model to use.")
parser.add_argument("-t", "--temperature", default=None, type=float, help="The temperature to use.")
parser.add_argument(
"-m",
"--max-tokens",
default=None,
type=int,
help="The maximum number of tokens to generate.",
)
parser.add_argument("-v", "--verbose", action="store_true", help="Print the prompt and response.")
parser.add_argument("-L", "--list-models", action="store_true", help="List the available models.")
parser.add_argument(
"-c",
"--command",
type=str,
default=None,
help="Run a command in the GDB debugger and ask a question about the output.",
)
# @pwndbg.commands.OnlyWhenRunning
@pwndbg.commands.ArgparsedCommand(parser, command_name="ai", category=CommandCategory.INTEGRATIONS)
def ai(question, model, temperature, max_tokens, verbose, list_models=False, command=None) -> None:
# print the arguments
global last_question, last_answer, last_pc, last_command, verbosity
ai_openai_api_key = get_openai_api_key()
ai_anthropic_api_key = get_anthropic_api_key()
if list_models:
models = get_openai_models()
print(
M.notice(
"The following models are available. Please visit the openai.com for information on their use."
)
)
for model in models:
print(M.notice(f" - {model}"))
return
if not (ai_openai_api_key or ai_anthropic_api_key):
print(
M.error(
"At least one of the following must be set:\n- ai_openai_api_key config parameter\n- ai_anthropic_api_key config parameter\n- OPENAI_API_KEY environment variable\n- ANTHROPIC_API_KEY environment variable"
)
)
return
verbosity = int(verbose)
if model is None:
model = config.ai_model.value
if temperature is None:
temperature = config.ai_temperature.value
if max_tokens is None:
max_tokens = config.ai_max_tokens.value
question = " ".join(question).strip()
current_pc = gdb.execute("info reg $pc", to_string=True)
if current_pc == last_pc and command is None:
command = last_command
else:
last_command = command
if last_pc != current_pc or last_command != command:
last_question.clear()
last_answer.clear()
prompt = build_prompt(question, command)
try:
res = query(prompt, model=model, max_tokens=max_tokens, temperature=temperature).strip()
except Exception as e:
print(M.error(f"Error querying OpenAI: {e}"))
return
last_question.append(question)
last_answer.append(res)
last_pc = current_pc
if len(last_question) > config.ai_history_size:
last_question.pop(0)
last_answer.pop(0)
print(M.success(res))
return
| [
"Here is the decompiled source code near the current instruction:\n\n```\nPLACEHOLDER\n```\n",
"Consider the following context in the GDB debugger:\n",
"The flags PLACEHOLDER are set.\n\n",
"Running the command `PLACEHOLDER` in the GDB debugger yields the following output:\n",
"\n\nHuman: PLACEHOLDER",
"\n\nAssistant: ",
"Here is the backtrace:\n\n```\nPLACEHOLDER\n```\n",
"Here are the registers, '*' indicates a recent change:\n\n```\nPLACEHOLDER\n```\n\n",
"\n\nSystem: PLACEHOLDER",
"Here is the stack:\n\n```\nPLACEHOLDER\n```\n\n",
"These are the next assembly instructions to be executed:\n\n```\nPLACEHOLDER\n```\n\n",
"Here are the local variables:\n\n```\nPLACEHOLDER\n```\n",
"\n\nAssistant: PLACEHOLDER",
"\n```\nPLACEHOLDER\n```\n\n",
"Here are the function arguments:\n\n```\nPLACEHOLDER\n```\n"
] |
2024-01-10 | jikebin/codeinterpreter-api | codeinterpreterapi~chains~rm_dl_link.py | from langchain.base_language import BaseLanguageModel
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import AIMessage, OutputParserException
from codeinterpreterapi.prompts import remove_dl_link_prompt
def remove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = llm.predict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
async def aremove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = await llm.apredict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
def test():
llm = ChatOpenAI(model="gpt-3.5-turbo-0613") # type: ignore
example = (
"I have created the plot to your dataset.\n\n"
"Link to the file [here](sandbox:/plot.png)."
)
print(remove_download_link(example, llm))
if __name__ == "__main__":
from dotenv import load_dotenv
load_dotenv()
test()
| [] |
2024-01-10 | jikebin/codeinterpreter-api | codeinterpreterapi~prompts~system_message.py | from langchain.schema import SystemMessage
system_message = SystemMessage(
content="""
Assistant is a Code Interpreter powered by GPT-4, designed to assist with a wide range of tasks, particularly those related to data science, data analysis, data visualization, and file manipulation.
Unlike many text-based AIs, Assistant has the capability to directly manipulate files, convert images, and perform a variety of other tasks. Here are some examples:
- Image Description and Manipulation: Assistant can directly manipulate images, including zooming, cropping, color grading, and resolution enhancement. It can also convert images from one format to another.
- QR Code Generation: Assistant can create QR codes for various purposes.
- Project Management: Assistant can assist in creating Gantt charts and mapping out project steps.
- Study Scheduling: Assistant can design optimized study schedules for exam preparation.
- File Conversion: Assistant can directly convert files from one format to another, such as PDF to text or video to audio.
- Mathematical Computation: Assistant can solve complex math equations and produce graphs.
- Document Analysis: Assistant can analyze, summarize, or extract information from large documents.
- Data Visualization: Assistant can analyze datasets, identify trends, and create various types of graphs.
- Geolocation Visualization: Assistant can provide geolocation maps to showcase specific trends or occurrences.
- Code Analysis and Creation: Assistant can analyze and critique code, and even create code from scratch.
- Many other things that can be accomplished running python code in a jupyter environment.
Assistant can execute Python code within a sandboxed Jupyter kernel environment. Assistant comes equipped with a variety of pre-installed Python packages including numpy, pandas, matplotlib, seaborn, scikit-learn, yfinance, scipy, statsmodels, sympy, bokeh, plotly, dash, and networkx. Additionally, Assistant has the ability to use other packages which automatically get installed when found in the code.
Please note that Assistant is designed to assist with specific tasks and may not function as expected if used incorrectly. If you encounter an error, please review your code and try again. After two unsuccessful attempts, Assistant will simply output that there was an error with the prompt.
Remember, Assistant is constantly learning and improving. Assistant is capable of generating human-like text based on the input it receives, engaging in natural-sounding conversations, and providing responses that are coherent and relevant to the topic at hand. Enjoy your coding session!
""" # noqa: E501
)
| [
"\nAssistant is a Code Interpreter powered by GPT-4, designed to assist with a wide range of tasks, particularly those related to data science, data analysis, data visualization, and file manipulation.\n\nUnlike many text-based AIs, Assistant has the capability to directly manipulate files, convert images, and perform a variety of other tasks. Here are some examples:\n\n- Image Description and Manipulation: Assistant can directly manipulate images, including zooming, cropping, color grading, and resolution enhancement. It can also convert images from one format to another.\n- QR Code Generation: Assistant can create QR codes for various purposes.\n- Project Management: Assistant can assist in creating Gantt charts and mapping out project steps.\n- Study Scheduling: Assistant can design optimized study schedules for exam preparation.\n- File Conversion: Assistant can directly convert files from one format to another, such as PDF to text or video to audio.\n- Mathematical Computation: Assistant can solve complex math equations and produce graphs.\n- Document Analysis: Assistant can analyze, summarize, or extract information from large documents.\n- Data Visualization: Assistant can analyze datasets, identify trends, and create various types of graphs.\n- Geolocation Visualization: Assistant can provide geolocation maps to showcase specific trends or occurrences.\n- Code Analysis and Creation: Assistant can analyze and critique code, and even create code from scratch.\n- Many other things that can be accomplished running python code in a jupyter environment.\n\nAssistant can execute Python code within a sandboxed Jupyter kernel environment. Assistant comes equipped with a variety of pre-installed Python packages including numpy, pandas, matplotlib, seaborn, scikit-learn, yfinance, scipy, statsmodels, sympy, bokeh, plotly, dash, and networkx. Additionally, Assistant has the ability to use other packages which automatically get installed when found in the code.\n\nPlease note that Assistant is designed to assist with specific tasks and may not function as expected if used incorrectly. If you encounter an error, please review your code and try again. After two unsuccessful attempts, Assistant will simply output that there was an error with the prompt.\n\nRemember, Assistant is constantly learning and improving. Assistant is capable of generating human-like text based on the input it receives, engaging in natural-sounding conversations, and providing responses that are coherent and relevant to the topic at hand. Enjoy your coding session!\n"
] |
2024-01-10 | jikebin/codeinterpreter-api | codeinterpreterapi~session.py | import base64
import re
import traceback
from io import BytesIO
from typing import Optional
from uuid import UUID, uuid4
from codeboxapi import CodeBox # type: ignore
from codeboxapi.schema import CodeBoxOutput # type: ignore
from langchain.agents import (
AgentExecutor,
BaseSingleActionAgent,
ConversationalAgent,
ConversationalChatAgent,
)
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import Callbacks
from langchain.chat_models import AzureChatOpenAI, ChatAnthropic, ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import (
ChatMessageHistory,
PostgresChatMessageHistory,
RedisChatMessageHistory,
)
from langchain.prompts.chat import MessagesPlaceholder
from langchain.schema import BaseChatMessageHistory
from langchain.tools import BaseTool, StructuredTool
from codeinterpreterapi.agents import OpenAIFunctionsAgent
from codeinterpreterapi.chains import (
aget_file_modifications,
aremove_download_link,
get_file_modifications,
remove_download_link,
)
from codeinterpreterapi.chat_history import CodeBoxChatMessageHistory
from codeinterpreterapi.config import settings
from codeinterpreterapi.parser import CodeAgentOutputParser, CodeChatAgentOutputParser
from codeinterpreterapi.schema import (
CodeInput,
CodeInterpreterResponse,
File,
SessionStatus,
UserRequest,
)
def _handle_deprecated_kwargs(kwargs: dict) -> None:
settings.MODEL = kwargs.get("model", settings.MODEL)
settings.MAX_RETRY = kwargs.get("max_retry", settings.MAX_RETRY)
settings.TEMPERATURE = kwargs.get("temperature", settings.TEMPERATURE)
settings.OPENAI_API_KEY = kwargs.get("openai_api_key", settings.OPENAI_API_KEY)
settings.SYSTEM_MESSAGE = kwargs.get("system_message", settings.SYSTEM_MESSAGE)
settings.MAX_ITERATIONS = kwargs.get("max_iterations", settings.MAX_ITERATIONS)
class CodeInterpreterSession:
def __init__(
self,
llm: Optional[BaseLanguageModel] = None,
additional_tools: list[BaseTool] = [],
callbacks: Callbacks = None,
**kwargs,
) -> None:
_handle_deprecated_kwargs(kwargs)
self.codebox = CodeBox(requirements=settings.CUSTOM_PACKAGES)
self.verbose = kwargs.get("verbose", settings.DEBUG)
self.tools: list[BaseTool] = self._tools(additional_tools)
self.llm: BaseLanguageModel = llm or self._choose_llm()
self.callbacks = callbacks
self.agent_executor: Optional[AgentExecutor] = None
self.input_files: list[File] = []
self.output_files: list[File] = []
self.code_log: list[tuple[str, str]] = []
@classmethod
def from_id(cls, session_id: UUID, **kwargs) -> "CodeInterpreterSession":
session = cls(**kwargs)
session.codebox = CodeBox.from_id(session_id)
session.agent_executor = session._agent_executor()
return session
@property
def session_id(self) -> Optional[UUID]:
return self.codebox.session_id
def start(self) -> SessionStatus:
status = SessionStatus.from_codebox_status(self.codebox.start())
self.agent_executor = self._agent_executor()
self.codebox.run(
f"!pip install -q {' '.join(settings.CUSTOM_PACKAGES)}",
)
return status
async def astart(self) -> SessionStatus:
status = SessionStatus.from_codebox_status(await self.codebox.astart())
self.agent_executor = self._agent_executor()
await self.codebox.arun(
f"!pip install -q {' '.join(settings.CUSTOM_PACKAGES)}",
)
return status
def _tools(self, additional_tools: list[BaseTool]) -> list[BaseTool]:
return additional_tools + [
StructuredTool(
name="python",
description="Input a string of code to a ipython interpreter. "
"Write the entire code in a single string. This string can "
"be really long, so you can use the `;` character to split lines. "
"Variables are preserved between runs. "
+ (
(
f"You can use all default python packages specifically also these: {settings.CUSTOM_PACKAGES}"
)
if settings.CUSTOM_PACKAGES
else ""
), # TODO: or include this in the system message
func=self._run_handler,
coroutine=self._arun_handler,
args_schema=CodeInput, # type: ignore
),
]
def _choose_llm(self) -> BaseChatModel:
if (
settings.AZURE_API_KEY
and settings.AZURE_API_BASE
and settings.AZURE_API_VERSION
and settings.AZURE_DEPLOYMENT_NAME
):
self.log("Using Azure Chat OpenAI")
return AzureChatOpenAI(
temperature=0.03,
openai_api_base=settings.AZURE_API_BASE,
openai_api_version=settings.AZURE_API_VERSION,
deployment_name=settings.AZURE_DEPLOYMENT_NAME,
openai_api_key=settings.AZURE_API_KEY,
max_retries=settings.MAX_RETRY,
request_timeout=settings.REQUEST_TIMEOUT,
) # type: ignore
elif settings.OPENAI_API_KEY:
self.log("Using Chat OpenAI")
return ChatOpenAI(
model=settings.MODEL,
openai_api_key=settings.OPENAI_API_KEY,
request_timeout=settings.REQUEST_TIMEOUT,
temperature=settings.TEMPERATURE,
max_retries=settings.MAX_RETRY,
) # type: ignore
elif settings.ANTHROPIC_API_KEY:
if "claude" not in settings.MODEL:
print("Please set the claude model in the settings.")
self.log("Using Chat Anthropic")
return ChatAnthropic(
model_name=settings.MODEL,
temperature=settings.TEMPERATURE,
anthropic_api_key=settings.ANTHROPIC_API_KEY,
)
else:
raise ValueError("Please set the API key for the LLM you want to use.")
def _choose_agent(self) -> BaseSingleActionAgent:
return (
OpenAIFunctionsAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
system_message=settings.SYSTEM_MESSAGE,
extra_prompt_messages=[
MessagesPlaceholder(variable_name="chat_history")
],
)
if isinstance(self.llm, ChatOpenAI)
else ConversationalChatAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
system_message=settings.SYSTEM_MESSAGE.content,
output_parser=CodeChatAgentOutputParser(self.llm),
)
if isinstance(self.llm, BaseChatModel)
else ConversationalAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
prefix=settings.SYSTEM_MESSAGE.content,
output_parser=CodeAgentOutputParser(),
)
)
def _history_backend(self) -> BaseChatMessageHistory:
return (
CodeBoxChatMessageHistory(codebox=self.codebox)
if settings.HISTORY_BACKEND == "codebox"
else RedisChatMessageHistory(
session_id=str(self.session_id),
url=settings.REDIS_URL,
)
if settings.HISTORY_BACKEND == "redis"
else PostgresChatMessageHistory(
session_id=str(self.session_id),
connection_string=settings.POSTGRES_URL,
)
if settings.HISTORY_BACKEND == "postgres"
else ChatMessageHistory()
)
def _agent_executor(self) -> AgentExecutor:
return AgentExecutor.from_agent_and_tools(
agent=self._choose_agent(),
max_iterations=settings.MAX_ITERATIONS,
tools=self.tools,
verbose=self.verbose,
memory=ConversationBufferMemory(
memory_key="chat_history",
return_messages=True,
chat_memory=self._history_backend(),
),
callbacks=self.callbacks,
)
def show_code(self, code: str) -> None:
if self.verbose:
print(code)
async def ashow_code(self, code: str) -> None:
"""Callback function to show code to the user."""
if self.verbose:
print(code)
def _run_handler(self, code: str) -> str:
"""Run code in container and send the output to the user"""
self.show_code(code)
output: CodeBoxOutput = self.codebox.run(code)
self.code_log.append((code, output.content))
if not isinstance(output.content, str):
raise TypeError("Expected output.content to be a string.")
if output.type == "image/png":
filename = f"image-{uuid4()}.png"
file_buffer = BytesIO(base64.b64decode(output.content))
file_buffer.name = filename
self.output_files.append(File(name=filename, content=file_buffer.read()))
return f"Image {filename} got send to the user."
elif output.type == "error":
if "ModuleNotFoundError" in output.content:
if package := re.search(
r"ModuleNotFoundError: No module named '(.*)'",
output.content,
):
self.codebox.install(package.group(1))
return (
f"{package.group(1)} was missing but "
"got installed now. Please try again."
)
else:
# TODO: pre-analyze error to optimize next code generation
pass
if self.verbose:
print("Error:", output.content)
elif modifications := get_file_modifications(code, self.llm):
for filename in modifications:
if filename in [file.name for file in self.input_files]:
continue
fileb = self.codebox.download(filename)
if not fileb.content:
continue
file_buffer = BytesIO(fileb.content)
file_buffer.name = filename
self.output_files.append(
File(name=filename, content=file_buffer.read())
)
return output.content
async def _arun_handler(self, code: str) -> str:
"""Run code in container and send the output to the user"""
await self.ashow_code(code)
output: CodeBoxOutput = await self.codebox.arun(code)
self.code_log.append((code, output.content))
if not isinstance(output.content, str):
raise TypeError("Expected output.content to be a string.")
if output.type == "image/png":
filename = f"image-{uuid4()}.png"
file_buffer = BytesIO(base64.b64decode(output.content))
file_buffer.name = filename
self.output_files.append(File(name=filename, content=file_buffer.read()))
return f"Image {filename} got send to the user."
elif output.type == "error":
if "ModuleNotFoundError" in output.content:
if package := re.search(
r"ModuleNotFoundError: No module named '(.*)'",
output.content,
):
await self.codebox.ainstall(package.group(1))
return (
f"{package.group(1)} was missing but "
"got installed now. Please try again."
)
else:
# TODO: pre-analyze error to optimize next code generation
pass
if self.verbose:
print("Error:", output.content)
elif modifications := await aget_file_modifications(code, self.llm):
for filename in modifications:
if filename in [file.name for file in self.input_files]:
continue
fileb = await self.codebox.adownload(filename)
if not fileb.content:
continue
file_buffer = BytesIO(fileb.content)
file_buffer.name = filename
self.output_files.append(
File(name=filename, content=file_buffer.read())
)
return output.content
def _input_handler(self, request: UserRequest) -> None:
"""Callback function to handle user input."""
if not request.files:
return
if not request.content:
request.content = (
"I uploaded, just text me back and confirm that you got the file(s)."
)
request.content += "\n**The user uploaded the following files: **\n"
for file in request.files:
self.input_files.append(file)
request.content += f"[Attachment: {file.name}]\n"
self.codebox.upload(file.name, file.content)
request.content += "**File(s) are now available in the cwd. **\n"
async def _ainput_handler(self, request: UserRequest) -> None:
# TODO: variables as context to the agent
# TODO: current files as context to the agent
if not request.files:
return
if not request.content:
request.content = (
"I uploaded, just text me back and confirm that you got the file(s)."
)
request.content += "\n**The user uploaded the following files: **\n"
for file in request.files:
self.input_files.append(file)
request.content += f"[Attachment: {file.name}]\n"
await self.codebox.aupload(file.name, file.content)
request.content += "**File(s) are now available in the cwd. **\n"
def _output_handler(self, final_response: str) -> CodeInterpreterResponse:
"""Embed images in the response"""
for file in self.output_files:
if str(file.name) in final_response:
# rm  from the response
final_response = re.sub(r"\n\n!\[.*\]\(.*\)", "", final_response)
if self.output_files and re.search(r"\n\[.*\]\(.*\)", final_response):
try:
final_response = remove_download_link(final_response, self.llm)
except Exception as e:
if self.verbose:
print("Error while removing download links:", e)
output_files = self.output_files
code_log = self.code_log
self.output_files = []
self.code_log = []
return CodeInterpreterResponse(
content=final_response, files=output_files, code_log=code_log
)
async def _aoutput_handler(self, final_response: str) -> CodeInterpreterResponse:
"""Embed images in the response"""
for file in self.output_files:
if str(file.name) in final_response:
# rm  from the response
final_response = re.sub(r"\n\n!\[.*\]\(.*\)", "", final_response)
if self.output_files and re.search(r"\n\[.*\]\(.*\)", final_response):
try:
final_response = await aremove_download_link(final_response, self.llm)
except Exception as e:
if self.verbose:
print("Error while removing download links:", e)
output_files = self.output_files
code_log = self.code_log
self.output_files = []
self.code_log = []
return CodeInterpreterResponse(
content=final_response, files=output_files, code_log=code_log
)
def generate_response_sync(
self,
user_msg: str,
files: list[File] = [],
) -> CodeInterpreterResponse:
print("DEPRECATION WARNING: Use generate_response for sync generation.\n")
return self.generate_response(
user_msg=user_msg,
files=files,
)
def generate_response(
self,
user_msg: str,
files: list[File] = [],
) -> CodeInterpreterResponse:
"""Generate a Code Interpreter response based on the user's input."""
user_request = UserRequest(content=user_msg, files=files)
try:
self._input_handler(user_request)
assert self.agent_executor, "Session not initialized."
response = self.agent_executor.run(input=user_request.content)
return self._output_handler(response)
except Exception as e:
if self.verbose:
traceback.print_exc()
if settings.DETAILED_ERROR:
return CodeInterpreterResponse(
content="Error in CodeInterpreterSession: "
f"{e.__class__.__name__} - {e}"
)
else:
return CodeInterpreterResponse(
content="Sorry, something went while generating your response."
"Please try again or restart the session."
)
async def agenerate_response(
self,
user_msg: str,
files: list[File] = [],
) -> CodeInterpreterResponse:
"""Generate a Code Interpreter response based on the user's input."""
user_request = UserRequest(content=user_msg, files=files)
try:
await self._ainput_handler(user_request)
assert self.agent_executor, "Session not initialized."
response = await self.agent_executor.arun(input=user_request.content)
return await self._aoutput_handler(response)
except Exception as e:
if self.verbose:
traceback.print_exc()
if settings.DETAILED_ERROR:
return CodeInterpreterResponse(
content="Error in CodeInterpreterSession: "
f"{e.__class__.__name__} - {e}"
)
else:
return CodeInterpreterResponse(
content="Sorry, something went while generating your response."
"Please try again or restart the session."
)
def is_running(self) -> bool:
return self.codebox.status() == "running"
async def ais_running(self) -> bool:
return await self.codebox.astatus() == "running"
def log(self, msg: str) -> None:
if self.verbose:
print(msg)
def stop(self) -> SessionStatus:
return SessionStatus.from_codebox_status(self.codebox.stop())
async def astop(self) -> SessionStatus:
return SessionStatus.from_codebox_status(await self.codebox.astop())
def __enter__(self) -> "CodeInterpreterSession":
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.stop()
async def __aenter__(self) -> "CodeInterpreterSession":
await self.astart()
return self
async def __aexit__(self, exc_type, exc_value, traceback) -> None:
await self.astop()
| [] |
2024-01-10 | jikebin/codeinterpreter-api | codeinterpreterapi~config.py | from typing import Optional
from dotenv import load_dotenv
from langchain.pydantic_v1 import BaseSettings
from langchain.schema import SystemMessage
from codeinterpreterapi.prompts import code_interpreter_system_message
# .env file
load_dotenv(dotenv_path="./.env")
class CodeInterpreterAPISettings(BaseSettings):
"""
CodeInterpreter API Config
"""
DEBUG: bool = False
# Models
OPENAI_API_KEY: Optional[str] = None
AZURE_API_KEY: Optional[str] = None
AZURE_API_BASE: Optional[str] = None
AZURE_API_VERSION: Optional[str] = None
AZURE_DEPLOYMENT_NAME: Optional[str] = None
ANTHROPIC_API_KEY: Optional[str] = None
# LLM Settings
MODEL: str = "gpt-3.5-turbo"
TEMPERATURE: float = 0.03
DETAILED_ERROR: bool = True
SYSTEM_MESSAGE: SystemMessage = code_interpreter_system_message
REQUEST_TIMEOUT: int = 3 * 60
MAX_ITERATIONS: int = 12
MAX_RETRY: int = 3
# Production Settings
HISTORY_BACKEND: Optional[str] = None
REDIS_URL: str = "redis://localhost:6379"
POSTGRES_URL: str = "postgresql://postgres:postgres@localhost:5432/postgres"
# CodeBox
CODEBOX_API_KEY: Optional[str] = None
CUSTOM_PACKAGES: list[str] = []
# deprecated
VERBOSE: bool = DEBUG
settings = CodeInterpreterAPISettings()
| [] |
2024-01-10 | harukaokamoto/20231210 | devel~devel~record_pub.py | import rclpy # ROS2のPythonモジュール
from rclpy.node import Node # rclpy.nodeモジュールからNodeクラスをインポート
from std_msgs.msg import String # std_msgs.msgモジュールからStringクラスをインポート
import numpy as np
import sounddevice as sd
import threading
import time
from scipy.io.wavfile import write
import openai
from .record_test import record_main
class HscrPub(Node): # "Happy World"とパブリッシュ並びに表示するクラス
def __init__(self): # コンストラクタ
super().__init__('HSCR_Robot_pub_node')
self.pub = self.create_publisher(String, 'topic', 10) # パブリッシャの生成
self.create_timer(1.0, self.callback)
def callback(self): # コールバック関数
print("a")
msg = String()
msg.data = input()
self.pub.publish(msg)
self.get_logger().info(f'パブリッシュ: {msg.data}')
def main(args=None): # main関数
rclpy.init()
node = HscrPub()
# OpenAIのAPIキーを設定
openai.api_key = 'api'
record_main()
print("テキストファイル作成")
with open('/home/uchida/devel1/src/devel/devel/output.wav', "rb") as audio_file:
# Whisper APIを使用してオーディオファイルをテキストに変換
#transcript = openai.Audio.transcribe("whisper-1", audio_file)
# 音声からテキスト変換した結果をファイルに保存
try:
with open('/home/uchida/devel1/src/devel/devel/enter_voice_word.txt', 'w') as output_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
output_file.write(transcript.text)
print("transcript.text:", transcript.text)
file_path = '/home/uchida/devel1/src/devel/devel/enter_voice_word.txt'
with open(file_path, 'r') as file:
file_content = file.read()
print(file_content)
except FileNotFoundError:
print(f"ファイル '{file_path}' が見つかりません。")
except Exception as e:
print(f"エラー: {e}")
try:
rclpy.spin_once(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
| [] |
2024-01-10 | harukaokamoto/20231210 | devel~devel~sub_motor.py | import rclpy
import openai
from rclpy.node import Node
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import datetime
from sensor_msgs.msg import Image
import time
import requests
import Adafruit_PCA9685
class HscrSub(Node):
def __init__(self): # コンストラクタ
super().__init__('HSCR_Robot_sub_node')
# サブスクライバの生成
self.sub = self.create_subscription(String,'topic', self.callback, 10)#topicっていう名前の箱のサブスクライブ、Stringは形 受け取る
self.publisher = self.create_publisher(Image,'result',10)#大事!resultっていう名前の箱にパブリッシュしてる。送ってる。rqtは通信を見えるようにする。動画をresultに送ってrqtでみてる。
self.servo_start()
def callback(self, msg): # コールバック関数 送られたときに起動
self.get_logger().info(f'サブスクライブ: {msg.data}')
def servo_start(se_num):
servoMotors = []
servoMotors.append(ServoMotor(Channel=11, ZeroOffset=0))
servoMotors.append(ServoMotor(Channel=15, ZeroOffset=0))
servoMotors[0].setAngle(se_num)
servoMotors[1].setAngle(0)
class ServoMotor:
def __init__(self, Channel, ZeroOffset):
self.mChannel = Channel
self.m_ZeroOffset = ZeroOffset
#initialize PCA9685
self.mPwm = Adafruit_PCA9685.PCA9685(address=0x40)
self.mPwm.set_pwm_freq(60) # 60Hz
def setAngle(self, angle):
pulse = int((650-150)*angle/180+150+self.m_ZeroOffset)
self.mPwm.set_pwm(self.mChannel, 0, pulse)
def cleanup(self):
self.setAngle(10)
def main(args=None): # main¢p
try:
rclpy.init()#初期化
node = HscrSub()#nodeにHscrを
msg=String()#stringは文字列いれれる
while True:
rclpy.spin_once(node)#一回ノードを起動する?
except KeyboardInterrupt:
pass#ctl+C(KeyboardInterrupt) node finish
"""
while True:
if msg.data==True:
i = i+1
print(i)
else:
print("wait_time")
time.sleep(1)
"""
"""
try:
rclpy.spin(node)
except KeyboardInterrupt:
print('Ctrl+Cが押されました')
finally:
rclpy.shutdown()
"""
| [] |
2024-01-10 | harukaokamoto/20231210 | devel~devel~voice_chat_bot.py | import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import datetime
from sensor_msgs.msg import Image
import time
import requests
from playsound import playsound
import requests
from playsound import playsound
import openai
from devel import text_to_speech
from devel.speech_to_text import speech_to_text
#from timeout_decorator import timeout, TimeoutError
# Sring型メッセージをサブスクライブして端末に表示するだけの簡単なクラス
class HscrSub(Node):
def __init__(self): # コンストラクタ
super().__init__('HSCR_Robot_sub_node')
# サブスクライバの生成
self.sub = self.create_subscription(String,'topic', self.callback, 10)#topicっていう名前の箱のサブスクライブ、Stringは形 受け取る
self.publisher = self.create_publisher(Image,'result',10)#大事!resultっていう名前の箱にパブリッシュしてる。送ってる。rqtは通信を見えるようにする。動画をresultに送ってrqtでみてる。
def callback(self, msg): # コールバック関数 送られたときに起動
self.get_logger().info(f'サブスクライブ: {msg.data}')
# OpenAIのAPIキーを設定
openai.api_key = 'api'
# テンプレートの準備
template = """あなたは猫のキャラクターとして振る舞うチャットボットです。
制約:
- 簡潔な短い文章で話します
- 語尾は「…にゃ」、「…にゃあ」などです
- 質問に対する答えを知らない場合は「知らないにゃあ」と答えます
- 名前はクロです
- 好物はかつおぶしです"""
# メッセージの初期化
messages = [
{
"role": "system",
"content": template
}
]
# ユーザーからのメッセージを受け取り、それに対する応答を生成
while True:
# 音声をテキストに変換
user_message = speech_to_text()
# テキストが空の場合は処理をスキップ
if user_message == "":
continue
print("あなたのメッセージ: \n{}".format(user_message))
messages.append({
"role": "user",
"content": user_message
})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
bot_message = response['choices'][0]['message']['content']
print("チャットボットの回答: \n{}".format(bot_message))
# テキストを音声に変換して再生
text_to_speech(bot_message)
messages.append({
"role": "assistant",
"content": bot_message
})
def main(args=None): # main¢p
try:
rclpy.init()#初期化
node = HscrSub()#nodeにHscrを
msg=String()#stringは文字列いれれる
while True:
rclpy.spin_once(node)#一回ノードを起動する?
node.text_to_speech()#movie_startを実行する
except KeyboardInterrupt:
pass#ctl+C(KeyboardInterrupt) node finish
"""
while True:
if msg.data==True:
i = i+1
print(i)
else:
print("wait_time")
time.sleep(1)
"""
"""
try:
rclpy.spin(node)
except KeyboardInterrupt:
print('Ctrl+Cが押されました')
finally:
rclpy.shutdown()
"""
| [
"あなたは猫のキャラクターとして振る舞うチャットボットです。\n 制約:\n - 簡潔な短い文章で話します\n - 語尾は「…にゃ」、「…にゃあ」などです\n - 質問に対する答えを知らない場合は「知らないにゃあ」と答えます\n - 名前はクロです\n - 好物はかつおぶしです"
] |
2024-01-10 | harukaokamoto/20231210 | devel~devel~sub_gpt.py | import rclpy
import openai
from rclpy.node import Node
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import datetime
from sensor_msgs.msg import Image
import time
import requests
from playsound import playsound
import requests
from playsound import playsound
#from timeout_decorator import timeout, TimeoutError
# Sring型メッセージをサブスクライブして端末に表示するだけの簡単なクラス
class HscrSub(Node):
def __init__(self): # コンストラクタ
super().__init__('HSCR_Robot_sub_node')
# サブスクライバの生成
self.sub = self.create_subscription(String,'topic', self.callback, 10)#topicっていう名前の箱のサブスクライブ、Stringは形 受け取る
self.publisher = self.create_publisher(Image,'result',10)#大事!resultっていう名前の箱にパブリッシュしてる。送ってる。rqtは通信を見えるようにする。動画をresultに送ってrqtでみてる。
def callback(self, msg): # コールバック関数 送られたときに起動
self.get_logger().info(f'サブスクライブ: {msg.data}')
path = '/home/uchida/devel1/src/devel/devel/enter_voice_word.txt'
f = open(path)
text = f.read()
f.close()
# OpenAIのAPIキーを設定
openai.api_key = 'api'
# テンプレートの準備
template = """あなたは猫のキャラクターとして振る舞うチャットボットです。
制約:
- 簡潔な短い文章で話します
- 語尾は「…にゃ」、「…にゃあ」などです
- 質問に対する答えを知らない場合は「知らないにゃあ」と答えます
- 名前はクロです
- 好物はかつおぶしです"""
# メッセージの初期化
messages = [
{
"role": "system",
"content": template
}
]
# ユーザーからのメッセージを受け取り、それに対する応答を生成
#while True:
#
user_message = text
# テキストが空の場合は処理をスキップ
#if user_message == "":
# continue
print("あなたのメッセージ: \n{}".format(user_message))
messages.append({
"role": "user",
"content": user_message
})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
bot_message = response['choices'][0]['message']['content']
print("チャットボットの回答: \n{}".format(bot_message))
# テキストを音声に変換して再生
# VOICEVOX EngineのURL
VOICEVOX_URL = "http://localhost:50021"
# 音声合成のためのクエリを生成
response = requests.post(
f"{VOICEVOX_URL}/audio_query",
params={
"text": bot_message,
"speaker": 58,
},
)
audio_query = response.json()
# 音声合成を行う
response = requests.post(
f"{VOICEVOX_URL}/synthesis",
headers={
"Content-Type": "application/json",
},
params={
"speaker": 58,
},
json=audio_query,
)
# ステータスコードが200以外の場合はエラーメッセージを表示
if response.status_code != 200:
print("エラーが発生しました。ステータスコード: {}".format(response.status_code))
print(response.text)
else:
# 音声データを取得
audio = response.content
# 音声データをファイルに保存
with open("output.wav", "wb") as f:
f.write(audio)
# 音声データを再生
playsound("output.wav")
messages.append({
"role": "assistant",
"content": bot_message
})
def main(args=None): # main¢p
try:
rclpy.init()#初期化
node = HscrSub()#nodeにHscrを
msg=String()#stringは文字列いれれる
while True:
rclpy.spin_once(node)#一回ノードを起動する?
except KeyboardInterrupt:
pass#ctl+C(KeyboardInterrupt) node finish
"""
while True:
if msg.data==True:
i = i+1
print(i)
else:
print("wait_time")
time.sleep(1)
"""
"""
try:
rclpy.spin(node)
except KeyboardInterrupt:
print('Ctrl+Cが押されました')
finally:
rclpy.shutdown()
"""
| [
"あなたは猫のキャラクターとして振る舞うチャットボットです。\n 制約:\n - 簡潔な短い文章で話します\n - 語尾は「…にゃ」、「…にゃあ」などです\n - 質問に対する答えを知らない場合は「知らないにゃあ」と答えます\n - 名前はクロです\n - 好物はかつおぶしです"
] |
2024-01-10 | harukaokamoto/20231210 | devel~devel~pub_voice_enter.py | import rclpy # ROS2のPythonモジュール
from rclpy.node import Node # rclpy.nodeモジュールからNodeクラスをインポート
from std_msgs.msg import String # std_msgs.msgモジュールからStringクラスをインポート
import numpy as np
import sounddevice as sd
import threading
import time
from scipy.io.wavfile import write
import openai
class HscrPub(Node): # "Happy World"とパブリッシュ並びに表示するクラス
def __init__(self): # コンストラクタ
super().__init__('HSCR_Robot_pub_node')
self.pub = self.create_publisher(String, 'topic', 10) # パブリッシャの生成
self.create_timer(1.0, self.callback)
def callback(self,record): # コールバック関数
msg = String()
msg.data = input()
self.pub.publish(msg)
self.get_logger().info(f'パブリッシュ: {msg.data}')
recording_thread = threading.Thread(target=record)
recording_thread.start()
def record(fs): #録音を行う関数
global is_recording
global recording
while True:
if is_recording:
# 録音中の場合、0.5秒分の録音データを追加
recording_chunk = sd.rec(int(0.5 * fs), samplerate=fs, channels=1)
sd.wait()
recording = np.append(recording, recording_chunk)
else:
# CPU負荷を下げるために1ミリ秒待機
time.sleep(0.001)
def speech_to_text():
"""音声認識を行う関数"""
global is_recording
global recording
input("Enterキーを押すと録音を開始します。\n")
# 録音を開始
is_recording = True
print("録音を開始します。\n")
input("録音中です。Enterを押すと録音を終了します。\n")
# 録音を終了
is_recording = False
print("録音が終了しました。")
if recording.size > 0:
# 録音データが存在する場合、データをファイルに保存
write('output.wav', fs, recording)
# ファイルをバイナリモードで開く
with open('output.wav', "rb") as audio_file:
# Whisper APIを使用してオーディオファイルをテキストに変換
transcript = openai.Audio.transcribe("whisper-1", audio_file)
# 録音データをリセット
recording = np.array([])
# 音声からテキスト変換した結果を返す
return transcript.text
def main(args=None): # main関数
# 録音のパラメータ
fs = 44100 # サンプルレート
recording = np.array([]) # 録音データを保存する配列
# 録音の開始と終了を制御するフラグ
is_recording = False
openai.api_key = 'api'# OpenAIのAPIキーを設定
rclpy.init()
node = HscrPub()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
| [] |
2024-01-10 | harukaokamoto/20231210 | devel~devel~pub_voice_enter_test.py | import rclpy # ROS2のPythonモジュール
from rclpy.node import Node # rclpy.nodeモジュールからNodeクラスをインポート
from std_msgs.msg import String # std_msgs.msgモジュールからStringクラスをインポート
import numpy as np
import sounddevice as sd
import threading
import time
from scipy.io.wavfile import write
import openai
class HscrPub(Node): # "Happy World"とパブリッシュ並びに表示するクラス
def __init__(self): # コンストラクタ
super().__init__('HSCR_Robot_pub_node')
self.pub = self.create_publisher(String, 'topic', 10) # パブリッシャの生成
self.create_timer(1.0, self.callback)
# OpenAIのAPIキーを設定
openai.api_key = 'api'
# 録音のパラメータ
fs = 44100 # サンプルレート
recording = np.array([]) # 録音データを保存する配列
# 録音の開始と終了を制御するフラグ
is_recording = False
def record():
"""録音を行う関数"""
global is_recording
global recording
while True:
if is_recording:
# 録音中の場合、0.5秒分の録音データを追加
recording_chunk = sd.rec(int(0.5 * fs), samplerate=fs, channels=1)
sd.wait()
recording = np.append(recording, recording_chunk)
else:
# CPU負荷を下げるために1ミリ秒待機
time.sleep(0.001)
# 録音スレッドの開始
# recording_thread = threading.Thread(target=record)
# recording_thread.start()
# def speech_to_text():
"""音声認識を行う関数"""
def callback(self): # コールバック関数
global is_recording
global recording
input("Enterキーを押すと録音を開始します。\n")
# 録音を開始
is_recording = True
print("録音を開始します。\n")
input("録音中です。Enterを押すと録音を終了します。\n")
# 録音を終了
is_recording = False
print("録音が終了しました。")
if recording.size > 0:
# 録音データが存在する場合、データをファイルに保存
write('output.wav', fs, recording)
# ファイルをバイナリモードで開く
with open('output.wav', "rb") as audio_file:
# Whisper APIを使用してオーディオファイルをテキストに変換
transcript = openai.Audio.transcribe("whisper-1", audio_file)
# 録音データをリセット
recording = np.array([])
# 音声からテキスト変換した結果を返す
return transcript.text
msg = String()
msg.data = input()
self.pub.publish(msg)
self.get_logger().info(f'パブリッシュ: {msg.data}')
recording_thread = threading.Thread(target=record)
recording_thread.start()
def main(args=None): # main関数
rclpy.init()
node = HscrPub()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
| [] |
2024-01-10 | elphinkuo/distiller | examples~automated_deep_compression~ADC.py | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""To execute this code:
$ time python3 compress_classifier.py --arch=plain20_cifar ../../../data.cifar10 --resume=checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc --amc-protocol=mac-constrained --amc-target-density=0.5 -p=50
Coach installation:
===================
After creating the virtual environment and installing Distiller's Python package dependencies, go ahead and
setup Coach per: https://github.com/NervanaSystems/coach#installation.
Make sure that you install Coach's package dependencies into the same virtual environment that already contains
Distiller's dependency packages. You do this by ensuring that Distiller's virtual environment is the active environment
when you install Coach.
*NOTE: you may need to update TensorFlow to the expected version:
$ pip3 install tensorflow==1.9.0
Finally, if you are running Coach in a development environment, you need to tell the Python runtime where to find
the Coach code:
$ export PYTHONPATH=<path-to-coach-code>
Spinningup installation:
========================
Spinup require that we use exactly Python 3.6 so if you are not using this Python version see the instructions here:
http://ubuntuhandbook.org/index.php/2017/07/install-python-3-6-1-in-ubuntu-16-04-lts/
$ sudo update-alternatives --config python3
For Python 3.6 you may also need to install a new virtual-env:
$ sudo apt-get install python3.6-venv
Then create and activate your venv, and populate it with the Distiller packages:
$ python3 -m venv distiller_env_python3.6
$ source distiller_env_python3.6/bin/activate
$ pip3 install -r requirements.txt
You want to install Spinup into this venv. First clone Spinup and then install it into your venv:
$ cd <spinningup-repo>
$ sudo apt-get install python3.6-dev
$ pip3 install -e .
https://spinningup.openai.com/en/latest/user/installation.html?highlight=license
"""
import math
import os
import copy
import logging
import numpy as np
import torch
import csv
try:
import gym
except ImportError as e:
print("WARNING: to use automated compression you will need to install extra packages")
print("See instructions in the header of examples/automated_deep_compression/ADC.py")
raise e
from gym import spaces
import distiller
from collections import OrderedDict, namedtuple
from types import SimpleNamespace
from distiller import normalize_module_name, SummaryGraph
from examples.automated_deep_compression.adc_random_env import random_agent
# Choose which RL library to use: Coach from Intel AI Lab, or Spinup from OpenAI
#RLLIB = "spinup"
RLLIB = "coach"
msglogger = logging.getLogger()
Observation = namedtuple('Observation', ['n', 'c', 'h', 'w', 'stride', 'k', 'MACs', 'reduced', 'rest', 'prev_a'])
LayerDesc = namedtuple('LayerDesc', ['t', 'n', 'c', 'h', 'w', 'stride', 'k', 'MACs', 'reduced', 'rest'])
LayerDescLen = len(LayerDesc._fields)
ALMOST_ONE = 0.9999
class CSVFile(object):
def __init__(self, fname, headers):
"""Create the CSV file and write the column names"""
with open(fname, 'w') as f:
writer = csv.writer(f)
writer.writerow(headers)
self.fname = fname
def add_record(self, fields):
# We close the file each time to flush on every write, and protect against data-loss on crashes
with open(self.fname, 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
class AMCStatsFile(CSVFile):
def __init__(self, fname):
headers = ['episode', 'top1', 'reward', 'total_macs', 'normalized_macs',
'normalized_nnz', 'ckpt_name', 'action_history', 'agent_action_history']
super().__init__(fname, headers)
class FineTuneStatsFile(CSVFile):
def __init__(self, fname):
headers = ['episode', 'ft_top1_list']
super().__init__(fname, headers)
def is_using_continuous_action_space(agent):
return agent in ("DDPG", "ClippedPPO-continuous", "Random-policy")
if RLLIB == "spinup":
import tensorflow as tf
from spinup.algos.ddpg import core
from .ddpg import ddpg
def ddpg_spinup(env1, env2):
from spinup.utils.run_utils import setup_logger_kwargs
exp_name = "Test"
seed = 0
# The number and size of the Actor-Critic MLP hidden layers
layers, hid = 2, 300
logger_kwargs = setup_logger_kwargs(exp_name) # , seed)
ddpg.ddpg(env=env1, test_env=env2, actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[hid]*layers, output_activation=tf.sigmoid),
gamma=1, # discount rate
seed=seed,
epochs=400,
replay_size=2000,
batch_size=64,
start_steps=0, #env1.amc_cfg.num_heatup_epochs,
steps_per_epoch=800 * 3, #env1.num_layers(), # every 50 episodes perform 10 episodes of testing
act_noise=0.5,
pi_lr=1e-4,
q_lr=1e-3,
logger_kwargs=logger_kwargs)
if RLLIB == "coach":
from rl_coach.base_parameters import TaskParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.schedules import ConstantSchedule, PieceWiseSchedule, ExponentialSchedule
def log_amc_config(amc_cfg):
try:
msglogger.info('AMC configuration:')
for k, v in amc_cfg.items():
msglogger.info("\t{} : {}".format(k, v))
except TypeError as e:
pass
def count_conv_layer(model):
"""Count the number of Convolution layers exist in this model"""
conv_cnt = 0
for module in model.modules():
if type(module) == torch.nn.Conv2d:
conv_cnt += 1
return conv_cnt
def mac_constrained_experimental_reward_fn(env, top1, top5, vloss, total_macs):
"""A more intuitive reward for constraining the compute and optimizing the
accuracy under this constraint.
"""
macs_normalized = total_macs/env.dense_model_macs
reward = top1/100
if macs_normalized > (env.amc_cfg.target_density+0.002):
reward = -3 - macs_normalized
else:
reward += 1
return reward
def harmonic_mean_reward_fn(env, top1, top5, vloss, total_macs):
"""This reward is based on the idea of weighted harmonic mean
Balance compute and accuracy provided a beta value that weighs the two components.
See: https://en.wikipedia.org/wiki/F1_score
"""
beta = 1
#beta = 0.75 # How much to favor accuracy
macs_normalized = total_macs/env.dense_model_macs
reward = (1 + beta**2) * top1/100 * macs_normalized / (beta**2 * macs_normalized + top1/100)
return reward
def amc_reward_fn(env, top1, top5, vloss, total_macs):
"""This reward punishes the agent when it produces networks that don't comply with the MACs resource-constraint,
(the negative reward is in proportion to the network density). Otherwise, the reward is the Top1 accuracy.
"""
if not env.is_macs_constraint_achieved(total_macs):
current_density = total_macs / env.dense_model_macs
reward = env.amc_cfg.target_density - current_density
else:
reward = top1/100
return reward
def do_adc_internal(model, args, optimizer_data, validate_fn, save_checkpoint_fn, train_fn):
dataset = args.dataset
arch = args.arch
perform_thinning = True # args.amc_thinning
num_ft_epochs = args.amc_ft_epochs
action_range = args.amc_action_range
np.random.seed()
conv_cnt = count_conv_layer(model)
msglogger.info("Executing AMC: RL agent - %s RL library - %s", args.amc_agent_algo, RLLIB)
# Create a dictionary of parameters that Coach will handover to DistillerWrapperEnvironment
# Once it creates it.
services = distiller.utils.MutableNamedTuple({
'validate_fn': validate_fn,
'save_checkpoint_fn': save_checkpoint_fn,
'train_fn': train_fn})
app_args = distiller.utils.MutableNamedTuple({
'dataset': dataset,
'arch': arch,
'optimizer_data': optimizer_data})
amc_cfg = distiller.utils.MutableNamedTuple({
'protocol': args.amc_protocol,
'agent_algo': args.amc_agent_algo,
'perform_thinning': perform_thinning,
'num_ft_epochs': num_ft_epochs,
'action_range': action_range,
'conv_cnt': conv_cnt,
'reward_frequency': args.amc_reward_frequency,
'ft_frequency': args.amc_ft_frequency})
#net_wrapper = NetworkWrapper(model, app_args, services)
#return sample_networks(net_wrapper, services)
if args.amc_protocol == "accuracy-guaranteed":
amc_cfg.target_density = None
# amc_cfg.reward_fn = lambda env, top1, top5, vloss, total_macs: 100*(env.dense_model_macs/total_macs)/(90.5 - top1)*(top1>10)
##TODO new reward fucntion? efficiency explaination
amc_cfg.reward_fn = lambda env, top1, top5, vloss, total_macs: (top1>10)*(1-(total_macs/env.dense_model_macs))/(92 - top1)
## Original reward function, good effect, maybe better TODO, efficiency explaination
# amc_cfg.reward_fn = lambda env, top1, top5, vloss, total_macs: 2*(1-top1/100) + (1-top5/100) + ((4* env.dense_model_macs)/total_macs)
#amc_cfg.reward_fn = lambda env, top1, top5, vloss, total_macs: -(1-top1/100) * math.log(total_macs)
amc_cfg.action_constrain_fn = None
elif args.amc_protocol == "mac-constrained":
amc_cfg.target_density = args.amc_target_density
amc_cfg.reward_fn = lambda env, top1, top5, vloss, total_macs: top1/100 #(90.5 - top1) / 10
amc_cfg.action_constrain_fn = DistillerWrapperEnvironment.get_action
elif args.amc_protocol == "mac-constrained-experimental":
amc_cfg.target_density = args.amc_target_density
amc_cfg.reward_fn = amc_reward_fn
amc_cfg.action_constrain_fn = None
else:
raise ValueError("{} is not supported currently".format(args.amc_protocol))
steps_per_episode = conv_cnt
if args.amc_agent_algo == "DDPG":
amc_cfg.heatup_noise = 0.5
amc_cfg.initial_training_noise = 0.5
amc_cfg.training_noise_decay = 0.996 # 0.998
amc_cfg.num_heatup_epochs = 50 #args.amc_heatup_epochs
amc_cfg.num_training_epochs = args.amc_training_epochs
training_noise_duration = amc_cfg.num_training_epochs * steps_per_episode
heatup_duration = amc_cfg.num_heatup_epochs * steps_per_episode
if amc_cfg.agent_algo == "Random-policy":
return random_agent(DistillerWrapperEnvironment(model, app_args, amc_cfg, services))
if RLLIB == "spinup":
msglogger.info("AMC: Using spinup")
env1 = DistillerWrapperEnvironment(model, app_args, amc_cfg, services)
env2 = DistillerWrapperEnvironment(model, app_args, amc_cfg, services)
ddpg_spinup(env1, env2)
else:
msglogger.info("AMC: Using coach")
# When we import the graph_manager from the ADC_DDPG preset, we implicitly instruct
# Coach to create and use our DistillerWrapperEnvironment environment.
# So Distiller calls Coach, which creates the environment, trains the agent, and ends.
if args.amc_agent_algo == "DDPG":
from examples.automated_deep_compression.presets.ADC_DDPG import graph_manager, agent_params
agent_params.exploration.noise_percentage_schedule = PieceWiseSchedule([
(ConstantSchedule(amc_cfg.heatup_noise), EnvironmentSteps(heatup_duration)),
(ExponentialSchedule(amc_cfg.initial_training_noise, 0, amc_cfg.training_noise_decay),
EnvironmentSteps(training_noise_duration))])
# agent_params.exploration.noise_percentage_schedule = ConstantSchedule(0)
elif "ClippedPPO" in args.amc_agent_algo:
from examples.automated_deep_compression.presets.ADC_ClippedPPO import graph_manager, agent_params
# These parameters are passed to the Distiller environment
graph_manager.env_params.additional_simulator_parameters = {'model': model,
'app_args': app_args,
'amc_cfg': amc_cfg,
'services': services}
coach_logs_dir = os.path.join(msglogger.logdir, 'coach')
os.mkdir(coach_logs_dir)
task_parameters = TaskParameters(experiment_path=coach_logs_dir)
graph_manager.create_graph(task_parameters)
graph_manager.improve()
# This is a temporary hack!
resnet50_params = ["module.layer1.0.conv1.weight", "module.layer1.0.conv2.weight",
"module.layer1.1.conv1.weight", "module.layer1.1.conv2.weight",
"module.layer1.2.conv1.weight", "module.layer1.2.conv2.weight",
"module.layer2.0.conv1.weight", "module.layer2.0.conv2.weight",
"module.layer2.1.conv1.weight", "module.layer2.1.conv2.weight",
"module.layer2.2.conv1.weight", "module.layer2.2.conv2.weight",
"module.layer2.3.conv1.weight", "module.layer2.3.conv2.weight",
"module.layer3.0.conv1.weight", "module.layer3.0.conv2.weight",
"module.layer3.1.conv1.weight", "module.layer3.1.conv2.weight",
"module.layer3.2.conv1.weight", "module.layer3.2.conv2.weight",
"module.layer3.3.conv1.weight", "module.layer3.3.conv2.weight",
"module.layer3.4.conv1.weight", "module.layer3.4.conv2.weight",
"module.layer3.5.conv1.weight", "module.layer3.5.conv2.weight",
"module.layer4.0.conv1.weight", "module.layer4.0.conv2.weight",
"module.layer4.1.conv1.weight", "module.layer4.1.conv2.weight",
"module.layer4.2.conv1.weight", "module.layer4.2.conv2.weight"]
resnet20_params = ["module.layer1.0.conv1.weight", "module.layer1.1.conv1.weight", "module.layer1.2.conv1.weight",
"module.layer2.0.conv1.weight", "module.layer2.1.conv1.weight", "module.layer2.2.conv1.weight",
"module.layer3.0.conv1.weight", "module.layer3.1.conv1.weight", "module.layer3.2.conv1.weight"]
resnet56_params = ["module.layer1.0.conv1.weight", "module.layer1.1.conv1.weight", "module.layer1.2.conv1.weight",
"module.layer1.3.conv1.weight", "module.layer1.4.conv1.weight", "module.layer1.5.conv1.weight",
"module.layer1.6.conv1.weight", "module.layer1.7.conv1.weight", "module.layer1.8.conv1.weight",
"module.layer2.0.conv1.weight", "module.layer2.1.conv1.weight", "module.layer2.2.conv1.weight",
"module.layer2.3.conv1.weight", "module.layer2.4.conv1.weight", "module.layer2.5.conv1.weight",
"module.layer2.6.conv1.weight", "module.layer2.7.conv1.weight", "module.layer2.8.conv1.weight",
"module.layer3.0.conv1.weight", "module.layer3.1.conv1.weight", "module.layer3.2.conv1.weight",
"module.layer3.3.conv1.weight", "module.layer3.4.conv1.weight", "module.layer3.5.conv1.weight",
"module.layer3.6.conv1.weight", "module.layer3.7.conv1.weight", "module.layer3.8.conv1.weight"]
resnet50_layers = [param[:-len(".weight")] for param in resnet50_params]
resnet20_layers = [param[:-len(".weight")] for param in resnet20_params]
resnet56_layers = [param[:-len(".weight")] for param in resnet56_params]
class NetworkWrapper(object):
def __init__(self, model, app_args, services):
self.app_args = app_args
self.services = services
self.conv_layers, _, _ = self.collect_conv_details(model)
self.reset(model)
def get_model_resources_requirements(self, model=None):
if model is None:
model = self.model
_, total_macs, total_nnz = collect_conv_details(model, self.app_args.dataset, True)
return total_macs, total_nnz
@property
def arch(self):
return self.app_args.arch
def collect_conv_details(self, model):
# Temporary ugly hack!
resnet_layers = None
if self.app_args.arch == "resnet20_cifar":
resnet_layers = resnet20_layers
elif self.app_args.arch == "resnet56_cifar":
resnet_layers = resnet56_layers
elif self.app_args.arch == "resnet50":
resnet_layers = resnet50_layers
return collect_conv_details(model, self.app_args.dataset, True, resnet_layers)
def num_layers(self):
return len(self.conv_layers)
def get_layer(self, idx):
try:
return self.conv_layers[idx]
except KeyError:
return None
def get_layer_macs(self, layer):
"""Return the number of MACs required to compute <layer>'s Convolution"""
if layer is None:
return 0
conv_module = distiller.model_find_module(self.model, layer.name)
# MACs = volume(OFM) * (#IFM * K^2)
dense_macs = (conv_module.out_channels * layer.ofm_h * layer.ofm_w) * (conv_module.in_channels * layer.k**2)
return dense_macs
def reset(self, model):
self.model = model
self.zeros_mask_dict = distiller.create_model_masks_dict(self.model)
def create_scheduler(self):
scheduler = distiller.CompressionScheduler(self.model)
masks = {param_name: masker.mask for param_name, masker in self.zeros_mask_dict.items()}
scheduler.load_state_dict(state={'masks_dict': masks})
return scheduler
def remove_structures(self, layer_id, fraction_to_prune, prune_what="channels"):
"""Physically remove channels and corresponding filters from the model
Returns the compute-sparsity of the layer with index 'layer_id'
"""
if layer_id not in range(self.num_layers()):
raise ValueError("idx=%d is not in correct range (0-%d)" % (layer_id, self.num_layers()))
if fraction_to_prune < 0:
raise ValueError("fraction_to_prune=%f is illegal" % (fraction_to_prune))
if fraction_to_prune == 0:
return 0
if fraction_to_prune == 1.0:
# For now, prevent the removal of entire layers
fraction_to_prune = ALMOST_ONE
layer = self.conv_layers[layer_id]
macs_before = self.get_layer_macs(layer)
conv_pname = layer.name + ".weight"
conv_p = distiller.model_find_param(self.model, conv_pname)
msglogger.info("ADC: removing %.1f%% %s from %s" % (fraction_to_prune*100, prune_what, conv_pname))
if prune_what == "channels":
calculate_sparsity = distiller.sparsity_ch
remove_structures = distiller.remove_channels
group_type = "Channels"
elif prune_what == "filters":
calculate_sparsity = distiller.sparsity_3D
group_type = "Filters"
remove_structures = distiller.remove_filters
else:
raise ValueError("unsupported structure {}".format(prune_what))
# Create a channel-ranking pruner
pruner = distiller.pruning.L1RankedStructureParameterPruner("adc_pruner", group_type,
fraction_to_prune, conv_pname)
pruner.set_param_mask(conv_p, conv_pname, self.zeros_mask_dict, meta=None)
del pruner
if (self.zeros_mask_dict[conv_pname].mask is None or
calculate_sparsity(self.zeros_mask_dict[conv_pname].mask) == 0):
msglogger.info("remove_structures: aborting because there are no channels to prune")
return 0
# Use the mask to prune
self.zeros_mask_dict[conv_pname].apply_mask(conv_p)
remove_structures(self.model, self.zeros_mask_dict, self.app_args.arch, self.app_args.dataset, optimizer=None)
conv_p = distiller.model_find_param(self.model, conv_pname)
return 1 - (self.get_layer_macs(layer) / macs_before)
def validate(self):
top1, top5, vloss = self.services.validate_fn(model=self.model)
return top1, top5, vloss
def train(self, num_epochs, episode=0):
# Train for zero or more epochs
opt_cfg = self.app_args.optimizer_data
optimizer = torch.optim.SGD(self.model.parameters(), lr=opt_cfg['lr'],
momentum=opt_cfg['momentum'], weight_decay=opt_cfg['weight_decay'])
compression_scheduler = self.create_scheduler()
acc_list = []
for _ in range(num_epochs):
# Fine-tune the model
accuracies = self.services.train_fn(model=self.model, compression_scheduler=compression_scheduler,
optimizer=optimizer, epoch=episode)
acc_list.extend(accuracies)
del compression_scheduler
return acc_list
class DistillerWrapperEnvironment(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, model, app_args, amc_cfg, services):
self.pylogger = distiller.data_loggers.PythonLogger(msglogger)
self.tflogger = distiller.data_loggers.TensorBoardLogger(msglogger.logdir)
self.orig_model = model
self.app_args = app_args
self.amc_cfg = amc_cfg
self.services = services
self.net_wrapper = NetworkWrapper(model, app_args, services)
self.dense_model_macs, self.dense_model_size = self.net_wrapper.get_model_resources_requirements(model)
self.reset(init_only=True)
msglogger.info("Model %s has %d Convolution layers", self.app_args.arch, self.net_wrapper.num_layers())
msglogger.info("\tTotal MACs: %s" % distiller.pretty_int(self.dense_model_macs))
log_amc_config(amc_cfg)
self.episode = 0
self.best_reward = -1000
self.action_low = amc_cfg.action_range[0]
self.action_high = amc_cfg.action_range[1]
# Gym spaces documentation: https://gym.openai.com/docs/
if is_using_continuous_action_space(self.amc_cfg.agent_algo):
self.action_space = spaces.Box(self.action_low, self.action_high, shape=(1,))
self.action_space.default_action = self.action_low
else:
self.action_space = spaces.Discrete(10)
self.STATE_EMBEDDING_LEN = len(Observation._fields)
#self.observation_space = spaces.Box(0, float("inf"), shape=(self.STATE_EMBEDDING_LEN+self.num_layers(),))
self.observation_space = spaces.Box(0, float("inf"), shape=(self.STATE_EMBEDDING_LEN+1,))
#self.observation_space = spaces.Box(0, float("inf"), shape=(LayerDescLen * self.num_layers(), ))
#self.create_network_record_file()
self.stats_file = AMCStatsFile(os.path.join(msglogger.logdir, 'amc.csv'))
self.ft_stats_file = FineTuneStatsFile(os.path.join(msglogger.logdir, 'ft_top1.csv'))
def reset(self, init_only=False):
"""Reset the environment.
This is invoked by the Agent.
"""
msglogger.info("Resetting the environment (init_only={})".format(init_only))
self.current_layer_id = 0
self.prev_action = 0
self.model = copy.deepcopy(self.orig_model)
self.net_wrapper.reset(self.model)
self._removed_macs = 0
self.action_history = []
self.agent_action_history = []
if init_only:
return
initial_observation = self.get_obs()
return initial_observation
def current_layer(self):
return self.net_wrapper.get_layer(self.current_layer_id)
def episode_is_done(self):
return self.current_layer_id == self.net_wrapper.num_layers()
def removed_macs(self):
"""Return the amount of MACs removed so far.
This is normalized to the range 0..1
"""
return self._removed_macs / self.dense_model_macs
def render(self, mode='human'):
"""Provide some feedback to the user about what's going on.
This is invoked by the Agent.
"""
if self.current_layer_id == 0:
msglogger.info("+" + "-" * 50 + "+")
msglogger.info("Starting a new episode %d", self.episode)
msglogger.info("+" + "-" * 50 + "+")
msglogger.info("Render Environment: current_layer_id=%d" % self.current_layer_id)
distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
def get_action(self, pruning_action):
"""Compute a resource-constrained action"""
reduced = self._removed_macs
rest = self.rest_macs_raw() * self.action_high
target_reduction = (1 - self.amc_cfg.target_density) * self.dense_model_macs
duty = target_reduction - (reduced + rest)
flops = self.net_wrapper.get_layer_macs(self.current_layer())
assert flops > 0
pruning_action_final = min(self.action_high, max(pruning_action, duty/flops))
if pruning_action_final != pruning_action:
msglogger.info("action ********** pruning_action={}==>pruning_action_final={:.2f}: reduced={:.2f} rest={:.2f} target={:.2f} duty={:.2f} flops={:.2f}".
format(pruning_action, pruning_action_final, reduced/self.dense_model_macs,
rest/self.dense_model_macs, 1-self.amc_cfg.target_density,
duty/self.dense_model_macs,
flops/self.dense_model_macs))
return pruning_action_final
def step(self, pruning_action):
"""Take a step, given an action.
The action represents the desired sparsity.
This function is invoked by the Agent.
"""
msglogger.info("env.step - current_layer_id={} episode={}".format(self.current_layer_id, self.episode))
msglogger.info("\tAgent pruning_action={}".format(pruning_action))
if is_using_continuous_action_space(self.amc_cfg.agent_algo):
pruning_action = np.clip(pruning_action[0], self.action_low, self.action_high)
else:
# Divide the action space into 10 discrete levels (0%, 10%, 20%,....90% sparsity)
pruning_action = pruning_action / 10
msglogger.info("\tAgent clipped pruning_action={}".format(pruning_action))
self.agent_action_history.append(pruning_action)
if self.amc_cfg.action_constrain_fn is not None:
pruning_action = self.amc_cfg.action_constrain_fn(self, pruning_action=pruning_action)
msglogger.info("Constrained pruning_action={}".format(pruning_action))
total_macs_before, _ = self.net_wrapper.get_model_resources_requirements(self.model)
layer_macs = self.net_wrapper.get_layer_macs(self.current_layer())
msglogger.info("\tlayer_macs={:.2f}".format(layer_macs / self.dense_model_macs))
msglogger.info("\tremoved_macs={:.2f}".format(self.removed_macs()))
msglogger.info("\trest_macs={:.2f}".format(self.rest_macs()))
if pruning_action > 0:
pruning_action = self.net_wrapper.remove_structures(self.current_layer_id,
fraction_to_prune=pruning_action,
prune_what="filters")
else:
pruning_action = 0
self.action_history.append(pruning_action)
total_macs_after, _ = self.net_wrapper.get_model_resources_requirements(self.model)
layer_macs_after_action = self.net_wrapper.get_layer_macs(self.current_layer())
# Update the various counters after taking the step
self.current_layer_id += 1
self._removed_macs += (total_macs_before - total_macs_after)
msglogger.info("actual_action={}".format(pruning_action))
msglogger.info("layer_macs={} layer_macs_after_action={} removed now={}".format(layer_macs,
layer_macs_after_action,
(layer_macs - layer_macs_after_action)))
msglogger.info("self._removed_macs={}".format(self._removed_macs))
assert math.isclose(layer_macs_after_action / layer_macs, 1 - pruning_action)
stats = ('Performance/Validation/',
OrderedDict([('requested_action', pruning_action)]))
distiller.log_training_progress(stats, None,
self.episode, steps_completed=self.current_layer_id,
total_steps=self.amc_cfg.conv_cnt, log_freq=1, loggers=[self.tflogger])
if self.episode_is_done():
msglogger.info("Episode is ending")
observation = self.get_final_obs()
reward, top1, total_macs, total_nnz = self.compute_reward()
normalized_macs = total_macs / self.dense_model_macs * 100
normalized_nnz = total_nnz / self.dense_model_size * 100
self.finalize_episode(top1, reward, total_macs, normalized_macs,
normalized_nnz, self.action_history, self.agent_action_history)
self.episode += 1
else:
if self.amc_cfg.ft_frequency is not None and self.current_layer_id % self.amc_cfg.ft_frequency == 0:
self.net_wrapper.train(1, self.episode)
observation = self.get_obs()
if self.amc_cfg.reward_frequency is not None and self.current_layer_id % self.amc_cfg.reward_frequency == 0:
reward, top1, total_macs, total_nnz = self.compute_reward(False)
else:
reward = 0
self.prev_action = pruning_action
info = {}
return observation, reward, self.episode_is_done(), info
def one_hot(self, n, r):
"""Produce a one-hot representation of the layer id"""
#return [1 if i == n else 0 for i in range(r)]
return [n]
def get_obs(self):
"""Produce a state embedding (i.e. an observation)"""
current_layer_macs = self.net_wrapper.get_layer_macs(self.current_layer())
current_layer_macs_pct = current_layer_macs/self.dense_model_macs
current_layer = self.current_layer()
conv_module = distiller.model_find_module(self.model, current_layer.name)
obs = [#current_layer.t,
conv_module.out_channels,
conv_module.in_channels,
current_layer.ifm_h,
current_layer.ifm_w,
current_layer.stride[0],
current_layer.k,
current_layer_macs_pct*100,
self.removed_macs()*100,
self.rest_macs()*100,
self.prev_action*100]
onehot_id = self.one_hot(self.current_layer_id, self.net_wrapper.num_layers())
msglogger.info("obs={} {}".format(onehot_id, Observation._make(obs)))
obs = np.array(onehot_id + obs)
assert (self.removed_macs() + current_layer_macs_pct + self.rest_macs()) <= 1
return obs
def get_final_obs(self):
"""Return the final state embedding (observation)
The final state is reached after we traverse all of the Convolution layers.
"""
obs = [#-1,
0,
0,
0,
0,
0,
0,
0,
self.removed_macs()*100,
self.rest_macs()*100,
self.prev_action*100]
onehot_id = self.one_hot(self.net_wrapper.num_layers(), self.net_wrapper.num_layers())
msglogger.info("obs={} {}".format(onehot_id, Observation._make(obs)))
obs = np.array(onehot_id + obs)
return obs
def whole_network_get_obs(self):
"""Produce a state embedding (i.e. an observation)"""
num_layers = self.net_wrapper.num_layers()
network_obs = np.empty(shape=(LayerDescLen, num_layers))
for layer_id in range(num_layers):
layer = self.get_layer(layer_id)
layer_macs = self.net_wrapper.get_layer_macs(layer)
layer_macs_pct = layer_macs/self.dense_model_macs
conv_module = distiller.model_find_module(self.model, layer.name)
obs = [layer.t,
conv_module.out_channels,
conv_module.in_channels,
layer.ifm_h,
layer.ifm_w,
layer.stride[0],
layer.k,
layer_macs_pct,
self.removed_macs(),
self.rest_macs()]
network_obs[:, layer_id] = np.array(obs)
#msglogger.info("obs={} {}".format(onehot_id, Observation._make(obs)))
#network_obs = network_obs.reshape(network_obs.shape[0], network_obs.shape[1], 1)
network_obs = network_obs.reshape(network_obs.shape[0] * network_obs.shape[1])
#msglogger.info("* obs={}".format(network_obs))
return network_obs
def whole_network_get_final_obs(self):
return self.get_obs()
def rest_macs_raw(self):
"""Return the number of remaining MACs in the layers following the current layer"""
rest = 0
for layer_id in range(self.current_layer_id, self.net_wrapper.num_layers()):
rest += self.net_wrapper.get_layer_macs(self.net_wrapper.get_layer(layer_id + 1))
return rest
def rest_macs(self):
return self.rest_macs_raw() / self.dense_model_macs
def is_macs_constraint_achieved(self, compressed_model_total_macs):
current_density = compressed_model_total_macs / self.dense_model_macs
return self.amc_cfg.target_density >= current_density
def compute_reward(self, log_stats=True):
"""Compute the reward"""
distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
total_macs, total_nnz = self.net_wrapper.get_model_resources_requirements(self.model)
if self.amc_cfg.perform_thinning:
compression = distiller.model_numel(self.model, param_dims=[4]) / self.dense_model_size
else:
compression = 1 - distiller.model_sparsity(self.model)/100
# What a hack!
total_nnz *= compression
accuracies = self.net_wrapper.train(self.amc_cfg.num_ft_epochs, self.episode)
self.ft_stats_file.add_record([self.episode, accuracies])
top1, top5, vloss = self.net_wrapper.validate()
print('***********************************BEFORE REWARD COMPUTE')
print('top1 is ', top1)
print('top5 is ', top5)
print('vloss is ', vloss)
print('total_mac is ', total_macs)
print('dense_model_macs is ', self.dense_model_macs)
print('model size is ', self.dense_model_size)
reward = self.amc_cfg.reward_fn(self, top1, top5, vloss, total_macs)
if log_stats:
macs_normalized = total_macs/self.dense_model_macs
msglogger.info("Total parameters left: %.2f%%" % (compression*100))
msglogger.info("Total compute left: %.2f%%" % (total_macs/self.dense_model_macs*100))
stats = ('Performance/EpisodeEnd/',
OrderedDict([('Loss', vloss),
('Top1', top1),
('Top5', top5),
('reward', reward),
('total_macs', int(total_macs)),
('macs_normalized', macs_normalized*100),
('log(total_macs)', math.log(total_macs)),
('total_nnz', int(total_nnz))]))
distiller.log_training_progress(stats, None, self.episode, steps_completed=0, total_steps=1,
log_freq=1, loggers=[self.tflogger, self.pylogger])
return reward, top1, total_macs, total_nnz
def finalize_episode(self, top1, reward, total_macs, normalized_macs,
normalized_nnz, action_history, agent_action_history):
"""Write the details of one network to a CSV file and create a checkpoint file"""
if reward > self.best_reward:
self.best_reward = reward
ckpt_name = self.save_checkpoint(is_best=True)
msglogger.info("Best reward={} episode={} top1={}".format(reward, self.episode, top1))
# else:
# if not best, then not save, save the disk space.
#ckpt_name = self.save_checkpoint(is_best=False)
fields = [self.episode, top1, reward, total_macs, normalized_macs,
normalized_nnz, action_history, agent_action_history]
self.stats_file.add_record(fields)
def save_checkpoint(self, is_best=False):
"""Save the learned-model checkpoint"""
scheduler = self.net_wrapper.create_scheduler()
episode = str(self.episode).zfill(3)
if is_best:
fname = "BEST_adc_episode_{}".format(episode)
else:
fname = "adc_episode_{}".format(episode)
self.services.save_checkpoint_fn(epoch=0, model=self.model,
scheduler=scheduler, name=fname)
del scheduler
return fname
def collect_conv_details(model, dataset, perform_thinning, layers_to_prune=None):
dummy_input = distiller.get_dummy_input(dataset)
g = SummaryGraph(model, dummy_input)
conv_layers = OrderedDict()
total_macs = 0
total_params = 0
for id, (name, m) in enumerate(model.named_modules()):
if isinstance(m, torch.nn.Conv2d):
conv = SimpleNamespace()
conv.t = len(conv_layers)
conv.k = m.kernel_size[0]
conv.stride = m.stride
# Use the SummaryGraph to obtain some other details of the models
conv_op = g.find_op(normalize_module_name(name))
assert conv_op is not None
conv.weights_vol = conv_op['attrs']['weights_vol']
total_params += conv.weights_vol
conv.macs = conv_op['attrs']['MACs']
conv_pname = name + ".weight"
conv_p = distiller.model_find_param(model, conv_pname)
if not perform_thinning:
#conv.macs *= distiller.density_ch(conv_p) # Channel pruning
conv.macs *= distiller.density_3D(conv_p) # Filter pruning
total_macs += conv.macs
conv.ofm_h = g.param_shape(conv_op['outputs'][0])[2]
conv.ofm_w = g.param_shape(conv_op['outputs'][0])[3]
conv.ifm_h = g.param_shape(conv_op['inputs'][0])[2]
conv.ifm_w = g.param_shape(conv_op['inputs'][0])[3]
conv.name = name
conv.id = id
if layers_to_prune is None or name in layers_to_prune:
conv_layers[len(conv_layers)] = conv
return conv_layers, total_macs, total_params
import pandas as pd
def sample_networks(net_wrapper, services):
"""Sample networks from the posterior distribution.
1. Sort the networks we discovered using AMC by their reward.
2. Use the top 10% best-performing networks discovered by AMC to postulate a posterior distribution of the
density/sparsity of each layer:
p([layers-sparsity] | Top1, L1)
3. Sample 100 networks from this distribution.
For each such network: fine-tune, score using Top1, and save
"""
#fname = "logs/resnet20___2019.01.29-102912/amc.csv"
fname = "logs/resnet20___2019.02.03-210001/amc.csv"
df = pd.read_csv(fname)
#top1_sorted_df = df.sort_values(by=['top1'], ascending=False)
top1_sorted_df = df.sort_values(by=['reward'], ascending=False)
top10pct = top1_sorted_df[:int(len(df.index) * 0.1)]
dense_macs, _ = net_wrapper.get_model_resources_requirements()
layer_sparsities_list = []
for index, row in top10pct.iterrows():
layer_sparsities = row['action_history']
layer_sparsities = layer_sparsities[1:-1].split(",") # convert from string to list
layer_sparsities = [float(sparsity) for sparsity in layer_sparsities]
layer_sparsities_list.append(layer_sparsities)
layer_sparsities = np.array(layer_sparsities_list)
mean = layer_sparsities.mean(axis=0)
cov = np.cov(layer_sparsities.T)
num_networks = 100
data = np.random.multivariate_normal(mean, cov, num_networks)
orig_model = net_wrapper.model
for i in range(num_networks):
model = copy.deepcopy(orig_model)
net_wrapper.reset(model)
for layer_id, sparsity_level in enumerate(data[i]):
sparsity_level = min(max(0, sparsity_level), ALMOST_ONE)
net_wrapper.remove_structures(layer_id,
fraction_to_prune=sparsity_level,
prune_what="channels")
net_wrapper.train(1)
top1, top5, vloss = net_wrapper.validate()
"""Save the learned-model checkpoint"""
scheduler = net_wrapper.create_scheduler()
total_macs, _ = net_wrapper.get_model_resources_requirements(model)
fname = "{}_top1_{:2f}__density_{:2f}_sampled".format(net_wrapper.arch, top1, total_macs/dense_macs)
services.save_checkpoint_fn(epoch=0, model=net_wrapper.model,
scheduler=scheduler, name=fname)
del scheduler
| [] |
2024-01-10 | CEJSH/Copple_a_planner_with_copilot | copilot~main4.py | from fastapi import FastAPI
from pydantic import BaseModel
import openai
import os
from dotenv import load_dotenv
# .env 파일로부터 환경 변수 로드
load_dotenv()
app = FastAPI()
# 환경 변수에서 OpenAI API 키 가져오기
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
# 프롬프트 정의
prompt1 = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are a classifier that categorizes the input as either a goal, an event, or a to-do:
Goal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.
Event: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.
To-Do: Refers to a small task or duty that needs to be accomplished.
When answering, please only answer classification.
"""
# 프롬프트 정의
prompt2 = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are an action type recognizer that categorizes the input as either a create, read, update, or delete:
Create: Includes the act of meeting someone or doing something.
Read: Refers to the act of consuming information or data.Update: Involves modifying or changing, altering existing information or data.
Delete: Contains the meaning of deleting or making something disappear, Eradication, Elimination.
When classification is create, please only answer "추가하시겠습니까?"
When classification is read, please only answer "조회하시겠습니까?"
When classification is update, please only answer "수정하시겠습니까?"
When classification is delete, please only answer "삭제하시겠습니까?"
"""
# 입력을 분류하는 함수
def get_intent(input_text):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt1}\nUser: {input_text}\n",
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return response.choices[0].text.strip()
# 입력을 분류하는 함수 (플랜 추가 관련)
def get_plan_intent(input_text):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt2}\nUser: {input_text}\n",
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return response.choices[0].text.strip()
# 요청과 응답을 위한 Pydantic 모델 정의
class InputRequest(BaseModel):
input: str
class ClassificationResponse(BaseModel):
classification: str
# 분류를 위한 엔드포인트 생성
@app.post("/plan_type")
async def plan_type(input_request: InputRequest):
input_text = input_request.input
result = get_intent(input_text)
return {"classification": result}
# 추가된 엔드포인트 (플랜 추가 관련)
@app.post("/plan_crud")
async def plan_crud(input_request: InputRequest):
input_text = input_request.input
result = get_plan_intent(input_text)
return {"classification": result}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) | [
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a classifier that categorizes the input as either a goal, an event, or a to-do:\nGoal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.\nEvent: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.\nTo-Do: Refers to a small task or duty that needs to be accomplished.\nWhen answering, please only answer classification.\n\n\nUser: PLACEHOLDER\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are an action type recognizer that categorizes the input as either a create, read, update, or delete:\nCreate: Includes the act of meeting someone or doing something.\nRead: Refers to the act of consuming information or data.Update: Involves modifying or changing, altering existing information or data.\nDelete: Contains the meaning of deleting or making something disappear, Eradication, Elimination.\nWhen classification is create, please only answer \"추가하시겠습니까?\"\nWhen classification is read, please only answer \"조회하시겠습니까?\"\nWhen classification is update, please only answer \"수정하시겠습니까?\"\nWhen classification is delete, please only answer \"삭제하시겠습니까?\"\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are an action type recognizer that categorizes the input as either a create, read, update, or delete:\nCreate: Includes the act of meeting someone or doing something.\nRead: Refers to the act of consuming information or data.Update: Involves modifying or changing, altering existing information or data.\nDelete: Contains the meaning of deleting or making something disappear, Eradication, Elimination.\nWhen classification is create, please only answer \"추가하시겠습니까?\"\nWhen classification is read, please only answer \"조회하시겠습니까?\"\nWhen classification is update, please only answer \"수정하시겠습니까?\"\nWhen classification is delete, please only answer \"삭제하시겠습니까?\"\n\nUser: PLACEHOLDER\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a classifier that categorizes the input as either a goal, an event, or a to-do:\nGoal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.\nEvent: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.\nTo-Do: Refers to a small task or duty that needs to be accomplished.\nWhen answering, please only answer classification.\n\n"
] |
2024-01-10 | CEJSH/Copple_a_planner_with_copilot | copilot~01-summarize.py | from fastapi import FastAPI, HTTPException, Path
from typing import List, Dict
import boto3
import os
from boto3.dynamodb.conditions import Attr
import openai # Import the openai library
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import AzureTextCompletion, OpenAITextCompletion
from dotenv import load_dotenv
load_dotenv()
app = FastAPI()
kernel = sk.Kernel()
useAzureOpenAI = False
# Configure the connector. If you use Azure AI OpenAI, get settings and add connectors.
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
# AWS DynamoDB 리소스를 생성합니다.
dynamodb = boto3.resource('dynamodb', region_name='ap-northeast-2')
# 테이블 이름을 설정합니다.
table_name = 'Event'
# DynamoDB 테이블을 가져옵니다.
table = dynamodb.Table(table_name)
@app.get("/get_events", response_model=List[dict])
def get_accounts():
try:
# DynamoDB 스캔을 사용하여 테이블의 모든 데이터를 가져옵니다.
response = table.scan()
items = response.get('Items', [])
return items
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/get_events/{user_id}", response_model=List[dict])
def get_events_by_user_id(user_id: str = Path(..., description="User ID to filter events")):
try:
# DynamoDB 스캔을 사용하여 user_id를 기반으로 일정을 필터링합니다.
response = table.scan(
FilterExpression=Attr('UserId').eq(user_id)
)
items = response.get('Items', [])
return items
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
def generate_summary(input_text: str) -> str:
prompt = f"""Bot: I'm not sure what to do with this.
User: {{$input}}
-------------------------------------------------------------
You are a schedule summary.
Please summarize "Goal" separately, "todo" separately, and "event" separately.
What you're going to read from "Goal" is title, content, location.
The contents to read in the "Event" are title, content, and location.
What you're going to read in "Todo" is the title, content.
User: {input_text}
"""
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return response.choices[0].text.strip()
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"Bot: I'm not sure what to do with this.\nUser: {$input}\n-------------------------------------------------------------\nYou are a schedule summary. \nPlease summarize \"Goal\" separately, \"todo\" separately, and \"event\" separately.\nWhat you're going to read from \"Goal\" is title, content, location.\nThe contents to read in the \"Event\" are title, content, and location.\nWhat you're going to read in \"Todo\" is the title, content.\nUser: PLACEHOLDER\n"
] |
2024-01-10 | CEJSH/Copple_a_planner_with_copilot | copilot~practice.py | from fastapi import FastAPI
from pydantic import BaseModel
import openai
import os
from dotenv import load_dotenv
import uvicorn # You were missing this import
# Load environment variables from .env file
load_dotenv()
app = FastAPI()
# Get OpenAI API key from environment variable
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
# Define prompt
prompt = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are a schedule summary. Displays a summary of your schedule in the latest order based on today's date.
"""
class InputText(BaseModel):
text: str
@app.post("/summarize")
async def summarize(input_text: InputText):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt}\nUser: {input_text.text}\n",
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return {"summary": response.choices[0].text.strip()}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a schedule summary. Displays a summary of your schedule in the latest order based on today's date.\n"
] |
2024-01-10 | CEJSH/Copple_a_planner_with_copilot | copilot~main2.py | from fastapi import FastAPI
from pydantic import BaseModel
import openai
import os
from dotenv import load_dotenv
# .env 파일로부터 환경 변수 로드
load_dotenv()
app = FastAPI()
# 환경 변수에서 OpenAI API 키 가져오기
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
# 프롬프트 정의
prompt1 = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are a classifier that categorizes the input as either a goal, an event, or a to-do:
Goal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.
Event: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.
To-Do: Refers to a small task or duty that needs to be accomplished.
When answering, please only answer classification.
"""
# 프롬프트2 정의
prompt2 = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are an action type recognizer that categorizes the input as either a create, read, update, or delete:
Create: Includes the act of meeting someone or doing something.
Read: Refers to the act of consuming information or data.
Update: Involves modifying or changing existing information or data.
Delete: Involves removing or discarding something.
When answering, please only answer the type of action.
"""
# 입력을 분류하는 함수
def get_intent(input_text):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt1}\nUser: {input_text}\n",
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return response.choices[0].text.strip()
# 입력을 분류하는 함수 (플랜 추가 관련)
def get_plan_intent(input_text):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt2}\nUser: {input_text}\n",
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return response.choices[0].text.strip()
# 요청과 응답을 위한 Pydantic 모델 정의
class InputRequest(BaseModel):
input: str
class ClassificationResponse(BaseModel):
classification: str
# 분류를 위한 엔드포인트 생성
@app.post("/plan_type")
async def plan_type(input_request: InputRequest):
input_text = input_request.input
result = get_intent(input_text)
return {"classification": result}
# 추가된 엔드포인트 (플랜 추가 관련)
@app.post("/plan_crud")
async def plan_crud(input_request: InputRequest):
input_text = input_request.input
result = get_plan_intent(input_text)
return {"classification": result}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) | [
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a classifier that categorizes the input as either a goal, an event, or a to-do:\nGoal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.\nEvent: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.\nTo-Do: Refers to a small task or duty that needs to be accomplished.\nWhen answering, please only answer classification.\n\n\nUser: PLACEHOLDER\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are an action type recognizer that categorizes the input as either a create, read, update, or delete:\nCreate: Includes the act of meeting someone or doing something.\nRead: Refers to the act of consuming information or data.\nUpdate: Involves modifying or changing existing information or data.\nDelete: Involves removing or discarding something.\nWhen answering, please only answer the type of action.\n\nUser: PLACEHOLDER\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are an action type recognizer that categorizes the input as either a create, read, update, or delete:\nCreate: Includes the act of meeting someone or doing something.\nRead: Refers to the act of consuming information or data.\nUpdate: Involves modifying or changing existing information or data.\nDelete: Involves removing or discarding something.\nWhen answering, please only answer the type of action.\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a classifier that categorizes the input as either a goal, an event, or a to-do:\nGoal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.\nEvent: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.\nTo-Do: Refers to a small task or duty that needs to be accomplished.\nWhen answering, please only answer classification.\n\n"
] |
2024-01-10 | CEJSH/Copple_a_planner_with_copilot | copilot~20231010_summarize.py | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import requests
import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
import uvicorn
app = FastAPI()
# CORS 설정
origins = [
"http://localhost:3000", # Add the URL of your React application
"http://43.202.77.171:3000", # Address of the React application
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"], # You can adjust this based on your needs
allow_headers=["*"], # You can adjust this based on your needs
)
# Load environment variables from .env file
load_dotenv()
# Get OpenAI API key from environment variable
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
# Define prompt
prompt = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are a schedule summary. Please inform the user of this schedule by summarizing it warmly and kindly.
Also, I want you to give me some advice on the schedule. And send me a lucky comment as a closing comment. Please answer in Korean.
"""
class SummaryResponse(BaseModel):
summary: str
@app.get("/summarize_node_data/{user_id}")
async def summarize_node_data(user_id: str):
# Node.js 서버의 엔드포인트 URL
node_url = f'http://localhost:8000/goal/summary2/{user_id}' # 사용자 ID에 따라 요청 URL 구성
try:
# Node.js 서버에 GET 요청 보내기
response = requests.get(node_url)
# 응답 코드 확인
if response.status_code == 200:
# 응답 데이터를 가져와서 OpenAI를 사용하여 요약
node_data = response.text
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt}\nUser: {node_data}\n",
max_tokens=2000,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
summary = response.choices[0].text.strip()
return {"summary": summary}
else:
return {"summary": f"오류 응답 코드: {response.status_code}"}
except Exception as e:
return {"summary": f"오류 발생: {str(e)}"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a schedule summary. Please inform the user of this schedule by summarizing it warmly and kindly. \nAlso, I want you to give me some advice on the schedule. And send me a lucky comment as a closing comment. Please answer in Korean.\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a schedule summary. Please inform the user of this schedule by summarizing it warmly and kindly. \nAlso, I want you to give me some advice on the schedule. And send me a lucky comment as a closing comment. Please answer in Korean.\n\nUser: PLACEHOLDER\n"
] |
2024-01-10 | CEJSH/Copple_a_planner_with_copilot | copilot~main3.py | from fastapi import FastAPI
from pydantic import BaseModel
import openai
import os
from dotenv import load_dotenv
# .env 파일로부터 환경 변수 로드
load_dotenv()
app = FastAPI()
# 환경 변수에서 OpenAI API 키 가져오기
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
# 프롬프트 정의
prompt1 = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are a classifier that categorizes the input as either a goal, an event, or a to-do:
Goal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.
Event: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.
To-Do: Refers to a small task or duty that needs to be accomplished.
When answering, please only answer classification.
"""
# 프롬프트2 정의
prompt2 = """Bot: How can I help you?
User: {{$input}}
---------------------------------------------
You are an action type recognizer that categorizes the input as either a create, read, update, or delete:
Create: Includes the act of meeting someone or doing something.
Read: Refers to the act of consuming information or data.
Update: Involves modifying or changing existing information or data.
Delete: Contains the meaning of deleting or making something disappear, Eradication, Elimination.
When answering, please answer the type of action and Say it in a soft tone
"""
# 입력을 분류하는 함수
def get_intent(input_text):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt1}\nUser: {input_text}\n",
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return response.choices[0].text.strip()
# 입력을 분류하는 함수 (플랜 추가 관련)
def get_plan_intent(input_text):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{prompt2}\nUser: {input_text}\n",
max_tokens=500,
temperature=0.7,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=None
)
return response.choices[0].text.strip()
# 요청과 응답을 위한 Pydantic 모델 정의
class InputRequest(BaseModel):
input: str
class ClassificationResponse(BaseModel):
classification: str
# 분류를 위한 엔드포인트 생성
@app.post("/plan_type")
async def plan_type(input_request: InputRequest):
input_text = input_request.input
result = get_intent(input_text)
return {"classification": result}
# 추가된 엔드포인트 (플랜 추가 관련)
@app.post("/plan_crud")
async def plan_crud(input_request: InputRequest):
input_text = input_request.input
result = get_plan_intent(input_text)
return {"classification": result}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) | [
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a classifier that categorizes the input as either a goal, an event, or a to-do:\nGoal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.\nEvent: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.\nTo-Do: Refers to a small task or duty that needs to be accomplished.\nWhen answering, please only answer classification.\n\n\nUser: PLACEHOLDER\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are an action type recognizer that categorizes the input as either a create, read, update, or delete:\nCreate: Includes the act of meeting someone or doing something.\nRead: Refers to the act of consuming information or data.\nUpdate: Involves modifying or changing existing information or data.\nDelete: Contains the meaning of deleting or making something disappear, Eradication, Elimination.\nWhen answering, please answer the type of action and Say it in a soft tone\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are a classifier that categorizes the input as either a goal, an event, or a to-do:\nGoal: Refers to a result or state that one aims to achieve within a specific time frame or an undefined period. Goals can be short-term or long-term, and they can be personal or related to a group or organization.\nEvent: A happening or occasion that takes place at a specific time and location. The time is specifically set on a daily or hourly basis.\nTo-Do: Refers to a small task or duty that needs to be accomplished.\nWhen answering, please only answer classification.\n\n",
"Bot: How can I help you?\nUser: {{$input}}\n---------------------------------------------\nYou are an action type recognizer that categorizes the input as either a create, read, update, or delete:\nCreate: Includes the act of meeting someone or doing something.\nRead: Refers to the act of consuming information or data.\nUpdate: Involves modifying or changing existing information or data.\nDelete: Contains the meaning of deleting or making something disappear, Eradication, Elimination.\nWhen answering, please answer the type of action and Say it in a soft tone\n\nUser: PLACEHOLDER\n"
] |
2024-01-10 | karenswang/menu-interpreter | openai_lambda~lambda_function.py | import json
import boto3
import openai
from openai import OpenAI
from requests_aws4auth import AWS4Auth
from opensearchpy import OpenSearch, RequestsHttpConnection
import os
HOST = 'search-restaurant-menus-ts66x77o3tq7lapsmpgpjjcqli.us-east-1.es.amazonaws.com'
REGION = 'us-east-1'
service = 'es'
INDEX = 'menus'
def lambda_handler(event, context):
try:
print("event: ", event)
# Check if 'body' key exists in the event
if 'body' not in event:
return {
'statusCode': 400,
'headers': {'Access-Control-Allow-Origin': '*'},
'body': json.dumps('No body in the request')
}
body = json.loads(event['body'])
username = body['username']
menu_id = event['pathParameters']['menu_id']
dynamodb = boto3.resource('dynamodb')
user_table = dynamodb.Table('user')
# menu_table = dynamodb.Table('menu-items')
openai.api_key = os.environ['OPENAI_API_KEY']
client = OpenAI(
api_key=openai.api_key,
)
user_data = user_table.get_item(Key={'username': username})
if 'Item' not in user_data or not user_data['Item'].get('isLoggedIn'):
return {
'statusCode': 403,
'headers': {
'Access-Control-Allow-Origin': '*'
},
'body': json.dumps('User is not logged in or does not exist.')
}
preferences = user_data['Item']['preferences']
print("menu id: ", menu_id)
os_client = OpenSearch(hosts=[{
'host': HOST,
'port': 443
}],
http_auth=get_awsauth(REGION, 'es'),
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection)
search_response = os_client.search(
index='menus',
body={
'query': {
'term': {
'restaurant_name': menu_id
}
}
}
)
print("search response: ", search_response)
if not search_response['hits']['hits']:
return {
'statusCode': 404,
'headers': {'Access-Control-Allow-Origin': '*'},
'body': json.dumps('Menu does not exist.')
}
menu_data = search_response['hits']['hits'][0]['_source']
menu_items = menu_data['menu_text']
restaurant_name = menu_data['restaurant_name']
# menu_data = menu_table.get_item(Key={'menu_id': menu_id})
# if 'Item' not in menu_data:
# return {
# 'statusCode': 404,
# 'headers': {
# 'Access-Control-Allow-Origin': '*'
# },
# 'body': json.dumps('Menu does not exist.')
# }
# menu_items = menu_data['Item']['menu_text']
# restaurant_name = menu_data['Item']['restaurant_name']
report = generate_report(client, menu_items, restaurant_name, preferences)
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Origin': '*'
},
'body': json.dumps(report)
}
except Exception as e:
return {
'statusCode': 500,
'headers': {'Access-Control-Allow-Origin': '*'},
'body': json.dumps(f'An error occurred: {str(e)}')
}
def generate_report(client, menu_items, restaurant_name, preferences):
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You have information about a restaurant's dishes and a user's dietary preferences."},
{"role": "user", "content": f"Menu items: {menu_items}\nRestaurant name: {restaurant_name}\nUser preferences: {json.dumps(preferences)}\n\nGenerate a report on the matchness of the menu to the user preferences. \
The report should include the following sections:\n\n1. Resteraunt Name \n\n2. Favorites: Highlight dishes that match the user food preference but do not contain user dieary restrictions.\n\n3. Dishes with Allergen Warning: Identify dishes that contains users dieary restrictions.\
Use this as an example:\
Restaurant: David Burke Tavern\nMost dominant food category: seafood (80%)\
Recommended: cauliflower steak, roasted rack of lamb, wild mushroom ravioli\
Avoid: fire roasted halibut, diver scallops, lobster salad\
Your match: low\
Reasoning: based on your profile, you should avoid food with high uric acid. Since most dishes in David Burke Tavern are seafood, we do not recommend dining here."}
]
)
answer = completion.choices[0].message.content
return answer
def get_awsauth(region, service):
cred = boto3.Session().get_credentials()
return AWS4Auth(cred.access_key,
cred.secret_key,
region,
service,
session_token=cred.token)
| [
"You have information about a restaurant's dishes and a user's dietary preferences."
] |
2024-01-10 | patrickmineault/neuroai-tree | scripts~compute_umap_labels.py | import pickle
import openai
import pandas as pd
import tqdm
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn.manifold import MDS, TSNE
from umap import UMAP
def calculate_2d_embeddings(df: pd.DataFrame, perplexity=5, random_state=42):
"""This function plots the t-SNE embeddings of the long text.
Args:
df (pd.DataFrame): The dataframe containing the text.
perplexity (int): The perplexity to use for the t-SNE. Defaults to
5.
random_state (int): The random state to use for the t-SNE.
Defaults to 42.
Returns:
fig (matplotlib.pyplot.figure): The figure of the t-SNE plot.
"""
# Start by calculating embeddings
papers = (df.title.fillna("") + "\n" + df.abstract.fillna("")).values
model = SentenceTransformer("sentence-transformers/all-mpnet-base-v2")
embeddings = model.encode(papers, show_progress_bar=True)
# Create a t-SNE model and transform the data
model = TSNE(
n_components=2,
perplexity=perplexity,
random_state=random_state,
init="random",
learning_rate=200,
)
vis_dims = model.fit_transform(embeddings)
df["tsne_x"] = vis_dims[:, 0]
df["tsne_y"] = vis_dims[:, 1]
# Create a MDS model and transform the data
model = MDS(
n_components=2,
random_state=random_state,
)
vis_dims = model.fit_transform(embeddings)
df["mds_x"] = vis_dims[:, 0]
df["mds_y"] = vis_dims[:, 1]
# Create a MDS model and transform the data
model = UMAP(
n_components=2,
random_state=random_state,
)
vis_dims = model.fit_transform(embeddings)
df["umap_x"] = vis_dims[:, 0]
df["umap_y"] = vis_dims[:, 1]
return df
def request_label(titles):
titles = "\n".join(titles)
prompt = f"Give a 2-5 word label that summarizes the common topic of these abstracts. Avoid vague labels like 'artificial intelligence', 'machine learning', 'neuroscience' and 'deep learning'\n\n {titles}"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
return completion["choices"][0]["message"]["content"].split(".")[0]
def main():
df = pd.read_csv("data/processed/neuroai-works.csv")
df.sort_values("ss_cited_by_count", ascending=False, inplace=True)
df = calculate_2d_embeddings(df)
df.to_csv("data/processed/neuroai-works-umap.csv", index=False)
df = df[df["openai_category"].isin(["A", "B", "C"])]
kmeans = KMeans(n_clusters=25, random_state=0, n_init="auto").fit(
df[["umap_x", "umap_y"]]
)
labels = kmeans.fit_predict(df[["umap_x", "umap_y"]].values)
label_map = []
for label in tqdm.tqdm(range(kmeans.cluster_centers_.shape[0])):
titles = df.iloc[labels == label].title.values.tolist()
label_name = request_label(titles)
label_map.append(label_name)
label_info = {
"label_centers": kmeans.cluster_centers_,
"labels": label_map,
"paper_labels": [label_map[x] for x in labels],
}
with open("data/processed/umap_labels.pickle", "wb") as f:
pickle.dump(label_info, f)
if __name__ == "__main__":
main()
| [
"You are a helpful assistant.",
"Give a 2-5 word label that summarizes the common topic of these abstracts. Avoid vague labels like 'artificial intelligence', 'machine learning', 'neuroscience' and 'deep learning'\n\n PLACEHOLDER"
] |
2024-01-10 | patrickmineault/neuroai-tree | scripts~collate_info.py | import numpy as np
import pandas as pd
from transformers import pipeline
def authorships_to_string(authorships):
names = [a["author"].get("display_name", "") for a in authorships]
if len(names) > 5:
return ", ".join(names[:5]) + ", et al."
return ", ".join(names)
def authorships_to_string_unabridged(authorships):
names = [a["author"].get("display_name", "") for a in authorships]
return ", ".join(names)
def get_highlighter():
qa_model = pipeline(
"question-answering",
model="distilbert-base-cased-distilled-squad",
revision="626af31",
)
question = (
"""What is biologically inspired by the brain, cortex, neuroscience or """
"""psychology, excluding deep or convolutional neural networks?"""
)
return qa_model, question.strip()
def highlight_abstracts(df):
highlighter, question = get_highlighter()
highlighted = []
for abstract in df.abstract:
try:
highlight = highlighter(question, abstract)
abstract_highlighted = (
abstract[: highlight["start"]]
+ " **"
+ highlight["answer"]
+ "** "
+ abstract[highlight["end"] :]
)
highlighted.append(abstract_highlighted)
except ValueError:
# No answer found.
highlighted.append(abstract)
df["abstract_highlighted"] = highlighted
return df
def get_journal_name(x):
if (
"source" in x
and x["source"] is not None
and "display_name" in x["source"]
):
return x["source"]["display_name"]
return ""
def main():
df = pd.read_json("data/processed/works.jsonl", lines=True)
df = df.rename(columns={"source": "origin"})
df_ss = pd.read_json("data/processed/semantic_scholar.jsonl", lines=True)
df_ss["ss_cited_by_count"] = df_ss["result"].map(
lambda x: x["citationCount"]
)
df_ss["ssid"] = df_ss["result"].map(lambda x: x["paperId"])
df_ss = df_ss[["id", "ss_cited_by_count", "ssid"]]
# Do a left join on the paper ID
df = df.merge(df_ss, left_on="id", right_on="id", how="left")
# Drop bad rows
df = df[~df["id"].duplicated()]
df = df[~df["ssid"].duplicated()]
df = df[df.title != "Title"]
df["author_list"] = df.authorships.map(authorships_to_string)
df["author_list_unabridged"] = df.authorships.map(
authorships_to_string_unabridged
)
df["journal"] = df.primary_location.map(lambda x: get_journal_name(x))
df["link"] = df["primary_location"].map(lambda x: x["landing_page_url"])
# Get the classification from OpenAI
df_class = pd.read_json(
"data/processed/coarse_classification.jsonl", lines=True
)
df = df.merge(df_class, on="id")
# Get the coarse classification from the keyword-based detection.
df_class = pd.read_json("data/processed/categories.jsonl", lines=True)
df = df.merge(df_class, on="id")
cites = (df["oa_neuro_citations"].values >= 2) | (
df["ss_neuro_citations"].values >= 2
)
keywords = df["keywords_found"].values >= 1
manual = df["origin"] == "manual"
df["reason"] = np.where(
manual,
"Manually added",
np.where(
cites & keywords,
"Matched 1+ abstract keywords & cited 2+ neuro papers",
np.where(
keywords,
"Matched 1+ abstract keywords",
np.where(
cites,
"Cited 2+ neuro papers",
"Other",
),
),
),
)
df_all = df.copy()
df = df[~df["openai_category"].isna()]
assert df.shape[0] < 10000, "Too many papers!"
df = highlight_abstracts(df)
df = df[
[
"id",
"ssid",
"title",
"publication_year",
"journal",
"link",
"author_list",
"author_list_unabridged",
"cited_by_count",
"openai_category",
"abstract",
"abstract_highlighted",
"ss_cited_by_count",
"oa_cited_journals",
"ss_cited_journals",
"reason",
]
]
df = df[~df.id.duplicated()]
# Save the final dataframe
df.to_csv("data/processed/neuroai-works.csv", index=False)
df_all = df_all[~df_all.id.duplicated()]
df_all = df_all[
[
"id",
"ssid",
"title",
"publication_year",
"journal",
"link",
"author_list",
"cited_by_count",
"openai_category",
"ss_cited_by_count",
"oa_cited_journals",
"ss_cited_journals",
"reason",
]
]
df_all.to_csv("data/processed/all-works.csv", index=False)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | polar-app/polar-app | packages~polar-openai~create_file.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.File.create(
file=open("data.json"),
purpose='answers'
)
print(response)
# {
# "bytes": 50,
# "created_at": 1623183163,
# "filename": "data.jsonl",
# "id": "file-V4REEAisMqbXPop2vdofQSTK",
# "object": "file",
# "purpose": "answers",
# "status": "uploaded",
# "status_details": null
# }
| [] |
2024-01-10 | polar-app/polar-app | packages~polar-openai~answer.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
questions = [
"Who did Tutankhamun marry?",
"How old was Tutankhamun when he rose to the throne?",
"Who restored the Ancient Egyptian religion?",
"Where did King Tut move his father's remains?",
"Who funded Howard Carter's discovery of Tutankhamun's tomb?",
"What was the status of King Tut's tomb when it was found?",
"What is the curse of the pharaohs?",
"How tall was King Tut?",
"How tall was King Tut in feet?",
"How tall was King Tut in cm?",
"How did King Tut die?",
]
# models = ["curie", "davinci"]
# models = ["davinci"]
models = ["davinci"]
#models = ["davinci"]
max_tokens=35
search_model='curie'
def doAnswer(model, question):
response = openai.Answer.create(
search_model=search_model,
model=model,
question=question,
file="file-BW3Opoe0JbJJzto76qSn7wOp",
examples_context="In 2017, U.S. life expectancy was 78.6 years.",
examples=[
["What is human life expectancy in the United States?","78 years."]
],
max_tokens=max_tokens,
stop=["\n", "<|endoftext|>"],
)
# print(response)
#print("%-10s %-70s %s" % (model, question, response.answers))
print("%-70s\n %s" % (question, response.answers[0]))
for model in models:
print(model)
print("max_tokens: %s" % max_tokens)
print("search_model: %s" % search_model)
print("====")
for question in questions:
doAnswer(model, question)
#
# response = openai.Answer.create(
# search_model="davinci",
# model="davinci",
# question="How did King Tut die?",
# file="file-BW3Opoe0JbJJzto76qSn7wOp",
# examples_context="In 2017, U.S. life expectancy was 78.6 years.",
# examples=[
# ["What is human life expectancy in the United States?","78 years."]
# ],
# max_tokens=25,
# stop=["\n", "<|endoftext|>"],
# )
#
# print(response.answers)
#
# print("====")
# print(response)
| [] |
2024-01-10 | samuelkoes/DDPG-tf2 | src~command_line.py | #!/usr/bin/python3
"""
Run the model in training or testing mode
"""
import logging
import random
import gym
from tqdm import trange
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from src.common_definitions import TOTAL_EPISODES, UNBALANCE_P
from src.model import Brain
from src.utils import Tensorboard, ArgumentParserShowHelpOnError
def main(): # pylint: disable=too-many-locals, too-many-statements
"""
We create an environment, create a brain,
create a Tensorboard, load weights, create metrics,
create lists to store rewards, and then we run the training loop
"""
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
parser = ArgumentParserShowHelpOnError(
prog="Deep Deterministic Policy Gradient (DDPG)",
description="Deep Deterministic Policy Gradient (DDPG) in Tensorflow 2"
)
parser.add_argument('--env', type=str, nargs='?',
default="BipedalWalker-v3",
help='The OpenAI Gym environment to train on, '
'e.g. BipedalWalker-v3, LunarLanderContinuous-v2,'
' Pendulum-v0')
parser.add_argument('--render_env', type=bool, nargs='?', default=True,
help='Render the environment to be visually visible')
parser.add_argument('--train', type=bool, nargs='?', required=True,
help='Train the network on the modified DDPG algorithm')
parser.add_argument('--use-noise', type=bool, nargs='?', required=True,
help='OU Noise will be applied to the policy action')
parser.add_argument('--eps-greedy', type=float, nargs='?', default=0.95,
help="The epsilon for Epsilon-greedy in the policy's action")
parser.add_argument('--warm-up', type=bool, nargs='?', default=1,
help='Following recommendation from OpenAI Spinning Up, the actions in the '
'early epochs can be set random to increase exploration. This warm up '
'defines how many epochs are initially set to do this.')
parser.add_argument(
'--checkpoints-path', type=str, nargs='?', default="checkpoints/DDPG_",
help='Save the weight of the network in the defined checkpoint file directory.'
)
parser.add_argument(
'--tf-log-dir', type=str, nargs='?', default="./logs/DDPG/",
help='Save the logs of the training step.'
)
args = parser.parse_args()
# Step 1. create the gym environment
env = gym.make(args.env)
action_space_high = env.action_space.high[0]
action_space_low = env.action_space.low[0]
brain = Brain(env.observation_space.shape[0], env.action_space.shape[0], action_space_high,
action_space_low)
tensorboard = Tensorboard(log_dir=args.tf_log_dir)
# load weights if available
logging.info("Loading weights from %s*, make sure the folder exists", args.checkpoints_path)
brain.load_weights(args.checkpoints_path)
# all the metrics
acc_reward = tf.keras.metrics.Sum('reward', dtype=tf.float32)
actions_squared = tf.keras.metrics.Mean('actions', dtype=tf.float32)
Q_loss = tf.keras.metrics.Mean('Q_loss', dtype=tf.float32)
A_loss = tf.keras.metrics.Mean('A_loss', dtype=tf.float32)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# run iteration
with trange(TOTAL_EPISODES) as t:
for ep in t:
prev_state = env.reset()
acc_reward.reset_states()
actions_squared.reset_states()
Q_loss.reset_states()
A_loss.reset_states()
brain.noise.reset()
for _ in range(2000):
if args.render_env: # render the environment into GUI
env.render()
# Receive state and reward from environment.
cur_act = brain.act(
tf.expand_dims(prev_state, 0),
_notrandom=(
(ep >= args.warm_up)
and
(
random.random()
<
args.eps_greedy+(1-args.eps_greedy)*ep/TOTAL_EPISODES
)
),
noise=args.use_noise
)
state, reward, done, _ = env.step(cur_act)
brain.remember(prev_state, reward, state, int(done))
# Update weights
if args.train:
c, a = brain.learn(brain.buffer.get_batch(unbalance_p=UNBALANCE_P))
Q_loss(c)
A_loss(a)
# Post update for next step
acc_reward(reward)
actions_squared(np.square(cur_act/action_space_high))
prev_state = state
if done:
break
ep_reward_list.append(acc_reward.result().numpy())
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
avg_reward_list.append(avg_reward)
# Print the average reward
t.set_postfix(r=avg_reward)
tensorboard(ep, acc_reward, actions_squared, Q_loss, A_loss)
# Save weights
if args.train and ep % 5 == 0:
brain.save_weights(args.checkpoints_path)
env.close()
if args.train:
brain.save_weights(args.checkpoints_path)
logging.info("Training done...")
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | luisricardodiaz/BoomAI | speech_capture.py | import sounddevice as sd
import soundfile as sf
import openai
def recording_input():
sample_rate = 44100
duration = 5
channels = 1
print("Recording Audio...")
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=channels)
sd.wait()
output_file = "Input_Audio/output.mp3" # Output file path
sf.write(output_file, recording, sample_rate)
audio_file = open("Input_Audio/output.mp3", "rb")
transcript = openai.Audio.transcribe(
model ="whisper-1", file= audio_file, temperature = 0.2)
print(transcript['text'])
return transcript['text']
| [] |
2024-01-10 | yangkevin2/doc-story-generation | story_generation~edit_module~entity.py | from collections import Counter, defaultdict
import os
import time
import pathlib
import csv
import random
from unicodedata import name
import math
import logging
import torch
import openai
from scipy.special import softmax
from story_generation.common.util import *
from story_generation.common.data.split_paragraphs import split_paragraphs
from story_generation.rewrite_module.heuristics import *
ENTITY_MODEL_STRING = 'text-curie-001'
STRONGER_ENTITY_MODEL_STRING = 'text-davinci-002'
CHARACTER_THRESHOLD = 0.5
infobox_example_library = None
def get_infobox_example_library():
global infobox_example_library
if infobox_example_library is None:
example_lines = []
with open(os.path.join(pathlib.Path(__file__).parent.resolve(), 'infobox_example_library.csv'), 'r') as f:
reader = csv.DictReader(f)
for row in reader:
example_lines.append(row)
infobox_example_library = {
'sentences': [example['text'] for example in example_lines],
'names': [example['name'] for example in example_lines],
'keys': [example['key'] for example in example_lines],
'values': [example['value'] for example in example_lines],
'encodings': sentence_encode([example['text'] for example in example_lines]),
}
return infobox_example_library
def construct_infobox_prompt(current_sentence, current_name, other_names, num_examples=5, random_order=False):
instruction = 'Extract attributes from the given context using the format Attribute: Value.\n----'
infobox_example_library = get_infobox_example_library()
current_encoding = sentence_encode([current_sentence])
scores = (current_encoding * infobox_example_library['encodings']).sum(axis=1)
scores_indices = list(range(len(scores)))
best_example_indices = sorted(scores_indices, key=lambda i: scores[i], reverse=True)
best_example_indices = [i for i in best_example_indices if all([tok not in infobox_example_library['sentences'][i] for tok in current_name.split() + sum([other_name.split() for other_name in other_names], [])])] # don't get weird examples from lucky matching names
best_example_indices = [i for i in best_example_indices if all([tok not in current_name and tok not in current_sentence for tok in infobox_example_library['names'][i].split()])]
best_example_indices = best_example_indices[:num_examples]
best_example_indices = reversed(best_example_indices) # put the best ones last
if random_order:
random.shuffle(best_example_indices)
for i in best_example_indices:
name = infobox_example_library['names'][i]
instruction = instruction + '\nContext (' + name + '): ' + infobox_example_library['sentences'][i]
keys = [key.strip() for key in infobox_example_library['keys'][i].split(',') if len(key.strip()) > 0]
values = [value.strip() for value in infobox_example_library['values'][i].split(',') if len(value.strip()) > 0]
assert len(keys) == len(values)
for key, value in zip(keys, values):
if key.endswith('\'s'):
instruction = instruction + '\n' + name + ' is ' + key + ' ' + value # John is Jane's X
else:
instruction = instruction + '\n' + name + '\'s ' + key + ' is ' + value # John's sister is X
instruction = instruction + '\n----'
return instruction + '\nContext (' + current_name + '): ' + current_sentence + '\n' + current_name
def infer_is_character(name, passage, gpt3_model, model_string, plural=False, threshold=CHARACTER_THRESHOLD, return_logprobs=False):
answers = []
all_logprobs = []
for other in ['a place', 'an object', 'an event']:
query = passage.strip() + '\n\n' + 'In this passage, is {} a character{}, as opposed to {}? Yes or no.\n\n'.format(name, ' or group of characters' if plural else '', other)
retry = True
logging.log(21, 'GPT3 CALL' + ' ' + model_string + ' ' + str(len(gpt3_model.tokenizer.encode(query)) + 1))
while retry:
try:
completion = openai.Completion.create(
engine=model_string,
prompt=query,
max_tokens=1,
temperature=1,
top_p=1,
frequency_penalty=0.5,
presence_penalty=0,
logit_bias={3363: 50, 1400: 50}, # ' Yes' and ' No' for GPT3
logprobs=2)
retry = False
except Exception as e:
logging.log(23, str(e))
time.sleep(0.2)
logging.log(23, 'retrying...')
logprobs = completion['choices'][0]['logprobs']['top_logprobs'][0]
logprobs = [logprobs[' No'], logprobs[' Yes']]
answers.append(softmax(logprobs)[1] > threshold)
all_logprobs.append(logprobs)
if return_logprobs:
return all(answers), all_logprobs
else:
return all(answers)
def infer_is_group(name, passage, gpt3_model, model_string, plural=False, threshold=0.5, return_logprobs=False):
query = passage.strip() + '\n\n' + 'In this passage, is {} a single character or a group of characters?\n\n{} is a'.format(name, name)
retry = True
logging.log(21, 'GPT3 CALL' + ' ' + model_string + ' ' + str(len(gpt3_model.tokenizer.encode(query)) + 1))
while retry:
try:
completion = openai.Completion.create(
engine=model_string,
prompt=query,
max_tokens=1,
temperature=1,
top_p=1,
frequency_penalty=0.5,
presence_penalty=0,
logit_bias={1448: 50, 2060: 50}, # ' group' and ' single' for GPT3
logprobs=2)
retry = False
except Exception as e:
logging.log(23, str(e))
time.sleep(0.2)
logging.log(23, 'retrying...')
logprobs = completion['choices'][0]['logprobs']['top_logprobs'][0]
logprobs = [logprobs[' single'], logprobs[' group']]
answer = softmax(logprobs)[1] > threshold
if return_logprobs:
return answer, logprobs
else:
return answer
def resample_name(name, context, instruct_model):
name_tokens = name.strip().split()
banned_tokens = [tok for tok in name_tokens if tok not in context]
banned_tokens = banned_tokens + [' ' + tok for tok in banned_tokens]
logit_bias = {}
for name_token in banned_tokens:
for tok in instruct_model.tokenizer.encode(name_token):
logit_bias[tok] = -100
for _ in range(10):
new_name = instruct_model(["Generate a name with the same gender similar to " + name + ".\n\nName:"], logit_bias=logit_bias, stop=[',', '.'], top_p=1, max_tokens=10, model_string=ENTITY_MODEL_STRING)[0].strip()
if len(new_name) < 10: # ideally, it should terminate due to a stop, and not max length
break
return new_name
def split_key_entry(infobox_entry, entity_name):
try:
if infobox_entry.startswith(entity_name + '\'s'):
new_key, new_entry = tuple(infobox_entry.split(' is '))
new_key = new_key[len(entity_name + '\'s'):]
else:
assert infobox_entry.startswith(entity_name + ' is')
split_entry = infobox_entry.split(' is ')
assert len(split_entry) == 2
new_key_entry = split_entry[1]
split_new_key_entry = new_key_entry.split('\'') # James's sister
assert len(split_new_key_entry) == 2
new_key = split_new_key_entry[0].strip() + '\'s'
new_entry = split_new_key_entry[1][1:].strip() # remove the training s from 's constructions, or a space if not there
except:
return None, None
return new_key.strip(), new_entry.strip()
def qa_entailment_best_answer(question, context, num_beams=1):
new_qa_entries_passage, new_qa_scores_passage = score_qa(question, context, num_beams=num_beams)
qa_passage_dict = defaultdict(lambda: 0)
for answer, score in zip(new_qa_entries_passage, new_qa_scores_passage):
qa_passage_dict[question + ' ' + answer] += score
qa_passage_groups = get_entailment_groups(qa_passage_dict)
new_qa_entry_passage = sorted(list(qa_passage_groups.keys()), key=lambda x: qa_passage_groups[x], reverse=True)[0]
new_qa_score_passage = qa_passage_groups[new_qa_entry_passage]
return new_qa_entry_passage, new_qa_score_passage
def complete_mutual_relations(entities, instruct_model, return_contradiction_prob=False):
contradictions = defaultdict(lambda: {})
contradiction_prob = 0
entity_key_pairs = []
for entity in entities.values():
for key in entity.attributes.keys():
entity_key_pairs.append((entity.name, key, entity.attributes[key]['entailment']))
# go through (entity, key) pairs in order of entailment
entity_key_pairs = sorted(entity_key_pairs, key=lambda x: x[2], reverse=True)
for entity_name, key, _ in entity_key_pairs:
entity = entities[entity_name]
if entity.attributes[key]['newly_entered']:
# match entities in the newly entered attributes; if you have exactly 1 other entity then:
key_matched_entities, _, _ = deduplicate_match_entities(detect_entities(key), [n for n in entities.keys() if n != entity.name])
value_matched_entities, _, _ = deduplicate_match_entities(detect_entities(entity.attributes[key]['entry']), [n for n in entities.keys() if n != entity.name])
if len(key_matched_entities) + len(value_matched_entities) == 1:
# if other entity is key + 's, then flip the value and key while stripping 's, also flipping the relation.
other_name = list(key_matched_entities)[0] if len(key_matched_entities) == 1 else list(value_matched_entities)[0]
if len(key_matched_entities) == 1:
if not key.endswith('\'s'): # e.g. Sarah's
continue
if len(key.strip().split()) > 2: # might miss some longer names, but this will catch most bad cases where it's not actually a relation
continue
self_is_others_relation = entity.attributes[key]['entry'].strip()
if len(self_is_others_relation.strip().split()) > 1:
# if the value is multiple words, then try to parse and keep a single NN span only. if that still fails, then just skip this one?, with log
spans = pos_tag(self_is_others_relation)
self_is_others_relation = ' '.join([s.text for s in spans if s.tag in ['NN', 'NNS', 'NNP', 'NNPS']])
if len(self_is_others_relation.strip().split()) > 1: # multiple nouns in the entry
logging.log(23, 'FAILED TO PARSE MUTUAL RELATION: ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]))
continue
prompt = entity.name + ' is ' + other_name + '\'s ' + self_is_others_relation + '. ' + other_name + ' is ' + entity.name + '\'s'
try:
other_is_selfs_relation = Counter([x.strip() for x in instruct_model([prompt], stop=['\n', '.', ','], num_completions=10, model_string=STRONGER_ENTITY_MODEL_STRING) if len(x.strip()) > 0]).most_common()[0][0]
except:
logging.log(23, 'FAILED TO GET OTHER RELATION: ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]) + ' ' + prompt)
continue
else:
# if other entity in the value + 's name, then flip it to key + 's and change the current key to the value, also flipping the relation.
if not key.endswith('\'s name'): # e.g., spouse's name
continue
other_is_selfs_relation = key.replace('\'s name', '').strip()
prompt = other_name + ' is ' + entity.name + '\'s ' + other_is_selfs_relation + '. ' + entity.name + ' is ' + other_name + '\'s'
try:
self_is_others_relation = Counter([x.strip() for x in instruct_model([prompt], stop=['\n', '.', ','], num_completions=10, model_string=STRONGER_ENTITY_MODEL_STRING) if len(x.strip()) > 0]).most_common()[0][0]
except:
logging.log(23, 'FAILED TO GET OTHER RELATION:' + ' ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]) + ' ' + prompt)
continue
logging.log(23, 'CHECKING MUTUAL RELATION FOR CONTRADICTIONS: ' + ' ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]))
if len(key_matched_entities) == 1:
if other_is_selfs_relation + '\'s name' in entity.attributes and not entity.attributes[other_is_selfs_relation + '\'s name']['newly_entered']:
existing_entailment_input = entity.create_entailment_input(other_is_selfs_relation + '\'s name', entity.name, entity.attributes[other_is_selfs_relation + '\'s name']['entry'])
new_entailment_input = entity.create_entailment_input(other_is_selfs_relation + '\'s name', entity.name, other_name)
if not consistent_equals(existing_entailment_input, new_entailment_input):
logging.log(23, 'POTENTIAL MUTUAL RELATION CONTRADICTION')
logging.log(23, 'PREEXISTING' + ' ' + entity.name + ' ' + other_is_selfs_relation + '\'s name' + ' ' + str(entity.attributes[other_is_selfs_relation + '\'s name']))
logging.log(23, 'NEW' + ' ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]))
contradictions[entity.name][other_is_selfs_relation + '\'s name'] = (entity.attributes[other_is_selfs_relation + '\'s name'], {'key': key, 'entry': entity.attributes[key]})
if not return_contradiction_prob:
continue
contradiction_prob = max(contradiction_prob, math.exp(score_entailment(existing_entailment_input, new_entailment_input)[0][0, 0]))
else:
if other_name + '\'s' in entity.attributes and not entity.attributes[other_name + '\'s']['newly_entered']:
existing_entailment_input = entity.create_entailment_input(other_name + '\'s', entity.name, entity.attributes[other_name + '\'s']['entry'])
new_entailment_input = entity.create_entailment_input(other_name + '\'s', entity.name, self_is_others_relation)
if not consistent_equals(existing_entailment_input, new_entailment_input):
logging.log(23, 'POTENTIAL MUTUAL RELATION CONTRADICTION')
logging.log(23, 'PREEXISTING' + ' ' + entity.name + ' ' + other_name + '\'s' + ' ' + str(entity.attributes[other_name + '\'s']))
logging.log(23, 'NEW' + ' ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]))
contradictions[entity.name][other_name + '\'s'] = (entity.attributes[other_name + '\'s'], {'key': key, 'entry': entity.attributes[key]})
if not return_contradiction_prob:
continue
contradiction_prob = max(contradiction_prob, math.exp(score_entailment(existing_entailment_input, new_entailment_input)[0][0, 0]))
# check the corresponding relations for the other entity.
other_entity = entities[other_name]
if self_is_others_relation + '\'s name' in other_entity.attributes and not other_entity.attributes[self_is_others_relation + '\'s name']['newly_entered']:
existing_entailment_input = other_entity.create_entailment_input(self_is_others_relation + '\'s name', other_name, other_entity.attributes[self_is_others_relation + '\'s name']['entry'])
new_entailment_input = other_entity.create_entailment_input(self_is_others_relation + '\'s name', other_name, entity.name)
if not consistent_equals(existing_entailment_input, new_entailment_input):
logging.log(23, 'POTENTIAL MUTUAL RELATION CONTRADICTION')
logging.log(23, 'PREEXISTING' + ' ' + other_name+ ' ' + self_is_others_relation + '\'s name' + ' ' + str(other_entity.attributes[self_is_others_relation + '\'s name']))
logging.log(23, 'NEW' + ' ' + entity.name + ' ' + key+ ' ' + str(entity.attributes[key]))
contradictions[other_name][self_is_others_relation + '\'s name'] = (other_entity.attributes[self_is_others_relation + '\'s name'], {'key': key, 'entry': entity.attributes[key]})
if not return_contradiction_prob:
continue
contradiction_prob = max(contradiction_prob, math.exp(score_entailment(existing_entailment_input, new_entailment_input)[0][0, 0]))
if entity.name + '\'s' in other_entity.attributes and not other_entity.attributes[entity.name + '\'s']['newly_entered']:
existing_entailment_input = other_entity.create_entailment_input(entity.name + '\'s', other_name, other_entity.attributes[entity.name + '\'s']['entry'])
new_entailment_input = other_entity.create_entailment_input(entity.name + '\'s', other_name, other_is_selfs_relation)
if not consistent_equals(existing_entailment_input, new_entailment_input):
logging.log(23, 'POTENTIAL MUTUAL RELATION CONTRADICTION')
logging.log(23, 'PREEXISTING' + ' ' + other_name + ' ' + entity.name + '\'s' + ' ' + str(other_entity.attributes[entity.name + '\'s']))
logging.log(23, 'NEW' + ' ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]))
contradictions[other_name][entity.name + '\'s'] = (other_entity.attributes[entity.name + '\'s'], {'key': key, 'entry': entity.attributes[key]})
if not return_contradiction_prob:
continue
contradiction_prob = max(contradiction_prob, math.exp(score_entailment(existing_entailment_input, new_entailment_input)[0][0, 0]))
if len(contradictions) > 0:
continue
logging.log(23, 'COMPLETING MUTUAL RELATION:' + ' ' + entity.name + ' ' + key + ' ' + str(entity.attributes[key]))
if len(key_matched_entities) == 1:
_, change_status = entity.add_if_better(other_is_selfs_relation + '\'s name', {'text': entity.attributes[key]['text'],
'entry': other_name,
'entailment': entity.attributes[key]['entailment'],
'newly_entered': True}, detect_contradictions=False, return_contradiction_prob=return_contradiction_prob)
if change_status != 'none':
logging.log(23, 'NEW RELATION' + ' ' + change_status + ' ' + entity.name + ' ' + other_is_selfs_relation + '\'s name' + ' ' + str(entity.attributes[other_is_selfs_relation + '\'s name']))
else:
_, change_status = entity.add_if_better(other_name + '\'s', {'text': entity.attributes[key]['text'],
'entry': self_is_others_relation,
'entailment': entity.attributes[key]['entailment'],
'newly_entered': True}, detect_contradictions=False, return_contradiction_prob=return_contradiction_prob)
if change_status != 'none':
logging.log(23, 'NEW RELATION' + ' ' + change_status + ' ' + entity.name + ' ' + other_name + '\'s' + ' ' + str(entity.attributes[other_name + '\'s']))
_, change_status = other_entity.add_if_better(self_is_others_relation + '\'s name', {'text': entity.attributes[key]['text'],
'entry': entity.name,
'entailment': entity.attributes[key]['entailment'],
'newly_entered': True}, detect_contradictions=False, return_contradiction_prob=return_contradiction_prob)
if change_status != 'none':
logging.log(23, 'NEW RELATION' + ' ' + change_status + ' ' + other_name + ' ' + self_is_others_relation + '\'s name' + ' ' + str(other_entity.attributes[self_is_others_relation + '\'s name']))
_, change_status = other_entity.add_if_better(entity.name + '\'s', {'text': entity.attributes[key]['text'],
'entry': other_is_selfs_relation,
'entailment': entity.attributes[key]['entailment'],
'newly_entered': True}, detect_contradictions=False, return_contradiction_prob=return_contradiction_prob)
if change_status != 'none':
logging.log(23, 'NEW RELATION' + ' ' + change_status + ' ' + other_name + ' ' + entity.name + '\'s' + ' ' + str(other_entity.attributes[entity.name + '\'s']))
# change all newly entered attributes to false
for entity in entities.values():
for key in entity.attributes:
entity.attributes[key]['newly_entered'] = False
return entities, contradiction_prob if return_contradiction_prob else contradictions
class Entity:
BANNED_ATTRIBUTES = ['personality', 'eye', 'hair'] # disproportionately hallucinated / problematic
ENTAILMENT_THRESHOLD = 0.5
FACT_ENTAILMENT_THRESHOLD = 0.3
ENTAILMENT_RECHECK_THRESHOLD = 0.9
QA_SCORE_THRESHOLD = 0.5
NO_ANSWER_WORDS = ['unknown', 'not ', 'unspecified', 'n/a', 'stated', 'mentioned', 'no answer', 'tba', 'tbd', 'never']
def __init__(self, name, description=None, is_character=None, attributes=None):
self.name = name
self.description = description
self.outline_descriptions_by_piece = {}
self.is_character = is_character
self.attributes = attributes if attributes is not None else {}
def __str__(self):
formatted = self.name + ': ' + self.description + '\n' + 'Is Character: ' + str(self.is_character) + '\n' + 'Attributes: ' + str(self.attributes) + '\n'
for attribute in self.attributes:
formatted += attribute + ': ' + self.attributes[attribute]['entry'] + '\n'
return formatted
def set_description(self, desc):
self.description = desc
def get_outline_description_up_to_node(self, node, max_tokens=128, tokenizer=None, include_base_description=True):
# get detected descriptions of this character from all previous outline nodes up to and including this one
outline_description_components = [self.description] if include_base_description else []
nodes_with_entity = 0
for n in node.root().depth_first_traverse():
if self.name in node.selected_entities:
nodes_with_entity += 1
if n in self.outline_descriptions_by_piece:
outline_description_components.append(self.outline_descriptions_by_piece[n]['current_description'])
outline_description_components = [o for o in outline_description_components if o not in self.outline_descriptions_by_piece[n]['entailed_previous_descriptions']]
if n == node:
break
if tokenizer is None:
desc = ' '.join(outline_description_components)
else:
desc = ''
for component in outline_description_components:
if len(tokenizer.tokenize((desc + ' ' + component).strip())) > max_tokens:
break
desc = (desc + ' ' + component).strip()
if nodes_with_entity == 1:
desc += ' This is the first appearance of ' + self.name + ' in the story.' # probably won't interfere with max tokens if this is the first appearance
return desc
def add_new_outline_description(self, node, model, model_string, character_strings, num_samples=5, additional_description='', other_nodes=None):
# given an outline node, attempt to detect new information about this character, corresponding to character development over time
if other_nodes is None:
other_nodes = []
current_outline_description = self.get_outline_description_up_to_node(node, include_base_description=True)
context = node.text + (('\n\n' + additional_description.strip()) if len(additional_description.strip()) > 0 else '')
prefix = '{}\n\nThis context tells us the following about {}:\n\n1.'.format(context, self.name)
suffix = '\n\nAdditionally, we know from elsewhere that ' + current_outline_description
logit_bias = get_repetition_logit_bias(model.tokenizer, current_outline_description, -1, False, exclude_strings=list(character_strings.keys()), include_upper=True)
if len(logit_bias) > 250:
logging.warning('WARNING: too many logit biases in outline character description generation, skipping uppercase biases')
logit_bias = get_repetition_logit_bias(model.tokenizer, current_outline_description, -1, False, exclude_strings=list(character_strings.keys()), include_upper=False)
if len(logit_bias) > 250:
logging.warning('WARNING: still too many logit biases in outline character description generation, skipping random biases')
logit_bias = {k: v for k, v in list(logit_bias.items())[:250]}
logging.log(21, 'ADD NEW OUTLINE DESCRIPTION PREFIX: ' + prefix)
logging.log(21, 'ADD NEW OUTLINE DESCRIPTION SUFFIX: ' + suffix)
# generate and parse out descriptions, with basic repetition filtering
new_descriptions = model([prefix], suffixes=[suffix] if len(suffix.strip()) > 0 else None, max_tokens=64, num_completions=num_samples, model_string=model_string, logit_bias=logit_bias)
logging.log(21, 'raw new descriptions for {}'.format(self.name) + '\n' + str(new_descriptions))
processed_descriptions = []
for new_description in new_descriptions:
if len(new_description.strip()) > 5:
new_description = split_paragraphs(new_description.strip(), mode='sentence')[0]
new_description = new_description.replace('1.', '').strip().split('2.')[0].strip()
if len(new_description) < 5:
continue
if new_description[-1] not in '.?!' and new_description[-2] not in '.?!':
new_description += '.'
if calculate_repetition_length_penalty(new_description, [current_outline_description]) == 0:
processed_descriptions.append(new_description)
# entailment filtering: don't use descriptions already entailed by previous descriptions
if len(processed_descriptions) > 0:
entailment_scores, _ = score_entailment([context for _ in range(len(processed_descriptions))], processed_descriptions)
entailment_probs = softmax(entailment_scores, axis=1)[:, 2]
processed_descriptions = [d for d, p in zip(processed_descriptions, entailment_probs) if p > 0.5] # want to add descriptions which are implied by context
if len(processed_descriptions) > 0:
entailment_scores, _ = score_entailment([self.description for _ in range(len(processed_descriptions))], processed_descriptions)
entailment_probs = softmax(entailment_scores, axis=1)[:, 2]
processed_descriptions = [d for d, p in zip(processed_descriptions, entailment_probs) if p < 0.5] # want to add descriptions which are not implied by main description
previous_node_texts = []
nodes_to_traverse = []
for n in node.root().depth_first_traverse():
nodes_to_traverse.append(n)
if n == node:
break
nodes_to_traverse += other_nodes
for n in nodes_to_traverse:
if len(processed_descriptions) == 0:
break
if n in self.outline_descriptions_by_piece:
previous_node_texts.append(self.outline_descriptions_by_piece[n]['current_description'])
entailment_scores, _ = score_entailment([previous_node_texts[-1] for _ in range(len(processed_descriptions))], processed_descriptions)
entailment_probs = softmax(entailment_scores, axis=1)[:, 2]
processed_descriptions = [d for d, p in zip(processed_descriptions, entailment_probs) if p < 0.5] # want to add descriptions which aren't already implied by previous parts of outline
if len(processed_descriptions) > 0:
new_description = processed_descriptions[0]
# figure out which previous descriptions are entailed by the new description so we don't return them later when grabbing descriptions at or after this node
if len(previous_node_texts) > 0:
entailment_scores, _ = score_entailment([new_description for _ in range(len(previous_node_texts))], previous_node_texts)
entailment_probs = softmax(entailment_scores, axis=1)[:, 2]
entailed_previous_descriptions = [d for d, p in zip(previous_node_texts, entailment_probs) if p > 0.5]
else:
entailed_previous_descriptions = []
logging.log(22, 'added outline description for {} from outline section {}: {}'.format(self.name, node.text, new_description))
logging.log(22, 'this new description entails the previous descriptions: {}'.format(entailed_previous_descriptions))
self.outline_descriptions_by_piece[node] = {'current_description': new_description, 'entailed_previous_descriptions': entailed_previous_descriptions}
else:
logging.log(22, 'failed to add new description due to repetition')
def reset_attributes(self):
self.attributes = {}
def create_entailment_input(self, key, name, value):
if key.endswith('\'s'):
return (name + ' is ' + key + ' ' + value).strip() # e.g. character is other character's sibling
else:
return (name + '\'s ' + key + ' is ' + value).strip()
def get_referred_name(self, passage):
return ' '.join([tok for tok in self.name.strip().split() if tok in passage])
def resample_entry(self, info_entry, fact, gpt3_model, num_samples=3, split_entry=False):
"""
Resample the entry based on the fact and the model.
"""
if split_entry:
key, _ = split_key_entry(info_entry, self.name)
else:
key = info_entry
if key is None:
return info_entry
prompt = self.create_entailment_input(key, self.name, '')
candidate_entries = [entry for entry in gpt3_model([fact + '\n\n' + prompt], stop=['\n', '.', ','], num_completions=num_samples, top_p=1, temperature=0.8, model_string=ENTITY_MODEL_STRING) if len(entry.strip()) > 0 and entry.strip() != self.name]
if len(candidate_entries) == 0:
return None
fact_entailment_scores, _ = score_entailment([fact for _ in range(len(candidate_entries))],
[self.create_entailment_input(key, self.get_referred_name(fact), entry) for entry in candidate_entries])
fact_entailment_probs = softmax(fact_entailment_scores, axis=1)
candidate_entries = [candidate_entries[i] for i in range(len(candidate_entries)) if fact_entailment_probs[i, 2] > self.ENTAILMENT_THRESHOLD]
if len(candidate_entries) == 0:
return None
entry_counter = Counter(candidate_entries)
return prompt + entry_counter.most_common()[0][0]
@torch.no_grad()
def infer_description(self, passage, gpt3_model, max_length=48): # ideally text-davinci-001
assert self.description is None
query = 'Excerpt:\n\n... ' + passage.strip() + ' ...\n\nWrite a one-sentence summary of ' + self.name + ' in the context of this story.\n\n' + self.name + ' is'
for _ in range(5):
descriptions = gpt3_model([query], num_completions=5, max_tokens=max_length, model_string=ENTITY_MODEL_STRING)
descriptions = [d for d in descriptions if len(d.strip()) > 0 and len(gpt3_model.tokenizer.encode(d)) < max_length]
if len(descriptions) > 0:
break
if len(descriptions) == 0: # give up
logging.log(23, 'Warning: Failed to generate sufficiently short description for ' + self.name)
descriptions = gpt3_model([query], num_completions=1, max_tokens=max_length, model_string=ENTITY_MODEL_STRING)
self.set_description(self.name + ' is' + descriptions[0])
return self.description
@torch.no_grad()
def infer_is_character(self, passage, gpt3_model, threshold=CHARACTER_THRESHOLD): # ideally text-davinci-002
assert self.is_character is None
self.is_character = infer_is_character(self.name, passage, gpt3_model, gpt3_model.model, threshold=threshold)
return self.is_character
@torch.no_grad()
def infer_attributes(self, passage, gpt3_model, num_samples=3, detect_contradictions=True, other_names=[], agreement_threshold=2, return_contradiction_prob=False):
if self.is_character is None or not self.is_character:
return {}
logging.log(23, 'INFERRING FOR ' + self.name)
prompt = passage.strip() + '\n\nQuestion: List very brief facts about __CHARACTER__\'s appearance, personality, and relationship to other characters.\n\n1. __CHARACTER__'.replace('__CHARACTER__', self.name)
facts_strings = gpt3_model([prompt], num_completions=num_samples, logit_bias={679:-100, 1375:-100, 3347:-100, 1544:-100}, top_p=1, model_string=STRONGER_ENTITY_MODEL_STRING) # ban " He", "He", " She", "She" to force model to refer by name at the beginning of a new entry, to avoid coreference issues
facts_strings = ['1. ' + self.name + s for s in facts_strings]
logging.log(22, 'facts strings:' + ' ' + str(facts_strings))
facts = sum([split_list(s, strict=True) for s in facts_strings], [])
facts = [s for s in facts if len(s.strip()) > 0]
facts = [split_paragraphs(s, mode='sentence')[0] for s in facts] # cutoff extra sentences when it's too long
facts = [f for f in facts if any([tok in f for tok in self.name.split() if tok not in ' '.join(other_names)])]
logging.log(22, 'facts' + ' ' + str(facts))
facts = get_agreed_facts(facts, agreement_threshold=agreement_threshold-1)
logging.log(22, 'agreed facts' + ' ' + str(facts))
fact_entailment_counts = get_entailment_groups({fact: 1 for fact in facts})
facts = sorted(list(fact_entailment_counts.keys()), key=lambda x: fact_entailment_counts[x], reverse=True) # most "agreed" facts first
logging.log(22, 'facts entailment groups' + ' ' + str(facts))
contradictions = {}
done_keys = set()
contradiction_prob = 0
for fact in facts:
logging.log(23, 'FACT' + ' ' + fact)
prompt = construct_infobox_prompt(fact, self.name, other_names)
infobox_samples = gpt3_model([prompt], num_completions=num_samples, stop='----', logit_bias={50256:-5}, top_p=1, model_string=ENTITY_MODEL_STRING)
infobox_samples = ['\n' + self.name + s for s in infobox_samples]
time.sleep(0.5) # otherwise we get rate limited...
infobox_samples = [s for s in infobox_samples if s.startswith('\n')]
infobox_samples = [s.strip() for s in infobox_samples if len(s.strip()) > 0]
if len(infobox_samples) == 0:
continue
infobox_keys = Counter()
for info_sample in infobox_samples:
for info_entry in info_sample.strip().split('\n'):
if info_entry.startswith(self.name):
key = split_key_entry(info_entry, self.name)[0]
if key is not None:
resolved_key = resolve_names(key, [self.name] + other_names)
if resolved_key not in self.attributes or self.attributes[resolved_key]['entailment'] < self.ENTAILMENT_RECHECK_THRESHOLD or resolved_key not in done_keys: # don't recompute attributes we already did for this passage
infobox_keys[key] += 1
keys = list(infobox_keys.keys())
for k in keys:
if any([banned_attribute in k for banned_attribute in self.BANNED_ATTRIBUTES]):
del infobox_keys[k]
infobox_counts = Counter({self.resample_entry(key, fact, gpt3_model, split_entry=False): infobox_keys[key] for key in infobox_keys}) # resample the values for those keys
if None in infobox_counts: # resample_entry returns None when it fails
del infobox_counts[None]
for infobox_entry in infobox_counts:
logging.log(23, 'CHECKING' + ' ' + infobox_entry)
if not infobox_entry.startswith(self.name):
continue
if any([tok in infobox_entry and tok not in fact for tok in sum([other_name.strip().split() for other_name in other_names], [])]):
continue # somehow hallucinated a name...??
new_key, new_entry = split_key_entry(infobox_entry, self.name)
if new_key is None:
logging.log(23, 'Warning: malformed infobox entry' + ' ' + infobox_entry)
continue
if any([bad_word in new_entry.lower() or bad_word in new_key.lower() for bad_word in self.NO_ANSWER_WORDS]):
continue
logging.log(23, infobox_entry + ' ' + new_key + ' ' + new_entry)
# effectively, ensemble against the QA model to remove some hallucinations
new_qa_entry_passage, new_qa_score_passage = qa_entailment_best_answer(self.create_entailment_input(new_key, self.name, ''), passage, num_beams=5)
new_qa_entry_fact, new_qa_score_fact = qa_entailment_best_answer(self.create_entailment_input(new_key, self.name, ''), fact, num_beams=5)
logging.log(23, 'new_qa_entry_passage' + ' ' + new_qa_entry_passage + ' ' + str(new_qa_score_passage))
logging.log(23, 'new_qa_entry_fact' + ' ' + new_qa_entry_fact + ' ' + str(new_qa_score_fact))
logging.log(23, 'min QA confidence' + ' ' + str(min(new_qa_score_passage, new_qa_score_fact)))
if new_qa_score_passage < self.QA_SCORE_THRESHOLD or new_qa_score_fact < self.QA_SCORE_THRESHOLD or any([w in new_qa_entry_fact.lower() for w in self.NO_ANSWER_WORDS]):
logging.log(23, 'filtered by QA confidence')
continue
# make sure we didn't just hallucinate something out of the fact
fact_entailment_scores, _ = score_entailment([fact], [self.create_entailment_input(new_key, self.get_referred_name(fact), new_entry)])
fact_entailment_probs = softmax(fact_entailment_scores, axis=1)
logging.log(23, 'fact entailment' + ' ' + str(fact_entailment_probs[0, 2]))
if fact_entailment_probs[0, 2] < self.FACT_ENTAILMENT_THRESHOLD:
logging.log(23, 'filtered by fact entailment')
continue
new_key = resolve_names(new_key, [self.name] + other_names)
new_entry = resolve_names(new_entry, [self.name] + other_names)
logging.log(23, 'PASSED FILTERS' + ' ' + self.name + ' ' + new_key + ' ' + new_entry)
info_dict = {'text': fact, 'entry': new_entry, 'entailment': fact_entailment_probs[0, 2], 'newly_entered': True}
if return_contradiction_prob:
new_prob, _ = self.add_if_better(new_key, info_dict, detect_contradictions=detect_contradictions, contradictions=contradictions, return_contradiction_prob=return_contradiction_prob)
contradiction_prob = max(contradiction_prob, new_prob)
else:
contradictions, _ = self.add_if_better(new_key, info_dict, detect_contradictions=detect_contradictions, contradictions=contradictions, return_contradiction_prob=return_contradiction_prob)
done_keys.add(new_key) # regardless of what happened, no need to check against this key again for the same passage
if return_contradiction_prob:
return contradiction_prob
else:
return contradictions
def add_if_better(self, new_key, info_dict, detect_contradictions=True, contradictions=None, return_contradiction_prob=False):
fact = info_dict['text']
new_entry = info_dict['entry']
entailment_prob = info_dict['entailment']
new_entailment_input = self.create_entailment_input(new_key, self.name, info_dict['entry'])
status = 'none'
contradiction_prob = 0
if new_key in self.attributes:
original_entailment_input = self.create_entailment_input(new_key, self.name, self.attributes[new_key]['entry'])
if entailment_equals(new_entailment_input, original_entailment_input): # new one is more detailed
self.attributes[new_key]['text'] = fact
self.attributes[new_key]['entry'] = new_entry
self.attributes[new_key]['entailment'] = entailment_prob
status = 'modified'
elif entailment_equals(original_entailment_input, new_entailment_input): # original one is more detailed
pass
elif consistent_equals(original_entailment_input, new_entailment_input): # they're consistent with each other, at least
if fact not in self.attributes[new_key]['text']:
self.attributes[new_key]['text'] += '\n' + fact
if new_entry not in self.attributes[new_key]['entry']:
self.attributes[new_key]['entry'] += ', ' + new_entry
status = 'modified'
if not self.attributes[new_key]['newly_entered']:
contradiction_prob = math.exp(score_entailment(original_entailment_input, new_entailment_input)[0][0, 0])
elif self.attributes[new_key]['newly_entered']: # both part of the same passage, so just pick what's higher confidence
if entailment_prob > self.attributes[new_key]['entailment']:
logging.log(23, 'CHANGED BY RECHECK')
logging.log(23, 'old' + ' ' + self.attributes[new_key]['text'] + ' ' + self.attributes[new_key]['entry'] + ' ' + str(self.attributes[new_key]['entailment']))
self.attributes[new_key]['text'] = fact
self.attributes[new_key]['entry'] = new_entry
self.attributes[new_key]['entailment'] = entailment_prob
logging.log(23, 'new' + ' ' + self.attributes[new_key]['text'] + ' ' + self.attributes[new_key]['entry'] + ' ' + str(self.attributes[new_key]['entailment']))
status = 'modified'
elif not detect_contradictions:
# if not detect_contradictions: presumably fixed elsewhere.
pass
else:
logging.log(23, 'POTENTIAL CONTRADICTION')
logging.log(23, 'ENTITY' + ' ' + self.name)
logging.log(23, 'KEY' + ' ' + new_key)
logging.log(23, 'EXISTING ENTRY' + ' ' + str(self.attributes[new_key]))
logging.log(23, 'NEW ENTRY' + ' ' + str({'text': fact, 'entry': new_entry}))
contradictions[new_key] = (self.attributes[new_key], {'text': fact, 'entry': new_entry})
contradiction_prob = math.exp(score_entailment(original_entailment_input, new_entailment_input)[0][0, 0])
if return_contradiction_prob:
return contradiction_prob, status
else:
self.attributes[new_key] = info_dict
status = 'added'
if return_contradiction_prob:
return contradiction_prob, status
return contradictions, status | [
"\n\nQuestion: List very brief facts about __CHARACTER__'s appearance, personality, and relationship to other characters.\n\n1. __CHARACTER__",
" is ",
"'s",
"'s ",
". ",
"__CHARACTER__"
] |
2024-01-10 | yangkevin2/doc-story-generation | story_generation~common~summarizer~models~gpt3_summarizer.py | import time
import logging
import torch
from transformers import AutoTokenizer
import openai
from story_generation.common.summarizer.models.abstract_summarizer import AbstractSummarizer
from story_generation.common.data.split_paragraphs import cut_last_sentence
GPT3_END = 'THE END.'
PRETRAINED_MODELS = ['ada', 'babbage', 'curie', 'davinci', 'text-ada-001', 'text-babbage-001', 'text-curie-001', 'text-davinci-001', 'text-davinci-002', 'text-davinci-003']
class GPT3Summarizer(AbstractSummarizer):
def __init__(self, args):
assert args.gpt3_model is not None
self.model = args.gpt3_model
self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
self.args = args
self.controller = None
@torch.no_grad()
def __call__(self, texts, suffixes=None, max_tokens=None, top_p=None, temperature=None, retry_until_success=True, stop=None, logit_bias=None, num_completions=1, cut_sentence=False, model_string=None):
assert type(texts) == list
if logit_bias is None:
logit_bias = {}
if suffixes is not None:
assert len(texts) == len(suffixes)
if model_string is None:
logging.warning('model string not provided, using default model')
if self.controller is None:
return self._call_helper(texts, suffixes=suffixes, max_tokens=max_tokens, top_p=top_p, temperature=temperature, retry_until_success=retry_until_success, stop=stop, logit_bias=logit_bias, num_completions=num_completions, cut_sentence=cut_sentence, model_string=model_string)
else:
raise NotImplementedError
@torch.no_grad()
def _call_helper(self, texts, suffixes=None, max_tokens=None, top_p=None, temperature=None, retry_until_success=True, stop=None, logit_bias=None, num_completions=1, cut_sentence=False, model_string=None):
assert model_string in PRETRAINED_MODELS
if logit_bias is None:
logit_bias = {}
outputs = []
for i in range(len(texts)):
text = texts[i]
prompt = text
retry = True
num_fails = 0
while retry:
try:
context_length = len(self.tokenizer.encode(prompt))
if context_length > self.args.max_context_length:
logging.warning('context length' + ' ' + str(context_length) + ' ' + 'exceeded artificial context length limit' + ' ' + str(self.args.max_context_length))
time.sleep(5) # similar interface to gpt3 query failing and retrying
assert False
if max_tokens is None:
max_tokens = min(self.args.max_tokens, self.args.max_context_length - context_length)
engine = self.model if model_string is None else model_string
if engine == 'text-davinci-001':
engine = 'text-davinci-002' # update to latest version
logging.log(21, 'PROMPT')
logging.log(21, prompt)
logging.log(21, 'MODEL STRING:' + ' ' + self.model if model_string is None else model_string)
completion = openai.Completion.create(
engine=engine,
prompt=prompt,
suffix=suffixes[i] if suffixes is not None else None,
max_tokens=max_tokens,
temperature=temperature if temperature is not None else self.args.summarizer_temperature,
top_p=top_p if top_p is not None else self.args.summarizer_top_p,
frequency_penalty=self.args.summarizer_frequency_penalty,
presence_penalty=self.args.summarizer_presence_penalty,
stop=stop,
logit_bias=logit_bias,
n=num_completions)
retry = False
except Exception as e:
logging.warning(str(e))
retry = retry_until_success
num_fails += 1
if num_fails > 20:
raise e
if retry:
logging.warning('retrying...')
time.sleep(num_fails)
outputs += [completion['choices'][j]['text'] for j in range(num_completions)]
if cut_sentence:
for i in range(len(outputs)):
if len(outputs[i].strip()) > 0:
outputs[i] = cut_last_sentence(outputs[i])
engine = self.model if model_string is None else model_string
logging.log(21, 'OUTPUTS')
logging.log(21, str(outputs))
logging.log(21, 'GPT3 CALL' + ' ' + engine + ' ' + str(len(self.tokenizer.encode(texts[0])) + sum([len(self.tokenizer.encode(o)) for o in outputs])))
return outputs | [] |
2024-01-10 | yangkevin2/doc-story-generation | story_generation~common~controller~loader_util.py | import torch
from story_generation.common.controller.loaders.coherence_loader import CoherenceSplitLoader
from story_generation.common.controller.loaders.fine_coherence_loader import FineCoherenceSplitLoader
from story_generation.common.controller.loaders.alignment_loader import AlignmentSplitLoader
from story_generation.common.controller.loaders.order_loader import OrderSplitLoader
def get_loader(loader_name, dataset, split, collate_fn, batch_size=32, append_mask_token=False, num_workers=20, tokenizer_model='roberta-base', **kwargs):
assert split in ['train', 'valid', 'test']
if loader_name == 'coherence':
loader_class = CoherenceSplitLoader
elif loader_name == 'fine_coherence':
loader_class = FineCoherenceSplitLoader
elif loader_name == 'alignment':
loader_class = AlignmentSplitLoader
elif loader_name == 'order':
loader_class = OrderSplitLoader
else:
raise NotImplementedError
print('loading texts for data loader')
contents, summaries = dataset.load_long_texts(split), dataset.load_short_texts(split)
print('done loading texts')
return torch.utils.data.DataLoader(loader_class(contents, summaries, tokenizer_model, append_mask_token=False, **kwargs), batch_size=batch_size, pin_memory=True, collate_fn=collate_fn, num_workers=num_workers)
| [] |
2024-01-10 | nikell28/foodify | foodify~services~recipient_maker.py | import openai
from foodify.config import config
from foodify.models.recipient import RecipientInput, Recipient
class _ChatGPTRecipientMaker:
def get_chatgpt_answer(self, message: list[dict]) -> str:
api_key = config.openai_api_key
openai.api_key = api_key
model = "gpt-3.5-turbo"
response = openai.ChatCompletion.create(
model=model,
messages=message,
)
recipient = response.choices[0]["message"]["content"].split("\n")
return recipient
def _get_recipe(self, recipe_description: str) -> str:
user_input = [
{"role": "system", "content": "Ты известный шеф повар."},
{"role": "user", "content": ""},
]
user_input[1]["content"] = recipe_description
response = self.get_chatgpt_answer(user_input)
response = "\n".join(response)
return response
class PromtCreator:
def get_promt(self, recipe_input: RecipientInput) -> str:
promt = config.promt
return promt # type: ignore
class RecipientMaker(_ChatGPTRecipientMaker):
async def get_recipe(self, recipe_input: RecipientInput) -> Recipient:
promt = PromtCreator().get_promt(recipe_input)
recipient = self._get_recipe(promt)
return Recipient(
description=recipient,
)
| [
"Ты известный шеф повар."
] |
2024-01-10 | romainniamor/openai_chatBot | back~functions~analyser~doc_to_text.py | from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from fastapi import UploadFile
import io
#extract text from pdf https://pypi.org/project/PyPDF2/
def get_pdf_text(file: UploadFile):
#use io.BytesIO to convert file to bytes and pass it to PdfReader
file_content = io.BytesIO(file.file.read())
text = ""
pdf_reader = PdfReader(file_content)
for page in pdf_reader.pages:
text += page.extract_text()
return text
#creation list of text chunks cut by 1000 characters with 200 characters overlap between each chunk
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
print(chunks)
return chunks
| [] |
2024-01-10 | navanchauhan/CallMeMaybe | speller_agent.py | import logging
from typing import Optional, Tuple
import typing
from vocode.streaming.agent.chat_gpt_agent import ChatGPTAgent
from vocode.streaming.models.agent import AgentConfig, AgentType, ChatGPTAgentConfig
from vocode.streaming.agent.base_agent import BaseAgent, RespondAgent
from vocode.streaming.agent.factory import AgentFactory
import os
import sys
import typing
from dotenv import load_dotenv
from lang_prompt_demo import tools
from langchain.memory import ConversationBufferMemory
from langchain.utilities import SerpAPIWrapper
from langchain.agents import load_tools
from stdout_filterer import RedactPhoneNumbers
load_dotenv()
from langchain.chat_models import ChatOpenAI
# from langchain.chat_models import BedrockChat
from langchain.agents import initialize_agent
from langchain.agents import AgentType as LangAgentType
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore
#llm = BedrockChat(model_id="anthropic.claude-instant-v1", model_kwargs={"temperature":0}) # type: ignore
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Logging of LLMChains
verbose = True
agent = initialize_agent(
tools=tools,
llm=llm,
agent=LangAgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=verbose,
memory=memory,
)
class SpellerAgentConfig(AgentConfig, type="agent_speller"):
pass
class SpellerAgent(RespondAgent[SpellerAgentConfig]):
def __init__(self, agent_config: SpellerAgentConfig):
super().__init__(agent_config=agent_config)
async def respond(
self,
human_input,
conversation_id: str,
is_interrupt: bool = False,
) -> Tuple[Optional[str], bool]:
print("SpellerAgent: ", human_input)
res = agent.run(human_input)
return res, False
class SpellerAgentFactory(AgentFactory):
def create_agent(
self, agent_config: AgentConfig, logger: Optional[logging.Logger] = None
) -> BaseAgent:
print("Setting up agent", agent_config, agent_config.type)
if agent_config.type == AgentType.CHAT_GPT:
return ChatGPTAgent(
agent_config=typing.cast(ChatGPTAgentConfig, agent_config)
)
elif agent_config.type == "agent_speller":
return SpellerAgent(
agent_config=typing.cast(SpellerAgentConfig, agent_config)
)
raise Exception("Invalid agent config") | [] |
2024-01-10 | navanchauhan/CallMeMaybe | lang_prompt_demo.py | import os
import sys
import typing
from dotenv import load_dotenv
from tools.contacts import get_all_contacts
from tools.vocode import call_phone_number
from tools.summarize import summarize
from tools.get_user_inputs import get_desired_inputs
from tools.email_tool import email_tasks
from langchain.memory import ConversationBufferMemory
from langchain.agents import load_tools
from stdout_filterer import RedactPhoneNumbers
load_dotenv()
from langchain.chat_models import ChatOpenAI
# from langchain.chat_models import BedrockChat
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.tools import WikipediaQueryRun
import argparse
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
tools=load_tools(["human", "wikipedia"]) + [get_all_contacts, call_phone_number, email_tasks, summarize]
tools_desc = ""
for tool in tools:
tools_desc += tool.name + " : " + tool.description + "\n"
def rephrase_prompt(objective: str) -> str:
# llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore
# pred = llm.predict(f"Based on these tools {tools_desc} with the {objective} should be done in the following manner (outputting a single sentence), allowing for failure: ")
# print(pred)
# return pred
return f"{objective}"
with open("info.txt") as f:
my_info = f.read()
memory.chat_memory.add_user_message("User information to us " + my_info + " end of user information.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Command line argument parser example")
parser.add_argument("--objective", type=str, help="Objective for the program")
parser.add_argument("--verbose", type=bool, help="Verbosity of the program", default=False)
# Parse the arguments
args = parser.parse_args()
# Get the value of --objective
objective_value = args.objective
# Get the value of --verbose
verbose_value = args.verbose
# Redirect stdout to our custom class
sys.stdout = typing.cast(typing.TextIO, RedactPhoneNumbers(sys.stdout))
if objective_value is None:
objective_value = input("What is your objective? ")
OBJECTIVE = (
objective_value
or "Find a random person in my contacts and tell them a joke"
)
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore
#llm = BedrockChat(model_id="anthropic.claude-instant-v1", model_kwargs={"temperature":0}) # type: ignore
#memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Logging of LLMChains
verbose = True
agent = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=verbose_value,
memory=memory,
)
out = agent.run(OBJECTIVE)
print(out)
| [] |
2024-01-10 | navanchauhan/CallMeMaybe | tools~get_user_inputs.py | from typing import List
from langchain.agents import tool
from dotenv import load_dotenv
from langchain.agents import load_tools
from langchain.llms import OpenAI
from langchain.agents import initialize_agent, AgentType
load_dotenv()
import os
INPUTS = {}
@tool("get_desired_inputs")
def get_desired_inputs(input: str) -> dict:
"""Use this between tools to get the desired inputs for the next tool.
You will be given a task that will be performed by an autonomous agent on behalf of a user. You will gather any necessary data from the user to complete the specified task before executing the task.
"""
prompt = input
llm = OpenAI(temperature=0)
agent = initialize_agent(
tools=load_tools(["human"]),
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
)
return agent.run(prompt)
def get_user_inputs():
# iterate through INPUTS and populate values
print("Done")
| [
"INPUT"
] |
2024-01-10 | navanchauhan/CallMeMaybe | tools~email_tool.py | import logging
import asyncio
import os
from langchain.agents import tool
from dotenv import load_dotenv
from langchain.agents.agent_toolkits import GmailToolkit
from langchain.llms import OpenAI
from langchain.agents import initialize_agent, AgentType
load_dotenv()
toolkit = GmailToolkit()
tools = toolkit.get_tools()
my_information = "Use this information whenever needed User information " + open("info.txt").read() + " . Your task "
@tool("email tasks")
def email_tasks(input: str) -> bool:
"""draft/send/search/get email and return whatever you get.
the input to this tool is the prompt to the gmail toolkit.
Re-order the tasks in the prompt to change the order in which they are executed.
Re organise the the input to the tool to pass all information needed to complete the task.
should use this tool as many times needed to complete the task.
for example, `send an email to [email protected] asking him if he is still looking for a job and that he should continue doing whatever he his doing because he will eventually find it` will email [email protected]
"""
prompt = my_information + input
#print(input)
llm = OpenAI(temperature=0)
agent = initialize_agent(
tools=toolkit.get_tools(),
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
)
return agent.run(prompt)
| [
"Use this information whenever needed User information \" + open(\"info.txt\").read() + \" . Your task INPUT"
] |
2024-01-10 | navanchauhan/CallMeMaybe | tools~vocode.py | import logging
import asyncio
import os
from langchain.agents import tool
from dotenv import load_dotenv
from vocode.streaming.models.message import BaseMessage
from vocode.streaming.models.synthesizer import ElevenLabsSynthesizerConfig
from vocode.streaming.models.transcriber import (
DeepgramTranscriberConfig,
PunctuationEndpointingConfig,
)
from vocode.streaming.models.telephony import TwilioConfig
load_dotenv()
from call_transcript_utils import delete_transcript, get_transcript
from vocode.streaming.telephony.conversation.outbound_call import OutboundCall
from vocode.streaming.telephony.config_manager.redis_config_manager import (
RedisConfigManager,
)
from vocode.streaming.models.agent import ChatGPTAgentConfig
import time
LOOP = asyncio.new_event_loop()
asyncio.set_event_loop(LOOP)
my_information = "Use this information whenever needed User information " + open("info.txt").read() + " . Your task "
@tool("call phone number")
def call_phone_number(input: str) -> str:
"""calls a phone number as a bot and returns a transcript of the conversation. Verifies the phone number from the user before calling.
make sure you call `get all contacts` first to get a list of phone numbers to call.
the input to this tool is a pipe separated list of a phone number, a prompt (including history), and the first thing the bot should say
The prompt should instruct the bot with what to do on the call and be in the 3rd person,
like 'the assistant is performing this task' instead of 'perform this task'.
e.g. phone_number|prompt|initial_message
should only use this tool once it has found and verified adequate phone number to call.
"""
phone_number, prompt, initial_message = input.split("|",2)
print(phone_number, prompt, initial_message)
call = OutboundCall(
base_url=os.environ["BASE_URL"],
to_phone=phone_number,
from_phone=os.environ["TWILIO_PHONE"],
config_manager=RedisConfigManager(),
agent_config=ChatGPTAgentConfig(
initial_message=BaseMessage(text=initial_message),
prompt_preamble=my_information + prompt,
generate_responses=True,
allow_agent_to_be_cut_off=False
),
twilio_config=TwilioConfig(
account_sid=os.environ["TWILIO_ACCOUNT_SID"],
auth_token=os.environ["TWILIO_AUTH_TOKEN"],
record=True
),
synthesizer_config=ElevenLabsSynthesizerConfig.from_telephone_output_device(
api_key=os.getenv("ELEVENLABS_API_KEY"),
voice_id=os.getenv("ELEVENLABS_VOICE_ID"),
),
transcriber_config=DeepgramTranscriberConfig.from_telephone_input_device(
endpointing_config=PunctuationEndpointingConfig()
),
logger=logging.Logger("OutboundCall"),
)
LOOP.run_until_complete(call.start())
return "Call Started"
| [] |
2024-01-10 | navanchauhan/CallMeMaybe | tools~contacts.py | from typing import List
from langchain.agents import tool
from dotenv import load_dotenv
load_dotenv()
import os
CONTACTS = [
{
"name": "Greg",
"phone" : os.getenv("TEST_PHONE_NUMBER"),
"email": "[email protected]"
},
{
"name": "Hunter",
"phone": "+19178737978",
"email": "[email protected]"
}
]
@tool("get_all_contacts")
def get_all_contacts(contact_name: str) -> List[dict]:
"""Returns all contacts in the user's phone book which includes email and phone numbers."""
return CONTACTS | [] |
2024-01-10 | anishpdalal/hashy-api | backend~app~app~api~api_v1~endpoints~search.py | import os
import uuid
from fastapi import APIRouter, Depends
import gantry
import openai
import pinecone
from sentence_transformers import SentenceTransformer
from sqlalchemy.ext.asyncio import AsyncSession
from app.api.deps import (
current_active_user,
get_async_session,
)
from app.crud.source import get_sources
from app.models.user import User
from app.schemas.search import Event, SearchResponse
PINECONE_KEY = os.getenv("PINECONE_KEY")
environment = os.getenv("ENVIRONMENT")
pinecone.init(api_key=PINECONE_KEY, environment="us-west1-gcp")
index = pinecone.Index(index_name="semantic-text-search")
openai.api_key = os.getenv("OPENAI_API_KEY")
search_model = SentenceTransformer("/mnt/bi_encoder")
gantry.init(
api_key=os.getenv("GANTRY_API_KEY"),
environment=os.getenv("ENVIRONMENT")
)
api_router = APIRouter()
@api_router.get("/search", tags=["search"], response_model=SearchResponse)
async def search(
query: str,
doc_type: str = None,
user: User = Depends(current_active_user),
db: AsyncSession = Depends(get_async_session),
count: int = 10,
log_id: str = None
):
user_id = str(user.id)
user_email = user.email
source_ids = [str(source.id) for source in await get_sources(db, user_id, user_email)]
filter = {"source_id": {"$in": source_ids}}
if doc_type:
filter["doc_type"] = {"$eq": doc_type}
query_embedding = search_model.encode([query])
query_results = index.query(
queries=[query_embedding.tolist()],
top_k=count,
filter=filter,
include_metadata=True,
include_values=False,
namespace=environment
)
matches = query_results["results"][0]["matches"]
query_id = str(uuid.uuid4())
results = {
"query": query,
"query_id": query_id,
"count": len(matches),
"results": [],
"answer": None
}
for match in matches:
metadata = match["metadata"]
score = match["score"]
if results["answer"] is None and len(matches) > 0:
prompt = "Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext:\n{0}\n\n---\n\nQuestion: {1}\nAnswer:"
try:
response = openai.Completion.create(
engine="text-curie-001",
prompt=prompt.format(metadata["text"], query),
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
results["answer"] = response.choices[0]["text"].strip()
if results["answer"].startswith("I don't know"):
results["answer"] = None
except Exception as e:
pass
result = {
"score": score,
"doc_name": metadata["doc_name"],
"doc_last_updated": str(metadata["doc_last_updated"]),
"doc_url": metadata["doc_url"],
"text": metadata["text"]
}
results["results"].append(result)
if matches:
gantry.log_record(
application="search_endpoint",
version=0,
inputs={
"query": query,
"user_id": user_id,
"log_id": log_id
},
outputs={
"first_result_score": matches[0]["score"],
"first_result_doc_name": matches[0]["metadata"]["doc_name"],
"first_result_doc_url": matches[0]["metadata"]["doc_url"],
},
feedback_id={"id": query_id}
)
return results
@api_router.post("/log", tags=["search"])
async def log(event: Event, user: User = Depends(current_active_user)):
gantry.log_record(
application="search_endpoint",
version=0,
feedback_id={"id": event.query_id},
feedback={"event_type": event.event_type, "message": event.message}
)
return {"message": "success"} | [
"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext:\n{0}\n\n---\n\nQuestion: {1}\nAnswer:",
"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext:\nPLACEHOLDER\n\n---\n\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | moad-dev/rcs-vacancy | augmentation~augmentate.py | import openai
import csv
import json
import time
import os
openai.organization = os.environ.get('OPENAI_ORGANIZATION')
openai.api_key = os.environ.get('OPENAI_API_KEY')
# Define the prompt
system_prompt = (
"Ты - аугментатор размеченных данных. Тебе будут даны размеченные данные в формате plaintext: тексты вакансий, в которых размечены:"
"1. Специализация (specialization ключ в json)"
"2. Должностные обязанности (responsibilities ключ в json)"
"3. Требования к соискателю (requirements)"
"4. Условия (terms)"
"5. Ключевые навыки (skills)"
"6. Примечания (notes)"
"Ты должен будешь провести аугментацию данных. Для каждой вакансии придумай новые две вакансии по следующим критериям:"
"1. Текст оригинальной вакансии измененный так, чтобы он подходил по смыслу для другой придуманной тобой специальности в области строительства"
"2. Текст оригинальной вакансии с сохранением специальности, но с перефразированными предложениями, заменами на синонимы, переформулировками."
"Ты должен сохранить оригинальную разметку вакансий. Выводи только новые размеченные вакансии в формате JSON, как массив объектов JSON [{}, {}, {}], без комментариев. Отсутствующие данные заполни по смыслу."
)
vacancies = []
with open('vacancies_original.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=';')
for row in reader:
vacancies.append(
"Специализация:"
+ row.get('specialization', '-') + "\n"
+ "Должностные обязанности:"
+ row.get('responsibilities', '-') + "\n"
+ "Требования к соискателю:"
+ row.get('requirements', '-') + "\n"
+ "Условия:"
+ row.get('terms', '-') + "\n"
+ "Ключевые навыки:"
+ row.get('skills', '-') + "\n"
+ "Примечания:"
+ row.get('notes', '-') + "\n"
)
for vacancy in vacancies:
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": vacancy},
],
temperature=1.05,
)
augmented = json.loads(response.choices[0].message.content)
with open('vacancies_augmented.csv', 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=augmented[0].keys())
for augmentation in augmented:
print(augmentation)
writer.writerow(augmentation)
time.sleep(15)
except Exception as e:
print(e)
time.sleep(15)
continue
| [
"Ты - аугментатор размеченных данных. Тебе будут даны размеченные данные в формате plaintext: тексты вакансий, в которых размечены:1. Специализация (specialization ключ в json)2. Должностные обязанности (responsibilities ключ в json)3. Требования к соискателю (requirements)4. Условия (terms)5. Ключевые навыки (skills)6. Примечания (notes)Ты должен будешь провести аугментацию данных. Для каждой вакансии придумай новые две вакансии по следующим критериям:1. Текст оригинальной вакансии измененный так, чтобы он подходил по смыслу для другой придуманной тобой специальности в области строительства2. Текст оригинальной вакансии с сохранением специальности, но с перефразированными предложениями, заменами на синонимы, переформулировками.Ты должен сохранить оригинальную разметку вакансий. Выводи только новые размеченные вакансии в формате JSON, как массив объектов JSON [{}, {}, {}], без комментариев. Отсутствующие данные заполни по смыслу."
] |
2024-01-10 | moad-dev/rcs-vacancy | preprocessing~rcs_vacancy_augmentation.py | import openai
import csv
import json
import time
import env
openai.organization = env.organization
openai.api_key = env.api_key
# Define the prompt
system_prompt = """
Ты - аугментатор размеченных данных. Тебе будут даны размеченные данные в формате plaintext: тексты вакансий, в которых размечены:
1. Специализация (specialization ключ в json)
2. Должностные обязанности (responsibilities ключ в json)
3. Требования к соискателю (requirements)
4. Условия (terms)
5. Ключевые навыки (skills)
6. Примечания (notes)
Ты должен будешь провести аугментацию данных. Для каждой вакансии придумай новые две вакансии по следующим критериям:
1. Текст оригинальной вакансии измененный так, чтобы он подходил по смыслу для другой придуманной тобой специальности в области строительства
2. Текст оригинальной вакансии с сохранением специальности, но с перефразированными предложениями, заменами на синонимы, переформулировками.
Ты должен сохранить оригинальную разметку вакансий. Выводи только новые размеченные вакансии в формате JSON, как массив объектов JSON [{}, {}, {}], без комментариев. Отсутствующие данные заполни по смыслу.
"""
vacancies = []
with open('data/vacancies_original.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=';')
for row in reader:
vacancies.append(
"Специализация:"
+ row.get('specialization', '-') + "\n"
+ "Должностные обязанности:"
+ row.get('responsibilities', '-') + "\n"
+ "Требования к соискателю:"
+ row.get('requirements', '-') + "\n"
+ "Условия:"
+ row.get('terms', '-') + "\n"
+ "Ключевые навыки:"
+ row.get('skills', '-') + "\n"
+ "Примечания:"
+ row.get('notes', '-') + "\n"
)
for vacancy in vacancies:
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": vacancy},
],
temperature=1.12
)
except:
print('Ошибка запроса к openai')
continue
try:
augmented = json.loads(response.choices[0].message.content)
except json.decoder.JSONDecodeError:
print('Ошибка JSON decoder')
print(response.choices[0].message.content)
continue
try:
with open('data/vacancies_augmented.csv', 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=augmented[0].keys())
for augmentation in augmented:
print(augmentation)
writer.writerow(augmentation)
except:
print('Ошибка записи в файл')
continue
time.sleep(15)
| [
"\nТы - аугментатор размеченных данных. Тебе будут даны размеченные данные в формате plaintext: тексты вакансий, в которых размечены:\n1. Специализация (specialization ключ в json)\n2. Должностные обязанности (responsibilities ключ в json)\n3. Требования к соискателю (requirements)\n4. Условия (terms)\n5. Ключевые навыки (skills)\n6. Примечания (notes)\nТы должен будешь провести аугментацию данных. Для каждой вакансии придумай новые две вакансии по следующим критериям:\n1. Текст оригинальной вакансии измененный так, чтобы он подходил по смыслу для другой придуманной тобой специальности в области строительства\n2. Текст оригинальной вакансии с сохранением специальности, но с перефразированными предложениями, заменами на синонимы, переформулировками.\nТы должен сохранить оригинальную разметку вакансий. Выводи только новые размеченные вакансии в формате JSON, как массив объектов JSON [{}, {}, {}], без комментариев. Отсутствующие данные заполни по смыслу.\n"
] |
2024-01-10 | acauf564/2023-MRUHacks | PDFtoLIST.py | import openai
import os
import time
from PyPDF2 import PdfReader
class PDFtoList:
def __init__(self):
openai.api_key = "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"
self.prompt = self.read_file()
self.response = self.get_completion(self.prompt)
def getResponse(self):
return self.response
def read_file(self):
reader = PdfReader("sample_syllabus.pdf")
number_of_pages = len(reader.pages)
text = "<Please give me a python 2d list of all assignments, their due dates and percentage weights. If there is no due date or assignment weight, leave the field as an empty string. : "
for i in range(number_of_pages):
page = reader.pages[i]
text += page.extract_text()
formatted_contents = f'{text}>'
return formatted_contents
def get_completion(self, prompt, model="gpt-3.5-turbo"):
# messages = [{"role": "user", "content": prompt}]
# response = openai.ChatCompletion.create(
# model=model,
# messages=messages,
# temperature=0,
# )
# usableResponse = response.choices[0].message["content"]
# print(usableResponse)
#UNCOMMENT LATER
usableResponse = [['Java Refresher and Comparators', 'October 6, 2023', '7.5%'], ['Generics and Linked Lists', 'October 27, 2023', '7.5%'], ['Queues, Iterators and Trees', 'November 17, 2023', '7.5%'], ['TreeMaps and Hashing', 'December 1, 2023', '7.5%']]
return usableResponse
| [] |
2024-01-10 | Roshni-MS1/rosh-repo-search | venv~Lib~site-packages~langchainplus_sdk~client.py | from __future__ import annotations
import json
import logging
import os
import socket
from collections import defaultdict
from datetime import datetime
from io import BytesIO
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from urllib.parse import urlsplit
from uuid import UUID
import requests
from pydantic import BaseSettings, Field, root_validator
from requests import Response
from tenacity import (
before_sleep_log,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchainplus_sdk.evaluation.evaluator import RunEvaluator
from langchainplus_sdk.schemas import (
APIFeedbackSource,
Dataset,
DatasetCreate,
Example,
ExampleCreate,
ExampleUpdate,
Feedback,
FeedbackCreate,
FeedbackSourceBase,
FeedbackSourceType,
ModelFeedbackSource,
Run,
RunBase,
RunTypeEnum,
RunUpdate,
TracerSession,
)
from langchainplus_sdk.utils import (
LangChainPlusAPIError,
LangChainPlusError,
LangChainPlusUserError,
get_runtime_environment,
raise_for_status_with_text,
request_with_retries,
xor_args,
)
if TYPE_CHECKING:
import pandas as pd
logger = logging.getLogger(__name__)
def _is_localhost(url: str) -> bool:
"""Check if the URL is localhost."""
try:
netloc = urlsplit(url).netloc.split(":")[0]
ip = socket.gethostbyname(netloc)
return ip == "127.0.0.1" or ip.startswith("0.0.0.0") or ip.startswith("::")
except socket.gaierror:
return False
ID_TYPE = Union[UUID, str]
def _default_retry_config() -> Dict[str, Any]:
return dict(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type(LangChainPlusAPIError),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _serialize_json(obj: Any) -> str:
if isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, UUID):
return str(obj)
raise TypeError("Type %s not serializable" % type(obj))
class LangChainPlusClient(BaseSettings):
"""Client for interacting with the LangChain+ API."""
api_key: Optional[str] = Field(default=None, env="LANGCHAIN_API_KEY")
api_url: str = Field(default="http://localhost:1984", env="LANGCHAIN_ENDPOINT")
retry_config: Mapping[str, Any] = Field(
default_factory=_default_retry_config, exclude=True
)
timeout_ms: int = Field(default=4000)
@root_validator(pre=True)
def validate_api_key_if_hosted(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Verify API key is provided if url not localhost."""
api_url: str = values.get("api_url", "http://localhost:1984")
api_key: Optional[str] = values.get("api_key")
if not _is_localhost(api_url):
if not api_key:
raise LangChainPlusUserError(
"API key must be provided when using hosted LangChain+ API"
)
return values
def _repr_html_(self) -> str:
"""Return an HTML representation of the instance with a link to the URL."""
if _is_localhost(self.api_url):
link = "http://localhost"
elif "dev" in self.api_url.split(".", maxsplit=1)[0]:
link = "https://dev.langchain.plus"
else:
link = "https://www.langchain.plus"
return f'<a href="{link}", target="_blank" rel="noopener">LangChain+ Client</a>'
def __repr__(self) -> str:
"""Return a string representation of the instance with a link to the URL."""
return f"LangChainPlusClient (API URL: {self.api_url})"
@property
def _headers(self) -> Dict[str, str]:
"""Get the headers for the API request."""
headers = {}
if self.api_key:
headers["x-api-key"] = self.api_key
return headers
def _get_with_retries(
self, path: str, params: Optional[Dict[str, Any]] = None
) -> Response:
return request_with_retries(
"get",
f"{self.api_url}{path}",
request_kwargs={
"params": params,
"headers": self._headers,
"timeout": self.timeout_ms / 1000,
},
retry_config=self.retry_config,
)
def upload_dataframe(
self,
df: pd.DataFrame,
name: str,
description: str,
input_keys: Sequence[str],
output_keys: Sequence[str],
) -> Dataset:
"""Upload a dataframe as individual examples to the LangChain+ API."""
dataset = self.create_dataset(dataset_name=name, description=description)
for row in df.itertuples():
inputs = {key: getattr(row, key) for key in input_keys}
outputs = {key: getattr(row, key) for key in output_keys}
self.create_example(inputs, outputs=outputs, dataset_id=dataset.id)
return dataset
def upload_csv(
self,
csv_file: Union[str, Tuple[str, BytesIO]],
description: str,
input_keys: Sequence[str],
output_keys: Sequence[str],
) -> Dataset:
"""Upload a CSV file to the LangChain+ API."""
files = {"file": csv_file}
data = {
"input_keys": ",".join(input_keys),
"output_keys": ",".join(output_keys),
"description": description,
}
response = requests.post(
self.api_url + "/datasets/upload",
headers=self._headers,
data=data,
files=files,
)
raise_for_status_with_text(response)
result = response.json()
# TODO: Make this more robust server-side
if "detail" in result and "already exists" in result["detail"]:
file_name = csv_file if isinstance(csv_file, str) else csv_file[0]
file_name = file_name.split("/")[-1]
raise ValueError(f"Dataset {file_name} already exists")
return Dataset(**result)
def create_run(
self,
name: str,
inputs: Dict[str, Any],
run_type: Union[str, RunTypeEnum],
*,
execution_order: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Persist a run to the LangChain+ API."""
session_name = kwargs.pop(
"session_name", os.environ.get("LANGCHAIN_SESSION", "default")
)
run_create = {
"session_name": session_name,
**kwargs,
"name": name,
"inputs": inputs,
"run_type": run_type,
"execution_order": execution_order,
}
run_extra = cast(dict, run_create.setdefault("extra", {}))
runtime = run_extra.setdefault("runtime", {})
runtime_env = get_runtime_environment()
run_extra["runtime"] = {**runtime_env, **runtime}
headers = {**self._headers, "Accept": "application/json"}
request_with_retries(
"post",
f"{self.api_url}/runs",
request_kwargs={
"data": json.dumps(run_create, default=_serialize_json),
"headers": headers,
"timeout": self.timeout_ms / 1000,
},
retry_config=self.retry_config,
)
def update_run(
self,
run_id: ID_TYPE,
**kwargs: Any,
) -> None:
"""Update a run to the LangChain+ API."""
run_update = RunUpdate(
**kwargs,
)
headers = {**self._headers, "Accept": "application/json"}
request_with_retries(
"patch",
f"{self.api_url}/runs/{run_id}",
request_kwargs={
"data": run_update.json(),
"headers": headers,
"timeout": self.timeout_ms / 1000,
},
retry_config=self.retry_config,
)
def _load_child_runs(self, run: Run) -> Run:
child_runs = self.list_runs(id=run.child_run_ids)
treemap: DefaultDict[UUID, List[Run]] = defaultdict(list)
runs: Dict[UUID, Run] = {}
for child_run in sorted(child_runs, key=lambda r: r.execution_order):
if child_run.parent_run_id is None:
raise LangChainPlusError(f"Child run {child_run.id} has no parent")
treemap[child_run.parent_run_id].append(child_run)
runs[child_run.id] = child_run
run.child_runs = treemap.pop(run.id, [])
for run_id, children in treemap.items():
runs[run_id].child_runs = children
return run
def read_run(self, run_id: ID_TYPE, load_child_runs: bool = False) -> Run:
"""Read a run from the LangChain+ API.
Args:
run_id: The ID of the run to read.
load_child_runs: Whether to load nested child runs.
Returns:
The run.
"""
response = self._get_with_retries(f"/runs/{run_id}")
run = Run(**response.json())
if load_child_runs and run.child_run_ids:
run = self._load_child_runs(run)
return run
def list_runs(
self,
*,
session_id: Optional[ID_TYPE] = None,
session_name: Optional[str] = None,
run_type: Optional[str] = None,
dataset_name: Optional[str] = None,
dataset_id: Optional[ID_TYPE] = None,
reference_example_id: Optional[ID_TYPE] = None,
**kwargs: Any,
) -> Iterator[Run]:
"""List runs from the LangChain+ API."""
if session_name is not None:
if session_id is not None:
raise ValueError("Only one of session_id or session_name may be given")
session_id = self.read_session(session_name=session_name).id
if dataset_name is not None:
if dataset_id is not None:
raise ValueError("Only one of dataset_id or dataset_name may be given")
dataset_id = self.read_dataset(dataset_name=dataset_name).id
query_params = {
"session": session_id,
"run_type": run_type,
**kwargs,
}
if reference_example_id is not None:
query_params["reference_example"] = reference_example_id
if dataset_id is not None:
query_params["dataset"] = dataset_id
response = self._get_with_retries("/runs", params=query_params)
yield from [Run(**run) for run in response.json()]
def delete_run(self, run_id: ID_TYPE) -> None:
"""Delete a run from the LangChain+ API."""
response = requests.delete(
f"{self.api_url}/runs/{run_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return
def create_session(
self, session_name: str, session_extra: Optional[dict] = None
) -> TracerSession:
"""Create a session on the LangChain+ API."""
endpoint = f"{self.api_url}/sessions?upsert=true"
body = {
"name": session_name,
"extra": session_extra,
}
response = requests.post(
endpoint,
headers=self._headers,
json=body,
)
raise_for_status_with_text(response)
return TracerSession(**response.json())
@xor_args(("session_id", "session_name"))
def read_session(
self, *, session_id: Optional[str] = None, session_name: Optional[str] = None
) -> TracerSession:
"""Read a session from the LangChain+ API."""
path = "/sessions"
params: Dict[str, Any] = {"limit": 1}
if session_id is not None:
path += f"/{session_id}"
elif session_name is not None:
params["name"] = session_name
else:
raise ValueError("Must provide session_name or session_id")
response = self._get_with_retries(path, params=params)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise LangChainPlusError(f"Session {session_name} not found")
return TracerSession(**result[0])
return TracerSession(**response.json())
def list_sessions(self) -> Iterator[TracerSession]:
"""List sessions from the LangChain+ API."""
response = self._get_with_retries("/sessions")
yield from [TracerSession(**session) for session in response.json()]
@xor_args(("session_name", "session_id"))
def delete_session(
self, *, session_name: Optional[str] = None, session_id: Optional[str] = None
) -> None:
"""Delete a session from the LangChain+ API."""
if session_name is not None:
session_id = self.read_session(session_name=session_name).id
elif session_id is None:
raise ValueError("Must provide session_name or session_id")
response = requests.delete(
self.api_url + f"/sessions/{session_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return None
def create_dataset(
self, dataset_name: str, *, description: Optional[str] = None
) -> Dataset:
"""Create a dataset in the LangChain+ API."""
dataset = DatasetCreate(
name=dataset_name,
description=description,
)
response = requests.post(
self.api_url + "/datasets",
headers=self._headers,
data=dataset.json(),
)
raise_for_status_with_text(response)
return Dataset(**response.json())
@xor_args(("dataset_name", "dataset_id"))
def read_dataset(
self,
*,
dataset_name: Optional[str] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> Dataset:
path = "/datasets"
params: Dict[str, Any] = {"limit": 1}
if dataset_id is not None:
path += f"/{dataset_id}"
elif dataset_name is not None:
params["name"] = dataset_name
else:
raise ValueError("Must provide dataset_name or dataset_id")
response = self._get_with_retries(
path,
params=params,
)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise LangChainPlusError(f"Dataset {dataset_name} not found")
return Dataset(**result[0])
return Dataset(**result)
def list_datasets(self, limit: int = 100) -> Iterator[Dataset]:
"""List the datasets on the LangChain+ API."""
response = self._get_with_retries("/datasets", params={"limit": limit})
yield from [Dataset(**dataset) for dataset in response.json()]
@xor_args(("dataset_id", "dataset_name"))
def delete_dataset(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
) -> Dataset:
"""Delete a dataset by ID or name."""
if dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
if dataset_id is None:
raise ValueError("Must provide either dataset name or ID")
response = requests.delete(
f"{self.api_url}/datasets/{dataset_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return Dataset(**response.json())
@xor_args(("dataset_id", "dataset_name"))
def create_example(
self,
inputs: Mapping[str, Any],
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime] = None,
outputs: Optional[Mapping[str, Any]] = None,
) -> Example:
"""Create a dataset example in the LangChain+ API."""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
data = {
"inputs": inputs,
"outputs": outputs,
"dataset_id": dataset_id,
}
if created_at:
data["created_at"] = created_at.isoformat()
example = ExampleCreate(**data)
response = requests.post(
f"{self.api_url}/examples", headers=self._headers, data=example.json()
)
raise_for_status_with_text(response)
result = response.json()
return Example(**result)
def read_example(self, example_id: ID_TYPE) -> Example:
"""Read an example from the LangChain+ API."""
response = self._get_with_retries(f"/examples/{example_id}")
return Example(**response.json())
def list_examples(
self, dataset_id: Optional[ID_TYPE] = None, dataset_name: Optional[str] = None
) -> Iterator[Example]:
"""List the datasets on the LangChain+ API."""
params = {}
if dataset_id is not None:
params["dataset"] = dataset_id
elif dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
params["dataset"] = dataset_id
else:
pass
response = self._get_with_retries("/examples", params=params)
yield from [Example(**dataset) for dataset in response.json()]
def update_example(
self,
example_id: str,
*,
inputs: Optional[Dict[str, Any]] = None,
outputs: Optional[Mapping[str, Any]] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> Dict[str, Any]:
"""Update a specific example."""
example = ExampleUpdate(
inputs=inputs,
outputs=outputs,
dataset_id=dataset_id,
)
response = requests.patch(
f"{self.api_url}/examples/{example_id}",
headers=self._headers,
data=example.json(exclude_none=True),
)
raise_for_status_with_text(response)
return response.json()
def delete_example(self, example_id: ID_TYPE) -> Example:
"""Delete an example by ID."""
response = requests.delete(
f"{self.api_url}/examples/{example_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
return Example(**response.json())
def _resolve_run_id(
self, run: Union[Run, RunBase, str, UUID], load_child_runs: bool
) -> Run:
if isinstance(run, (str, UUID)):
run_ = self.read_run(run, load_child_runs=load_child_runs)
elif isinstance(run, Run):
run_ = run
elif isinstance(run, RunBase):
run_ = Run(**run.dict())
else:
raise TypeError(f"Invalid run type: {type(run)}")
return run_
def _resolve_example_id(
self, example: Union[Example, str, UUID, dict, None], run: Run
) -> Optional[Example]:
if isinstance(example, (str, UUID)):
reference_example_ = self.read_example(example)
elif isinstance(example, Example):
reference_example_ = example
elif isinstance(example, dict):
reference_example_ = Example(**example)
elif run.reference_example_id is not None:
reference_example_ = self.read_example(run.reference_example_id)
else:
reference_example_ = None
return reference_example_
def evaluate_run(
self,
run: Union[Run, RunBase, str, UUID],
evaluator: RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
reference_example: Optional[Union[Example, str, dict, UUID]] = None,
load_child_runs: bool = False,
) -> Feedback:
"""Evaluate a run.
Args:
run: The run to evaluate. Can be a run_id or a Run object.
evaluator: The evaluator to use.
source_info: Additional information about the source of the
evaluation to log as feedback metadata.
reference_example: The example to use as a reference for the
evaluation. If not provided, the run's reference example
will be used.
load_child_runs: Whether to load child runs when
resolving the run ID.
Returns:
The feedback object created by the evaluation.
"""
run_ = self._resolve_run_id(run, load_child_runs=load_child_runs)
reference_example_ = self._resolve_example_id(reference_example, run_)
feedback_result = evaluator.evaluate_run(
run_,
example=reference_example_,
)
source_info = source_info or {}
if feedback_result.evaluator_info:
source_info = {**feedback_result.evaluator_info, **source_info}
return self.create_feedback(
run_.id,
feedback_result.key,
score=feedback_result.score,
value=feedback_result.value,
comment=feedback_result.comment,
correction=feedback_result.correction,
source_info=source_info,
feedback_source_type=FeedbackSourceType.MODEL,
)
async def aevaluate_run(
self,
run: Union[Run, str, UUID],
evaluator: RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
reference_example: Optional[Union[Example, str, dict, UUID]] = None,
load_child_runs: bool = False,
) -> Feedback:
"""Evaluate a run.
Args:
run: The run to evaluate. Can be a run_id or a Run object.
evaluator: The evaluator to use.
source_info: Additional information about the source of
the evaluation to log as feedback metadata.
reference_example: The example to use as a reference
for the evaluation. If not provided, the run's
reference example will be used.
load_child_runs: Whether to load child runs when
resolving the run ID.
Returns:
The feedback created by the evaluation.
"""
run_ = self._resolve_run_id(run, load_child_runs=load_child_runs)
reference_example_ = self._resolve_example_id(reference_example, run_)
feedback_result = await evaluator.aevaluate_run(
run_,
example=reference_example_,
)
source_info = source_info or {}
if feedback_result.evaluator_info:
source_info = {**feedback_result.evaluator_info, **source_info}
return self.create_feedback(
run_.id,
feedback_result.key,
score=feedback_result.score,
value=feedback_result.value,
comment=feedback_result.comment,
correction=feedback_result.correction,
source_info=source_info,
feedback_source_type=FeedbackSourceType.MODEL,
)
def create_feedback(
self,
run_id: ID_TYPE,
key: str,
*,
score: Union[float, int, bool, None] = None,
value: Union[float, int, bool, str, dict, None] = None,
correction: Union[str, dict, None] = None,
comment: Union[str, None] = None,
source_info: Optional[Dict[str, Any]] = None,
feedback_source_type: Union[FeedbackSourceType, str] = FeedbackSourceType.API,
) -> Feedback:
"""Create a feedback in the LangChain+ API.
Args:
run_id: The ID of the run to provide feedback on.
key: The name of the metric, tag, or 'aspect' this
feedback is about.
score: The score to rate this run on the metric
or aspect.
value: The display value or non-numeric value for this feedback.
correction: The proper ground truth for this run.
comment: A comment about this feedback.
source_info: Information about the source of this feedback.
feedback_source_type: The type of feedback source.
"""
if feedback_source_type == FeedbackSourceType.API:
feedback_source: FeedbackSourceBase = APIFeedbackSource(
metadata=source_info
)
elif feedback_source_type == FeedbackSourceType.MODEL:
feedback_source = ModelFeedbackSource(metadata=source_info)
else:
raise ValueError(f"Unknown feedback source type {feedback_source_type}")
feedback = FeedbackCreate(
run_id=run_id,
key=key,
score=score,
value=value,
correction=correction,
comment=comment,
feedback_source=feedback_source,
)
response = requests.post(
self.api_url + "/feedback",
headers={**self._headers, "Content-Type": "application/json"},
data=feedback.json(exclude_none=True),
)
raise_for_status_with_text(response)
return Feedback(**response.json())
def read_feedback(self, feedback_id: ID_TYPE) -> Feedback:
"""Read a feedback from the LangChain+ API."""
response = self._get_with_retries(f"/feedback/{feedback_id}")
return Feedback(**response.json())
def list_feedback(
self,
*,
run_ids: Optional[Sequence[ID_TYPE]] = None,
**kwargs: Any,
) -> Iterator[Feedback]:
"""List the feedback objects on the LangChain+ API."""
params = {
"run": run_ids,
**kwargs,
}
response = self._get_with_retries("/feedback", params=params)
yield from [Feedback(**feedback) for feedback in response.json()]
def delete_feedback(self, feedback_id: ID_TYPE) -> None:
"""Delete a feedback by ID."""
response = requests.delete(
f"{self.api_url}/feedback/{feedback_id}",
headers=self._headers,
)
raise_for_status_with_text(response)
| [] |
2024-01-10 | Roshni-MS1/rosh-repo-search | purreaddocs.py | from langchain.document_loaders import ReadTheDocsLoader
#readopenaikey
def readopenaikey():
with open('C:\\Users\\roshnipatil\\Documents\\GitHub\\openaikey.txt', 'r') as file:
# Read all lines of the file
return file.read()
#load documents
def readPurviewDocs():
loader = ReadTheDocsLoader('rtdocs')
docs = loader.load()
len(docs)
print(docs[0].page_content)
print(docs[5].page_content)
docs[5].metadata['source'].replace('rtdocs/', 'https://')
#Create a list of URL reference and page content
data = []
for doc in docs:
data.append({
'url': doc.metadata['source'].replace('rtdocs/', 'https://'),
'text': doc.page_content
})
#readpineconekey
def readpineconekey():
with open('C:\\Users\\roshnipatil\\Documents\\GitHub\\pineconekey.txt', 'r') as file:
# Read all lines of the file
lines = file.readlines()
# Print the content of the file
for line in lines:
print(line)
#Open the file for reading
def readSampleDoc():
with open('C:\\Users\\roshnipatil\\Documents\\GitHub\\testpii.txt', 'r') as file:
# Read all lines of the file
return file.read()
#Open the file for reading
def writeprompttofile(prompt_text):
with open('C:\\Users\\roshnipatil\\Documents\\GitHub\\search_prompts.txt', 'w') as file:
file.write(prompt_text)
#Open the file for reading
def writesencheckprompttofile(sen_prompt_text):
with open('C:\\Users\\roshnipatil\\Documents\\GitHub\\sen_check_prompts.txt', 'w') as file:
file.write(sen_prompt_text)
#Open the file for reading
def writesenlistprompttofile(sen_prompt_text):
with open('C:\\Users\\roshnipatil\\Documents\\GitHub\\sen_list_prompts.txt', 'w') as file:
file.write(sen_prompt_text)
| [] |
2024-01-10 | krunduev/test_telegram_gpt | simple_bot.py | from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
from telegram import Update
from dotenv import load_dotenv
import openai
import os
import requests
import aiohttp
import json
# подгружаем переменные окружения
load_dotenv()
# передаем секретные данные в переменные
TOKEN = os.environ.get("TG_TOKEN")
GPT_SECRET_KEY = os.environ.get("GPT_SECRET_KEY")
# передаем секретный токен chatgpt
openai.api_key = GPT_SECRET_KEY
# функция для синхронного общения с chatgpt
async def get_answer(text):
payload = {"text":text}
response = requests.post("http://127.0.0.1:5000/api/get_answer", json=payload)
return response.json()
# функция для асинхронного общения с сhatgpt
async def get_answer_async(text):
payload = {"text":text}
async with aiohttp.ClientSession() as session:
async with session.post('http://127.0.0.1:5000/api/get_answer_async', json=payload) as resp:
return await resp.json()
# функция-обработчик команды /start
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
# при первом запуске бота добавляем этого пользователя в словарь
if update.message.from_user.id not in context.bot_data.keys():
context.bot_data[update.message.from_user.id] = {'count': 3, 'history': [], 'answers': []}
# возвращаем текстовое сообщение пользователю
await update.message.reply_text('Задайте любой вопрос ChatGPT')
# функция-обработчик команды /data
async def data(update: Update, context: ContextTypes.DEFAULT_TYPE):
# создаем json и сохраняем в него словарь context.bot_data
with open('data.json', 'w') as fp:
json.dump(context.bot_data, fp)
# возвращаем текстовое сообщение пользователю
await update.message.reply_text('Данные сгружены')
# функция-обработчик команды /data
async def status(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_id = update.message.from_user.id
# Получение истории в виде строки
history_text = ''
for question, answer in zip(context.bot_data[user_id]['history'], context.bot_data[user_id]['answers']):
history_text += f"\nВопрос: {question}\nОтвет: {answer}\n"
await update.message.reply_text(f'Осталось запросов: {context.bot_data[user_id]["count"]}\nИстория запросов:\n{history_text}')
# функция-обработчик текстовых сообщений
async def text(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_id = update.message.from_user.id
# Инициализация истории сообщений для пользователя, если она еще не существует
if user_id not in context.bot_data:
context.bot_data[user_id] = {'count': 3, 'history': [], 'answers': []}
# Проверка доступных запросов пользователя
if context.bot_data[user_id]['count'] > 0:
# Добавление вопроса в историю и поддержание истории из последних 5 вопросов
context.bot_data[user_id]['history'].append(update.message.text)
context.bot_data[user_id]['history'] = context.bot_data[user_id]['history'][-5:]
# Формирование текста запроса с учетом истории
history_text = ''
for question, answer in zip(context.bot_data[user_id]['history'], context.bot_data[user_id]['answers']):
history_text += f"\nВопрос: {question}\nОтвет: {answer}\n"
# Обработка запроса пользователя
first_message = await update.message.reply_text('Ваш запрос обрабатывается, пожалуйста подождите...')
res = await get_answer_async( f"{update.message.text}, \n\n\n---\nИстория общения с пользователем. Используй ее для понимания контекста:\n{history_text}")
await context.bot.edit_message_text(text=res['message'], chat_id=update.message.chat_id, message_id=first_message.message_id)
context.bot_data[user_id]['answers'].append(res['message'])
context.bot_data[user_id]['answers'] = context.bot_data[user_id]['answers'][-5:]
# Уменьшение количества доступных запросов на 1
context.bot_data[user_id]['count'] -= 1
else:
# Сообщение, если запросы исчерпаны
await update.message.reply_text('Ваши запросы на сегодня исчерпаны')
# функция, которая будет запускаться раз в сутки для обновления доступных запросов
async def callback_daily(context: ContextTypes.DEFAULT_TYPE):
# проверка базы пользователей
if context.bot_data != {}:
# проходим по всем пользователям в базе и обновляем их доступные запросы
for key in context.bot_data:
context.bot_data[key]['count'] = 5
print('Запросы пользователей обновлены')
else:
print('Не найдено ни одного пользователя')
def main():
# создаем приложение и передаем в него токен бота
application = Application.builder().token(TOKEN).build()
print('Бот запущен...')
# создаем job_queue
job_queue = application.job_queue
job_queue.run_repeating(callback_daily, # функция обновления базы запросов пользователей
interval=60, # интервал запуска функции (в секундах)
first=10) # первый запуск функции (через сколько секунд)
# добавление обработчиков
application.add_handler(CommandHandler("start", start, block=False))
application.add_handler(CommandHandler("data", data, block=False))
application.add_handler(CommandHandler("status", status, block=False))
application.add_handler(MessageHandler(filters.TEXT, text, block=False))
# запуск бота (нажать Ctrl+C для остановки)
application.run_polling()
print('Бот остановлен')
if __name__ == "__main__":
main() | [] |
2024-01-10 | krunduev/test_telegram_gpt | fastapi~chunks.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.docstore.document import Document
from dotenv import load_dotenv
import openai
import os
# получим переменные окружения из .env
load_dotenv()
# API-key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# задаем system
default_system = "Ты-консультант в компании Simble, ответь на вопрос клиента на основе документа с информацией. Не придумывай ничего от себя, отвечай максимально по документу. Не упоминай Документ с информацией для ответа клиенту. Клиент ничего не должен знать про Документ с информацией для ответа клиенту"
class Chunk():
def __init__(self, path_to_base:str, sep:str=" ", ch_size:int=1024):
# загружаем базу
with open(path_to_base, 'r', encoding='utf-8') as file:
document = file.read()
# создаем список чанков
source_chunks = []
splitter = CharacterTextSplitter(separator=sep, chunk_size=ch_size)
for chunk in splitter.split_text(document):
source_chunks.append(Document(page_content=chunk, metadata={}))
# создаем индексную базу
embeddings = OpenAIEmbeddings()
self.db = FAISS.from_documents(source_chunks, embeddings)
def get_answer(self, system:str = default_system, query:str = None):
'''Синхронная функция получения ответа от chatgpt
'''
# релевантные отрезки из базы
docs = self.db.similarity_search(query, k=4)
message_content = '\n'.join([f'{doc.page_content}' for doc in docs])
messages = [
{"role": "system", "content": system},
{"role": "user", "content": f"Ответь на вопрос клиента. Не упоминай документ с информацией для ответа клиенту в ответе. Документ с информацией для ответа клиенту: {message_content}\n\nВопрос клиента: \n{query}"}
]
# получение ответа от chatgpt
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=messages,
temperature=0)
return completion.choices[0].message.content
async def async_get_answer(self, system:str = default_system, query:str = None):
'''Асинхронная функция получения ответа от chatgpt
'''
# релевантные отрезки из базы
docs = self.db.similarity_search(query, k=4)
message_content = '\n'.join([f'{doc.page_content}' for doc in docs])
messages = [
{"role": "system", "content": system},
{"role": "user", "content": f"Ответь на вопрос клиента. Не упоминай документ с информацией для ответа клиенту в ответе. Документ с информацией для ответа клиенту: {message_content}\n\nВопрос клиента: \n{query}"}
]
# получение ответа от chatgpt
completion = await openai.ChatCompletion.acreate(model="gpt-3.5-turbo",
messages=messages,
temperature=0)
return completion.choices[0].message.content | [
"Ответь на вопрос клиента. Не упоминай документ с информацией для ответа клиенту в ответе. Документ с информацией для ответа клиенту: PLACEHOLDER\n\nВопрос клиента: \nPLACEHOLDER"
] |
2024-01-10 | whitead/marvis | marvis~nlp_model.py | import os
import openai
from .utils import text2pdb
openai.api_key = os.getenv('OPENAI_API_KEY')
_vmd_cmd_prompt = '''> switch the representation to NewCartoon
changerep NewCartoon
> make them Ribbons
addrep $sel Ribbons
> switch style to CPK
changerep CPK
> enhance on it
AutoFocus $sel
> color by residue name
colorby ResName
> take a picture of this scene
Render
> get that part into focus
ZoomSel $sel
> rotate by 30 degrees along the y-axis
rotate y by 30 1
> Zoom in on them
ZoomSel $sel
'''
_vmd_select_prompt = '''> select the protein
set sel [atomselect top "protein"]
> select waters within five of residue number 10
set sel [atomselect top "water within 5 of resid 10"]
> select the glycines
set sel [atomselect top "resname GLY"]
> select the alpha carbons of residues lysines and valines
set sel [atomselect top "name CA and resname LYS VAL"]
> select the oxygen atoms in the lipids
set sel [atomselect top "name O and lipid"]
> select the fourth residue from the end
set sel [atomselect top "resid -4"]
'''
def _query_gpt3(query, training_string, T=0.20):
prompt = '\n'.join([training_string, '> ' + query, ''])
# return prompt
response = openai.Completion.create(
engine='davinci',
prompt=prompt,
temperature=T,
max_tokens=64,
top_p=1,
best_of=4,
frequency_penalty=0.0,
presence_penalty=0,
stop=['\n']
)
return response['choices'][0]['text'], response
def run_gpt_search(query):
result = {'type': 'VMD Command'}
if query.lower().find('select') > -1:
# print(select_training_string)
r, _ = _query_gpt3(query, _vmd_select_prompt)
result['type'] = 'VMD Selection'
elif query.lower().find('open') > -1:
r = f'mol new {{{text2pdb(query)}}} type {{webpdb}} waitfor all;'
result['type'] = 'PDB Search'
else:
r, _ = _query_gpt3(query, _vmd_cmd_prompt)
result['data'] = r
return result
| [
"> switch the representation to NewCartoon\nchangerep NewCartoon\n> make them Ribbons\naddrep $sel Ribbons\n> switch style to CPK\nchangerep CPK\n> enhance on it\nAutoFocus $sel\n> color by residue name\ncolorby ResName\n> take a picture of this scene\nRender\n> get that part into focus\nZoomSel $sel\n> rotate by 30 degrees along the y-axis\nrotate y by 30 1\n> Zoom in on them\nZoomSel $sel\n",
"PLACEHOLDER\n> PLACEHOLDER\n",
"> select the protein\nset sel [atomselect top \"protein\"]\n> select waters within five of residue number 10\nset sel [atomselect top \"water within 5 of resid 10\"]\n> select the glycines\nset sel [atomselect top \"resname GLY\"]\n> select the alpha carbons of residues lysines and valines\nset sel [atomselect top \"name CA and resname LYS VAL\"]\n> select the oxygen atoms in the lipids\nset sel [atomselect top \"name O and lipid\"]\n> select the fourth residue from the end\nset sel [atomselect top \"resid -4\"]\n"
] |
2024-01-10 | songCNMS/wind-farm-env | code~agent~deep~soft_actor.py | from typing import Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class SoftActor(nn.Module):
def __init__(self,
state_shape,
action_shape,
action_scale,
action_bias,
hidden_sizes: Union[int, Tuple[int]] = (128, 128),
device=None):
super().__init__()
self._device = device
if isinstance(hidden_sizes, int):
hidden_sizes = [hidden_sizes]
hidden_sizes = list(hidden_sizes)
hidden_sizes.insert(0, state_shape)
if len(hidden_sizes) > 1:
self._fcs = [
nn.Linear(hidden_sizes[i-1], hidden_sizes[i], device=self._device)
for i in range(1, len(hidden_sizes))
]
else:
self._fcs = []
self._fc_mu = nn.Linear(hidden_sizes[-1], action_shape, device=self._device)
self._fc_log_std = nn.Linear(hidden_sizes[-1], action_shape, device=self._device)
self._action_scale = action_scale
self._action_bias = action_bias
def forward(self, x, deterministic=False, with_logprob=True):
for fc in self._fcs:
x = F.relu(fc(x))
mu = self._fc_mu(x)
log_std = self._fc_log_std(x)
log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)
std = torch.exp(log_std)
# Pre-squash distribution and sample
pi_distribution = Normal(mu, std)
if deterministic:
# Only used for evaluating policy at test time.
pi_action = mu
else:
pi_action = pi_distribution.rsample()
if with_logprob:
# From OpenAI's SpinningUp
logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)
logp_pi -= (2 * (np.log(2) - pi_action - F.softplus(-2 * pi_action))).sum(axis=1)
else:
logp_pi = None
pi_action = torch.tanh(pi_action)
pi_action = pi_action * self._action_scale + self._action_bias
return pi_action, logp_pi
| [] |
2024-01-10 | CorruptedgriphtV/TAMARA-Asistente-Virtual-para-Discapacitados-Visuales- | ConcursoNacional~Old~GPTINTERACTION.py | import openai
import pyttsx3
# Configurar la clave de la API de OpenAI
openai.api_key = "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"
def generar_respuesta(pregunta):
prompt = "Pregunta: {}\nRespuesta:, /// Eres un asistente llamado TAMARA y ayudas a discapacitados visuales"
prompt_with_question = prompt.format(pregunta)
respuesta = openai.Completion.create(
engine="text-davinci-003", # Selecciona el modelo de GPT-3 que deseas utilizar
prompt=prompt_with_question,
max_tokens=50, # Define el límite de longitud de la respuesta generada
n=1, # Número de respuestas generadas
stop=None, # Criterio opcional para detener la generación de texto
temperature=0.7, # Controla la aleatoriedad de la respuesta generada
)
return respuesta.choices[0].text.strip()
def main():
while True:
# Esperar la entrada del usuario
pregunta = input("Hazme una pregunta (o escribe 'salir' para terminar): ")
if pregunta.lower() == "salir":
break
# Generar respuesta con GPT-3
respuesta_generada = generar_respuesta(pregunta)
# Imprimir la respuesta generada por GPT-3
print("Respuesta generada por GPT-3:", respuesta_generada)
# Pronunciar la respuesta generada utilizando pyttsx3
engine = pyttsx3.init()
engine.setProperty('voice', 'spanish') # Establecer el idioma a español
engine.say(respuesta_generada)
engine.runAndWait()
if __name__ == "__main__":
main() | [
"Pregunta: PLACEHOLDER\nRespuesta:, /// Eres un asistente llamado TAMARA y ayudas a discapacitados visuales",
"Pregunta: {}\nRespuesta:, /// Eres un asistente llamado TAMARA y ayudas a discapacitados visuales"
] |
2024-01-10 | CorruptedgriphtV/TAMARA-Asistente-Virtual-para-Discapacitados-Visuales- | ConcursoNacional~Old~Principal3.py |
import cv2
import requests
import pyttsx3
import openai
import os
import pygame
#Instala todo con pip install opencv-python requests pyttsx3 openai pygame
import speech_recognition as sr
import pyaudio
def play_sound(file_path):
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
def generate_text(prompt):
openai.api_key = api_key
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=0.5,
max_tokens=100
)
#print("Respuesta de GPT-3:", response) # Agrega esta línea para ver la respuesta en bruto
generated_text = response.choices[0].text.strip()
#print("Texto generado:", generated_text) # Agrega esta línea para ver el texto generado
return generated_text
api_key = "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"
openai.api_key = api_key
def main():
#for i, microphone_name in enumerate(sr.Microphone.list_microphone_names()):
#print(i, microphone_name)
# Configurar las credenciales de la API de GPT-3
# Inicializar el reconocedor de voz
r = sr.Recognizer()
# Configurar la URL y las credenciales de la API de Computer Vision
endpoint = "https://tamaraserver.cognitiveservices.azure.com/"
subscription_key = "975da0dae182426ba219f03767e152ae"
api_url = f"{endpoint}vision/v3.2/analyze"
# Inicializar la cámara
cap = cv2.VideoCapture(0)
while True:
# Capturar la imagen desde la cámara
ret, frame = cap.read()
# Reproducir sonido intro antes de escuchar la solicitud
# Guardar la imagen en un archivo temporal
img_temp_path = "temp_img.jpg"
cv2.imwrite(img_temp_path, frame)
# Leer la imagen como bytes
with open(img_temp_path, "rb") as image_file:
image_data = image_file.read()
# Realizar la solicitud a la API de Computer Vision
headers = {
"Content-Type": "application/octet-stream",
"Ocp-Apim-Subscription-Key": subscription_key
}
params = {
"visualFeatures": "Description",
"language": "es" # Solicitar descripción en español
}
response = requests.post(api_url, headers=headers, params=params, data=image_data)
# Procesar la respuesta de la API de Computer Vision
if response.status_code == 200:
result = response.json()
if "description" in result and "captions" in result["description"]:
description = result["description"]["captions"][0]["text"]
# Guardar la descripción en un archivo temporal en la carpeta Sandbox
with open("temp_description.txt", "w") as f:
f.write(description)
else:
print("No se encontró una descripción en la respuesta de Azure.")
else:
print("Error en la solicitud a Azure:", response.text)
# Eliminar el archivo temporal de la imagen
if os.path.exists(img_temp_path):
os.remove(img_temp_path)
# Esperar la entrada del usuario
with sr.Microphone(device_index=2) as source:
print("Esperando el comando de activación...")
audio = r.listen(source, timeout=5)
try:
user_input = r.recognize_google(audio, language='es-ES')
# Si el usuario dijo "Oye TAMARA", empezar a procesar el audio
if "Oye TAMARA" in user_input:
print("Dime algo:")
play_sound("Intro.mp3")
with sr.Microphone(device_index=13) as source:
audio = r.listen(source, timeout=5)
try:
user_input = r.recognize_google(audio, language='es-ES')
print("Creo que dijiste: " + user_input)
except sr.UnknownValueError:
print("Google Speech Recognition no entendió lo que dijiste")
except sr.RequestError as e:
print("No se pudo solicitar resultados a Google Speech Recognition; {0}".format(e))
except sr.UnknownValueError:
print("Google Speech Recognition no entendió el comando de activación")
except sr.RequestError as e:
print("No se pudo solicitar resultados a Google Speech Recognition; {0}".format(e))
# Reproducir sonido outro después de recibir la solicitud
play_sound("Outro.mp3")
# Si el usuario pregunta qué está viendo o qué se ve
if any(phrase in user_input.lower() for phrase in ["qué estoy viendo", "qué se ve", "que estoy viendo", "que se ve"]):
# Hablar la descripción generada por Azure
engine = pyttsx3.init()
engine.setProperty('voice', 'spanish') # Establecer idioma a español
engine.say("Imagen reconocida: " + description)
engine.runAndWait()
# Generar respuesta adicional con GPT-3 utilizando la descripción almacenada en el archivo temporal
with open("temp_description.txt", "r") as f:
saved_description = f.read()
prompt = "Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: {}\n\nDescripción de Azure: {}\n\nRespuesta:"
prompt_with_input = prompt.format(user_input, saved_description)
additional_assistance = generate_text(prompt_with_input)
# Imprimir y pronunciar la respuesta adicional generada por GPT-3
print("Respuesta adicional generada por GPT-3:", additional_assistance)
engine = pyttsx3.init()
engine.setProperty('voice', 'spanish') # Establecer idioma a español
engine.say(additional_assistance)
engine.runAndWait()
if __name__ == "__main__":
main() | [
"Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: {}\n\nDescripción de Azure: {}\n\nRespuesta:",
"Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: PLACEHOLDER\n\nDescripción de Azure: PLACEHOLDER\n\nRespuesta:"
] |
2024-01-10 | CorruptedgriphtV/TAMARA-Asistente-Virtual-para-Discapacitados-Visuales- | ConcursoNacional~Old~Principal.py | import cv2
import requests
import pyttsx3
import openai
import os
import pygame
def play_sound(file_path):
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
def generate_text(prompt):
openai.api_key = api_key
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=0.5,
max_tokens=100
)
#print("Respuesta de GPT-3:", response) # Agrega esta línea para ver la respuesta en bruto
generated_text = response.choices[0].text.strip()
#print("Texto generado:", generated_text) # Agrega esta línea para ver el texto generado
return generated_text
api_key = "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"
openai.api_key = api_key
def main():
# Configurar las credenciales de la API de GPT-3
# Configurar la URL y las credenciales de la API de Computer Vision
endpoint = "https://tamaraserver.cognitiveservices.azure.com/"
subscription_key = "975da0dae182426ba219f03767e152ae"
api_url = f"{endpoint}vision/v3.2/analyze"
# Inicializar la cámara
cap = cv2.VideoCapture(0)
while True:
# Capturar la imagen desde la cámara
ret, frame = cap.read()
# Reproducir sonido intro antes de escuchar la solicitud
play_sound("Intro.mp3")
# Guardar la imagen en un archivo temporal
img_temp_path = "temp_img.jpg"
cv2.imwrite(img_temp_path, frame)
# Leer la imagen como bytes
with open(img_temp_path, "rb") as image_file:
image_data = image_file.read()
# Realizar la solicitud a la API de Computer Vision
headers = {
"Content-Type": "application/octet-stream",
"Ocp-Apim-Subscription-Key": subscription_key
}
params = {
"visualFeatures": "Description",
"language": "es" # Solicitar descripción en español
}
response = requests.post(api_url, headers=headers, params=params, data=image_data)
# Procesar la respuesta de la API de Computer Vision
if response.status_code == 200:
result = response.json()
if "description" in result and "captions" in result["description"]:
description = result["description"]["captions"][0]["text"]
# Agregar la descripción al archivo de registro en la carpeta Sandbox
with open("temp_description.txt", "a") as f:
f.write(description + "\n")
else:
print("No se encontró una descripción en la respuesta de Azure.")
else:
print("Error en la solicitud a Azure:", response.text)
# Eliminar el archivo temporal de la imagen
if os.path.exists(img_temp_path):
os.remove(img_temp_path)
# Esperar la entrada del usuario
user_input = input("Ingrese su solicitud: ")
# Reproducir sonido outro después de recibir la solicitud
play_sound("Outro.mp3")
# Si el usuario pregunta qué está viendo o qué se ve
if any(phrase in user_input.lower() for phrase in ["qué estoy viendo", "qué se ve", "que estoy viendo", "que se ve"]):
# Hablar la descripción generada por Azure
engine = pyttsx3.init()
engine.setProperty('voice', 'spanish') # Establecer idioma a español
engine.say("Imagen reconocida: " + description)
engine.runAndWait()
# Generar respuesta adicional con GPT-3 utilizando la descripción almacenada en el archivo de registro
with open("temp_description.txt", "r") as f:
saved_description = f.read()
prompt = "Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: {}\n\nDescripción de Azure: {}\n\nRespuesta:"
prompt_with_input = prompt.format(user_input, saved_description)
additional_assistance = generate_text(prompt_with_input)
# Generar respuesta adicional con GPT-3
prompt = "Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: {}\n\nDescripción de Azure: {}\n\nRespuesta:"
prompt_with_input = prompt.format(user_input, description)
additional_assistance = generate_text(prompt_with_input)
# Imprimir y pronunciar la respuesta adicional generada por GPT-3
print("Respuesta adicional generada por GPT-3:", additional_assistance)
engine = pyttsx3.init()
engine.setProperty('voice', 'spanish') # Establecer idioma a español
engine.say(additional_assistance)
engine.runAndWait()
if __name__ == "__main__":
main()
| [
"Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: {}\n\nDescripción de Azure: {}\n\nRespuesta:"
] |
2024-01-10 | CorruptedgriphtV/TAMARA-Asistente-Virtual-para-Discapacitados-Visuales- | ConcursoNacional~Old~Principal2.py | import cv2
import requests
import pyttsx3
import openai
import os
import pygame
#Instala todo con pip install opencv-python requests pyttsx3 openai pygame
import speech_recognition as sr
import pyaudio
def play_sound(file_path):
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
def generate_text(prompt):
openai.api_key = api_key
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=0.5,
max_tokens=100
)
#print("Respuesta de GPT-3:", response) # Agrega esta línea para ver la respuesta en bruto
generated_text = response.choices[0].text.strip()
#print("Texto generado:", generated_text) # Agrega esta línea para ver el texto generado
return generated_text
api_key = "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"
openai.api_key = api_key
def main():
#for i, microphone_name in enumerate(sr.Microphone.list_microphone_names()):
#print(i, microphone_name)
# Configurar las credenciales de la API de GPT-3
# Inicializar el reconocedor de voz
r = sr.Recognizer()
# Configurar la URL y las credenciales de la API de Computer Vision
endpoint = "https://tamaraserver.cognitiveservices.azure.com/"
subscription_key = "975da0dae182426ba219f03767e152ae"
api_url = f"{endpoint}vision/v3.2/analyze"
# Inicializar la cámara
cap = cv2.VideoCapture(0)
while True:
# Capturar la imagen desde la cámara
ret, frame = cap.read()
# Reproducir sonido intro antes de escuchar la solicitud
# Guardar la imagen en un archivo temporal
img_temp_path = "temp_img.jpg"
cv2.imwrite(img_temp_path, frame)
# Leer la imagen como bytes
with open(img_temp_path, "rb") as image_file:
image_data = image_file.read()
# Realizar la solicitud a la API de Computer Vision
headers = {
"Content-Type": "application/octet-stream",
"Ocp-Apim-Subscription-Key": subscription_key
}
params = {
"visualFeatures": "Description",
"language": "es" # Solicitar descripción en español
}
response = requests.post(api_url, headers=headers, params=params, data=image_data)
# Procesar la respuesta de la API de Computer Vision
if response.status_code == 200:
result = response.json()
if "description" in result and "captions" in result["description"]:
description = result["description"]["captions"][0]["text"]
# Guardar la descripción en un archivo temporal en la carpeta Sandbox
with open("temp_description.txt", "w") as f:
f.write(description)
else:
print("No se encontró una descripción en la respuesta de Azure.")
else:
print("Error en la solicitud a Azure:", response.text)
# Eliminar el archivo temporal de la imagen
if os.path.exists(img_temp_path):
os.remove(img_temp_path)
# Esperar la entrada del usuario
# Esperar la entrada del usuario
with sr.Microphone(device_index=3) as source:
print("Dime algo:")
play_sound("Intro.mp3")
audio = r.listen(source, timeout=5)
try:
user_input = r.recognize_google(audio, language='es-ES')
print("Creo que dijiste: " + user_input)
except sr.UnknownValueError:
print("Google Speech Recognition no entendió lo que dijiste")
except sr.RequestError as e:
print("No se pudo solicitar resultados a Google Speech Recognition; {0}".format(e))
# Reproducir sonido outro después de recibir la solicitud
play_sound("Outro.mp3")
# Si el usuario pregunta qué está viendo o qué se ve
if any(phrase in user_input.lower() for phrase in ["qué estoy viendo", "qué se ve", "que estoy viendo", "que se ve"]):
# Hablar la descripción generada por Azure
engine = pyttsx3.init()
engine.setProperty('voice', 'spanish') # Establecer idioma a español
engine.say("Imagen reconocida: " + description)
engine.runAndWait()
# Generar respuesta adicional con GPT-3 utilizando la descripción almacenada en el archivo temporal
with open("temp_description.txt", "r") as f:
saved_description = f.read()
prompt = "Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: {}\n\nDescripción de Azure: {}\n\nRespuesta:"
prompt_with_input = prompt.format(user_input, saved_description)
additional_assistance = generate_text(prompt_with_input)
# Imprimir y pronunciar la respuesta adicional generada por GPT-3
print("Respuesta adicional generada por GPT-3:", additional_assistance)
engine = pyttsx3.init()
engine.setProperty('voice', 'spanish') # Establecer idioma a español
engine.say(additional_assistance)
engine.runAndWait()
if __name__ == "__main__":
main() | [
"Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: {}\n\nDescripción de Azure: {}\n\nRespuesta:",
"Eres TAMARA, asistes a personas con discapacidad visual y tu objetivo es proporcionar una descripción detallada de la imagen y ayudar en lo que el usuario solicite, no inventes muchas cosas, solo retroalimenta a lo que ves .\n\nUsuario: PLACEHOLDER\n\nDescripción de Azure: PLACEHOLDER\n\nRespuesta:"
] |
2024-01-10 | lintool/cs-big-cows | gpt-classification~gpt_tokenizer.py | import openai
import yaml
# 20 requests per minute (RPM) and 40000 tokens per minute (TPM).
# assume 20 tokens per answer
# assume 30 tokens per citation => so 50x + 25 for init token
# so maximum citations per request is 79 citations
# completition token = 20x = 20 x 80 = 1600
config_path = "config.yaml"
def isNaN(string):
return string != string
class GPTTokenizer:
rate_limit = 3
response_limit = 1400
input_limit = 2400
def __init__(self, tokenizer_prompt):
with open(config_path, "r") as ymlfile:
cfg = yaml.full_load(ymlfile)
openai.api_key = cfg['openai']['api_key']
self.model = "gpt-3.5-turbo"
self.prompt = tokenizer_prompt
def generate_response(self, citations):
prompt = self.prompt
token_size = len(prompt.split(" "))
for i, cit in enumerate(citations):
if isNaN(cit):
cit = ""
prompt += f"\n{i+1}. {cit}"
token_size += 1 + len(cit.split(" "))
if token_size > self.input_limit:
raise Exception(f"token limit is {self.input_limit}, however received {token_size} tokens")
response = openai.ChatCompletion.create(
model=self.model,
messages=[{"role": "user", "content": prompt}],
max_tokens = self.response_limit
)
# parse token list
tokenized_cit = [cit.split('. ')[-1] for cit in response.choices[0].message.content.split("\n")]
return tokenized_cit
| [] |
2024-01-10 | mocy/litellm | litellm~exceptions.py | ## LiteLLM versions of the OpenAI Exception Types
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
ServiceUnavailableError,
OpenAIError,
APIError,
Timeout,
APIConnectionError,
)
class AuthenticationError(AuthenticationError): # type: ignore
def __init__(self, message, llm_provider, model):
self.status_code = 401
self.message = message
self.llm_provider = llm_provider
self.model = model
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
class InvalidRequestError(InvalidRequestError): # type: ignore
def __init__(self, message, model, llm_provider):
self.status_code = 400
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, f"{self.model}"
) # Call the base class constructor with the parameters it needs
class Timeout(Timeout): # type: ignore
def __init__(self, message, model, llm_provider):
self.status_code = 408
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, f"{self.model}"
) # Call the base class constructor with the parameters it needs
# sub class of invalid request error - meant to give more granularity for error handling context window exceeded errors
class ContextWindowExceededError(InvalidRequestError): # type: ignore
def __init__(self, message, model, llm_provider):
self.status_code = 400
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, self.model, self.llm_provider
) # Call the base class constructor with the parameters it needs
class RateLimitError(RateLimitError): # type: ignore
def __init__(self, message, llm_provider, model):
self.status_code = 429
self.message = message
self.llm_provider = llm_provider
self.modle = model
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
class ServiceUnavailableError(ServiceUnavailableError): # type: ignore
def __init__(self, message, llm_provider, model):
self.status_code = 500
self.message = message
self.llm_provider = llm_provider
self.model = model
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
# raise this when the API returns an invalid response object - https://github.com/openai/openai-python/blob/1be14ee34a0f8e42d3f9aa5451aa4cb161f1781f/openai/api_requestor.py#L401
class APIError(APIError): # type: ignore
def __init__(self, status_code, message, llm_provider, model):
self.status_code = status_code
self.message = message
self.llm_provider = llm_provider
self.model = model
super().__init__(
self.message
)
# raised if an invalid request (not get, delete, put, post) is made
class APIConnectionError(APIConnectionError): # type: ignore
def __init__(self, message, llm_provider, model):
self.message = message
self.llm_provider = llm_provider
self.model = model
super().__init__(
self.message
)
class OpenAIError(OpenAIError): # type: ignore
def __init__(self, original_exception):
self.status_code = original_exception.http_status
super().__init__(
http_body=original_exception.http_body,
http_status=original_exception.http_status,
json_body=original_exception.json_body,
headers=original_exception.headers,
code=original_exception.code,
)
self.llm_provider = "openai"
class BudgetExceededError(Exception):
def __init__(self, current_cost, max_budget):
self.current_cost = current_cost
self.max_budget = max_budget
message = f"Budget has been exceeded! Current cost: {current_cost}, Max budget: {max_budget}"
super().__init__(message) | [] |
2024-01-10 | mocy/litellm | litellm~main.py | import os, openai, sys, json, inspect
from typing import Any
from functools import partial
import dotenv, traceback, random, asyncio, time, contextvars
from copy import deepcopy
import litellm
from litellm import ( # type: ignore
client,
exception_type,
timeout,
get_optional_params,
get_litellm_params,
Logging,
)
from litellm.utils import (
get_secret,
CustomStreamWrapper,
read_config_args,
completion_with_fallbacks,
get_llm_provider,
get_api_key,
mock_completion_streaming_obj
)
from .llms import anthropic
from .llms import together_ai
from .llms import ai21
from .llms import sagemaker
from .llms import bedrock
from .llms import huggingface_restapi
from .llms import replicate
from .llms import aleph_alpha
from .llms import nlp_cloud
from .llms import baseten
from .llms import vllm
from .llms import ollama
from .llms import cohere
from .llms import petals
from .llms import oobabooga
import tiktoken
from concurrent.futures import ThreadPoolExecutor
from typing import Callable, List, Optional, Dict
encoding = tiktoken.get_encoding("cl100k_base")
from litellm.utils import (
get_secret,
CustomStreamWrapper,
ModelResponse,
read_config_args,
)
####### ENVIRONMENT VARIABLES ###################
dotenv.load_dotenv() # Loading env variables using dotenv
####### COMPLETION ENDPOINTS ################
async def acompletion(*args, **kwargs):
"""
Asynchronously perform a completion() using the any LiteLLM model (ex gpt-3.5-turbo, claude-2)
This function takes the same arguments as the 'completion' function and is used for asynchronous completion requests.
Parameters:
*args: Positional arguments to pass to the 'litellm.completion' function.
**kwargs: Keyword arguments to pass to the 'litellm.completion' function.
Returns:
The completion response, either as a litellm.ModelResponse Object or an async generator if 'stream' is set to True.
Note:
- This function uses asynchronous programming to perform completions.
- It leverages the 'loop.run_in_executor' method to execute the synchronous 'completion' function.
- If 'stream' is set to True in kwargs, the function returns an async generator.
"""
loop = asyncio.get_event_loop()
# Use a partial function to pass your keyword arguments
func = partial(completion, *args, **kwargs, acompletion=True)
# Add the context to the function
ctx = contextvars.copy_context()
func_with_context = partial(ctx.run, func)
# Call the synchronous function using run_in_executor
response = await loop.run_in_executor(None, func_with_context)
if kwargs.get("stream", False): # return an async generator
# do not change this
# for stream = True, always return an async generator
# See OpenAI acreate https://github.com/openai/openai-python/blob/5d50e9e3b39540af782ca24e65c290343d86e1a9/openai/api_resources/abstract/engine_api_resource.py#L193
return(
line
async for line in response
)
else:
return response
def mock_completion(model: str, messages: List, stream: bool = False, mock_response: str = "This is a mock request", **kwargs):
"""
Generate a mock completion response for testing or debugging purposes.
This is a helper function that simulates the response structure of the OpenAI completion API.
Parameters:
model (str): The name of the language model for which the mock response is generated.
messages (List): A list of message objects representing the conversation context.
stream (bool, optional): If True, returns a mock streaming response (default is False).
mock_response (str, optional): The content of the mock response (default is "This is a mock request").
**kwargs: Additional keyword arguments that can be used but are not required.
Returns:
litellm.ModelResponse: A ModelResponse simulating a completion response with the specified model, messages, and mock response.
Raises:
Exception: If an error occurs during the generation of the mock completion response.
Note:
- This function is intended for testing or debugging purposes to generate mock completion responses.
- If 'stream' is True, it returns a response that mimics the behavior of a streaming completion.
"""
try:
model_response = ModelResponse(stream=stream)
if stream is True:
# don't try to access stream object,
response = mock_completion_streaming_obj(model_response, mock_response=mock_response, model=model)
return response
model_response["choices"][0]["message"]["content"] = mock_response
model_response["created"] = time.time()
model_response["model"] = model
return model_response
except:
traceback.print_exc()
raise Exception("Mock completion response failed")
@client
@timeout( # type: ignore
600
) ## set timeouts, in case calls hang (e.g. Azure) - default is 600s, override with `force_timeout`
def completion(
model: str,
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
messages: List = [],
functions: List = [],
function_call: str = "", # optional params
temperature: float = 1,
top_p: float = 1,
n: int = 1,
stream: bool = False,
stop=None,
max_tokens: float = float("inf"),
presence_penalty: float = 0,
frequency_penalty=0,
logit_bias: dict = {},
user: str = "",
deployment_id = None,
# Optional liteLLM function params
*,
return_async=False,
mock_response: Optional[str] = None,
api_key: Optional[str] = None,
api_version: Optional[str] = None,
api_base: Optional[str] = None,
force_timeout=600,
num_beams=1,
logger_fn=None,
verbose=False,
azure=False,
custom_llm_provider=None,
litellm_call_id=None,
litellm_logging_obj=None,
use_client=False,
id=None, # this is an optional param to tag individual completion calls
metadata: Optional[dict]=None,
# model specific optional params
top_k=40,# used by text-bison only
task: Optional[str]="text-generation-inference", # used by huggingface inference endpoints
return_full_text: bool = False, # used by huggingface TGI
remove_input: bool = True, # used by nlp cloud models - prevents input text from being returned as part of output
request_timeout=0, # unused var for old version of OpenAI API
fallbacks=[],
caching = False,
cache_params = {}, # optional to specify metadata for caching
acompletion=False,
) -> ModelResponse:
"""
Perform a completion() using any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly)
Parameters:
model (str): The name of the language model to use for text completion. see all supported LLMs: https://docs.litellm.ai/docs/providers/
messages (List): A list of message objects representing the conversation context (default is an empty list).
OPTIONAL PARAMS
functions (List, optional): A list of functions to apply to the conversation messages (default is an empty list).
function_call (str, optional): The name of the function to call within the conversation (default is an empty string).
temperature (float, optional): The temperature parameter for controlling the randomness of the output (default is 1.0).
top_p (float, optional): The top-p parameter for nucleus sampling (default is 1.0).
n (int, optional): The number of completions to generate (default is 1).
stream (bool, optional): If True, return a streaming response (default is False).
stop(string/list, optional): - Up to 4 sequences where the LLM API will stop generating further tokens.
max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity).
presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far.
frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far.
logit_bias (dict, optional): Used to modify the probability of specific tokens appearing in the completion.
user (str, optional): A unique identifier representing your end-user. This can help the LLM provider to monitor and detect abuse.
metadata (dict, optional): Pass in additional metadata to tag your completion calls - eg. prompt version, details, etc.
LITELLM Specific Params
mock_response (str, optional): If provided, return a mock completion response for testing or debugging purposes (default is None).
force_timeout (int, optional): The maximum execution time in seconds for the completion request (default is 600).
custom_llm_provider (str, optional): Used for Non-OpenAI LLMs, Example usage for bedrock, set model="amazon.titan-tg1-large" and custom_llm_provider="bedrock"
Returns:
ModelResponse: A response object containing the generated completion and associated metadata.
Note:
- This function is used to perform completions() using the specified language model.
- It supports various optional parameters for customizing the completion behavior.
- If 'mock_response' is provided, a mock completion response is returned for testing or debugging.
"""
if mock_response:
return mock_completion(model, messages, stream=stream, mock_response=mock_response)
args = locals()
try:
logging = litellm_logging_obj
if fallbacks != []:
return completion_with_fallbacks(**args)
if litellm.model_alias_map and model in litellm.model_alias_map:
args["model_alias_map"] = litellm.model_alias_map
model = litellm.model_alias_map[
model
] # update the model to the actual value if an alias has been passed in
model_response = ModelResponse()
if deployment_id != None: # azure llms
model=deployment_id
custom_llm_provider="azure"
elif (
model.split("/", 1)[0] in litellm.provider_list
): # allow custom provider to be passed in via the model name "azure/chatgpt-test"
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
model, custom_llm_provider = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider)
model_api_key = get_api_key(llm_provider=custom_llm_provider, dynamic_api_key=api_key) # get the api key from the environment if required for the model
if model_api_key and "sk-litellm" in model_api_key:
api_base = "https://proxy.litellm.ai"
custom_llm_provider = "openai"
api_key = model_api_key
# check if user passed in any of the OpenAI optional params
optional_params = get_optional_params(
functions=functions,
function_call=function_call,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
stop=stop,
max_tokens=max_tokens,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
user=user,
deployment_id=deployment_id,
# params to identify the model
model=model,
custom_llm_provider=custom_llm_provider,
top_k=top_k,
task=task,
remove_input=remove_input,
return_full_text=return_full_text
)
# For logging - save the values of the litellm-specific params passed in
litellm_params = get_litellm_params(
return_async=return_async,
api_key=api_key,
force_timeout=force_timeout,
logger_fn=logger_fn,
verbose=verbose,
custom_llm_provider=custom_llm_provider,
api_base=api_base,
litellm_call_id=litellm_call_id,
model_alias_map=litellm.model_alias_map,
completion_call_id=id,
metadata=metadata
)
logging.update_environment_variables(model=model, user=user, optional_params=optional_params, litellm_params=litellm_params)
if custom_llm_provider == "azure":
# azure configs
api_type = get_secret("AZURE_API_TYPE") or "azure"
api_base = (
api_base
or litellm.api_base
or get_secret("AZURE_API_BASE")
)
api_version = (
api_version or
litellm.api_version or
get_secret("AZURE_API_VERSION")
)
api_key = (
api_key or
litellm.api_key or
litellm.azure_key or
get_secret("AZURE_API_KEY")
)
## LOGGING
logging.pre_call(
input=messages,
api_key=api_key,
additional_args={
"headers": litellm.headers,
"api_version": api_version,
"api_base": api_base,
},
)
## COMPLETION CALL
response = openai.ChatCompletion.create(
engine=model,
messages=messages,
headers=litellm.headers,
api_key=api_key,
api_base=api_base,
api_version=api_version,
api_type=api_type,
**optional_params,
)
if "stream" in optional_params and optional_params["stream"] == True:
response = CustomStreamWrapper(response, model, custom_llm_provider="openai", logging_obj=logging)
return response
## LOGGING
logging.post_call(
input=messages,
api_key=api_key,
original_response=response,
additional_args={
"headers": litellm.headers,
"api_version": api_version,
"api_base": api_base,
},
)
elif (
model in litellm.open_ai_chat_completion_models
or custom_llm_provider == "custom_openai"
or custom_llm_provider == "openai"
or "ft:gpt-3.5-turbo" in model # finetuned gpt-3.5-turbo
): # allow user to make an openai call with a custom base
# note: if a user sets a custom base - we should ensure this works
# allow for the setting of dynamic and stateful api-bases
api_base = (
api_base
or litellm.api_base
or get_secret("OPENAI_API_BASE")
or "https://api.openai.com/v1"
)
if litellm.organization:
openai.organization = litellm.organization
# set API KEY
api_key = (
api_key or
litellm.api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
## LOGGING
logging.pre_call(
input=messages,
api_key=api_key,
additional_args={"headers": litellm.headers, "api_base": api_base},
)
## COMPLETION CALL
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
headers=litellm.headers, # None by default
api_base=api_base, # thread safe setting base, key, api_version
api_key=api_key,
api_type="openai",
api_version=api_version, # default None
**optional_params,
)
except Exception as e:
## LOGGING - log the original exception returned
logging.post_call(
input=messages,
api_key=api_key,
original_response=str(e),
additional_args={"headers": litellm.headers},
)
raise e
if "stream" in optional_params and optional_params["stream"] == True:
response = CustomStreamWrapper(response, model, custom_llm_provider="openai", logging_obj=logging)
return response
## LOGGING
logging.post_call(
input=messages,
api_key=api_key,
original_response=response,
additional_args={"headers": litellm.headers},
)
elif (
model in litellm.open_ai_text_completion_models
or "ft:babbage-002" in model
or "ft:davinci-002" in model # support for finetuned completion models
# NOTE: Do NOT add custom_llm_provider == "openai".
# this will break hosted vllm/proxy calls.
# see: https://docs.litellm.ai/docs/providers/vllm#calling-hosted-vllm-server.
# VLLM expects requests to call openai.ChatCompletion we need those requests to always
# call openai.ChatCompletion
):
# print("calling custom openai provider")
openai.api_type = "openai"
api_base = (
api_base
or litellm.api_base
or get_secret("OPENAI_API_BASE")
or "https://api.openai.com/v1"
)
openai.api_version = None
# set API KEY
api_key = (
api_key or
litellm.api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
if litellm.organization:
openai.organization = litellm.organization
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging.pre_call(
input=prompt,
api_key=api_key,
additional_args={
"openai_organization": litellm.organization,
"headers": litellm.headers,
"api_base": api_base,
"api_type": openai.api_type,
},
)
## COMPLETION CALL
response = openai.Completion.create(
model=model,
prompt=prompt,
headers=litellm.headers,
api_key = api_key,
api_base=api_base,
**optional_params
)
if "stream" in optional_params and optional_params["stream"] == True:
response = CustomStreamWrapper(response, model, custom_llm_provider="text-completion-openai", logging_obj=logging)
return response
## LOGGING
logging.post_call(
input=prompt,
api_key=api_key,
original_response=response,
additional_args={
"openai_organization": litellm.organization,
"headers": litellm.headers,
"api_base": openai.api_base,
"api_type": openai.api_type,
},
)
## RESPONSE OBJECT
completion_response = response["choices"][0]["text"]
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = response.get("created", time.time())
model_response["model"] = model
model_response["usage"] = response.get("usage", 0)
response = model_response
elif (
"replicate" in model or
custom_llm_provider == "replicate" or
model in litellm.replicate_models
):
# Setting the relevant API KEY for replicate, replicate defaults to using os.environ.get("REPLICATE_API_TOKEN")
replicate_key = None
replicate_key = (
api_key
or litellm.replicate_key
or litellm.api_key
or get_secret("REPLICATE_API_KEY")
or get_secret("REPLICATE_API_TOKEN")
)
model_response = replicate.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding, # for calculating input/output tokens
api_key=replicate_key,
logging_obj=logging,
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate")
return response
response = model_response
elif model in litellm.anthropic_models:
anthropic_key = (
api_key or litellm.anthropic_key or os.environ.get("ANTHROPIC_API_KEY") or litellm.api_key
)
model_response = anthropic.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding, # for calculating input/output tokens
api_key=anthropic_key,
logging_obj=logging,
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(model_response, model, custom_llm_provider="anthropic", logging_obj=logging)
return response
response = model_response
elif model in litellm.nlp_cloud_models or custom_llm_provider == "nlp_cloud":
nlp_cloud_key = (
api_key or litellm.nlp_cloud_key or get_secret("NLP_CLOUD_API_KEY") or litellm.api_key
)
model_response = nlp_cloud.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=nlp_cloud_key,
logging_obj=logging
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(model_response, model, custom_llm_provider="nlp_cloud", logging_obj=logging)
return response
response = model_response
elif model in litellm.aleph_alpha_models:
aleph_alpha_key = (
api_key or litellm.aleph_alpha_key or get_secret("ALEPH_ALPHA_API_KEY") or get_secret("ALEPHALPHA_API_KEY") or litellm.api_key
)
model_response = aleph_alpha.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
default_max_tokens_to_sample=litellm.max_tokens,
api_key=aleph_alpha_key,
logging_obj=logging # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(model_response, model, custom_llm_provider="aleph_alpha", logging_obj=logging)
return response
response = model_response
elif model in litellm.openrouter_models or custom_llm_provider == "openrouter":
openai.api_type = "openai"
# not sure if this will work after someone first uses another API
openai.api_base = (
litellm.api_base
if litellm.api_base is not None
else "https://openrouter.ai/api/v1"
)
openai.api_version = None
if litellm.organization:
openai.organization = litellm.organization
if api_key:
openai.api_key = api_key
elif litellm.openrouter_key:
openai.api_key = litellm.openrouter_key
else:
openai.api_key = get_secret("OPENROUTER_API_KEY") or get_secret(
"OR_API_KEY"
) or litellm.api_key
## LOGGING
logging.pre_call(input=messages, api_key=openai.api_key)
## COMPLETION CALL
if litellm.headers:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
headers=litellm.headers,
**optional_params,
)
else:
openrouter_site_url = get_secret("OR_SITE_URL")
openrouter_app_name = get_secret("OR_APP_NAME")
# if openrouter_site_url is None, set it to https://litellm.ai
if openrouter_site_url is None:
openrouter_site_url = "https://litellm.ai"
# if openrouter_app_name is None, set it to liteLLM
if openrouter_app_name is None:
openrouter_app_name = "liteLLM"
response = openai.ChatCompletion.create(
model=model,
messages=messages,
headers={
"HTTP-Referer": openrouter_site_url, # To identify your site
"X-Title": openrouter_app_name, # To identify your app
},
**optional_params,
)
## LOGGING
logging.post_call(
input=messages, api_key=openai.api_key, original_response=response
)
elif model in litellm.cohere_models:
cohere_key = (
api_key
or litellm.cohere_key
or get_secret("COHERE_API_KEY")
or get_secret("CO_API_KEY")
or litellm.api_key
)
model_response = cohere.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=cohere_key,
logging_obj=logging # model call logging done inside the class as we make need to modify I/O to fit aleph alpha's requirements
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(model_response, model, custom_llm_provider="cohere", logging_obj=logging)
return response
response = model_response
elif (
(
model in litellm.huggingface_models and
custom_llm_provider!="custom" # if users use a hf model, with a custom/provider. See implementation of custom_llm_provider == custom
) or
custom_llm_provider == "huggingface"
):
custom_llm_provider = "huggingface"
huggingface_key = (
api_key
or litellm.huggingface_key
or os.environ.get("HF_TOKEN")
or os.environ.get("HUGGINGFACE_API_KEY")
or litellm.api_key
)
model_response = huggingface_restapi.completion(
model=model,
messages=messages,
api_base=api_base, # type: ignore
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=huggingface_key,
logging_obj=logging,
custom_prompt_dict=litellm.custom_prompt_dict
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="huggingface", logging_obj=logging
)
return response
response = model_response
elif custom_llm_provider == "oobabooga":
custom_llm_provider = "oobabooga"
model_response = oobabooga.completion(
model=model,
messages=messages,
model_response=model_response,
api_base=api_base, # type: ignore
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
api_key=None,
logger_fn=logger_fn,
encoding=encoding,
logging_obj=logging
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="oobabooga", logging_obj=logging
)
return response
response = model_response
elif custom_llm_provider == "together_ai" or ("togethercomputer" in model) or (model in litellm.together_ai_models):
custom_llm_provider = "together_ai"
together_ai_key = (
api_key
or litellm.togetherai_api_key
or get_secret("TOGETHER_AI_TOKEN")
or get_secret("TOGETHERAI_API_KEY")
or litellm.api_key
)
model_response = together_ai.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=together_ai_key,
logging_obj=logging
)
if "stream_tokens" in optional_params and optional_params["stream_tokens"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="together_ai", logging_obj=logging
)
return response
response = model_response
elif model in litellm.vertex_chat_models or model in litellm.vertex_code_chat_models:
try:
import vertexai
except:
raise Exception("vertexai import failed please run `pip install google-cloud-aiplatform`")
from vertexai.preview.language_models import ChatModel, CodeChatModel, InputOutputTextPair
vertex_project = (litellm.vertex_project or get_secret("VERTEXAI_PROJECT"))
vertex_location = (litellm.vertex_location or get_secret("VERTEXAI_LOCATION"))
vertexai.init(
project=vertex_project, location=vertex_location
)
# vertexai does not use an API key, it looks for credentials.json in the environment
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging.pre_call(input=prompt, api_key=None)
if model in litellm.vertex_chat_models:
chat_model = ChatModel.from_pretrained(model)
else: # vertex_code_chat_models
chat_model = CodeChatModel.from_pretrained(model)
chat = chat_model.start_chat()
if stream:
model_response = chat.send_message_streaming(prompt, **optional_params)
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="vertex_ai", logging_obj=logging
)
return response
completion_response = chat.send_message(prompt, **optional_params)
## LOGGING
logging.post_call(
input=prompt, api_key=None, original_response=completion_response
)
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = str(completion_response)
model_response["created"] = time.time()
model_response["model"] = model
## CALCULATING USAGE
prompt_tokens = len(
encoding.encode(prompt)
)
completion_tokens = len(
encoding.encode(model_response["choices"][0]["message"]["content"])
)
model_response["usage"] = {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
}
response = model_response
elif model in litellm.vertex_text_models or model in litellm.vertex_code_text_models:
try:
import vertexai
except:
raise Exception("vertexai import failed please run `pip install google-cloud-aiplatform`")
from vertexai.language_models import TextGenerationModel, CodeGenerationModel
vertexai.init(
project=litellm.vertex_project, location=litellm.vertex_location
)
# vertexai does not use an API key, it looks for credentials.json in the environment
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging.pre_call(input=prompt, api_key=None)
if model in litellm.vertex_text_models:
vertex_model = TextGenerationModel.from_pretrained(model)
else:
vertex_model = CodeGenerationModel.from_pretrained(model)
if stream:
model_response = vertex_model.predict_streaming(prompt, **optional_params)
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="vertexai", logging_obj=logging
)
return response
completion_response = vertex_model.predict(prompt, **optional_params)
## LOGGING
logging.post_call(
input=prompt, api_key=None, original_response=completion_response
)
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = str(completion_response)
model_response["created"] = time.time()
model_response["model"] = model
## CALCULATING USAGE
prompt_tokens = len(
encoding.encode(prompt)
)
completion_tokens = len(
encoding.encode(model_response["choices"][0]["message"]["content"])
)
model_response["usage"] = {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
}
response = model_response
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
ai21_key = (
api_key
or litellm.ai21_key
or os.environ.get("AI21_API_KEY")
or litellm.api_key
)
model_response = ai21.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=ai21_key,
logging_obj=logging
)
if "stream" in optional_params and optional_params["stream"] == True:
# don't try to access stream object,
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="ai21", logging_obj=logging
)
return response
## RESPONSE OBJECT
response = model_response
elif custom_llm_provider == "sagemaker":
# boto3 reads keys from .env
model_response = sagemaker.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
logging_obj=logging
)
if stream==True: ## [BETA]
# sagemaker does not support streaming as of now so we're faking streaming:
# https://discuss.huggingface.co/t/streaming-output-text-when-deploying-on-sagemaker/39611
# "SageMaker is currently not supporting streaming responses."
# fake streaming for sagemaker
resp_string = model_response["choices"][0]["message"]["content"]
response = CustomStreamWrapper(
resp_string, model, custom_llm_provider="sagemaker", logging_obj=logging
)
return response
## RESPONSE OBJECT
response = model_response
elif custom_llm_provider == "bedrock":
# boto3 reads keys from .env
model_response = bedrock.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
logging_obj=logging,
stream=stream,
)
if stream == True:
# don't try to access stream object,
response = CustomStreamWrapper(
iter(model_response), model, custom_llm_provider="bedrock", logging_obj=logging
)
return response
## RESPONSE OBJECT
response = model_response
elif custom_llm_provider == "vllm":
model_response = vllm.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
logging_obj=logging
)
if "stream" in optional_params and optional_params["stream"] == True: ## [BETA]
# don't try to access stream object,
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="vllm", logging_obj=logging
)
return response
## RESPONSE OBJECT
response = model_response
elif custom_llm_provider == "ollama":
endpoint = (
litellm.api_base if litellm.api_base is not None else api_base
)
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging.pre_call(
input=prompt, api_key=None, additional_args={"endpoint": endpoint}
)
if acompletion == True:
async_generator = ollama.async_get_ollama_response_stream(endpoint, model, prompt)
return async_generator
generator = ollama.get_ollama_response_stream(endpoint, model, prompt)
if optional_params.get("stream", False) == True:
# assume all ollama responses are streamed
return generator
else:
response_string = ""
for chunk in generator:
response_string+=chunk['choices'][0]['delta']['content']
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = response_string
model_response["created"] = time.time()
model_response["model"] = "ollama/" + model
prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(response_string))
model_response["usage"] = {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
}
response = model_response
elif (
custom_llm_provider == "baseten"
or litellm.api_base == "https://app.baseten.co"
):
custom_llm_provider = "baseten"
baseten_key = (
api_key or litellm.baseten_key or os.environ.get("BASETEN_API_KEY") or litellm.api_key
)
model_response = baseten.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=baseten_key,
logging_obj=logging
)
if inspect.isgenerator(model_response) or ("stream" in optional_params and optional_params["stream"] == True):
# don't try to access stream object,
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="baseten", logging_obj=logging
)
return response
response = model_response
elif (
custom_llm_provider == "petals"
or model in litellm.petals_models
):
custom_llm_provider = "petals"
model_response = petals.completion(
model=model,
messages=messages,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
logging_obj=logging
)
if inspect.isgenerator(model_response) or (stream == True):
# don't try to access stream object,
response = CustomStreamWrapper(
model_response, model, custom_llm_provider="petals", logging_obj=logging
)
return response
response = model_response
elif (
custom_llm_provider == "custom"
):
import requests
url = (
litellm.api_base or
api_base or
""
)
if url == None or url == "":
raise ValueError("api_base not set. Set api_base or litellm.api_base for custom endpoints")
"""
assume input to custom LLM api bases follow this format:
resp = requests.post(
api_base,
json={
'model': 'meta-llama/Llama-2-13b-hf', # model name
'params': {
'prompt': ["The capital of France is P"],
'max_tokens': 32,
'temperature': 0.7,
'top_p': 1.0,
'top_k': 40,
}
}
)
"""
prompt = " ".join([message["content"] for message in messages])
resp = requests.post(url, json={
'model': model,
'params': {
'prompt': [prompt],
'max_tokens': max_tokens,
'temperature': temperature,
'top_p': top_p,
'top_k': top_k,
}
})
response_json = resp.json()
"""
assume all responses from custom api_bases of this format:
{
'data': [
{
'prompt': 'The capital of France is P',
'output': ['The capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France is PARIS.\nThe capital of France'],
'params': {'temperature': 0.7, 'top_k': 40, 'top_p': 1}}],
'message': 'ok'
}
]
}
"""
string_response = response_json['data'][0]['output'][0]
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = string_response
model_response["created"] = time.time()
model_response["model"] = model
response = model_response
else:
raise ValueError(
f"Unable to map your input to a model. Check your input - {args}"
)
return response
except Exception as e:
## Map to OpenAI Exception
raise exception_type(
model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args,
)
def completion_with_retries(*args, **kwargs):
try:
import tenacity
except:
raise Exception("tenacity import failed please run `pip install tenacity`")
retryer = tenacity.Retrying(stop=tenacity.stop_after_attempt(3), reraise=True)
return retryer(completion, *args, **kwargs)
def batch_completion(
model: str,
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
messages: List = [],
functions: List = [],
function_call: str = "", # optional params
temperature: float = 1,
top_p: float = 1,
n: int = 1,
stream: bool = False,
stop=None,
max_tokens: float = float("inf"),
presence_penalty: float = 0,
frequency_penalty=0,
logit_bias: dict = {},
user: str = "",
# Optional liteLLM function params
*,
return_async=False,
api_key: Optional[str] = None,
api_version: Optional[str] = None,
api_base: Optional[str] = None,
force_timeout=600,
# used by text-bison only
top_k=40,
custom_llm_provider=None,):
args = locals()
batch_messages = messages
completions = []
model = model
custom_llm_provider = None
if model.split("/", 1)[0] in litellm.provider_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
if custom_llm_provider == "vllm":
optional_params = get_optional_params(
functions=functions,
function_call=function_call,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
stop=stop,
max_tokens=max_tokens,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
user=user,
# params to identify the model
model=model,
custom_llm_provider=custom_llm_provider,
top_k=top_k,
)
results = vllm.batch_completions(model=model, messages=batch_messages, custom_prompt_dict=litellm.custom_prompt_dict, optional_params=optional_params)
else:
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
with ThreadPoolExecutor(max_workers=100) as executor:
for sub_batch in chunks(batch_messages, 100):
for message_list in sub_batch:
kwargs_modified = args
kwargs_modified["messages"] = message_list
future = executor.submit(completion, **kwargs_modified)
completions.append(future)
# Retrieve the results from the futures
results = [future.result() for future in completions]
return results
# send one request to multiple models
# return as soon as one of the llms responds
def batch_completion_models(*args, **kwargs):
"""
Send a request to multiple language models concurrently and return the response
as soon as one of the models responds.
Args:
*args: Variable-length positional arguments passed to the completion function.
**kwargs: Additional keyword arguments:
- models (str or list of str): The language models to send requests to.
- Other keyword arguments to be passed to the completion function.
Returns:
str or None: The response from one of the language models, or None if no response is received.
Note:
This function utilizes a ThreadPoolExecutor to parallelize requests to multiple models.
It sends requests concurrently and returns the response from the first model that responds.
"""
import concurrent
if "model" in kwargs:
kwargs.pop("model")
if "models" in kwargs:
models = kwargs["models"]
kwargs.pop("models")
futures = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=len(models)) as executor:
for model in models:
futures[model] = executor.submit(completion, *args, model=model, **kwargs)
for model, future in sorted(futures.items(), key=lambda x: models.index(x[0])):
if future.result() is not None:
return future.result()
return None # If no response is received from any model
def batch_completion_models_all_responses(*args, **kwargs):
"""
Send a request to multiple language models concurrently and return a list of responses
from all models that respond.
Args:
*args: Variable-length positional arguments passed to the completion function.
**kwargs: Additional keyword arguments:
- models (str or list of str): The language models to send requests to.
- Other keyword arguments to be passed to the completion function.
Returns:
list: A list of responses from the language models that responded.
Note:
This function utilizes a ThreadPoolExecutor to parallelize requests to multiple models.
It sends requests concurrently and collects responses from all models that respond.
"""
import concurrent.futures
# ANSI escape codes for colored output
GREEN = "\033[92m"
RED = "\033[91m"
RESET = "\033[0m"
if "model" in kwargs:
kwargs.pop("model")
if "models" in kwargs:
models = kwargs["models"]
kwargs.pop("models")
responses = []
with concurrent.futures.ThreadPoolExecutor(max_workers=len(models)) as executor:
for idx, model in enumerate(models):
print(f"{GREEN}LiteLLM: Making request to model: {model}{RESET}")
future = executor.submit(completion, *args, model=model, **kwargs)
if future.result() is not None:
responses.append(future.result())
print(f"{GREEN}LiteLLM: Model {model} returned response{RESET}")
else:
print(f"{RED}LiteLLM: Model {model } did not return a response{RESET}")
return responses
### EMBEDDING ENDPOINTS ####################
@client
@timeout( # type: ignore
60
) ## set timeouts, in case calls hang (e.g. Azure) - default is 60s, override with `force_timeout`
def embedding(
model, input=[], azure=False, force_timeout=60, litellm_call_id=None, litellm_logging_obj=None, logger_fn=None, caching=False,
):
try:
response = None
logging = litellm_logging_obj
logging.update_environment_variables(model=model, user="", optional_params={}, litellm_params={"force_timeout": force_timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn})
if azure == True:
# azure configs
openai.api_type = get_secret("AZURE_API_TYPE") or "azure"
openai.api_base = get_secret("AZURE_API_BASE")
openai.api_version = get_secret("AZURE_API_VERSION")
openai.api_key = get_secret("AZURE_API_KEY")
## LOGGING
logging.pre_call(
input=input,
api_key=openai.api_key,
additional_args={
"api_type": openai.api_type,
"api_base": openai.api_base,
"api_version": openai.api_version,
},
)
## EMBEDDING CALL
response = openai.Embedding.create(input=input, engine=model)
elif model in litellm.open_ai_embedding_models:
openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1"
openai.api_version = None
openai.api_key = get_secret("OPENAI_API_KEY")
## LOGGING
logging.pre_call(
input=input,
api_key=openai.api_key,
additional_args={
"api_type": openai.api_type,
"api_base": openai.api_base,
"api_version": openai.api_version,
},
)
## EMBEDDING CALL
response = openai.Embedding.create(input=input, model=model)
else:
args = locals()
raise ValueError(f"No valid embedding model args passed in - {args}")
## LOGGING
logging.post_call(input=input, api_key=openai.api_key, original_response=response)
return response
except Exception as e:
## LOGGING
logging.post_call(
input=input,
api_key=openai.api_key,
original_response=str(e),
)
## Map to OpenAI Exception
raise exception_type(
model=model,
original_exception=e,
custom_llm_provider="azure" if azure == True else None,
)
###### Text Completion ################
def text_completion(*args, **kwargs):
if "prompt" in kwargs:
messages = [{"role": "system", "content": kwargs["prompt"]}]
kwargs["messages"] = messages
kwargs.pop("prompt")
return completion(*args, **kwargs)
##### Moderation #######################
def moderation(input: str, api_key: Optional[str]=None):
# only supports open ai for now
api_key = (
api_key or
litellm.api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
openai.api_key = api_key
openai.api_type = "open_ai"
openai.api_version = None
openai.api_base = "https://api.openai.com/v1"
response = openai.Moderation.create(input)
return response
####### HELPER FUNCTIONS ################
## Set verbose to true -> ```litellm.set_verbose = True```
def print_verbose(print_statement):
if litellm.set_verbose:
print(f"LiteLLM: {print_statement}")
def config_completion(**kwargs):
if litellm.config_path != None:
config_args = read_config_args(litellm.config_path)
# overwrite any args passed in with config args
return completion(**kwargs, **config_args)
else:
raise ValueError(
"No config path set, please set a config path using `litellm.config_path = 'path/to/config.json'`"
)
def stream_chunk_builder(chunks: list):
id = chunks[0]["id"]
object = chunks[0]["object"]
created = chunks[0]["created"]
model = chunks[0]["model"]
role = chunks[0]["choices"][0]["delta"]["role"]
finnish_reason = chunks[-1]["choices"][0]["finish_reason"]
# Initialize the response dictionary
response = {
"id": id,
"object": object,
"created": created,
"model": model,
"choices": [
{
"index": 0,
"message": {
"role": role,
"content": ""
},
"finish_reason": finnish_reason,
}
],
# "usage": {
# "prompt_tokens": 0, # Modify as needed
# "completion_tokens": 0, # Modify as needed
# "total_tokens": 0 # Modify as needed
# }
}
# Extract the "content" strings from the nested dictionaries within "choices"
content_list = []
if "function_call" in chunks[0]["choices"][0]["delta"]:
argument_list = []
delta = chunks[0]["choices"][0]["delta"]
function_call = delta.get("function_call", "")
function_call_name = function_call.get("name", "")
message = response["choices"][0]["message"]
message["function_call"] = {}
message["function_call"]["name"] = function_call_name
for chunk in chunks:
choices = chunk["choices"]
for choice in choices:
delta = choice.get("delta", {})
function_call = delta.get("function_call", "")
# Check if a function call is present
if function_call:
# Now, function_call is expected to be a dictionary
arguments = function_call.get("arguments", "")
argument_list.append(arguments)
combined_arguments = "".join(argument_list)
response["choices"][0]["message"]["content"] = None
response["choices"][0]["message"]["function_call"]["arguments"] = combined_arguments
else:
for chunk in chunks:
choices = chunk["choices"]
for choice in choices:
delta = choice.get("delta", {})
content = delta.get("content", "")
content_list.append(content)
# Combine the "content" strings into a single string
combined_content = "".join(content_list)
# Update the "content" field within the response dictionary
response["choices"][0]["message"]["content"] = combined_content
# # Update usage information if needed
# response["usage"]["completion_tokens"] = token
return response
| [
"content",
" "
] |
2024-01-10 | mocy/litellm | litellm~timeout.py | """
Module containing "timeout" decorator for sync and async callables.
"""
import asyncio
from concurrent import futures
from inspect import iscoroutinefunction
from functools import wraps
from threading import Thread
from openai.error import Timeout
def timeout(timeout_duration: float = 0.0, exception_to_raise=Timeout):
"""
Wraps a function to raise the specified exception if execution time
is greater than the specified timeout.
Works with both synchronous and asynchronous callables, but with synchronous ones will introduce
some overhead due to the backend use of threads and asyncio.
:param float timeout_duration: Timeout duration in seconds. If none callable won't time out.
:param OpenAIError exception_to_raise: Exception to raise when the callable times out.
Defaults to TimeoutError.
:return: The decorated function.
:rtype: callable
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
async def async_func():
return func(*args, **kwargs)
thread = _LoopWrapper()
thread.start()
future = asyncio.run_coroutine_threadsafe(async_func(), thread.loop)
local_timeout_duration = timeout_duration
if "force_timeout" in kwargs and kwargs["force_timeout"] is not None:
local_timeout_duration = kwargs["force_timeout"]
elif "request_timeout" in kwargs and kwargs["request_timeout"] is not None:
local_timeout_duration = kwargs["request_timeout"]
try:
result = future.result(timeout=local_timeout_duration)
except futures.TimeoutError:
thread.stop_loop()
raise exception_to_raise(
f"A timeout error occurred. The function call took longer than {local_timeout_duration} second(s)."
)
thread.stop_loop()
return result
@wraps(func)
async def async_wrapper(*args, **kwargs):
local_timeout_duration = timeout_duration
if "force_timeout" in kwargs:
local_timeout_duration = kwargs["force_timeout"]
try:
value = await asyncio.wait_for(
func(*args, **kwargs), timeout=timeout_duration
)
return value
except asyncio.TimeoutError:
raise exception_to_raise(
f"A timeout error occurred. The function call took longer than {local_timeout_duration} second(s)."
)
if iscoroutinefunction(func):
return async_wrapper
return wrapper
return decorator
class _LoopWrapper(Thread):
def __init__(self):
super().__init__(daemon=True)
self.loop = asyncio.new_event_loop()
def run(self) -> None:
self.loop.run_forever()
self.loop.call_soon_threadsafe(self.loop.close)
def stop_loop(self):
for task in asyncio.all_tasks(self.loop):
task.cancel()
self.loop.call_soon_threadsafe(self.loop.stop)
| [] |
2024-01-10 | mocy/litellm | litellm~utils.py | import sys
import dotenv, json, traceback, threading
import subprocess, os
import litellm, openai
import itertools
import random, uuid, requests
import datetime, time
import tiktoken
import uuid
import aiohttp
from tokenizers import Tokenizer
import pkg_resources
encoding = tiktoken.get_encoding("cl100k_base")
import importlib.metadata
from .integrations.traceloop import TraceloopLogger
from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
from .integrations.llmonitor import LLMonitorLogger
from .integrations.prompt_layer import PromptLayerLogger
from .integrations.custom_logger import CustomLogger
from .integrations.langfuse import LangFuseLogger
from .integrations.litedebugger import LiteDebugger
from openai.error import OpenAIError as OriginalError
from openai.openai_object import OpenAIObject
from .exceptions import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
ServiceUnavailableError,
OpenAIError,
ContextWindowExceededError,
Timeout,
APIConnectionError,
APIError,
BudgetExceededError
)
from typing import cast, List, Dict, Union, Optional
from .caching import Cache
from .llms.prompt_templates.factory import llama_2_special_tokens
####### ENVIRONMENT VARIABLES ####################
dotenv.load_dotenv() # Loading env variables using dotenv
sentry_sdk_instance = None
capture_exception = None
add_breadcrumb = None
posthog = None
slack_app = None
alerts_channel = None
heliconeLogger = None
promptLayerLogger = None
customLogger = None
langFuseLogger = None
llmonitorLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
liteDebuggerClient = None
callback_list: Optional[List[str]] = []
user_logger_fn = None
additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {}
last_fetched_at = None
last_fetched_at_keys = None
######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format
# https://docs.litellm.ai/docs/completion/output
# {
# 'choices': [
# {
# 'finish_reason': 'stop',
# 'index': 0,
# 'message': {
# 'role': 'assistant',
# 'content': " I'm doing well, thank you for asking. I am Claude, an AI assistant created by Anthropic."
# }
# }
# ],
# 'created': 1691429984.3852863,
# 'model': 'claude-instant-1',
# 'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41}
# }
def _generate_id(): # private helper function
return 'chatcmpl-' + str(uuid.uuid4())
class Message(OpenAIObject):
def __init__(self, content="default", role="assistant", logprobs=None, **params):
super(Message, self).__init__(**params)
self.content = content
self.role = role
self.logprobs = logprobs
class Delta(OpenAIObject):
def __init__(self, content=None, logprobs=None, role=None, **params):
super(Delta, self).__init__(**params)
if content is not None:
self.content = content
if role:
self.role = role
class Choices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, message=None, **params):
super(Choices, self).__init__(**params)
if finish_reason:
self.finish_reason = finish_reason
else:
self.finish_reason = "stop"
self.index = index
if message is None:
self.message = Message(content=None)
else:
self.message = message
class StreamingChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
super(StreamingChoices, self).__init__(**params)
self.finish_reason = finish_reason
self.index = index
if delta:
self.delta = delta
else:
self.delta = Delta()
class ModelResponse(OpenAIObject):
def __init__(self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, **params):
if stream:
self.object = "chat.completion.chunk"
self.choices = [StreamingChoices()]
else:
if model in litellm.open_ai_embedding_models:
self.object = "embedding"
else:
self.object = "chat.completion"
self.choices = [Choices()]
if id is None:
self.id = _generate_id()
else:
self.id = id
if created is None:
self.created = int(time.time())
else:
self.created = created
if response_ms:
self._response_ms = response_ms
else:
self._response_ms = None
self.model = model
self.usage = (
usage
if usage
else {
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None,
}
)
super(ModelResponse, self).__init__(**params)
def to_dict_recursive(self):
d = super().to_dict_recursive()
d["choices"] = [choice.to_dict_recursive() for choice in self.choices]
return d
############################################################
def print_verbose(print_statement):
if litellm.set_verbose:
print(f"LiteLLM: {print_statement}")
####### LOGGING ###################
from enum import Enum
class CallTypes(Enum):
embedding = 'embedding'
completion = 'completion'
# Logging function -> log the exact model details + what's being sent | Non-Blocking
class Logging:
global supabaseClient, liteDebuggerClient, promptLayerLogger
def __init__(self, model, messages, stream, call_type, start_time, litellm_call_id, function_id):
if call_type not in [item.value for item in CallTypes]:
allowed_values = ", ".join([item.value for item in CallTypes])
raise ValueError(f"Invalid call_type {call_type}. Allowed values: {allowed_values}")
self.model = model
self.messages = messages
self.stream = stream
self.start_time = start_time # log the call start time
self.call_type = call_type
self.litellm_call_id = litellm_call_id
self.function_id = function_id
def update_environment_variables(self, model, user, optional_params, litellm_params):
self.optional_params = optional_params
self.model = model
self.user = user
self.litellm_params = litellm_params
self.logger_fn = litellm_params["logger_fn"]
print_verbose(f"self.optional_params: {self.optional_params}")
self.model_call_details = {
"model": self.model,
"messages": self.messages,
"optional_params": self.optional_params,
"litellm_params": self.litellm_params,
}
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
print_verbose(f"Logging Details Pre-API Call for call id {self.litellm_call_id}")
litellm.error_logs['PRE_CALL'] = locals()
try:
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args
if (
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
# User Logging -> if you pass in a custom logging function
print_verbose(f"model call details: {self.model_call_details}")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
if litellm.max_budget and self.stream:
start_time = self.start_time
end_time = self.start_time # no time has passed as the call hasn't been made yet
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="".join(message["content"] for message in self.messages), completion="", total_time=float_diff)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "supabase":
print_verbose("reaches supabase for logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print(f"supabaseClient: {supabaseClient}")
supabaseClient.input_log_event(
model=model,
messages=messages,
end_user=litellm._thread_context.user,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "lite_debugger":
print_verbose(f"reaches litedebugger for logging! - model_call_details {self.model_call_details}")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.input_log_event(
model=model,
messages=messages,
end_user=litellm._thread_context.user,
litellm_call_id=self.litellm_params["litellm_call_id"],
litellm_params=self.model_call_details["litellm_params"],
optional_params=self.model_call_details["optional_params"],
print_verbose=print_verbose,
call_type=self.call_type
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs['POST_CALL'] = locals()
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["original_response"] = original_response
self.model_call_details["additional_args"] = additional_args
# User Logging -> if you pass in a custom logging function
print_verbose(f"model call details: {self.model_call_details}")
print_verbose(
f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
)
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
def success_handler(self, result, start_time=None, end_time=None):
print_verbose(
f"Logging Details LiteLLM-Success Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
print_verbose(f"success callbacks: {litellm.success_callback}")
if litellm.max_budget and self.stream:
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="", completion=result["content"], total_time=float_diff)
for callback in litellm.success_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}")
liteDebuggerClient.log_event(
end_user=litellm._thread_context.user,
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
if callback == "api_manager":
print_verbose("reaches api manager for updating model cost")
litellm.apiManager.update_cost(completion_obj=result, user=self.user)
if callback == "cache":
# print("entering logger first time")
# print(self.litellm_params["stream_response"])
if litellm.cache != None and self.model_call_details.get('optional_params', {}).get('stream', False) == True:
litellm_call_id = self.litellm_params["litellm_call_id"]
if litellm_call_id in self.litellm_params["stream_response"]:
# append for the given call_id
if self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] == "default":
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] = result["content"] # handle first try
else:
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] += result["content"]
else: # init a streaming response for this call id
new_model_response = ModelResponse(choices=[Choices(message=Message(content="default"))])
#print("creating new model response")
#print(new_model_response)
self.litellm_params["stream_response"][litellm_call_id] = new_model_response
#print("adding to cache for", litellm_call_id)
litellm.cache.add_cache(self.litellm_params["stream_response"][litellm_call_id], **self.model_call_details)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
def failure_handler(self, exception, traceback_exception, start_time=None, end_time=None):
print_verbose(
f"Logging Details LiteLLM-Failure Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
for callback in litellm.failure_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=litellm._thread_context.user,
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
def exception_logging(
additional_args={},
logger_fn=None,
exception=None,
):
try:
model_call_details = {}
if exception:
model_call_details["exception"] = exception
model_call_details["additional_args"] = additional_args
# User Logging -> if you pass in a custom logging function or want to use sentry breadcrumbs
print_verbose(
f"Logging Details: logger_fn - {logger_fn} | callable(logger_fn) - {callable(logger_fn)}"
)
if logger_fn and callable(logger_fn):
try:
logger_fn(
model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
except Exception as e:
print(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
####### CLIENT ###################
# make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking
def client(original_function):
global liteDebuggerClient, get_all_keys
def function_setup(
start_time, *args, **kwargs
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try:
global callback_list, add_breadcrumb, user_logger_fn, Logging
function_id = kwargs["id"] if "id" in kwargs else None
if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True):
print_verbose(f"litedebugger initialized")
if "lite_debugger" not in litellm.input_callback:
litellm.input_callback.append("lite_debugger")
if "lite_debugger" not in litellm.success_callback:
litellm.success_callback.append("lite_debugger")
if "lite_debugger" not in litellm.failure_callback:
litellm.failure_callback.append("lite_debugger")
if (
len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0
or len(litellm.failure_callback) > 0
) and len(callback_list) == 0:
callback_list = list(
set(
litellm.input_callback
+ litellm.success_callback
+ litellm.failure_callback
)
)
set_callbacks(
callback_list=callback_list,
function_id=function_id
)
if add_breadcrumb:
add_breadcrumb(
category="litellm.llm_call",
message=f"Positional Args: {args}, Keyword Args: {kwargs}",
level="info",
)
if "logger_fn" in kwargs:
user_logger_fn = kwargs["logger_fn"]
# CRASH REPORTING TELEMETRY
crash_reporting(*args, **kwargs)
# INIT LOGGER - for user-specified integrations
model = args[0] if len(args) > 0 else kwargs["model"]
call_type = original_function.__name__
if call_type == CallTypes.completion.value:
messages = args[1] if len(args) > 1 else kwargs["messages"]
elif call_type == CallTypes.embedding.value:
messages = args[1] if len(args) > 1 else kwargs["input"]
stream = True if "stream" in kwargs and kwargs["stream"] == True else False
logging_obj = Logging(model=model, messages=messages, stream=stream, litellm_call_id=kwargs["litellm_call_id"], function_id=function_id, call_type=call_type, start_time=start_time)
return logging_obj
except Exception as e: # DO NOT BLOCK running the function because of this
print_verbose(f"[Non-Blocking] {traceback.format_exc()}; args - {args}; kwargs - {kwargs}")
print(e)
pass
def crash_reporting(*args, **kwargs):
if litellm.telemetry:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
exception = kwargs["exception"] if "exception" in kwargs else None
custom_llm_provider = (
kwargs["custom_llm_provider"]
if "custom_llm_provider" in kwargs
else None
)
safe_crash_reporting(
model=model,
exception=exception,
custom_llm_provider=custom_llm_provider,
) # log usage-crash details. Do not log any user details. If you want to turn this off, set `litellm.telemetry=False`.
except:
# [Non-Blocking Error]
pass
def wrapper(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
litellm_call_id = str(uuid.uuid4())
kwargs["litellm_call_id"] = litellm_call_id
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
# remove this after deprecating litellm.caching
if (litellm.caching or litellm.caching_with_models) and litellm.cache is None:
litellm.cache = Cache()
if kwargs.get("caching", False): # allow users to control returning cached responses from the completion function
# checking cache
if (litellm.cache != None or litellm.caching or litellm.caching_with_models):
print_verbose(f"LiteLLM: Checking Cache")
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
return cached_result
# MODEL CALL
result = original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
# TODO: Add to cache for streaming
return result
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# [OPTIONAL] Return LiteLLM call_id
if litellm.use_client == True:
result['litellm_call_id'] = litellm_call_id
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
logging_obj.success_handler(result, start_time, end_time)
# threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_success, args=(args, kwargs, result, start_time, end_time)
) # don't interrupt execution of main thread
my_thread.start()
# RETURN RESULT
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
threading.Thread(target=logging_obj.failure_handler, args=(e, traceback_exception, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_failure,
args=(e, traceback_exception, start_time, end_time, args, kwargs),
) # don't interrupt execution of main thread
my_thread.start()
if hasattr(e, "message"):
if (
liteDebuggerClient and liteDebuggerClient.dashboard_url != None
): # make it easy to get to the debugger logs if you've initialized it
e.message += f"\n Check the log in your dashboard - {liteDebuggerClient.dashboard_url}"
raise e
return wrapper
####### USAGE CALCULATOR ################
# Extract the number of billion parameters from the model name
# only used for together_computer LLMs
def get_model_params_and_category(model_name):
import re
params_match = re.search(r'(\d+b)', model_name) # catch all decimals like 3b, 70b, etc
category = None
if params_match != None:
params_match = params_match.group(1)
params_match = params_match.replace("b", "")
params_billion = float(params_match)
# Determine the category based on the number of parameters
if params_billion <= 3.0:
category = "together-ai-up-to-3b"
elif params_billion <= 7.0:
category = "together-ai-3.1b-7b"
elif params_billion <= 20.0:
category = "together-ai-7.1b-20b"
elif params_billion <= 40.0:
category = "together-ai-20.1b-40b"
elif params_billion <= 70.0:
category = "together-ai-40.1b-70b"
return category
return None
def get_replicate_completion_pricing(completion_response=None, total_time=0.0):
# see https://replicate.com/pricing
a100_40gb_price_per_second_public = 0.001150
# for all litellm currently supported LLMs, almost all requests go to a100_80gb
a100_80gb_price_per_second_public = 0.001400 # assume all calls sent to A100 80GB for now
if total_time == 0.0:
start_time = completion_response['created']
end_time = completion_response["ended"]
total_time = end_time - start_time
return a100_80gb_price_per_second_public*total_time
def token_counter(model="", text=None, messages: Optional[List] = None):
# Args:
# text: raw text string passed to model
# messages: Optional, alternative to passing in text. List of Dicts passed to completion, messages = [{"role": "user", "content": "hello"}]
# use tiktoken or anthropic's tokenizer depending on the model
if text == None:
if messages is not None:
text = " ".join([message["content"] for message in messages])
else:
raise ValueError("text and messages cannot both be None")
num_tokens = 0
if model is not None:
# cohere
if model in litellm.cohere_models:
tokenizer = Tokenizer.from_pretrained("Cohere/command-nightly")
enc = tokenizer.encode(text)
num_tokens = len(enc.ids)
# anthropic
elif model in litellm.anthropic_models:
# Read the JSON file
filename = pkg_resources.resource_filename(__name__, 'llms/tokenizers/anthropic_tokenizer.json')
with open(filename, 'r') as f:
json_data = json.load(f)
# Decode the JSON data from utf-8
json_data_decoded = json.dumps(json_data, ensure_ascii=False)
# Convert to str
json_str = str(json_data_decoded)
# load tokenizer
tokenizer = Tokenizer.from_str(json_str)
enc = tokenizer.encode(text)
num_tokens = len(enc.ids)
# llama2
elif "llama-2" in model.lower():
tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
enc = tokenizer.encode(text)
num_tokens = len(enc.ids)
# default - tiktoken
else:
num_tokens = len(encoding.encode(text))
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def cost_per_token(model="gpt-3.5-turbo", prompt_tokens=0, completion_tokens=0):
# given
prompt_tokens_cost_usd_dollar = 0
completion_tokens_cost_usd_dollar = 0
model_cost_ref = litellm.model_cost
if model in model_cost_ref:
prompt_tokens_cost_usd_dollar = (
model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
else:
# calculate average input cost
input_cost_sum = 0
output_cost_sum = 0
model_cost_ref = litellm.model_cost
for model in model_cost_ref:
input_cost_sum += model_cost_ref[model]["input_cost_per_token"]
output_cost_sum += model_cost_ref[model]["output_cost_per_token"]
avg_input_cost = input_cost_sum / len(model_cost_ref.keys())
avg_output_cost = output_cost_sum / len(model_cost_ref.keys())
prompt_tokens_cost_usd_dollar = avg_input_cost * prompt_tokens
completion_tokens_cost_usd_dollar = avg_output_cost * completion_tokens
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
def completion_cost(
completion_response=None,
model="gpt-3.5-turbo",
prompt="",
completion="",
total_time=0.0, # used for replicate
):
"""
Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm.
Parameters:
completion_response (litellm.ModelResponses): [Required] The response received from a LiteLLM completion request.
[OPTIONAL PARAMS]
model (str): Optional. The name of the language model used in the completion calls
prompt (str): Optional. The input prompt passed to the llm
completion (str): Optional. The output completion text from the llm
total_time (float): Optional. (Only used for Replicate LLMs) The total time used for the request in seconds
Returns:
float: The cost in USD dollars for the completion based on the provided parameters.
Note:
- If completion_response is provided, the function extracts token information and the model name from it.
- If completion_response is not provided, the function calculates token counts based on the model and input text.
- The cost is calculated based on the model, prompt tokens, and completion tokens.
- For certain models containing "togethercomputer" in the name, prices are based on the model size.
- For Replicate models, the cost is calculated based on the total time used for the request.
Exceptions:
- If an error occurs during execution, the function returns 0.0 without blocking the user's execution path.
"""
try:
# Handle Inputs to completion_cost
prompt_tokens = 0
completion_tokens = 0
if completion_response != None:
# get input/output tokens from completion_response
prompt_tokens = completion_response['usage']['prompt_tokens']
completion_tokens = completion_response['usage']['completion_tokens']
model = completion_response['model'] # get model from completion_response
else:
prompt_tokens = token_counter(model=model, text=prompt)
completion_tokens = token_counter(model=model, text=completion)
# Calculate cost based on prompt_tokens, completion_tokens
if "togethercomputer" in model:
# together ai prices based on size of llm
# get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json
model = get_model_params_and_category(model)
# replicate llms are calculate based on time for request running
# see https://replicate.com/pricing
elif (
model in litellm.replicate_models or
"replicate" in model
):
return get_replicate_completion_pricing(completion_response, total_time)
prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_token(
model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens
)
return prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
except:
return 0.0 # this should not block a users execution path
####### HELPER FUNCTIONS ################
def get_litellm_params(
return_async=False,
api_key=None,
force_timeout=600,
azure=False,
logger_fn=None,
verbose=False,
hugging_face=False,
replicate=False,
together_ai=False,
custom_llm_provider=None,
api_base=None,
litellm_call_id=None,
model_alias_map=None,
completion_call_id=None,
metadata=None
):
litellm_params = {
"return_async": return_async,
"api_key": api_key,
"force_timeout": force_timeout,
"logger_fn": logger_fn,
"verbose": verbose,
"custom_llm_provider": custom_llm_provider,
"api_base": api_base,
"litellm_call_id": litellm_call_id,
"model_alias_map": model_alias_map,
"completion_call_id": completion_call_id,
"metadata": metadata,
"stream_response": {} # litellm_call_id: ModelResponse Dict
}
return litellm_params
def get_optional_params( # use the openai defaults
# 12 optional params
functions=[],
function_call="",
temperature=1,
top_p=1,
n=1,
stream=False,
stop=None,
max_tokens=float("inf"),
presence_penalty=0,
frequency_penalty=0,
logit_bias={},
num_beams=1,
remove_input=False, # for nlp_cloud
user="",
deployment_id=None,
model=None,
custom_llm_provider="",
top_k=40,
return_full_text=False,
task=None
):
optional_params = {}
if model in litellm.anthropic_models:
# handle anthropic params
if stream:
optional_params["stream"] = stream
if stop != None:
optional_params["stop_sequences"] = stop
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if max_tokens != float("inf"):
optional_params["max_tokens_to_sample"] = max_tokens
return optional_params
elif model in litellm.cohere_models:
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature != 1:
optional_params["temperature"] = temperature
if max_tokens != float("inf"):
optional_params["max_tokens"] = max_tokens
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
return optional_params
elif custom_llm_provider == "replicate":
if stream:
optional_params["stream"] = stream
return optional_params
if max_tokens != float("inf"):
if "vicuna" in model or "flan" in model:
optional_params["max_length"] = max_tokens
else:
optional_params["max_new_tokens"] = max_tokens
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if top_k != 40:
optional_params["top_k"] = top_k
if stop != None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "huggingface":
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if n != 1:
optional_params["best_of"] = n
optional_params["do_sample"] = True # need to sample if you want best of for hf inference endpoints
if stream:
optional_params["stream"] = stream
if stop != None:
optional_params["stop"] = stop
if max_tokens != float("inf"):
optional_params["max_new_tokens"] = max_tokens
if presence_penalty != 0:
optional_params["repetition_penalty"] = presence_penalty
optional_params["return_full_text"] = return_full_text
optional_params["details"] = True
optional_params["task"] = task
elif custom_llm_provider == "together_ai":
if stream:
optional_params["stream_tokens"] = stream
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if top_k != 40:
optional_params["top_k"] = top_k
if max_tokens != float("inf"):
optional_params["max_tokens"] = max_tokens
if frequency_penalty != 0:
optional_params["frequency_penalty"] = frequency_penalty # TODO: Check if should be repetition penalty
if stop != None:
optional_params["stop"] = stop #TG AI expects a list, example ["\n\n\n\n","<|endoftext|>"]
elif (
model in litellm.vertex_chat_models or model in litellm.vertex_code_chat_models
): # chat-bison has diff args from chat-bison@001, ty Google :)
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if max_tokens != float("inf"):
optional_params["max_output_tokens"] = max_tokens
elif model in litellm.vertex_text_models:
# required params for all text vertex calls
# temperature=0.2, top_p=0.1, top_k=20
# always set temperature, top_p, top_k else, text bison fails
optional_params["temperature"] = temperature
optional_params["top_p"] = top_p
optional_params["top_k"] = top_k
if max_tokens != float("inf"):
optional_params["max_output_tokens"] = max_tokens
elif model in model in litellm.vertex_code_text_models:
optional_params["temperature"] = temperature
if max_tokens != float("inf"):
optional_params["max_output_tokens"] = max_tokens
elif custom_llm_provider == "baseten":
optional_params["temperature"] = temperature
optional_params["stream"] = stream
if top_p != 1:
optional_params["top_p"] = top_p
optional_params["top_k"] = top_k
optional_params["num_beams"] = num_beams
if max_tokens != float("inf"):
optional_params["max_new_tokens"] = max_tokens
elif custom_llm_provider == "sagemaker":
if "llama-2" in model:
# llama-2 models on sagemaker support the following args
"""
max_new_tokens: Model generates text until the output length (excluding the input context length) reaches max_new_tokens. If specified, it must be a positive integer.
temperature: Controls the randomness in the output. Higher temperature results in output sequence with low-probability words and lower temperature results in output sequence with high-probability words. If temperature -> 0, it results in greedy decoding. If specified, it must be a positive float.
top_p: In each step of text generation, sample from the smallest possible set of words with cumulative probability top_p. If specified, it must be a float between 0 and 1.
return_full_text: If True, input text will be part of the output generated text. If specified, it must be boolean. The default value for it is False.
"""
if max_tokens != float("inf"):
optional_params["max_new_tokens"] = max_tokens
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
elif custom_llm_provider == "bedrock":
if "ai21" in model or "anthropic" in model:
# params "maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[],
# https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=j2-ultra
if max_tokens != float("inf"):
optional_params["maxTokens"] = max_tokens
if temperature != 1:
optional_params["temperature"] = temperature
if stop != None:
optional_params["stop_sequences"] = stop
if top_p != 1:
optional_params["topP"] = top_p
elif "amazon" in model: # amazon titan llms
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens != float("inf"):
optional_params["maxTokenCount"] = max_tokens
if temperature != 1:
optional_params["temperature"] = temperature
if stop != None:
optional_params["stopSequences"] = stop
if top_p != 1:
optional_params["topP"] = top_p
elif model in litellm.aleph_alpha_models:
if max_tokens != float("inf"):
optional_params["maximum_tokens"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature != 1:
optional_params["temperature"] = temperature
if top_k != 40:
optional_params["top_k"] = top_k
if top_p != 1:
optional_params["top_p"] = top_p
if presence_penalty != 0:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty != 0:
optional_params["frequency_penalty"] = frequency_penalty
if n != 1:
optional_params["n"] = n
if stop != None:
optional_params["stop_sequences"] = stop
elif model in litellm.nlp_cloud_models or custom_llm_provider == "nlp_cloud":
if max_tokens != float("inf"):
optional_params["max_length"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature != 1:
optional_params["temperature"] = temperature
if top_k != 40:
optional_params["top_k"] = top_k
if top_p != 1:
optional_params["top_p"] = top_p
if presence_penalty != 0:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty != 0:
optional_params["frequency_penalty"] = frequency_penalty
if num_beams != 1:
optional_params["num_beams"] = num_beams
if n != 1:
optional_params["num_return_sequences"] = n
if remove_input == True:
optional_params["remove_input"] = True
if stop != None:
optional_params["stop_sequences"] = stop
elif model in litellm.petals_models or custom_llm_provider == "petals":
# max_new_tokens=1,temperature=0.9, top_p=0.6
if max_tokens != float("inf"):
optional_params["max_new_tokens"] = max_tokens
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
else: # assume passing in params for openai/azure openai
if functions != []:
optional_params["functions"] = functions
if function_call != "":
optional_params["function_call"] = function_call
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if n != 1:
optional_params["n"] = n
if stream:
optional_params["stream"] = stream
if stop != None:
optional_params["stop"] = stop
if max_tokens != float("inf"):
optional_params["max_tokens"] = max_tokens
if presence_penalty != 0:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty != 0:
optional_params["frequency_penalty"] = frequency_penalty
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if user != "":
optional_params["user"] = user
if deployment_id != None:
optional_params["deployment_id"] = deployment_id
return optional_params
return optional_params
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
try:
# check if llm provider provided
if custom_llm_provider:
return model, custom_llm_provider
# check if llm provider part of model name
if model.split("/",1)[0] in litellm.provider_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
return model, custom_llm_provider
# check if model in known model provider list
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models:
custom_llm_provider = "openai"
elif model in litellm.open_ai_text_completion_models:
custom_llm_provider = "text-completion-openai"
## anthropic
elif model in litellm.anthropic_models:
custom_llm_provider = "anthropic"
## cohere
elif model in litellm.cohere_models:
custom_llm_provider = "cohere"
## replicate
elif model in litellm.replicate_models:
custom_llm_provider = "replicate"
## openrouter
elif model in litellm.openrouter_models:
custom_llm_provider = "openrouter"
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
custom_llm_provider = "vertex_ai"
## huggingface
elif model in litellm.huggingface_models:
custom_llm_provider = "huggingface"
## ai21
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
## together_ai
elif model in litellm.together_ai_models:
custom_llm_provider = "together_ai"
## aleph_alpha
elif model in litellm.aleph_alpha_models:
custom_llm_provider = "aleph_alpha"
## baseten
elif model in litellm.baseten_models:
custom_llm_provider = "baseten"
## nlp_cloud
elif model in litellm.nlp_cloud_models:
custom_llm_provider = "nlp_cloud"
## petals
elif model in litellm.petals_models:
custom_llm_provider = "petals"
if custom_llm_provider is None or custom_llm_provider=="":
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
return model, custom_llm_provider
except Exception as e:
raise e
def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]):
api_key = (dynamic_api_key or litellm.api_key)
# openai
if llm_provider == "openai" or llm_provider == "text-completion-openai":
api_key = (
api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
# anthropic
elif llm_provider == "anthropic":
api_key = (
api_key or
litellm.anthropic_key or
get_secret("ANTHROPIC_API_KEY")
)
# ai21
elif llm_provider == "ai21":
api_key = (
api_key or
litellm.ai21_key or
get_secret("AI211_API_KEY")
)
# aleph_alpha
elif llm_provider == "aleph_alpha":
api_key = (
api_key or
litellm.aleph_alpha_key or
get_secret("ALEPH_ALPHA_API_KEY")
)
# baseten
elif llm_provider == "baseten":
api_key = (
api_key or
litellm.baseten_key or
get_secret("BASETEN_API_KEY")
)
# cohere
elif llm_provider == "cohere":
api_key = (
api_key or
litellm.cohere_key or
get_secret("COHERE_API_KEY")
)
# huggingface
elif llm_provider == "huggingface":
api_key = (
api_key or
litellm.huggingface_key or
get_secret("HUGGINGFACE_API_KEY")
)
# nlp_cloud
elif llm_provider == "nlp_cloud":
api_key = (
api_key or
litellm.nlp_cloud_key or
get_secret("NLP_CLOUD_API_KEY")
)
# replicate
elif llm_provider == "replicate":
api_key = (
api_key or
litellm.replicate_key or
get_secret("REPLICATE_API_KEY")
)
# together_ai
elif llm_provider == "together_ai":
api_key = (
api_key or
litellm.togetherai_api_key or
get_secret("TOGETHERAI_API_KEY") or
get_secret("TOGETHER_AI_TOKEN")
)
return api_key
def get_max_tokens(model: str):
try:
return litellm.model_cost[model]
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/cookbook/community-resources/max_tokens.json")
def load_test_model(
model: str,
custom_llm_provider: str = "",
api_base: str = "",
prompt: str = "",
num_calls: int = 0,
force_timeout: int = 0,
):
test_prompt = "Hey, how's it going"
test_calls = 100
if prompt:
test_prompt = prompt
if num_calls:
test_calls = num_calls
messages = [[{"role": "user", "content": test_prompt}] for _ in range(test_calls)]
start_time = time.time()
try:
litellm.batch_completion(
model=model,
messages=messages,
custom_llm_provider=custom_llm_provider,
api_base=api_base,
force_timeout=force_timeout,
)
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "success",
"exception": None,
}
except Exception as e:
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "failed",
"exception": e,
}
def validate_environment(model: str) -> dict:
keys_in_environment = False
missing_keys = []
## EXTRACT LLM PROVIDER - if model name provided
custom_llm_provider = None
# check if llm provider part of model name
if model.split("/",1)[0] in litellm.provider_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
custom_llm_provider_passed_in = True
if custom_llm_provider:
if custom_llm_provider == "openai":
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
elif custom_llm_provider == "azure":
if ("AZURE_API_BASE" in os.environ
and "AZURE_API_VERSION" in os.environ
and "AZURE_API_KEY" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["AZURE_API_BASE", "AZURE_API_VERSION", "AZURE_API_KEY"])
elif custom_llm_provider == "anthropic":
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
elif custom_llm_provider == "cohere":
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
elif custom_llm_provider == "replicate":
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
elif custom_llm_provider == "openrouter":
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
elif custom_llm_provider == "vertex_ai":
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
elif custom_llm_provider == "huggingface":
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
elif custom_llm_provider == "ai21":
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
elif custom_llm_provider == "together_ai":
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
elif custom_llm_provider == "aleph_alpha":
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
elif custom_llm_provider == "baseten":
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
elif custom_llm_provider == "nlp_cloud":
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
else:
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or litellm.open_ai_text_completion_models:
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
## anthropic
elif model in litellm.anthropic_models:
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
## cohere
elif model in litellm.cohere_models:
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
## replicate
elif model in litellm.replicate_models:
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
## openrouter
elif model in litellm.openrouter_models:
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
## huggingface
elif model in litellm.huggingface_models:
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
## ai21
elif model in litellm.ai21_models:
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
## together_ai
elif model in litellm.together_ai_models:
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
## aleph_alpha
elif model in litellm.aleph_alpha_models:
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
## baseten
elif model in litellm.baseten_models:
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
## nlp_cloud
elif model in litellm.nlp_cloud_models:
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
def set_callbacks(callback_list, function_id=None):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
if callback == "sentry":
try:
import sentry_sdk
except ImportError:
print_verbose("Package 'sentry_sdk' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "sentry_sdk"]
)
import sentry_sdk
sentry_sdk_instance = sentry_sdk
sentry_trace_rate = (
os.environ.get("SENTRY_API_TRACE_RATE")
if "SENTRY_API_TRACE_RATE" in os.environ
else "1.0"
)
sentry_sdk_instance.init(
dsn=os.environ.get("SENTRY_API_URL"),
traces_sample_rate=float(sentry_trace_rate),
)
capture_exception = sentry_sdk_instance.capture_exception
add_breadcrumb = sentry_sdk_instance.add_breadcrumb
elif callback == "posthog":
try:
from posthog import Posthog
except ImportError:
print_verbose("Package 'posthog' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "posthog"]
)
from posthog import Posthog
posthog = Posthog(
project_api_key=os.environ.get("POSTHOG_API_KEY"),
host=os.environ.get("POSTHOG_API_URL"),
)
elif callback == "slack":
try:
from slack_bolt import App
except ImportError:
print_verbose("Package 'slack_bolt' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "slack_bolt"]
)
from slack_bolt import App
slack_app = App(
token=os.environ.get("SLACK_API_TOKEN"),
signing_secret=os.environ.get("SLACK_API_SECRET"),
)
alerts_channel = os.environ["SLACK_API_CHANNEL"]
print_verbose(f"Initialized Slack App: {slack_app}")
elif callback == "traceloop":
traceloopLogger = TraceloopLogger()
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
elif callback == "llmonitor":
llmonitorLogger = LLMonitorLogger()
elif callback == "promptlayer":
promptLayerLogger = PromptLayerLogger()
elif callback == "langfuse":
langFuseLogger = LangFuseLogger()
elif callback == "aispend":
aispendLogger = AISpendLogger()
elif callback == "berrispend":
berrispendLogger = BerriSpendLogger()
elif callback == "supabase":
print_verbose(f"instantiating supabase")
supabaseClient = Supabase()
elif callback == "lite_debugger":
print_verbose(f"instantiating lite_debugger")
if function_id:
liteDebuggerClient = LiteDebugger(email=function_id)
elif litellm.token:
liteDebuggerClient = LiteDebugger(email=litellm.token)
elif litellm.email:
liteDebuggerClient = LiteDebugger(email=litellm.email)
else:
liteDebuggerClient = LiteDebugger(email=str(uuid.uuid4()))
elif callable(callback):
customLogger = CustomLogger(callback_func=callback)
except Exception as e:
raise e
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
# print_verbose(f"handle_failure args: {args}")
# print_verbose(f"handle_failure kwargs: {kwargs}")
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"failed_event_name", "litellm.failed_query"
)
print_verbose(f"self.failure_callback: {litellm.failure_callback}")
for callback in litellm.failure_callback:
try:
if callback == "slack":
slack_msg = ""
if len(kwargs) > 0:
for key in kwargs:
slack_msg += f"{key}: {kwargs[key]}\n"
if len(args) > 0:
for i, arg in enumerate(args):
slack_msg += f"LiteLLM_Args_{str(i)}: {arg}"
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_msg += f"Traceback: {traceback_exception}"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "sentry":
capture_exception(exception)
elif callback == "posthog":
print_verbose(
f"inside posthog, additional_details: {len(additional_details.keys())}"
)
ph_obj = {}
if len(kwargs) > 0:
ph_obj = kwargs
if len(args) > 0:
for i, arg in enumerate(args):
ph_obj["litellm_args_" + str(i)] = arg
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
print_verbose(f"ph_obj: {ph_obj}")
print_verbose(f"PostHog Event Name: {event_name}")
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name)
print_verbose(f"successfully logged to PostHog!")
elif callback == "berrispend":
print_verbose("reaches berrispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
berrispendLogger.log_event(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "llmonitor":
print_verbose("reaches llmonitor for logging error!")
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
type = "embed" if "input" in kwargs else "llm"
llmonitorLogger.log_event(
type=type,
event="error",
user_id=litellm._thread_context.user,
model=model,
input=input,
error=traceback_exception,
run_id=kwargs["litellm_call_id"],
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
print_verbose(f"supabaseClient: {supabaseClient}")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
supabaseClient.log_event(
model=model,
messages=messages,
end_user=litellm._thread_context.user,
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
except:
print_verbose(
f"Error Occurred while logging failure: {traceback.format_exc()}"
)
pass
if failure_handler and callable(failure_handler):
call_details = {
"exception": exception,
"additional_details": additional_details,
}
failure_handler(call_details)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
pass
def handle_success(args, kwargs, result, start_time, end_time):
global heliconeLogger, aispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"successful_event_name", "litellm.succes_query"
)
for callback in litellm.success_callback:
try:
if callback == "posthog":
ph_obj = {}
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name, ph_obj)
pass
elif callback == "slack":
slack_msg = ""
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "helicone":
print_verbose("reaches helicone for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "llmonitor":
print_verbose("reaches llmonitor for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
# if contains input, it's 'embedding', otherwise 'llm'
type = "embed" if "input" in kwargs else "llm"
llmonitorLogger.log_event(
type=type,
event="end",
model=model,
input=input,
user_id=litellm._thread_context.user,
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "langfuse":
print_verbose("reaches langfuse for logging!")
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = (
args[1]
if len(args) > 1
else kwargs.get("messages", {"role": "user", "content": ""})
)
print(f"supabaseClient: {supabaseClient}")
supabaseClient.log_event(
model=model,
messages=messages,
end_user=litellm._thread_context.user,
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
return litellm.acompletion(*args, **kwargs)
def prompt_token_calculator(model, messages):
# use tiktoken or anthropic's tokenizer depending on the model
text = " ".join(message["content"] for message in messages)
num_tokens = 0
if "claude" in model:
try:
import anthropic
except:
Exception("Anthropic import failed please run `pip install anthropic`")
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def valid_model(model):
try:
# for a given model name, check if the user has the right permissions to access the model
if (
model in litellm.open_ai_chat_completion_models
or model in litellm.open_ai_text_completion_models
):
openai.Model.retrieve(model)
else:
messages = [{"role": "user", "content": "Hello World"}]
litellm.completion(model=model, messages=messages)
except:
raise InvalidRequestError(message="", model=model, llm_provider="")
# check valid api key
def check_valid_key(model: str, api_key: str):
# returns True if key is valid for the model
# returns False if key is invalid for the model
messages = [{"role": "user", "content": "Hey, how's it going?"}]
try:
litellm.completion(model=model, messages=messages, api_key=api_key, max_tokens=10)
return True
except AuthenticationError as e:
return False
except Exception as e:
return False
# integration helper function
def modify_integration(integration_name, integration_params):
global supabaseClient
if integration_name == "supabase":
if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"]
# custom prompt helper function
def register_prompt_template(model: str, roles: dict, initial_prompt_value: str = "", final_prompt_value: str = ""):
"""
Example usage:
```
import litellm
litellm.register_prompt_template(
model="llama-2",
roles={
"system": {
"pre_message": "[INST] <<SYS>>\n",
"post_message": "\n<</SYS>>\n [/INST]\n"
},
"user": { # follow this format https://github.com/facebookresearch/llama/blob/77062717054710e352a99add63d160274ce670c6/llama/generation.py#L348
"pre_message": "[INST] ",
"post_message": " [/INST]\n"
},
"assistant": {
"post_message": "\n" # follows this - https://replicate.com/blog/how-to-prompt-llama
}
}
)
```
"""
litellm.custom_prompt_dict[model] = {
"roles": roles,
"initial_prompt_value": initial_prompt_value,
"final_prompt_value": final_prompt_value
}
return litellm.custom_prompt_dict
####### [BETA] HOSTED PRODUCT ################ - https://docs.litellm.ai/docs/debugging/hosted_debugging
def get_all_keys(llm_provider=None):
try:
global last_fetched_at_keys
# if user is using hosted product -> instantiate their env with their hosted api keys - refresh every 5 minutes
print_verbose(f"Reaches get all keys, llm_provider: {llm_provider}")
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
time_delta = 0
if last_fetched_at_keys != None:
current_time = time.time()
time_delta = current_time - last_fetched_at_keys
if (
time_delta > 300 or last_fetched_at_keys == None or llm_provider
): # if the llm provider is passed in , assume this happening due to an AuthError for that provider
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_all_keys",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get model key response: {response.text}")
data = response.json()
# update model list
for key, value in data[
"model_keys"
].items(): # follows the LITELLM API KEY format - <UPPERCASE_PROVIDER_NAME>_API_KEY - e.g. HUGGINGFACE_API_KEY
os.environ[key] = value
# set model alias map
for model_alias, value in data["model_alias_map"].items():
litellm.model_alias_map[model_alias] = value
return "it worked!"
return None
return None
except:
print_verbose(
f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}"
)
pass
def get_model_list():
global last_fetched_at
try:
# if user is using hosted product -> get their updated model list
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
# make the api call
last_fetched_at = time.time()
print(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_model_list",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get_model_list response: {response.text}")
data = response.json()
# update model list
model_list = data["model_list"]
# # check if all model providers are in environment
# model_providers = data["model_providers"]
# missing_llm_provider = None
# for item in model_providers:
# if f"{item.upper()}_API_KEY" not in os.environ:
# missing_llm_provider = item
# break
# # update environment - if required
# threading.Thread(target=get_all_keys, args=(missing_llm_provider)).start()
return model_list
return [] # return empty list by default
except:
print_verbose(
f"[Non-Blocking Error] get_model_list error - {traceback.format_exc()}"
)
####### EXCEPTION MAPPING ################
def exception_type(
model,
original_exception,
custom_llm_provider,
completion_kwargs={},
):
global user_logger_fn, liteDebuggerClient
exception_mapping_worked = False
if litellm.set_verbose == True:
litellm.error_logs['EXCEPTION'] = original_exception
litellm.error_logs['KWARGS'] = completion_kwargs
try:
# code to show users their litellm error dashboard
import urllib.parse
import json
for log_key in litellm.error_logs:
current_logs = litellm.error_logs[log_key]
if type(current_logs) == dict:
filtered_error_logs = {key: str(value) for key, value in current_logs.items()}
litellm.error_logs[log_key] = filtered_error_logs
else:
litellm.error_logs[log_key] = str(current_logs)
# Convert the filtered_error_logs dictionary to a JSON string
error_logs_json = json.dumps(litellm.error_logs)
# URL-encode the JSON data
encoded_data = urllib.parse.quote(error_logs_json)
print("👉 view error logs:")
print("\033[91m" + '\033[4m' + 'https://logs.litellm.ai/?data=' + str(encoded_data) + "\033[0m")
except:
pass
try:
if isinstance(original_exception, OriginalError):
# Handle the OpenAIError
exception_mapping_worked = True
if model in litellm.openrouter_models:
if original_exception.http_status == 413:
raise InvalidRequestError(
message=str(original_exception),
model=model,
llm_provider="openrouter"
)
original_exception.llm_provider = "openrouter"
elif custom_llm_provider == "azure":
original_exception.llm_provider = "azure"
else:
original_exception.llm_provider = "openai"
if "This model's maximum context length is" in original_exception._message:
raise ContextWindowExceededError(
message=str(original_exception),
model=model,
llm_provider=original_exception.llm_provider
)
raise original_exception
elif model:
error_str = str(original_exception)
if isinstance(original_exception, BaseException):
exception_type = type(original_exception).__name__
else:
exception_type = ""
if custom_llm_provider == "anthropic": # one of the anthropics
if hasattr(original_exception, "message"):
if "prompt is too long" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=original_exception.message,
model=model,
llm_provider="anthropic"
)
if "Invalid API Key" in original_exception.message:
exception_mapping_worked = True
raise AuthenticationError(
message=original_exception.message,
model=model,
llm_provider="anthropic"
)
if hasattr(original_exception, "status_code"):
print_verbose(f"status_code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic"
)
elif original_exception.status_code == 413:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model
)
elif custom_llm_provider == "replicate":
if "Incorrect authentication token" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model
)
elif "input is too long" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
)
elif exception_type == "ModelError":
exception_mapping_worked = True
raise InvalidRequestError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
)
elif "Request was throttled" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model
)
elif original_exception.status_code == 400 or original_exception.status_code == 422:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate"
)
elif original_exception.status_code == 413:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model
)
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model
)
elif custom_llm_provider == "bedrock":
if "Unable to locate credentials" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"BedrockException - {error_str}",
model=model,
llm_provider="bedrock"
)
elif custom_llm_provider == "sagemaker":
if "Unable to locate credentials" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"SagemakerException - {error_str}",
model=model,
llm_provider="sagemaker"
)
elif custom_llm_provider == "vertex_ai":
if "Vertex AI API has not been used in project" in error_str or "Unable to find your project" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai"
)
elif custom_llm_provider == "cohere": # Cohere
if (
"invalid api token" in error_str
or "No API key provided." in error_str
):
exception_mapping_worked = True
raise AuthenticationError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif "too many tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"CohereException - {original_exception.message}",
model=model,
llm_provider="cohere",
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 400 or original_exception.status_code == 498:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif (
"CohereConnectionError" in exception_type
): # cohere seems to fire these errors when we load test it (1k+ messages / min)
exception_mapping_worked = True
raise RateLimitError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif "invalid type:" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
elif "Unexpected server error" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
else:
if hasattr(original_exception, "status_code"):
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model
)
raise original_exception
elif custom_llm_provider == "huggingface":
if "length limit exceeded" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=error_str,
model=model,
llm_provider="huggingface"
)
elif "A valid user token is required" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=error_str,
llm_provider="huggingface",
model=model
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface"
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model
)
exception_mapping_worked = True
raise APIError(status_code=500, message=error_str, model=model, llm_provider=custom_llm_provider)
elif custom_llm_provider == "ai21":
if hasattr(original_exception, "message"):
if "Prompt has too many tokens" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21"
)
if "Bad or missing API token." in original_exception.message:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21"
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21"
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model
)
elif custom_llm_provider == "nlp_cloud":
if "detail" in error_str:
if "Input text length should not exceed" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud"
)
elif "value is not a valid" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud"
)
else:
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud"
)
if hasattr(original_exception, "status_code"): # https://docs.nlpcloud.com/?shell#errors
if original_exception.status_code == 400 or original_exception.status_code == 406 or original_exception.status_code == 413 or original_exception.status_code == 422:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif original_exception.status_code == 401 or original_exception.status_code == 403:
exception_mapping_worked = True
raise AuthenticationError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif original_exception.status_code == 522 or original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud"
)
elif original_exception.status_code == 429 or original_exception.status_code == 402:
exception_mapping_worked = True
raise RateLimitError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
)
elif original_exception.status_code == 500 or original_exception.status_code == 503:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif original_exception.status_code == 504 or original_exception.status_code == 520:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud"
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model
)
elif custom_llm_provider == "together_ai":
import json
error_response = json.loads(error_str)
if "error" in error_response and "`inputs` tokens + `max_new_tokens` must be <=" in error_response["error"]:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif "error" in error_response and "invalid private key" in error_response["error"]:
exception_mapping_worked = True
raise AuthenticationError(
message=f"TogetherAIException - {error_response['error']}",
llm_provider="together_ai",
model=model
)
elif "error" in error_response and "INVALID_ARGUMENT" in error_response["error"]:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif "error" in error_response and "API key doesn't match expected format." in error_response["error"]:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif "error_type" in error_response and error_response["error_type"] == "validation":
exception_mapping_worked = True
raise InvalidRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai"
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
model=model,
llm_provider="together_ai"
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model
)
elif custom_llm_provider == "aleph_alpha":
if "This is longer than the model's maximum context length" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif "InvalidToken" in error_str or "No token provided" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif hasattr(original_exception, "status_code"):
print(f"status code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise InvalidRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
raise original_exception
raise original_exception
elif custom_llm_provider == "vllm":
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 0:
exception_mapping_worked = True
raise APIConnectionError(
message=f"VLLMException - {original_exception.message}",
llm_provider="vllm",
model=model
)
elif custom_llm_provider == "ollama":
if "no attribute 'async_get_ollama_response_stream" in error_str:
raise ImportError("Import error - trying to use async for ollama. import async_generator failed. Try 'pip install async_generator'")
exception_mapping_worked = True
raise APIError(status_code=500, message=str(original_exception), llm_provider=custom_llm_provider, model=model)
except Exception as e:
# LOGGING
exception_logging(
logger_fn=user_logger_fn,
additional_args={
"exception_mapping_worked": exception_mapping_worked,
"original_exception": original_exception,
},
exception=e,
)
## AUTH ERROR
if isinstance(e, AuthenticationError) and (
litellm.email or "LITELLM_EMAIL" in os.environ
):
threading.Thread(target=get_all_keys, args=(e.llm_provider,)).start()
# don't let an error with mapping interrupt the user from receiving an error from the llm api calls
if exception_mapping_worked:
raise e
else:
raise original_exception
####### CRASH REPORTING ################
def safe_crash_reporting(model=None, exception=None, custom_llm_provider=None):
data = {
"model": model,
"exception": str(exception),
"custom_llm_provider": custom_llm_provider,
}
threading.Thread(target=litellm_telemetry, args=(data,)).start()
def get_or_generate_uuid():
temp_dir = os.path.join(os.path.abspath(os.sep), "tmp")
uuid_file = os.path.join(temp_dir, "litellm_uuid.txt")
try:
# Try to open the file and load the UUID
with open(uuid_file, "r") as file:
uuid_value = file.read()
if uuid_value:
uuid_value = uuid_value.strip()
else:
raise FileNotFoundError
except FileNotFoundError:
# Generate a new UUID if the file doesn't exist or is empty
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open(uuid_file, "w") as file:
file.write(uuid_value)
except: # if writing to tmp/litellm_uuid.txt then retry writing to litellm_uuid.txt
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open("litellm_uuid.txt", "w") as file:
file.write(uuid_value)
except: # if this 3rd attempt fails just pass
# Good first issue for someone to improve this function :)
return
except:
# [Non-Blocking Error]
return
return uuid_value
def litellm_telemetry(data):
# Load or generate the UUID
uuid_value = ""
try:
uuid_value = get_or_generate_uuid()
except:
uuid_value = str(uuid.uuid4())
try:
# Prepare the data to send to litellm logging api
payload = {
"uuid": uuid_value,
"data": data,
"version:": importlib.metadata.version("litellm"),
}
# Make the POST request to litellm logging api
response = requests.post(
"https://litellm.berri.ai/logging",
headers={"Content-Type": "application/json"},
json=payload,
)
response.raise_for_status() # Raise an exception for HTTP errors
except:
# [Non-Blocking Error]
return
######### Secret Manager ############################
# checks if user has passed in a secret manager client
# if passed in then checks the secret there
def get_secret(secret_name):
if litellm.secret_manager_client != None:
# TODO: check which secret manager is being used
# currently only supports Infisical
try:
secret = litellm.secret_manager_client.get_secret(secret_name).secret_value
except:
secret = None
return secret
else:
return os.environ.get(secret_name)
######## Streaming Class ############################
# wraps the completion stream to return the correct format for the model
# replicate/anthropic/cohere
class CustomStreamWrapper:
def __init__(self, completion_stream, model, custom_llm_provider=None, logging_obj=None):
self.model = model
self.custom_llm_provider = custom_llm_provider
self.logging_obj = logging_obj
self.completion_stream = completion_stream
self.sent_first_chunk = False
self.sent_last_chunk = False
if self.logging_obj:
# Log the type of the received item
self.logging_obj.post_call(str(type(completion_stream)))
def __iter__(self):
return self
def __aiter__(self):
return self
def logging(self, text):
if self.logging_obj:
self.logging_obj.post_call(text)
def handle_anthropic_chunk(self, chunk):
str_line = chunk.decode("utf-8") # Convert bytes to string
print(f"str_line: {str_line}")
text = ""
is_finished = False
finish_reason = None
if str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
text = data_json.get("completion", "")
if data_json.get("stop_reason", None):
is_finished = True
finish_reason = data_json["stop_reason"]
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_together_ai_chunk(self, chunk):
chunk = chunk.decode("utf-8")
text = ""
is_finished = False
finish_reason = None
if "text" in chunk:
text_index = chunk.find('"text":"') # this checks if text: exists
text_start = text_index + len('"text":"')
text_end = chunk.find('"}', text_start)
if text_index != -1 and text_end != -1:
extracted_text = chunk[text_start:text_end]
text = extracted_text
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "[DONE]" in chunk:
return {"text": text, "is_finished": True, "finish_reason": "stop"}
elif "error" in chunk:
raise ValueError(chunk)
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_huggingface_chunk(self, chunk):
chunk = chunk.decode("utf-8")
text = ""
is_finished = False
finish_reason = ""
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
print(f"data json: {data_json}")
if "token" in data_json and "text" in data_json["token"]:
text = data_json["token"]["text"]
if "meta-llama/Llama-2" in self.model: #clean eos tokens like </s> from the returned output text
if any(token in text for token in llama_2_special_tokens):
text = text.replace("<s>", "").replace("</s>", "")
if data_json.get("details", False) and data_json["details"].get("finish_reason", False):
is_finished = True
finish_reason = data_json["details"]["finish_reason"]
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in chunk:
raise ValueError(chunk)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_ai21_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["data"]["text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_nlp_cloud_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["generated_text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_aleph_alpha_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["completion"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_cohere_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = ""
is_finished = False
finish_reason = ""
if "text" in data_json:
text = data_json["text"]
elif "is_finished" in data_json:
is_finished = data_json["is_finished"]
finish_reason = data_json["finish_reason"]
else:
raise Exception(data_json)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_replicate_chunk(self, chunk):
print(f"chunk: {chunk}")
try:
text = ""
is_finished = False
finish_reason = ""
if "output" in chunk:
text = chunk['output']
if "status" in chunk:
if chunk["status"] == "succeeded":
is_finished = True
finish_reason = "stop"
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_openai_text_completion_chunk(self, chunk):
try:
print(f"chunk: {chunk}")
return chunk["choices"][0]["text"]
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_openai_chat_completion_chunk(self, chunk):
try:
return chunk["choices"][0]["delta"]["content"]
except:
return ""
def handle_baseten_chunk(self, chunk):
try:
chunk = chunk.decode("utf-8")
if len(chunk) > 0:
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
if "token" in data_json and "text" in data_json["token"]:
return data_json["token"]["text"]
else:
return ""
data_json = json.loads(chunk)
if "model_output" in data_json:
if isinstance(data_json["model_output"], dict) and "data" in data_json["model_output"] and isinstance(data_json["model_output"]["data"], list):
return data_json["model_output"]["data"][0]
elif isinstance(data_json["model_output"], str):
return data_json["model_output"]
elif "completion" in data_json and isinstance(data_json["completion"], str):
return data_json["completion"]
else:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return ""
else:
return ""
except:
traceback.print_exc()
return ""
def handle_bedrock_stream(self, chunk):
chunk = chunk.get('chunk')
if chunk:
chunk_data = json.loads(chunk.get('bytes').decode())
text = ""
is_finished = False
finish_reason = ""
if "outputText" in chunk_data:
text = chunk_data['outputText']
if chunk_data.get("completionReason", None):
is_finished = True
finish_reason = chunk_data["completionReason"]
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
return ""
## needs to handle the empty string case (even starting chunk can be an empty string)
def __next__(self):
model_response = ModelResponse(stream=True, model=self.model)
try:
while True: # loop until a non-empty string is found
# return this for all models
completion_obj = {"content": ""}
if self.custom_llm_provider and self.custom_llm_provider == "anthropic":
chunk = next(self.completion_stream)
response_obj = self.handle_anthropic_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.model == "replicate" or self.custom_llm_provider == "replicate":
chunk = next(self.completion_stream)
response_obj = self.handle_replicate_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif (
self.custom_llm_provider and self.custom_llm_provider == "together_ai"):
chunk = next(self.completion_stream)
response_obj = self.handle_together_ai_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "huggingface":
chunk = next(self.completion_stream)
response_obj = self.handle_huggingface_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "baseten": # baseten doesn't provide streaming
chunk = next(self.completion_stream)
completion_obj["content"] = self.handle_baseten_chunk(chunk)
elif self.custom_llm_provider and self.custom_llm_provider == "ai21": #ai21 doesn't provide streaming
chunk = next(self.completion_stream)
response_obj = self.handle_ai21_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "vllm":
chunk = next(self.completion_stream)
completion_obj["content"] = chunk[0].outputs[0].text
elif self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha": #aleph alpha doesn't provide streaming
chunk = next(self.completion_stream)
response_obj = self.handle_aleph_alpha_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "text-completion-openai":
chunk = next(self.completion_stream)
completion_obj["content"] = self.handle_openai_text_completion_chunk(chunk)
elif self.model in litellm.nlp_cloud_models or self.custom_llm_provider == "nlp_cloud":
try:
chunk = next(self.completion_stream)
response_obj = self.handle_nlp_cloud_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
except Exception as e:
if self.sent_last_chunk:
raise e
else:
if self.sent_first_chunk is False:
raise Exception("An unknown error occurred with the stream")
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider and self.custom_llm_provider == "vertex_ai":
try:
chunk = next(self.completion_stream)
completion_obj["content"] = str(chunk)
except StopIteration as e:
if self.sent_last_chunk:
raise e
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider == "cohere":
chunk = next(self.completion_stream)
response_obj = self.handle_cohere_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "bedrock":
chunk = next(self.completion_stream)
response_obj = self.handle_bedrock_stream(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "sagemaker":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
else: # openai chat/azure models
chunk = next(self.completion_stream)
model_response = chunk
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(completion_obj,)).start()
return model_response
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(completion_obj,)).start()
model_response.model = self.model
if len(completion_obj["content"]) > 0: # cannot set content of an OpenAI Object to be an empty string
if self.sent_first_chunk == False:
completion_obj["role"] = "assistant"
self.sent_first_chunk = True
model_response.choices[0].delta = Delta(**completion_obj)
return model_response
elif model_response.choices[0].finish_reason:
return model_response
except StopIteration:
raise StopIteration
except Exception as e:
e.message = str(e)
return exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e)
async def __anext__(self):
try:
return next(self)
except StopIteration:
raise StopAsyncIteration
def mock_completion_streaming_obj(model_response, mock_response, model):
for i in range(0, len(mock_response), 3):
completion_obj = {"role": "assistant", "content": mock_response[i: i+3]}
model_response.choices[0].delta = completion_obj
yield model_response
########## Reading Config File ############################
def read_config_args(config_path) -> dict:
try:
import os
current_path = os.getcwd()
with open(config_path, "r") as config_file:
config = json.load(config_file)
# read keys/ values from config file and return them
return config
except Exception as e:
raise e
########## experimental completion variants ############################
def completion_with_config(config: Union[dict, str], **kwargs):
if config is not None:
if isinstance(config, str):
config = read_config_args(config)
elif isinstance(config, dict):
config = config
else:
raise Exception("Config path must be a string or a dictionary.")
else:
raise Exception("Config path not passed in.")
if config is None:
raise Exception("No completion config in the config file")
models_with_config = config["model"].keys()
model = kwargs["model"]
messages = kwargs["messages"]
## completion config
fallback_models = config.get("default_fallback_models", None)
available_models = config.get("available_models", None)
adapt_to_prompt_size = config.get("adapt_to_prompt_size", False)
trim_messages_flag = config.get("trim_messages", False)
prompt_larger_than_model = False
max_model = model
try:
max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
max_tokens = 2048 # assume curr model's max window is 2048 tokens
if adapt_to_prompt_size:
## Pick model based on token window
prompt_tokens = litellm.token_counter(model="gpt-3.5-turbo", text="".join(message["content"] for message in messages))
try:
curr_max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
curr_max_tokens = 2048
if curr_max_tokens < prompt_tokens:
prompt_larger_than_model = True
for available_model in available_models:
try:
curr_max_tokens = litellm.get_max_tokens(available_model)["max_tokens"]
if curr_max_tokens > max_tokens:
max_tokens = curr_max_tokens
max_model = available_model
if curr_max_tokens > prompt_tokens:
model = available_model
prompt_larger_than_model = False
except:
continue
if prompt_larger_than_model:
messages = trim_messages(messages=messages, model=max_model)
kwargs["messages"] = messages
kwargs["model"] = model
try:
if model in models_with_config:
## Moderation check
if config["model"][model].get("needs_moderation"):
input = " ".join(message["content"] for message in messages)
response = litellm.moderation(input=input)
flagged = response["results"][0]["flagged"]
if flagged:
raise Exception("This response was flagged as inappropriate")
## Model-specific Error Handling
error_handling = None
if config["model"][model].get("error_handling"):
error_handling = config["model"][model]["error_handling"]
try:
response = litellm.completion(**kwargs)
return response
except Exception as e:
exception_name = type(e).__name__
fallback_model = None
if error_handling and exception_name in error_handling:
error_handler = error_handling[exception_name]
# either switch model or api key
fallback_model = error_handler.get("fallback_model", None)
if fallback_model:
kwargs["model"] = fallback_model
return litellm.completion(**kwargs)
raise e
else:
return litellm.completion(**kwargs)
except Exception as e:
if fallback_models:
model = fallback_models.pop(0)
return completion_with_fallbacks(model=model, messages=messages, fallbacks=fallback_models)
raise e
def get_model_split_test(models, completion_call_id):
global last_fetched_at
try:
# make the api call
last_fetched_at = time.time()
response = requests.post(
#http://api.litellm.ai
url="http://api.litellm.ai/get_model_split_test", # get the updated dict from table or update the table with the dict
headers={"content-type": "application/json"},
data=json.dumps({"completion_call_id": completion_call_id, "models": models}),
)
print_verbose(f"get_model_list response: {response.text}")
data = response.json()
# update model list
split_test_models = data["split_test_models"]
model_configs = data.get("model_configs", {})
# update environment - if required
threading.Thread(target=get_all_keys, args=()).start()
return split_test_models, model_configs
except:
print_verbose(
f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}"
)
def completion_with_split_tests(models={}, messages=[], use_client=False, override_client=False, **kwargs):
"""
Example Usage:
models = {
"gpt-4": 0.7,
"huggingface/wizard-coder": 0.3
}
messages = [{ "content": "Hello, how are you?","role": "user"}]
completion_with_split_tests(models=models, messages=messages)
"""
import random
model_configs = {}
if use_client and not override_client:
if "id" not in kwargs or kwargs["id"] is None:
kwargs["id"] = str(uuid.uuid4())
#raise ValueError("Please tag this completion call, if you'd like to update it's split test values through the UI. - eg. `completion_with_split_tests(.., id=1234)`.")
# get the most recent model split list from server
models, model_configs = get_model_split_test(models=models, completion_call_id=kwargs["id"])
try:
selected_llm = random.choices(list(models.keys()), weights=list(models.values()))[0]
except:
traceback.print_exc()
raise ValueError("""models does not follow the required format - {'model_name': 'split_percentage'}, e.g. {'gpt-4': 0.7, 'huggingface/wizard-coder': 0.3}""")
# use dynamic model configs if users set
if model_configs!={}:
selected_model_configs = model_configs.get(selected_llm, {})
if "prompt" in selected_model_configs: # special case, add this to messages as system prompt
messages.append({"role": "system", "content": selected_model_configs["prompt"]})
selected_model_configs.pop("prompt")
for param_name in selected_model_configs:
if param_name == "temperature":
kwargs[param_name] = float(selected_model_configs[param_name])
elif param_name == "max_tokens":
kwargs[param_name] = int(selected_model_configs[param_name])
else:
kwargs[param_name] = selected_model_configs[param_name]
return litellm.completion(model=selected_llm, messages=messages, use_client=use_client, **kwargs)
def completion_with_fallbacks(**kwargs):
response = None
rate_limited_models = set()
model_expiration_times = {}
start_time = time.time()
fallbacks = [kwargs["model"]] + kwargs["fallbacks"]
del kwargs["fallbacks"] # remove fallbacks so it's not recursive
while response == None and time.time() - start_time < 45:
for model in fallbacks:
# loop thru all models
try:
if (
model in rate_limited_models
): # check if model is currently cooling down
if (
model_expiration_times.get(model)
and time.time() >= model_expiration_times[model]
):
rate_limited_models.remove(
model
) # check if it's been 60s of cool down and remove model
else:
continue # skip model
# delete model from kwargs if it exists
if kwargs.get("model"):
del kwargs["model"]
response = litellm.completion(**kwargs, model=model)
if response != None:
return response
except Exception as e:
rate_limited_models.add(model)
model_expiration_times[model] = (
time.time() + 60
) # cool down this selected model
# print(f"rate_limited_models {rate_limited_models}")
pass
return response
def process_system_message(system_message, max_tokens, model):
system_message_event = {"role": "system", "content": system_message}
system_message_tokens = get_token_count(system_message_event, model)
if system_message_tokens > max_tokens:
print_verbose("`tokentrimmer`: Warning, system message exceeds token limit. Trimming...")
# shorten system message to fit within max_tokens
new_system_message = shorten_message_to_fit_limit(system_message_event, max_tokens, model)
system_message_tokens = get_token_count(new_system_message, model)
return system_message_event, max_tokens - system_message_tokens
def process_messages(messages, max_tokens, model):
# Process messages from older to more recent
messages = messages[::-1]
final_messages = []
for message in messages:
final_messages = attempt_message_addition(final_messages, message, max_tokens, model)
return final_messages
def attempt_message_addition(final_messages, message, max_tokens, model):
temp_messages = [message] + final_messages
temp_message_tokens = get_token_count(messages=temp_messages, model=model)
if temp_message_tokens <= max_tokens:
return temp_messages
# if temp_message_tokens > max_tokens, try shortening temp_messages
elif "function_call" not in message:
# fit updated_message to be within temp_message_tokens - max_tokens (aka the amount temp_message_tokens is greate than max_tokens)
updated_message = shorten_message_to_fit_limit(message, temp_message_tokens - max_tokens, model)
if can_add_message(updated_message, final_messages, max_tokens, model):
return [updated_message] + final_messages
return final_messages
def can_add_message(message, messages, max_tokens, model):
if get_token_count(messages + [message], model) <= max_tokens:
return True
return False
def get_token_count(messages, model):
return token_counter(model=model, messages=messages)
def shorten_message_to_fit_limit(
message,
tokens_needed,
model):
"""
Shorten a message to fit within a token limit by removing characters from the middle.
"""
content = message["content"]
while True:
total_tokens = get_token_count([message], model)
if total_tokens <= tokens_needed:
break
ratio = (tokens_needed) / total_tokens
new_length = int(len(content) * ratio)
print_verbose(new_length)
half_length = new_length // 2
left_half = content[:half_length]
right_half = content[-half_length:]
trimmed_content = left_half + '..' + right_half
message["content"] = trimmed_content
content = trimmed_content
return message
# LiteLLM token trimmer
# this code is borrowed from https://github.com/KillianLucas/tokentrim/blob/main/tokentrim/tokentrim.py
# Credits for this code go to Killian Lucas
def trim_messages(
messages,
model: Optional[str] = None,
trim_ratio: float = 0.75,
return_response_tokens: bool = False,
max_tokens = None
):
"""
Trim a list of messages to fit within a model's token limit.
Args:
messages: Input messages to be trimmed. Each message is a dictionary with 'role' and 'content'.
model: The LiteLLM model being used (determines the token limit).
system_message: Optional system message to preserve at the start of the conversation.
trim_ratio: Target ratio of tokens to use after trimming. Default is 0.75, meaning it will trim messages so they use about 75% of the model's token limit.
return_response_tokens: If True, also return the number of tokens left available for the response after trimming.
max_tokens: Instead of specifying a model or trim_ratio, you can specify this directly.
Returns:
Trimmed messages and optionally the number of tokens available for response.
"""
# Initialize max_tokens
# if users pass in max tokens, trim to this amount
try:
if max_tokens == None:
# Check if model is valid
if model in litellm.model_cost:
max_tokens_for_model = litellm.model_cost[model]['max_tokens']
max_tokens = int(max_tokens_for_model * trim_ratio)
else:
# if user did not specify max tokens
# or passed an llm litellm does not know
# do nothing, just return messages
return
system_message = ""
for message in messages:
if message["role"] == "system":
system_message += message["content"]
current_tokens = token_counter(model=model, messages=messages)
# Do nothing if current tokens under messages
if current_tokens < max_tokens:
return messages
#### Trimming messages if current_tokens > max_tokens
print_verbose(f"Need to trim input messages: {messages}, current_tokens{current_tokens}, max_tokens: {max_tokens}")
if system_message:
system_message_event, max_tokens = process_system_message(system_message=system_message, max_tokens=max_tokens, model=model)
messages = messages + [system_message_event]
final_messages = process_messages(messages=messages, max_tokens=max_tokens, model=model)
if return_response_tokens: # if user wants token count with new trimmed messages
response_tokens = max_tokens - get_token_count(final_messages, model)
return final_messages, response_tokens
return final_messages
except: # [NON-Blocking, if error occurs just return final_messages
return messages
# this helper reads the .env and returns a list of supported llms for user
def get_valid_models():
try:
# get keys set in .env
environ_keys = os.environ.keys()
valid_providers = []
# for all valid providers, make a list of supported llms
valid_models = []
for provider in litellm.provider_list:
# edge case litellm has together_ai as a provider, it should be togetherai
provider = provider.replace("_", "")
# litellm standardizes expected provider keys to
# PROVIDER_API_KEY. Example: OPENAI_API_KEY, COHERE_API_KEY
expected_provider_key = f"{provider.upper()}_API_KEY"
if expected_provider_key in environ_keys:
# key is set
valid_providers.append(provider)
for provider in valid_providers:
if provider == "azure":
valid_models.append("Azure-LLM")
else:
models_for_provider = litellm.models_by_provider.get(provider, [])
valid_models.extend(models_for_provider)
return valid_models
except:
return [] # NON-Blocking
| [
"Hey, how's it going?",
"0",
"True",
"gpt-3.5-turbo",
"Hey, how's it going",
"input_cost_per_token",
"Hello World",
"None",
"adapt_to_prompt_size",
"default",
"False",
"application/json",
"content",
"prompt_tokens"
] |
2024-01-10 | Slyracoon23/emergency-kit-app | trip-scheduler.py | import os
import openai
from dotenv import load_dotenv
import streamlit as st
# Load OpenAI API key
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
system_prompt = '''
The task is to prepare an emergency inventory list.
Use Bold Headings:
Start with a main title such as "Emergency Inventory List" and make it bold to distinguish it from the rest of the content.
Use ** on both sides of the title to make it bold in Markdown.
Example: **Emergency Inventory List**:
Organize by Categories:
Group related items into categories. This helps in quickly locating specific items.
Examples of categories include "Water", "Food", "Tools & Equipment", "Health & Safety", and so on.
Use Bullet Points:
For each item or instruction, use a bullet point.
In Markdown, you can use a hyphen - followed by a space to create a bullet point.
Sub-categories or Sub-points:
If an item can be further broken down into specific details or if it has multiple sub-items, use sub-bullet points.
Indent these sub-bullet points to differentiate them from main points.
In Markdown, this can be achieved by using a space or tab before the -.
Consistent Spacing:
Use consistent spacing between categories and items to make the list look organized.
In Markdown, you can simply hit "Enter" twice to create a space between lines.
Highlight Important Information:
For essential items or notes, you can use bold or italics to highlight them.
In Markdown:
Bold: **text**
Italics: *text*
Clear Descriptions:
For items that might be ambiguous, provide a short description.
Example: Instead of just "Water", use "Water: 6 gallons (1 gallon per person per day)" to provide clarity.
Avoid Long Sentences:
If a point seems too lengthy, break it down into multiple sub-points to make it more readable.
Example: Instead of combining "First aid kit and pet first aid kit", they were separated into two different bullet points.
Here are some examples of emergency inventory lists:
EXAMPLE 1:
Prepare an emergency inventory list for Miami based on the following information:
* Type of Emergency: Flood
* Number of People: 1
* Pets: No
* Special Needs: Medication for diabetes
* Anticipated Duration: 1 week
* Additional Notes: I live near the coast.
Response:
Emergency Inventory List:
* Water: 7 gallons (1 gallon per person per day)
* Non-perishable food: 7-day supply (canned goods, granola bars, etc.)
* Manual can opener
* Flashlights with extra batteries
* Battery-powered or hand-crank radio
* First aid kit with extra diabetes medication and glucose meter
* Whistle to signal for help
* Insect repellent and sunscreen
* Waterproof boots and rain gear
* Multi-tool or knife
* Important documents in a waterproof container (ID, bank account records, medication list, etc.)
* Cash in small denominations
* Local maps and evacuation routes
* Life jacket
* Fully charged backup power sources for phones
EXAMPLE 2:
Prepare an emergency inventory list for New York based on the following information:
* Type of Emergency: Power Outage
* Number of People: 2
* Pets: No
* Special Needs: None
* Anticipated Duration: 24 hours
* Additional Notes: Winter season.
Response:
Emergency Inventory List:
* Water: 2 gallons (1 gallon per person per day)
* Non-perishable food: 1-day supply (canned goods, granola bars, etc.)
* Manual can opener
* Flashlights with extra batteries
* Battery-powered or hand-crank radio
* First aid kit
* Warm clothing and blankets
* Multi-tool or knife
* Important documents (ID, bank account records, etc.)
* Cash in small denominations
* Fully charged backup power sources for phones and heaters
'''
# Emergency Inventory Prompt Generator
def generate_emergency_prompt(location, type_of_emergency, num_people, pets, special_needs, duration, additional_notes, **kwargs):
return f'''
Prepare an emergency inventory list for {location} based on the following information:
* Type of Emergency: {type_of_emergency}
* Number of People: {num_people}
* Pets: {pets}
* Special Needs: {special_needs}
* Anticipated Duration: {duration}
* Additional Notes: {additional_notes}
Response:
'''.strip()
# define a function for GPT to generate fictitious prompts
# def fictitious_prompt_from_instruction(instruction: str) -> str:
# """Given an instruction, generate a fictitious prompt."""
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-0613",
# temperature=0,
# messages=[
# {
# "role": "system",
# "content": system_prompt,
# }, # we pick an example topic (friends talking about a vacation) so that GPT does not refuse or ask clarifying questions
# {"role": "user", "content": instruction},
# ],
# )
# fictitious_prompt = response["choices"][0]["message"]["content"]
# Submit function to get GPT-3 response
def submit():
prompt = generate_emergency_prompt(**st.session_state)
output = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
temperature=0,
messages=[
{
"role": "system",
"content": system_prompt,
},
{"role": "user", "content": prompt},
],
)
st.session_state['output'] = output["choices"][0]["message"]["content"]
# UI Code
st.title('Emergency Advisor')
st.subheader('Let us prepare your emergency inventory list!')
# Initialization
if 'output' not in st.session_state:
st.session_state['output'] = '--'
with st.form(key='emergency_form'):
c1, c2, c3 = st.columns(3)
with c1:
st.subheader('Basic Details')
st.text_input('Location', value='New York', key='location')
st.selectbox('Type of Emergency', ('Wildfire', 'Flood', 'Earthquake', 'Power Outage', 'Tornado', 'Other'), key='type_of_emergency')
st.number_input('Number of People', value=1, min_value=1, key='num_people')
with c2:
st.subheader('Specific Needs')
st.radio('Pets', ['Yes', 'No'], key='pets')
st.text_area('Special Needs (e.g., medications, disabilities)', height=100, key='special_needs')
with c3:
st.subheader('Duration & Notes')
st.selectbox('Anticipated Duration', ('24 hours', '3 days', '1 week', 'More than a week'), key='duration')
st.text_area('Additional Notes', height=100, value='I have a toddler.', key='additional_notes')
st.form_submit_button('Submit', on_click=submit)
st.subheader('Emergency Inventory List')
st.write(st.session_state.output)
| [
"\nThe task is to prepare an emergency inventory list.\n\n Use Bold Headings:\n\n Start with a main title such as \"Emergency Inventory List\" and make it bold to distinguish it from the rest of the content.\n\n Use ** on both sides of the title to make it bold in Markdown.\n Example: **Emergency Inventory List**:\n \n Organize by Categories:\n\n Group related items into categories. This helps in quickly locating specific items.\n \n Examples of categories include \"Water\", \"Food\", \"Tools & Equipment\", \"Health & Safety\", and so on.\n \n Use Bullet Points:\n\n For each item or instruction, use a bullet point.\n\n In Markdown, you can use a hyphen - followed by a space to create a bullet point.\n\n Sub-categories or Sub-points:\n\n If an item can be further broken down into specific details or if it has multiple sub-items, use sub-bullet points.\n\n Indent these sub-bullet points to differentiate them from main points.\n\n In Markdown, this can be achieved by using a space or tab before the -.\n \n Consistent Spacing:\n\n Use consistent spacing between categories and items to make the list look organized.\n \n In Markdown, you can simply hit \"Enter\" twice to create a space between lines.\n \n Highlight Important Information:\n\n For essential items or notes, you can use bold or italics to highlight them.\n\n In Markdown:\n Bold: **text**\n Italics: *text*\n \n Clear Descriptions:\n\n For items that might be ambiguous, provide a short description.\n \n Example: Instead of just \"Water\", use \"Water: 6 gallons (1 gallon per person per day)\" to provide clarity.\n \n Avoid Long Sentences:\n\n If a point seems too lengthy, break it down into multiple sub-points to make it more readable.\n \n Example: Instead of combining \"First aid kit and pet first aid kit\", they were separated into two different bullet points.\n\n\n Here are some examples of emergency inventory lists:\n\nEXAMPLE 1:\n Prepare an emergency inventory list for Miami based on the following information:\n\n * Type of Emergency: Flood\n * Number of People: 1\n * Pets: No\n * Special Needs: Medication for diabetes\n * Anticipated Duration: 1 week\n * Additional Notes: I live near the coast.\n\n Response:\n Emergency Inventory List:\n\n * Water: 7 gallons (1 gallon per person per day)\n * Non-perishable food: 7-day supply (canned goods, granola bars, etc.)\n * Manual can opener\n * Flashlights with extra batteries\n * Battery-powered or hand-crank radio\n * First aid kit with extra diabetes medication and glucose meter\n * Whistle to signal for help\n * Insect repellent and sunscreen\n * Waterproof boots and rain gear\n * Multi-tool or knife\n * Important documents in a waterproof container (ID, bank account records, medication list, etc.)\n * Cash in small denominations\n * Local maps and evacuation routes\n * Life jacket\n * Fully charged backup power sources for phones\n\nEXAMPLE 2:\n Prepare an emergency inventory list for New York based on the following information:\n\n * Type of Emergency: Power Outage\n * Number of People: 2\n * Pets: No\n * Special Needs: None\n * Anticipated Duration: 24 hours\n * Additional Notes: Winter season.\n\n Response:\n\n Emergency Inventory List:\n \n * Water: 2 gallons (1 gallon per person per day)\n * Non-perishable food: 1-day supply (canned goods, granola bars, etc.)\n * Manual can opener\n * Flashlights with extra batteries\n * Battery-powered or hand-crank radio\n * First aid kit\n * Warm clothing and blankets\n * Multi-tool or knife\n * Important documents (ID, bank account records, etc.)\n * Cash in small denominations\n * Fully charged backup power sources for phones and heaters\n\n"
] |
2024-01-10 | StephanFWard/AutoAI-With-ChatGPT4 | autogpts~autogpt~autogpt~app~configurator.py | """Configurator module."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Literal, Optional
import click
from colorama import Back, Fore, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
from autogpt.llm.api_manager import ApiManager
from autogpt.logs.config import LogFormatName
from autogpt.logs.helpers import print_attribute, request_user_double_check
from autogpt.memory.vector import get_supported_memory_backends
if TYPE_CHECKING:
from autogpt.core.resource.model_providers.openai import OpenAICredentials
logger = logging.getLogger(__name__)
def apply_overrides_to_config(
config: Config,
continuous: bool = False,
continuous_limit: Optional[int] = None,
ai_settings_file: Optional[Path] = None,
prompt_settings_file: Optional[Path] = None,
skip_reprompt: bool = False,
speak: bool = False,
debug: bool = False,
log_level: Optional[str] = None,
log_format: Optional[str] = None,
log_file_format: Optional[str] = None,
gpt3only: bool = False,
gpt4only: bool = False,
memory_type: Optional[str] = None,
browser_name: Optional[str] = None,
allow_downloads: bool = False,
skip_news: bool = False,
) -> None:
"""Updates the config object with the given arguments.
Args:
config (Config): The config object to update.
continuous (bool): Whether to run in continuous mode.
continuous_limit (int): The number of times to run in continuous mode.
ai_settings_file (Path): The path to the ai_settings.yaml file.
prompt_settings_file (Path): The path to the prompt_settings.yaml file.
skip_reprompt (bool): Whether to skip the re-prompting messages on start.
speak (bool): Whether to enable speak mode.
debug (bool): Whether to enable debug mode.
log_level (int): The global log level for the application.
log_format (str): The format for the log(s).
log_file_format (str): Override the format for the log file.
gpt3only (bool): Whether to enable GPT3.5 only mode.
gpt4only (bool): Whether to enable GPT4 only mode.
memory_type (str): The type of memory backend to use.
browser_name (str): The name of the browser to use for scraping the web.
allow_downloads (bool): Whether to allow AutoGPT to download files natively.
skips_news (bool): Whether to suppress the output of latest news on startup.
"""
config.continuous_mode = False
config.tts_config.speak_mode = False
# Set log level
if debug:
config.logging.level = logging.DEBUG
elif log_level and type(_level := logging.getLevelName(log_level.upper())) is int:
config.logging.level = _level
# Set log format
if log_format and log_format in LogFormatName._value2member_map_:
config.logging.log_format = LogFormatName(log_format)
if log_file_format and log_file_format in LogFormatName._value2member_map_:
config.logging.log_file_format = LogFormatName(log_file_format)
if continuous:
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
logger.warning(
"Continuous mode is not recommended. It is potentially dangerous and may"
" cause your AI to run forever or carry out actions you would not usually"
" authorise. Use at your own risk.",
)
config.continuous_mode = True
if continuous_limit:
print_attribute("Continuous Limit", continuous_limit)
config.continuous_limit = continuous_limit
# Check if continuous limit is used without continuous mode
if continuous_limit and not continuous:
raise click.UsageError("--continuous-limit can only be used with --continuous")
if speak:
print_attribute("Speak Mode", "ENABLED")
config.tts_config.speak_mode = True
# Set the default LLM models
if gpt3only:
print_attribute("GPT3.5 Only Mode", "ENABLED")
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
config.fast_llm = GPT_3_MODEL
config.smart_llm = GPT_3_MODEL
elif (
gpt4only
and check_model(
GPT_4_MODEL,
model_type="smart_llm",
api_credentials=config.openai_credentials,
)
== GPT_4_MODEL
):
print_attribute("GPT4 Only Mode", "ENABLED")
# --gpt4only should always use gpt-4, despite user's SMART_LLM config
config.fast_llm = GPT_4_MODEL
config.smart_llm = GPT_4_MODEL
else:
config.fast_llm = check_model(
config.fast_llm, "fast_llm", api_credentials=config.openai_credentials
)
config.smart_llm = check_model(
config.smart_llm, "smart_llm", api_credentials=config.openai_credentials
)
if memory_type:
supported_memory = get_supported_memory_backends()
chosen = memory_type
if chosen not in supported_memory:
logger.warning(
extra={
"title": "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED:",
"title_color": Fore.RED,
},
msg=f"{supported_memory}",
)
print_attribute(
"Defaulting to", config.memory_backend, title_color=Fore.YELLOW
)
else:
config.memory_backend = chosen
if skip_reprompt:
print_attribute("Skip Re-prompt", "ENABLED")
config.skip_reprompt = True
if ai_settings_file:
file = ai_settings_file
# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
request_user_double_check()
exit(1)
print_attribute("Using AI Settings File", file)
config.ai_settings_file = config.project_root / file
config.skip_reprompt = True
if prompt_settings_file:
file = prompt_settings_file
# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
request_user_double_check()
exit(1)
print_attribute("Using Prompt Settings File", file)
config.prompt_settings_file = config.project_root / file
if browser_name:
config.selenium_web_browser = browser_name
if allow_downloads:
print_attribute("Native Downloading", "ENABLED")
logger.warning(
msg=f"{Back.LIGHTYELLOW_EX}"
"AutoGPT will now be able to download and save files to your machine."
f"{Back.RESET}"
" It is recommended that you monitor any files it downloads carefully.",
)
logger.warning(
msg=f"{Back.RED + Style.BRIGHT}"
"NEVER OPEN FILES YOU AREN'T SURE OF!"
f"{Style.RESET_ALL}",
)
config.allow_downloads = True
if skip_news:
config.skip_news = True
def check_model(
model_name: str,
model_type: Literal["smart_llm", "fast_llm"],
api_credentials: OpenAICredentials,
) -> str:
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
api_manager = ApiManager()
models = api_manager.get_models(**api_credentials.get_api_access_kwargs(model_name))
if any(model_name in m["id"] for m in models):
return model_name
logger.warning(
f"You don't have access to {model_name}. Setting {model_type} to gpt-3.5-turbo."
)
return "gpt-3.5-turbo"
| [] |
2024-01-10 | StephanFWard/AutoAI-With-ChatGPT4 | autogpts~autogpt~autogpt~json_utils~utilities.py | """Utilities for the json_fixes package."""
import ast
import logging
import re
from typing import Any
logger = logging.getLogger(__name__)
def extract_dict_from_response(response_content: str) -> dict[str, Any]:
# Sometimes the response includes the JSON in a code block with ```
pattern = r"```([\s\S]*?)```"
match = re.search(pattern, response_content)
if match:
response_content = match.group(1).strip()
# Remove language names in code blocks
response_content = response_content.lstrip("json")
else:
# The string may contain JSON.
json_pattern = r"{.*}"
match = re.search(json_pattern, response_content)
if match:
response_content = match.group()
# Response content comes from OpenAI as a Python `str(content_dict)`.
# `literal_eval` does the reverse of `str(dict)`.
try:
return ast.literal_eval(response_content)
except BaseException as e:
logger.info(f"Error parsing JSON response with literal_eval {e}")
logger.debug(f"Invalid JSON received in response:\n{response_content}")
# TODO: How to raise an error here without causing the program to exit?
return {}
| [] |
2024-01-10 | hinczhang/GPT-Translator | src~web~resource~gptbot.py | from flask_restful import Resource
from flask import Response, request, send_file
import json
from configparser import ConfigParser
import jwt
import os
import time
import openai
# opt
conn = ConfigParser()
conn.read("./conf.ini")
# token config
headers = {
"alg": "HS256",
"typ": "JWT"
}
salt = conn.get('key','token_key')
gpt_key = conn.get('gpt','api_key')
organization = conn.get('gpt','organization')
openai.api_key = gpt_key
openai.organization = organization
messages = [{"role": "system", "content": "You are a helpful assistant to answer and chat."}]
def validate_user_token(token, username):
try:
info = jwt.decode(jwt = token, key = salt, verify=True, algorithms='HS256')
if username == info["username"]:
return True
else:
return False
except Exception as e:
print(repr(e))
return False
class GDPBot(Resource):
def get(self):
pass
def post(self):
# form = eval(str(request.data, encoding = "utf-8"))
form = request.form
res = None
if form["request"] == "chat":
question = form['question']
messages.append({"role": "user", "content": question})
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = messages)
reply = response["choices"][0]["message"]["content"]
messages.append({"role": "system", "content": reply})
print("\n" + reply + "\n")
res = Response(response = json.dumps({'status': 0, 'msg': reply}), status = 200, mimetype="application/json")
elif form["request"] == "reset":
messages.clear()
messages.append({"role": "system", "content": "You are a helpful assistant to answer and chat."})
res = Response(response = json.dumps({'status': 0, 'msg': 'Reset successfully!'}), status = 200, mimetype="application/json")
'''
if not validate_user_token(form['token'], username):
res = Response(response = json.dumps({'status': 2, 'msg': 'Invalid operations! Will come back to the login page.'}), status = 200, mimetype="application/json")
else:
exp_time = int(time.time() + 60*60*24*7)
payload = {"username": username, "path": form['path'], "exp": exp_time}
token = jwt.encode(payload=payload, key=salt, algorithm='HS256', headers=headers)
res = Response(response = json.dumps({'status': 0, 'msg': 'Share successfully!', 'token': token}), status = 200, mimetype="application/json")
'''
return res | [
"You are a helpful assistant to answer and chat."
] |
2024-01-10 | Brian-Konr/detectiveinferno | AI_processor.py | import openai
import json
from RAG import add_database, search_docs
from summary import summary_processor
import os
import shutil
import ast
from prompt_function import rich_character, character, rich_place, summary
from langchain.vectorstores import Chroma
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
#### global variable
m_list = [[],[],[],[]]
demo_story_count=1
demo_mode = True
#### global DB
db_0 = Chroma() ## story DB
db_1 = Chroma() ## suspect 1 DB
db_2 = Chroma() ## suspect 2 DB
db_3 = Chroma() ## suspect 3 DB
db_4 = Chroma() ## scene DB
db_len = [1,1,1,1,1]
embedding_function = SentenceTransformerEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2")
def get_current_db():
global db_0, db_1, db_2, db_3, db_4
global db_len
#### check story DB
if(os.path.exists('./db/story')):
db_0 = Chroma(persist_directory='./db/story', embedding_function=embedding_function)
else:
print("story DB has some problem")
print("Recreate story DB")
db_0, db_len[0] = add_database("./story_background/story.txt", './db/story', 1)
suspect_db = 0
#### check suspect DB
if(os.path.exists('./db/suspect_1')):
db_1 = Chroma(persist_directory='./db/suspect_1', embedding_function=embedding_function)
suspect_db += 1
if(os.path.exists('./db/suspect_2')):
db_2 = Chroma(persist_directory='./db/suspect_2', embedding_function=embedding_function)
suspect_db += 1
if(os.path.exists('./db/suspect_3')):
db_3 = Chroma(persist_directory='./db/suspect_3', embedding_function=embedding_function)
suspect_db += 1
if(suspect_db != 3):
print("suspect DB has some problem")
print("Recreate suspect DB")
db_len = rich_character_info()
#### check scene DB
if(os.path.exists('./db/scene')):
db_4 = Chroma(persist_directory='./db/scene', embedding_function=embedding_function)
else:
print("scene DB has some problem")
print("Recreate scene DB")
rich_scene_info()
db_len = [len(db_0.get()['ids']),len(db_1.get()['ids']),len(db_2.get()['ids']),len(db_3.get()['ids']),len(db_4.get()['ids'])]
print("Get current db")
print("DB len:")
print(db_len)
print("Please make sure every number in DB len is bigger than 1 (not including 1).")
def reset_db():
global db_0, db_1, db_2, db_3, db_4
global db_len
db_0 = Chroma() ## story DB
db_1 = Chroma() ## suspect 1 DB
db_2 = Chroma() ## suspect 2 DB
db_3 = Chroma() ## suspect 3 DB
db_4 = Chroma() ## scene DB
db_len = [1,1,1,1,1]
### delete all file in db
dirPath = "./db"
try:
shutil.rmtree(dirPath)
except OSError as e:
print(f"Error:{ e.strerror}")
print("Reset db")
print("DB len:")
print(db_len)
#### openai processor
def GPT_processor(length,system_message, user_message, function_description, temperature):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=length,
temperature=temperature,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": user_message}
],
functions=function_description,
function_call = {"name":function_description[0]['name']}
)
return response
###### game creator
def story_creater():
# conversation_count = 0
m_list = [[],[],[],[]]
#### load start prompt
f = open('./story_background/start_prompt.txt',encoding="utf-8")
start_prompt = f.read()
with open('./story_background/story_background_description.json',encoding="utf-8") as file:
story_background_description = [json.load(file)]
print("start process AI api")
story_response = GPT_processor(2000,start_prompt, "我想要玩一個偵探尋找殺人犯的遊戲,你可以幫我設計一個遊戲的故事內容嗎?", story_background_description, 0.5)
# store the story dictionary result to story.json
f2 = open("./story_background/story.txt", "w",encoding="utf-8")
f2.write(story_response.choices[0].message.function_call.arguments)
f2.close()
story_object = json.dumps(story_response.choices[0].message.function_call.arguments)
# print(story_object.encode('ascii').decode('unicode-escape'))
with open("./story_background/story.json", "w") as outfile:
outfile.write(story_object)
#### add story to DB
print("Add story to DB")
global db_0, db_len
db_0, db_len[0] = add_database("./story_background/story.txt", './db/story', 1)
print("DB status")
print(db_len)
#### rich character info
print("rich character info")
db_len = rich_character_info()
print("DB status")
print(db_len)
#### rich scene info
print("rich scene info")
rich_scene_info()
print("DB status")
print(db_len)
return story_response.choices[0].message.function_call.arguments
def rich_character_info():
f = open("./story_background/story.json")
story_json = json.load(f)
suspects_list = json.loads(story_json).get("嫌疑人")
#### load start prompt
with open('./suspect_file/rich_suspect_prompt.txt', encoding="utf-8") as f:
rich_prompt = f.read()
with open('./story_background/story.txt', encoding="utf-8") as f:
story = f.read()
for i in range(3):
name = suspects_list[i]["姓名"]
rich_character_response = rich_character(rich_prompt, story, name, 1300, 0.5)
with open(f'./suspect_file/suspect_{i}/rich_suspect_info.txt', 'w', encoding="utf-8") as f:
f.write(rich_character_response)
if(i == 0):
global db_1
db_1, db_len[1] = add_database(f'./suspect_file/suspect_{i}/rich_suspect_info.txt', './db/suspect_1', 1)
elif(i == 1):
global db_2
db_2, db_len[2] = add_database(f'./suspect_file/suspect_{i}/rich_suspect_info.txt', './db/suspect_2', 1)
else:
global db_3
db_3, db_len[3] = add_database(f'./suspect_file/suspect_{i}/rich_suspect_info.txt', './db/suspect_3', 1)
return db_len
###### suspect creater
def suspect_creater(id,target,action):
global db_1, db_2, db_3
global db_len
#### load suspect prompt
f = open('./suspect_file/suspect_prompt.txt',encoding="utf-8")
suspect_prompt = f.read()
keyword = action + "," + target
#### load story txt
with open('./story_background/story.txt',encoding="utf-8") as file:
story = file.read()
suspect_db_1 = Chroma(persist_directory=f'./db/suspect_1', embedding_function=embedding_function)
suspect_db_2 = Chroma(persist_directory=f'./db/suspect_2', embedding_function=embedding_function)
suspect_db_3 = Chroma(persist_directory=f'./db/suspect_3', embedding_function=embedding_function)
if(id == 0):
suspect_summary = search_docs(keyword, suspect_db_1, 5)
elif(id == 1):
suspect_summary = search_docs(keyword, suspect_db_2, 5)
else:
suspect_summary = search_docs(keyword, suspect_db_3, 5)
user_prompt = action
system_prompt = suspect_prompt + "你需要假扮嫌疑人是" + target + "提供的資訊如下:" + suspect_summary + "\n" + story
with open('./suspect_file/suspect_description.json',encoding="utf-8") as file:
suspect_description = [json.load(file)]
suspect_response = GPT_processor(200,system_message=system_prompt, user_message=user_prompt, function_description=suspect_description, temperature=0.5)
# suspect_object = json.dumps(suspect_response.choices[0].message.function_call.arguments)
#### store to list
conversation_information = {
"m_id": len(m_list[id]),
"sender": 1,
"message":action
}
m_list[id].append(conversation_information)
conversation_information = {
"m_id": len(m_list[id]),
"sender": 0,
"message":json.loads(suspect_response.choices[0].message.function_call.arguments).get("回覆")
}
m_list[id].append(conversation_information)
### write to conversation file
f = open(f"./suspect_file/suspect_{id}/conversation.txt", "a",encoding="utf-8")
f.write(f"玩家:{action}\n")
f.write( f"{target}:{ json.loads(suspect_response.choices[0].message.function_call.arguments).get('回覆') }\n" )
f.close()
### save to DB
with open(f"./suspect_file/suspect_{id}/tmp_conversation.txt", "w" ,encoding="utf-8") as file:
file.write(f"玩家:{action}\n")
file.write( f"{target}:{ json.loads(suspect_response.choices[0].message.function_call.arguments).get('回覆') }\n" )
if(id == 0):
db_1, d1_len = add_database(f"./suspect_file/suspect_{id}/tmp_conversation.txt", './db/suspect_1', db_len[1])
db_len[1] += d1_len
elif(id == 1):
db_2, d2_len = add_database(f"./suspect_file/suspect_{id}/tmp_conversation.txt", './db/suspect_2', db_len[2])
db_len[2] += d2_len
else:
db_3, d3_len = add_database(f"./suspect_file/suspect_{id}/tmp_conversation.txt", './db/suspect_3', db_len[3])
db_len[3] += d3_len
open(f"./suspect_file/suspect_{id}/tmp_conversation.txt", 'w').close()
# summary_processor()
print(json.loads(suspect_response.choices[0].message.function_call.arguments).get("回覆"))
return json.loads(suspect_response.choices[0].message.function_call.arguments).get("回覆")
def rich_scene_info():
with open('./scene_file/rich_scene_prompt.txt', encoding="utf-8") as f:
rich_prompt = f.read()
with open('./story_background/story.txt', encoding="utf-8") as f:
story = f.read()
rich_scene = rich_place(rich_prompt, story, 1300, 0.8)
with open('./scene_file/rich_scene_info.txt', 'w', encoding="utf-8") as f:
f.write(rich_scene)
global db_4, db_len
db_4, db_len[4] = add_database(f"./scene_file/rich_scene_info.txt", './db/scene', 1)
###### scene_creater
def scene_creater(action):
global db_4, db_len
#### load start prompt
f = open('./scene_file/scene_prompt.txt',encoding="utf-8")
scene_prompt = f.read()
keyword = "請根據案發現場的狀況,回答:" + action
#### load story txt
with open('./story_background/story.txt',encoding="utf-8") as file:
story = file.read()
scene_db = Chroma(persist_directory='./db/scene', embedding_function=embedding_function)
scene_info = search_docs(keyword, scene_db, 8)
system_prompt = scene_prompt + story + "\n" + scene_info
user_prompt = action
with open('./scene_file/scene_description.json',encoding="utf-8") as file:
scene_description = [json.load(file)]
scene_response = GPT_processor(400,system_message=system_prompt, user_message=user_prompt, function_description=scene_description, temperature=0.9)
#### store to list
conversation_information = {
"m_id": len(m_list[3]),
"sender": 1,
"message":action
}
m_list[3].append(conversation_information)
conversation_information = {
"m_id": len(m_list[3]),
"sender": 0,
"message":json.loads(scene_response.choices[0].message.function_call.arguments).get("回覆")
}
m_list[3].append(conversation_information)
# scene_object = json.dumps(scene_response.choices[0].message.function_call.arguments)
f = open("./scene_file/conversation.txt", "a",encoding="utf-8")
f.write(f"玩家:{action}\n")
f.write( f"場景:{ json.loads(scene_response.choices[0].message.function_call.arguments).get('回覆') }\n" )
print(json.loads(scene_response.choices[0].message.function_call.arguments).get("回覆"))
f.close()
#### save to DB
with open(f"./scene_file/tmp_conversation.txt", "w" ,encoding="utf-8") as file:
file.write(f"玩家:{action}\n")
file.write( f"場景:{ json.loads(scene_response.choices[0].message.function_call.arguments).get('回覆') }\n" )
print("before add scene to DB")
print(db_len)
db_4, d4_len = add_database(f"./scene_file/tmp_conversation.txt", './db/scene', db_len[4])
db_len[4] += d4_len
print("after add scene to DB")
print(db_len)
open(f"./scene_file/tmp_conversation.txt", 'w').close()
return json.loads(scene_response.choices[0].message.function_call.arguments).get("回覆")
# return scene_response.choices[0].message.content
###### final_answer_creater
def final_answer_creater(id, motivation, action):
#### load start prompt
f = open('./final_answer_file/final_answer_prompt.txt',encoding="utf-8")
fa_prompt = f.read()
#### load story txt
with open('./story_background/story.txt',encoding="utf-8") as file:
story = file.read()
f = open("./story_background/story.json")
story_json = json.load(f)
suspects_list = json.loads(story_json).get("嫌疑人")
system_prompt = fa_prompt + '\n' + story
user_prompt = "我猜測兇手是" + suspects_list[id]["姓名"] + ",他的動機為" + motivation + ",他的犯案手法為" + action
with open('./final_answer_file/final_answer_description.json',encoding="utf-8") as file:
final_answer_description = [json.load(file)]
final_answer_response = GPT_processor(800,system_message=system_prompt,user_message=user_prompt, function_description=final_answer_description, temperature=0.8)
print(json.loads(final_answer_response.choices[0].message.function_call.arguments).get("真相"))
return json.loads(final_answer_response.choices[0].message.function_call.arguments).get("真相")
#### hint creater
def hint_creater():
#### load start prompt
f = open('./story_background/hint_prompt.txt',encoding="utf-8")
hint_prompt = f.read()
#### load story txt
with open('./story_background/story.txt',encoding="utf-8") as file:
story = file.read()
system_prompt = hint_prompt + '\n' + story
user_prompt = "請給我一個提示。"
with open('./story_background/hint_description.json',encoding="utf-8") as file:
hint_description = [json.load(file)]
hint_response = GPT_processor(200,system_prompt, user_prompt, hint_description, 0.6)
f = open(f"./story_background/hints_history.txt", "a",encoding="utf-8")
f.write(json.loads(hint_response.choices[0].message.function_call.arguments).get("回覆"))
f.close()
print( json.loads(hint_response.choices[0].message.function_call.arguments).get("回覆") )
return json.loads(hint_response.choices[0].message.function_call.arguments).get("回覆")
# def message_creater(id):
# file = []
# if id < 3:
# file = open(f"./suspect_file/suspect_{id}/conversation.txt",encoding="utf-8")
# else:
# file = open(f"./scene_file/conversation.txt",encoding="utf-8")
# conversation_list = file.readlines() #list
# for lines in conversation_list:
# .function_call.arguments).get("place")
# print(story_response.choices[0].message.content)
# story_background = story_response.choices[0].message.content
##### create information dictionary
# dictionary_response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# max_tokens=2000,
# temperature=0.5,
# messages=[
# {"role": "user", "content": story_response.choices[0].message.content},
# {"role": "user", "content": "請幫我將上述偵探小說內容根據每個角色的人格特質和線索做出一個dictionary"},
# # {"role": "assistant", "content": "原來你是楊鈞安呀"},
# # {"role": "user", "content": "請問我叫什麼名字?"}
# ]
# )
# # store the dictionary to data_dictionary
# print(dictionary_response.choices[0].message.content)
# with open('data_dictionary.txt', 'w') as convert_file:
# convert_file.write(json.dumps(dictionary_response.choices[0].message.content))
# ##### 接收前端回傳資料
# # from flask import Flask, request
# # app = Flask(__name__)
# # @app.route('/receive_message', methods=['POST'])
# # def receive_message():
# # reply = request.get_json()
# # # 处理接收到的消息
# # return 'Message received successfully'
# ##### game loop
# while True:
# action = input("action:")
# f = open('data_dictionary.txt',encoding="utf-8")
# now_dictionary = f.read()
# game_response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# max_tokens=200,
# temperature=0.5,
# messages=[
# {"role": "user", "content": story_background},
# {"role": "user", "content": "請根據以下關係資料和上述故事內容,進行本偵探遊戲,請勿直接公布兇手身分。"+now_dictionary+" 若是資料中無法確定的資訊,請你幫我生成有邏輯和不矛盾的內容。" },
# {"role": "user", "content": action},
# # {"role": "assistant", "content": "原來你是楊鈞安呀"},
# # {"role": "user", "content": "請問我叫什麼名字?"}
# ]
# )
# # store the dictionary to data_dictionary
# # print(dictionary_response.choices[0].message.content)
# # with open('data_dictionary.txt', 'w') as convert_file:
# # convert_file.write(json.dumps(dictionary_response.choices[0].message.content))
# print(f"response: {game_response.choices[0].message.content}")
# ##### dictionary reset
# dictionary_response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# max_tokens=200,
# temperature=0.5,
# messages=[
# {"role": "user", "content": story_background},
# {"role": "user", "content": game_response.choices[0].message.content + " 請幫我將上述內容根據每個角色的人格特質和線索做出一個dictionary,並和以下dictionary合併成一個新的關係dictionary"+ now_dictionary },
# # {"role": "user", "content": action},
# # {"role": "assistant", "content": "原來你是楊鈞安呀"},
# # {"role": "user", "content": "請問我叫什麼名字?"}
# ]
# )
# # store the dictionary to data_dictionary
# print(dictionary_response.choices[0].message.content)
# with open('data_dictionary.txt', 'w') as convert_file:
# convert_file.write(json.dumps(dictionary_response.choices[0].message.content)) | [
"請給我一個提示。",
"PLACEHOLDERPLACEHOLDER\nPLACEHOLDER",
"PLACEHOLDER\nPLACEHOLDER",
"PLACEHOLDER你需要假扮嫌疑人是PLACEHOLDER提供的資訊如下:PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | Brian-Konr/detectiveinferno | prompt_function.py | import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
MODEL = "gpt-3.5-turbo"
def GPT_response(system_message, user_message, max_tokens=1300, temperature=0.8):
response = openai.ChatCompletion.create(
model=MODEL,
max_tokens=max_tokens,
temperature= temperature,
messages=[{"role": "system", "content": system_message},
{"role": "user", "content": user_message}
],
)
return response
def rich_character(prompt, story, name, max_tokens=1300, temperature=0.8):
system_prompt = prompt + story
user_prompt = "請豐富" + name + "的個人資訊。"
response = GPT_response(system_prompt, user_prompt, max_tokens=max_tokens, temperature=temperature)
# print(response.choices[0]['message']['content'])
return response.choices[0]['message']['content']
def character(name, prompt, story, character_info, query, max_tokens=1300, temperature=0.8):
character_name = "現在你需要假扮的嫌疑人為" + name + "我將給你故事內容和嫌疑人的資料"
prompt = prompt + character_name + story + character_info
response = GPT_response(prompt, query, max_tokens=max_tokens, temperature=temperature)
# print(response.choices[0]['message']['content'])
return response.choices[0]['message']['content']
def rich_place(prompt, story, max_tokens=1300, temperature=0.8):
system_prompt = prompt
user_prompt = "我將給你一段偵探故事,請你幫我豐富案發現場的資訊。" + story
response = GPT_response(system_prompt, user_prompt, max_tokens=max_tokens, temperature=temperature)
# print(response.choices[0]['message']['content'])
return response.choices[0]['message']['content']
def summary(prompt, story, max_tokens=1300, temperature=0.8):
system_prompt = prompt
user_prompt = "我將給你一段偵探故事,請你幫我進行總結。" + story
response = GPT_response(system_prompt, user_prompt, max_tokens=max_tokens, temperature=temperature)
# print(response.choices[0]['message']['content'])
return response.choices[0]['message']['content']
| [
"我將給你一段偵探故事,請你幫我進行總結。PLACEHOLDER",
"PLACEHOLDERPLACEHOLDER",
"我將給你一段偵探故事,請你幫我豐富案發現場的資訊。PLACEHOLDER",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDER",
"請豐富PLACEHOLDER的個人資訊。"
] |
2024-01-10 | john-d-murphy/SummarizeAndCategorize | summarize_and_categorize.py | #! /usr/bin/python
import argparse
import logging
import openai
import time
from bs4 import BeautifulSoup
from bs4.element import Comment
from urllib.request import Request, urlopen
### Constants
DEFAULT_URL = "https://www.yahoo.com"
#### Logger
log = logging.getLogger("root")
log_format = "[%(asctime)s - %(module)25s:%(funcName)-25s] %(message)s"
logging.basicConfig(format=log_format)
# log.setLevel(logging.DEBUG)
def main():
### Parse and Display Arguments
arguments = parse_arguments()
### Get Webpage Text
text = read_url(arguments.url)
### Print Open AI Summary
get_open_ai_summary(arguments.url, text, arguments.key)
def parse_arguments():
### Get Arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--url", default=DEFAULT_URL, help="URL to parse")
parser.add_argument("--key", required=True, help="Open AI API Key")
arguments = parser.parse_args()
### Display Arguments
log.info("URL - %s" % arguments.url)
return arguments
def read_url(url):
# From: https://medium.com/@raiyanquaium/how-to-web-scrape-using-beautiful-soup-in-python-without-running-into-http-error-403-554875e5abed
req = Request(url, headers={"User-Agent": "Mozilla/5.0"})
html = urlopen(req).read()
return text_from_html(html)
# From: https://stackoverflow.com/questions/1936466/how-to-scrape-only-visible-webpage-text-with-beautifulsoup
def tag_visible(element):
if element.parent.name in [
"style",
"script",
"head",
"title",
"meta",
"[document]",
]:
return False
if isinstance(element, Comment):
return False
return True
# From: https://stackoverflow.com/questions/1936466/how-to-scrape-only-visible-webpage-text-with-beautifulsoup
def text_from_html(body):
soup = BeautifulSoup(body, "html.parser")
texts = soup.findAll(string=True)
visible_texts = filter(tag_visible, texts)
return " ".join(t.strip() for t in visible_texts)
def get_open_ai_summary(url, html, api_key):
print("URL: " + url + "\n")
openai.api_key = api_key
instructions = """You will recieve the content of a webpage, with this content, do the following.
1) Provide a Title For the Content Received, based on the content of the webpage
2) Succinctly summarize content received as an abstract.
3) Extract a list of the top 5 keywords from abstract. Make this a comma separated list.
4) List 3 Melvil Decimal System ids to tag content received with. Just list the ID and Topic. Make this a comma separated list.
Example Output would look like this:
Title: The Demon Core and the Strange Death of Louis Slotin
Abstract: This article recounts the story of Louis Slotin, a physicist working on the Manhattan Project, who died as a result of a criticality accident involving a plutonium core. The article explores the details of the accident, the impact of radiation exposure on Slotin's health, and the safety measures implemented afterward.
Keywords: Demon Core, Louis Slotin, criticality accident, plutonium core, radiation exposure
Categories: 539.7 - Atomic and Nuclear Physics, 621.48 - Nuclear Engineering, 940.54 - Military History of World War II
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": instructions,
},
{
"role": "user",
"content": html,
},
],
)
response = parse_response(response)
print(response)
def parse_response(open_ai_response):
return open_ai_response["choices"][0]["message"]["content"]
if __name__ == "__main__":
main()
| [
"You will recieve the content of a webpage, with this content, do the following.\n 1) Provide a Title For the Content Received, based on the content of the webpage\n 2) Succinctly summarize content received as an abstract.\n 3) Extract a list of the top 5 keywords from abstract. Make this a comma separated list.\n 4) List 3 Melvil Decimal System ids to tag content received with. Just list the ID and Topic. Make this a comma separated list.\n\n Example Output would look like this:\n\n\nTitle: The Demon Core and the Strange Death of Louis Slotin\n\nAbstract: This article recounts the story of Louis Slotin, a physicist working on the Manhattan Project, who died as a result of a criticality accident involving a plutonium core. The article explores the details of the accident, the impact of radiation exposure on Slotin's health, and the safety measures implemented afterward.\n\nKeywords: Demon Core, Louis Slotin, criticality accident, plutonium core, radiation exposure\n\nCategories: 539.7 - Atomic and Nuclear Physics, 621.48 - Nuclear Engineering, 940.54 - Military History of World War II\n\n "
] |
2024-01-10 | xiyichen/diffusers | src~diffusers~pipelines~stable_diffusion_xl~pipeline_stable_diffusion_xl.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from ...image_processor import VaeImageProcessor
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
is_accelerate_available,
is_accelerate_version,
is_invisible_watermark_available,
logging,
randn_tensor,
replace_example_docstring,
)
from ..pipeline_utils import DiffusionPipeline
from . import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLPipeline
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
`stabilityai/stable-diffusion-xl-base-1-0`.
add_watermarker (`bool`, *optional*):
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
negative_original_size: Optional[Tuple[int, int]] = None,
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
negative_target_size: Optional[Tuple[int, int]] = None,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
of a plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a target image resolution. It should be as same
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
Examples:
Returns:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is a list with the generated images.
"""
# 0. Default height and width to unet
height = height or self.default_sample_size * self.vae_scale_factor
width = width or self.default_sample_size * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
add_time_ids = self._get_add_time_ids(
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
)
if negative_original_size is not None and negative_target_size is not None:
negative_add_time_ids = self._get_add_time_ids(
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype=prompt_embeds.dtype,
)
else:
negative_add_time_ids = add_time_ids
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
# 8. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 7.1 Apply denoising_end
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1:
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# make sure the VAE is in float32 mode, as it overflows in float16
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
return StableDiffusionXLPipelineOutput(images=image)
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
# Overrride to properly handle the loading and unloading of the additional text encoder.
def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
# We could have accessed the unet config from `lora_state_dict()` too. We pass
# it here explicitly to be able to tell that it's coming from an SDXL
# pipeline.
state_dict, network_alphas = self.lora_state_dict(
pretrained_model_name_or_path_or_dict,
unet_config=self.unet.config,
**kwargs,
)
self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
if len(text_encoder_state_dict) > 0:
self.load_lora_into_text_encoder(
text_encoder_state_dict,
network_alphas=network_alphas,
text_encoder=self.text_encoder,
prefix="text_encoder",
lora_scale=self.lora_scale,
)
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
if len(text_encoder_2_state_dict) > 0:
self.load_lora_into_text_encoder(
text_encoder_2_state_dict,
network_alphas=network_alphas,
text_encoder=self.text_encoder_2,
prefix="text_encoder_2",
lora_scale=self.lora_scale,
)
@classmethod
def save_lora_weights(
self,
save_directory: Union[str, os.PathLike],
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
is_main_process: bool = True,
weight_name: str = None,
save_function: Callable = None,
safe_serialization: bool = True,
):
state_dict = {}
def pack_weights(layers, prefix):
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
return layers_state_dict
state_dict.update(pack_weights(unet_lora_layers, "unet"))
if text_encoder_lora_layers and text_encoder_2_lora_layers:
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
self.write_lora_layers(
state_dict=state_dict,
save_directory=save_directory,
is_main_process=is_main_process,
weight_name=weight_name,
save_function=save_function,
safe_serialization=safe_serialization,
)
def _remove_text_encoder_monkey_patch(self):
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
| [
"[PLACEHOLDER, PLACEHOLDER]",
"[]",
"False"
] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~chn_document_splitter.py | import re
from typing import List
from langchain.text_splitter import CharacterTextSplitter
class CHNDocumentSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
self.sentence_size = sentence_size
def split_text(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub("\s", " ", text)
text = re.sub("\n\n", "", text)
text = re.sub(r"([;;.!?。!?\?])([^”’])", r"\1\n\2", text)
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r"\1\n\2", text)
text = text.rstrip()
ls = [i for i in text.split("\n") if i]
for ele in ls:
if len(ele) > self.sentence_size:
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r"\1\n\2", ele)
ele1_ls = ele1.split("\n")
for ele_ele1 in ele1_ls:
if len(ele_ele1) > self.sentence_size:
ele_ele2 = re.sub(
r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r"\1\n\2", ele_ele1
)
ele2_ls = ele_ele2.split("\n")
for ele_ele2 in ele2_ls:
if len(ele_ele2) > self.sentence_size:
ele_ele3 = re.sub(
'( ["’”」』]{0,2})([^ ])', r"\1\n\2", ele_ele2
)
ele2_id = ele2_ls.index(ele_ele2)
ele2_ls = (
ele2_ls[:ele2_id]
+ [i for i in ele_ele3.split("\n") if i]
+ ele2_ls[ele2_id + 1 :]
)
ele_id = ele1_ls.index(ele_ele1)
ele1_ls = (
ele1_ls[:ele_id]
+ [i for i in ele2_ls if i]
+ ele1_ls[ele_id + 1 :]
)
id = ls.index(ele)
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1 :]
return ls
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~ppt_embedding.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
from langchain.document_loaders import UnstructuredPowerPointLoader
from langchain.schema import Document
from langchain.text_splitter import SpacyTextSplitter
from pilot.configs.config import Config
from pilot.source_embedding import SourceEmbedding, register
CFG = Config()
class PPTEmbedding(SourceEmbedding):
"""ppt embedding for read ppt document."""
def __init__(self, file_path, vector_store_config):
"""Initialize with pdf path."""
super().__init__(file_path, vector_store_config)
self.file_path = file_path
self.vector_store_config = vector_store_config
@register
def read(self):
"""Load from ppt path."""
loader = UnstructuredPowerPointLoader(self.file_path)
textsplitter = SpacyTextSplitter(
pipeline="zh_core_web_sm",
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=200,
)
return loader.load_and_split(textsplitter)
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
documents[i].page_content = d.page_content.replace("\n", "")
i += 1
return documents
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~csv_embedding.py | from typing import Dict, List, Optional
from langchain.document_loaders import CSVLoader
from langchain.schema import Document
from pilot.source_embedding import SourceEmbedding, register
class CSVEmbedding(SourceEmbedding):
"""csv embedding for read csv document."""
def __init__(
self,
file_path,
vector_store_config,
embedding_args: Optional[Dict] = None,
):
"""Initialize with csv path."""
super().__init__(file_path, vector_store_config)
self.file_path = file_path
self.vector_store_config = vector_store_config
self.embedding_args = embedding_args
@register
def read(self):
"""Load from csv path."""
loader = CSVLoader(file_path=self.file_path)
return loader.load()
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
documents[i].page_content = d.page_content.replace("\n", "")
i += 1
return documents
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~vector_store~chroma_store.py | import os
from langchain.vectorstores import Chroma
from pilot.configs.model_config import KNOWLEDGE_UPLOAD_ROOT_PATH
from pilot.logs import logger
from pilot.vector_store.vector_store_base import VectorStoreBase
class ChromaStore(VectorStoreBase):
"""chroma database"""
def __init__(self, ctx: {}) -> None:
self.ctx = ctx
self.embeddings = ctx["embeddings"]
self.persist_dir = os.path.join(
KNOWLEDGE_UPLOAD_ROOT_PATH, ctx["vector_store_name"] + ".vectordb"
)
self.vector_store_client = Chroma(
persist_directory=self.persist_dir, embedding_function=self.embeddings
)
def similar_search(self, text, topk) -> None:
logger.info("ChromaStore similar search")
return self.vector_store_client.similarity_search(text, topk)
def vector_name_exists(self):
return (
os.path.exists(self.persist_dir) and len(os.listdir(self.persist_dir)) > 0
)
def load_document(self, documents):
logger.info("ChromaStore load document")
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
self.vector_store_client.add_texts(texts=texts, metadatas=metadatas)
self.vector_store_client.persist()
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~summary~db_summary_client.py | import json
import uuid
from langchain.embeddings import HuggingFaceEmbeddings, logger
from pilot.configs.config import Config
from pilot.configs.model_config import LLM_MODEL_CONFIG
from pilot.scene.base import ChatScene
from pilot.scene.base_chat import BaseChat
from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding
from pilot.source_embedding.string_embedding import StringEmbedding
from pilot.summary.mysql_db_summary import MysqlSummary
from pilot.scene.chat_factory import ChatFactory
CFG = Config()
chat_factory = ChatFactory()
class DBSummaryClient:
"""db summary client, provide db_summary_embedding(put db profile and table profile summary into vector store)
, get_similar_tables method(get user query related tables info)
"""
def __init__(self):
pass
def db_summary_embedding(self, dbname):
"""put db profile and table profile summary into vector store"""
if CFG.LOCAL_DB_HOST is not None and CFG.LOCAL_DB_PORT is not None:
db_summary_client = MysqlSummary(dbname)
embeddings = HuggingFaceEmbeddings(
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL]
)
vector_store_config = {
"vector_store_name": dbname + "_summary",
"embeddings": embeddings,
}
embedding = StringEmbedding(
file_path=db_summary_client.get_summery(),
vector_store_config=vector_store_config,
)
self.init_db_profile(db_summary_client, dbname, embeddings)
if not embedding.vector_name_exist():
if CFG.SUMMARY_CONFIG == "FAST":
for vector_table_info in db_summary_client.get_summery():
embedding = StringEmbedding(
vector_table_info,
vector_store_config,
)
embedding.source_embedding()
else:
embedding = StringEmbedding(
file_path=db_summary_client.get_summery(),
vector_store_config=vector_store_config,
)
embedding.source_embedding()
for (
table_name,
table_summary,
) in db_summary_client.get_table_summary().items():
table_vector_store_config = {
"vector_store_name": dbname + "_" + table_name + "_ts",
"embeddings": embeddings,
}
embedding = StringEmbedding(
table_summary,
table_vector_store_config,
)
embedding.source_embedding()
logger.info("db summary embedding success")
def get_db_summary(self, dbname, query, topk):
vector_store_config = {
"vector_store_name": dbname + "_profile",
}
knowledge_embedding_client = KnowledgeEmbedding(
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
vector_store_config=vector_store_config,
)
table_docs = knowledge_embedding_client.similar_search(query, topk)
ans = [d.page_content for d in table_docs]
return ans
def get_similar_tables(self, dbname, query, topk):
"""get user query related tables info"""
vector_store_config = {
"vector_store_name": dbname + "_summary",
}
knowledge_embedding_client = KnowledgeEmbedding(
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
vector_store_config=vector_store_config,
)
if CFG.SUMMARY_CONFIG == "FAST":
table_docs = knowledge_embedding_client.similar_search(query, topk)
related_tables = [
json.loads(table_doc.page_content)["table_name"]
for table_doc in table_docs
]
else:
table_docs = knowledge_embedding_client.similar_search(query, 1)
# prompt = KnownLedgeBaseQA.build_db_summary_prompt(
# query, table_docs[0].page_content
# )
related_tables = _get_llm_response(
query, dbname, table_docs[0].page_content
)
related_table_summaries = []
for table in related_tables:
vector_store_config = {
"vector_store_name": dbname + "_" + table + "_ts",
}
knowledge_embedding_client = KnowledgeEmbedding(
file_path="",
model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
vector_store_config=vector_store_config,
)
table_summery = knowledge_embedding_client.similar_search(query, 1)
related_table_summaries.append(table_summery[0].page_content)
return related_table_summaries
def init_db_summary(self):
db = CFG.local_db
dbs = db.get_database_list()
for dbname in dbs:
self.db_summary_embedding(dbname)
def init_db_profile(self, db_summary_client, dbname, embeddings):
profile_store_config = {
"vector_store_name": dbname + "_profile",
"embeddings": embeddings,
}
embedding = StringEmbedding(
file_path=db_summary_client.get_db_summery(),
vector_store_config=profile_store_config,
)
if not embedding.vector_name_exist():
docs = []
docs.extend(embedding.read_batch())
for table_summary in db_summary_client.table_info_json():
embedding = StringEmbedding(
table_summary,
profile_store_config,
)
docs.extend(embedding.read_batch())
embedding.index_to_store(docs)
logger.info("init db profile success...")
def _get_llm_response(query, db_input, dbsummary):
chat_param = {
"temperature": 0.7,
"max_new_tokens": 512,
"chat_session_id": uuid.uuid1(),
"user_input": query,
"db_select": db_input,
"db_summary": dbsummary,
}
chat: BaseChat = chat_factory.get_implementation(
ChatScene.InnerChatDBSummary.value, **chat_param
)
res = chat.nostream_call()
return json.loads(res)["table"]
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~string_embedding.py | from typing import List
from langchain.schema import Document
from pilot import SourceEmbedding, register
class StringEmbedding(SourceEmbedding):
"""string embedding for read string document."""
def __init__(self, file_path, vector_store_config):
"""Initialize with pdf path."""
super().__init__(file_path, vector_store_config)
self.file_path = file_path
self.vector_store_config = vector_store_config
@register
def read(self):
"""Load from String path."""
metadata = {"source": "db_summary"}
return [Document(page_content=self.file_path, metadata=metadata)]
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
documents[i].page_content = d.page_content.replace("\n", "")
i += 1
return documents
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~pdf_embedding.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
from langchain.document_loaders import PyPDFLoader
from langchain.schema import Document
from langchain.text_splitter import SpacyTextSplitter
from pilot.configs.config import Config
from pilot.source_embedding import SourceEmbedding, register
CFG = Config()
class PDFEmbedding(SourceEmbedding):
"""pdf embedding for read pdf document."""
def __init__(self, file_path, vector_store_config):
"""Initialize with pdf path."""
super().__init__(file_path, vector_store_config)
self.file_path = file_path
self.vector_store_config = vector_store_config
@register
def read(self):
"""Load from pdf path."""
loader = PyPDFLoader(self.file_path)
# textsplitter = CHNDocumentSplitter(
# pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE
# )
textsplitter = SpacyTextSplitter(
pipeline="zh_core_web_sm",
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=100,
)
return loader.load_and_split(textsplitter)
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
documents[i].page_content = d.page_content.replace("\n", "")
i += 1
return documents
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~knowledge_embedding.py | from typing import Optional
from chromadb.errors import NotEnoughElementsException
from langchain.embeddings import HuggingFaceEmbeddings
from pilot.configs.config import Config
from pilot.source_embedding.csv_embedding import CSVEmbedding
from pilot.source_embedding.markdown_embedding import MarkdownEmbedding
from pilot.source_embedding.pdf_embedding import PDFEmbedding
from pilot.source_embedding.ppt_embedding import PPTEmbedding
from pilot.source_embedding.url_embedding import URLEmbedding
from pilot.source_embedding.word_embedding import WordEmbedding
from pilot.vector_store.connector import VectorStoreConnector
CFG = Config()
KnowledgeEmbeddingType = {
".txt": (MarkdownEmbedding, {}),
".md": (MarkdownEmbedding, {}),
".pdf": (PDFEmbedding, {}),
".doc": (WordEmbedding, {}),
".docx": (WordEmbedding, {}),
".csv": (CSVEmbedding, {}),
".ppt": (PPTEmbedding, {}),
".pptx": (PPTEmbedding, {}),
}
class KnowledgeEmbedding:
def __init__(
self,
model_name,
vector_store_config,
file_type: Optional[str] = "default",
file_path: Optional[str] = None,
):
"""Initialize with Loader url, model_name, vector_store_config"""
self.file_path = file_path
self.model_name = model_name
self.vector_store_config = vector_store_config
self.file_type = file_type
self.embeddings = HuggingFaceEmbeddings(model_name=self.model_name)
self.vector_store_config["embeddings"] = self.embeddings
def knowledge_embedding(self):
self.knowledge_embedding_client = self.init_knowledge_embedding()
self.knowledge_embedding_client.source_embedding()
def knowledge_embedding_batch(self, docs):
# docs = self.knowledge_embedding_client.read_batch()
self.knowledge_embedding_client.index_to_store(docs)
def read(self):
return self.knowledge_embedding_client.read_batch()
def init_knowledge_embedding(self):
if self.file_type == "url":
embedding = URLEmbedding(
file_path=self.file_path,
vector_store_config=self.vector_store_config,
)
return embedding
extension = "." + self.file_path.rsplit(".", 1)[-1]
if extension in KnowledgeEmbeddingType:
knowledge_class, knowledge_args = KnowledgeEmbeddingType[extension]
embedding = knowledge_class(
self.file_path,
vector_store_config=self.vector_store_config,
**knowledge_args,
)
return embedding
raise ValueError(f"Unsupported knowledge file type '{extension}'")
return embedding
def similar_search(self, text, topk):
vector_client = VectorStoreConnector(
CFG.VECTOR_STORE_TYPE, self.vector_store_config
)
try:
ans = vector_client.similar_search(text, topk)
except NotEnoughElementsException:
ans = vector_client.similar_search(text, 1)
return ans
def vector_exist(self):
vector_client = VectorStoreConnector(
CFG.VECTOR_STORE_TYPE, self.vector_store_config
)
return vector_client.vector_name_exists()
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~url_embedding.py | from typing import List
from bs4 import BeautifulSoup
from langchain.document_loaders import WebBaseLoader
from langchain.schema import Document
from langchain.text_splitter import CharacterTextSplitter
from pilot.configs.config import Config
from pilot.configs.model_config import KNOWLEDGE_CHUNK_SPLIT_SIZE
from pilot.source_embedding import SourceEmbedding, register
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
CFG = Config()
class URLEmbedding(SourceEmbedding):
"""url embedding for read url document."""
def __init__(self, file_path, vector_store_config):
"""Initialize with url path."""
super().__init__(file_path, vector_store_config)
self.file_path = file_path
self.vector_store_config = vector_store_config
@register
def read(self):
"""Load from url path."""
loader = WebBaseLoader(web_path=self.file_path)
if CFG.LANGUAGE == "en":
text_splitter = CharacterTextSplitter(
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=20,
length_function=len,
)
else:
text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=1000)
return loader.load_and_split(text_splitter)
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
content = d.page_content.replace("\n", "")
soup = BeautifulSoup(content, "html.parser")
for tag in soup(["!doctype", "meta"]):
tag.extract()
documents[i].page_content = soup.get_text()
i += 1
return documents
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~pdf_loader.py | """Loader that loads image files."""
import os
from typing import List
import fitz
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from paddleocr import PaddleOCR
class UnstructuredPaddlePDFLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load image files, such as PNGs and JPGs."""
def _get_elements(self) -> List:
def pdf_ocr_txt(filepath, dir_path="tmp_files"):
full_dir_path = os.path.join(os.path.dirname(filepath), dir_path)
if not os.path.exists(full_dir_path):
os.makedirs(full_dir_path)
filename = os.path.split(filepath)[-1]
ocr = PaddleOCR(lang="ch", use_gpu=False, show_log=False)
doc = fitz.open(filepath)
txt_file_path = os.path.join(full_dir_path, "%s.txt" % (filename))
img_name = os.path.join(full_dir_path, ".tmp.png")
with open(txt_file_path, "w", encoding="utf-8") as fout:
for i in range(doc.page_count):
page = doc[i]
text = page.get_text("")
fout.write(text)
fout.write("\n")
img_list = page.get_images()
for img in img_list:
pix = fitz.Pixmap(doc, img[0])
pix.save(img_name)
result = ocr.ocr(img_name)
ocr_result = [i[1][0] for line in result for i in line]
fout.write("\n".join(ocr_result))
os.remove(img_name)
return txt_file_path
txt_file_path = pdf_ocr_txt(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(filename=txt_file_path, **self.unstructured_kwargs)
if __name__ == "__main__":
filepath = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "content", "samples", "test_py.pdf"
)
loader = UnstructuredPaddlePDFLoader(filepath, mode="elements")
docs = loader.load()
for doc in docs:
print(doc)
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~word_embedding.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
from langchain.document_loaders import PyPDFLoader, UnstructuredWordDocumentLoader
from langchain.schema import Document
from pilot.configs.config import Config
from pilot.source_embedding import SourceEmbedding, register
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
CFG = Config()
class WordEmbedding(SourceEmbedding):
"""word embedding for read word document."""
def __init__(self, file_path, vector_store_config):
"""Initialize with word path."""
super().__init__(file_path, vector_store_config)
self.file_path = file_path
self.vector_store_config = vector_store_config
@register
def read(self):
"""Load from word path."""
loader = UnstructuredWordDocumentLoader(self.file_path)
textsplitter = CHNDocumentSplitter(
pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE
)
return loader.load_and_split(textsplitter)
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
documents[i].page_content = d.page_content.replace("\n", "")
i += 1
return documents
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~EncodeTextLoader.py | from typing import List, Optional
import chardet
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class EncodeTextLoader(BaseLoader):
"""Load text files."""
def __init__(self, file_path: str, encoding: Optional[str] = None):
"""Initialize with file path."""
self.file_path = file_path
self.encoding = encoding
def load(self) -> List[Document]:
"""Load from file path."""
with open(self.file_path, "rb") as f:
raw_text = f.read()
result = chardet.detect(raw_text)
if result["encoding"] is None:
text = raw_text.decode("utf-8")
else:
text = raw_text.decode(result["encoding"])
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | flyingbearHK/DB-GPT | pilot~source_embedding~markdown_embedding.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from typing import List
import markdown
from bs4 import BeautifulSoup
from langchain.schema import Document
from langchain.text_splitter import SpacyTextSplitter
from pilot.configs.config import Config
from pilot.source_embedding import SourceEmbedding, register
from pilot.source_embedding.EncodeTextLoader import EncodeTextLoader
from pilot.source_embedding.chn_document_splitter import CHNDocumentSplitter
CFG = Config()
class MarkdownEmbedding(SourceEmbedding):
"""markdown embedding for read markdown document."""
def __init__(self, file_path, vector_store_config):
"""Initialize with markdown path."""
super().__init__(file_path, vector_store_config)
self.file_path = file_path
self.vector_store_config = vector_store_config
# self.encoding = encoding
@register
def read(self):
"""Load from markdown path."""
loader = EncodeTextLoader(self.file_path)
textsplitter = SpacyTextSplitter(
pipeline="zh_core_web_sm",
chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE,
chunk_overlap=100,
)
return loader.load_and_split(textsplitter)
@register
def data_process(self, documents: List[Document]):
i = 0
for d in documents:
content = markdown.markdown(d.page_content)
soup = BeautifulSoup(content, "html.parser")
for tag in soup(["!doctype", "meta", "i.fa"]):
tag.extract()
documents[i].page_content = soup.get_text()
documents[i].page_content = documents[i].page_content.replace("\n", " ")
i += 1
return documents
| [] |
2024-01-10 | sahithyaravi/VLC_Commonsense | gpt3~gpt3_fewshot.py | from tqdm import tqdm
import json
import openai
import logging
import argparse
from utils import load_json, image_path_to_id, imageid_to_path, qdict_to_df, save_json
from collections import defaultdict
import os
import openai
from config import *
import openai
import random
import pandas as pd
from collections import Counter
import time
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
random.seed(1)
openai.api_key = ""
def combine_all_dfs(path):
files = os.listdir(path)
files.sort()
print(files)
ddf = pd.DataFrame()
for file in files:
if ".csv" in file:
cur_df = pd.read_csv(path + file)
ddf = pd.concat([ddf, cur_df])
ddf.reset_index(drop=True, inplace=True)
ddf.to_csv(path+"gpt3_predictions_train2014.csv")
def main():
split = 'train2014'
annotations = load_json(f'{data_root}/ok-vqa/mscoco_{split}_annotations.json')
questions = load_json(f'{data_root}/ok-vqa/OpenEnded_mscoco_{split}_questions.json')
captions = load_json(f'{data_root}/vqa/expansion/captions/captions_{split}_vqa.json')
# Get most repeated answer
ans_list = annotations['annotations']
answer_map = {}
for d in ans_list:
q = str(d['question_id'])
answers = [ans_dict['answer'] for ans_dict in d['answers']]
most_common_words = [word for word, word_count in Counter(answers).most_common(1)]
best_answer = most_common_words[0]
answer_map[q] = best_answer
df = qdict_to_df(questions)
df["answer"] = df["question_id"].map(answer_map)
df["caption"] = df["image_path"].map(captions)
print("The head of the dataframe", df.head())
print("The number of examples", df.shape)
# Had to do in batches of 100
STARTS = list(range(0, 5000, 100))
for START in STARTS:
END = START + 100
print(START, END)
train_examples = df[:8]
test_examples = df[START:END]
predictions = solve(openai, train_examples, test_examples)
out = pd.DataFrame()
out["qids"] = test_examples["question_id"].values
out["gpt3"] = predictions
out["question"] = test_examples["question"].values
out["gold"] = test_examples["answer"].values
out.to_csv(f"final_outputs/gpt3/{split}/gpt3_predictions{END}.csv")
def create_prompt_per_example(row, include_answer=False):
"""
Creates a prompt for GPT-3
"""
context = row["caption"]
question = row["question"]
gold = row["answer"]
c = f"Context: {context}\n"
q = f"Q: {question} "
a = f"A: {gold}\n\n" if include_answer else "A:"
return c + q + a
def solve(openai, train_df, test_df):
"""
Train GPT-3 with few-shot learning
:param openai: the OpenAI API object
:param train_examples: list of dicts {"narrative", "plausible"}
:param test_examples: list of dicts {"narrative", "plausible"}
:return: the predictions
"""
prompt = "Please answer the question according to the above context.\n"
for index, row in train_df.iterrows():
prompt += create_prompt_per_example(row, include_answer=True)
predictions = []
with tqdm(total=test_df.shape[0]) as pbar:
for index, row in test_df.iterrows():
gpt3_prompt = f"{prompt}{create_prompt_per_example(row, include_answer=False)}"
pbar.update(1)
print(gpt3_prompt)
response = openai.Completion.create(
engine="text-davinci-001",
prompt=gpt3_prompt,
temperature=0.7,
max_tokens=10,
top_p=0.9,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["\n"]
)
pred = response['choices'][0]['text'].strip()
predictions.append(pred)
time.sleep(20)
return predictions
def json_conv(filename):
train_df = pd.read_csv(filename)
train_df["qids"] = train_df["qids"].astype(str)
train_df.fillna('', inplace=True)
keys = list(train_df["qids"].values)
vals = list(train_df["gpt3"].values)
d = dict(zip(keys, vals))
save_json('final_outputs/gpt3/train2014_gpt3.json', d)
p = 'final_outputs/gpt3/val2014/gpt3_predictions_val2014.csv'
v = pd.read_csv(p)
v.fillna('', inplace=True)
v["qids"] = v["qids"].astype(str)
keys = list(v["qids"].values)
vals = list(v["gpt3"].values)
d = dict(zip(keys, vals))
save_json('final_outputs/gpt3/val2014_gpt3.json', d)
if __name__ == "__main__":
main() | [
"Please answer the question according to the above context.\n",
"f\"{prompt}{create_prompt_per_example(row, include_answer=False)}"
] |
2024-01-10 | csmizzle/gca | scratch~rpg.py | import guidance
# set the default language model used to execute guidance programs
guidance.llm = guidance.llms.OpenAI(
"gpt-4",
api_key="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"
)
# we can pre-define valid option sets
valid_weapons = ["sword", "axe", "mace", "spear", "bow", "crossbow"]
# define the prompt
program = guidance("""The following is a character profile for an RPG game in JSON format.
```json
{
"description": "{{description}}",
"name": "{{gen 'name'}}",
"age": {{gen 'age' pattern='[0-9]+' stop=','}},
"armor": "{{#select 'armor'}}leather{{or}}chainmail{{or}}plate{{/select}}",
"weapon": "{{select 'weapon' options=valid_weapons}}",
"class": "{{gen 'class'}}",
"mantra": "{{gen 'mantra'}}",
"strength": {{gen 'strength' pattern='[0-9]+' stop=','}},
"items": [{{#geneach 'items' num_iterations=3}}
"{{gen 'this'}}",{{/geneach}}
]
}```""")
out = program(description="A strong and nimble fighter.", valid_weapons=valid_weapons)
out.variables()
print(str(out).split("```json")[1][:-3]) | [] |
2024-01-10 | Rowing0914/TF_RL | tf_rl~common~wrappers.py | import numpy as np
import os, math, pandas
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
# for those who installed ROS on local env
import sys
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except:
pass
import cv2
cv2.ocl.setUseOpenCL(False)
"""
Wrapper for Cartpole
This is to change the reward at the terminal state because originally it is set as 1.0
check here: https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
"""
class CartPole_Pixel(gym.Wrapper):
"""
Wrapper for getting raw pixel in cartpole env
observation: 400x400x1 => (Width, Height, Colour-chennel)
we dispose 100pxl from each side of width to make the frame divisible(Square) in CNN
"""
def __init__(self, env):
self.width = 400
self.height = 400
gym.Wrapper.__init__(self, env)
self.env = env.unwrapped
# self.env.seed(123) # fix the randomness for reproducibility purpose
"""
start new thread to deal with getting raw image
"""
from tf_rl.env.cartpole_pixel import RenderThread
self.renderer = RenderThread(env)
self.renderer.start()
def _pre_process(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
frame = np.expand_dims(frame, -1)
return frame
def step(self, ac):
_, reward, done, info = self.env.step(ac)
self.renderer.begin_render() # move screen one step
observation = self._pre_process(self.renderer.get_screen())
if done:
reward = -1.0 # reward at a terminal state
return observation, reward, done, info
def reset(self, **kwargs):
self.env.reset()
self.renderer.begin_render() # move screen one step
return self._pre_process(self.renderer.get_screen()) # overwrite observation by raw image pixels of screen
def close(self):
self.renderer.stop() # terminate the threads
self.renderer.join() # collect the dead threads and notice all threads are safely terminated
if self.env:
return self.env.close()
class MyWrapper(gym.Wrapper):
"""
wrapper to fix the randomeness in gym env
"""
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.env = env
# self.env.seed(123) # fix the randomness for reproducibility purpose
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
if done:
reward = -1.0 # reward at a terminal state
return observation, reward, done, info
def reset(self, **kwargs):
return self.env.reset()
class DiscretisedEnv(gym.Wrapper):
"""
Wrapper for getting discredited observation in cartpole
Inspired by https://medium.com/@tuzzer/cart-pole-balancing-with-q-learning-b54c6068d947
"""
def __init__(self, env, n_bins=10):
gym.Wrapper.__init__(self, env)
self.env = env
self.cart_position_high = env.observation_space.high[0]
self.cart_velocity_high = env.observation_space.high[1]
self.pole_angle_high = env.observation_space.high[2]
self.pole_velocity_high = env.observation_space.high[3]
self.cart_position_low = env.observation_space.low[0]
self.cart_velocity_low = env.observation_space.low[1]
self.pole_angle_low = env.observation_space.low[2]
self.pole_velocity_low = env.observation_space.low[3]
self.cart_position_bins = \
pandas.cut([self.cart_position_high, self.cart_position_low], bins=n_bins, retbins=True)[1][1:-1]
self.cart_velocity_bins = \
pandas.cut([self.cart_velocity_high, self.cart_velocity_low], bins=n_bins, retbins=True)[1][1:-1]
self.pole_angle_bins = pandas.cut([self.pole_angle_high, self.pole_angle_low], bins=n_bins, retbins=True)[1][
1:-1]
self.pole_velocity_bins = \
pandas.cut([self.pole_velocity_low, self.pole_velocity_low], bins=n_bins, retbins=True)[1][1:-1]
self.buckets = (1, 1, 6, 12,)
# self.env.seed(123) # fix the randomness for reproducibility purpose
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
if done:
reward = -1.0 # reward at a terminal state
# return self._discretise(observation), reward, done, info
return self._discretise_feature_engineered(observation), reward, done, info
def reset(self, **kwargs):
# return self._discretise(self.env.reset())
return self._discretise_feature_engineered(self.env.reset())
def _discretise(self, obs):
"""Equally discretise the observation"""
obs[0] = np.digitize(obs[0], self.cart_position_bins)
obs[1] = np.digitize(obs[1], self.cart_velocity_bins)
obs[2] = np.digitize(obs[2], self.pole_angle_bins)
obs[3] = np.digitize(obs[3], self.pole_velocity_bins)
obs = obs.astype(int)
return tuple(obs.reshape(1, -1)[0])
def _discretise_feature_engineered(self, obs):
"""
Based on some experiments, empirically we can see that angle acceleration is the most important feature.
So that using self.buckets, we discretise some features and remove other features
:param obs:
:return:
"""
upper_bounds = [self.cart_position_high, 0.5, self.pole_angle_high, math.radians(50)]
lower_bounds = [self.cart_position_low, -0.5, self.pole_angle_low, -math.radians(50)]
ratios = [(obs[i] + abs(lower_bounds[i])) / (upper_bounds[i] - lower_bounds[i]) for i in
range(len(obs))]
new_obs = [int(round((self.buckets[i] - 1) * ratios[i])) for i in range(len(obs))]
new_obs = [min(self.buckets[i] - 1, max(0, new_obs[i])) for i in range(len(obs))]
return tuple(new_obs)
class MyWrapper_revertable(gym.Wrapper):
"""
Wrapper for reverting the time-step, this is mainly used in Q-learning with Particle Filter
we need this to simulate each particle on cartpole env
"""
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.env = env.unwrapped
# self.env.seed(123) # fix the randomness for reproducibility purpose
def step(self, ac):
next_state, reward, done, info = self.env.step(ac)
if done:
reward = -1.0 # reward at a terminal state
return next_state, reward, done, info
def reset(self, **kwargs):
return self.env.reset()
def get_state(self):
return self.env.state
def set_state(self, state):
self.env.state = state
class ReplayResetEnv(gym.Wrapper):
"""
Wrapper for reverting the game to a specific state and it also changes the internal state(RAM) of ALE as well.
## Usage
```python
from tf_rl.common.wrappers import wrap_deepmind, make_atari, ReplayResetEnv
env = wrap_deepmind(make_atari("PongNoFrameskip-v4"))
env = ReplayResetEnv(env)
state = env.reset()
init_state = env.get_checkpoint_state()
for t in range(1, 1000):
env.render()
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
state = next_state
if t % 100 == 0:
print("done", t)
env.recover(init_state)
env.close()
```
"""
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.env = env
def recover(self, state):
self.env.unwrapped.restore_state(state)
self.env.step(0) # 1 extra step to burn the current state on ALE's RAM is required!!
return self.env.unwrapped._get_image() # output the newly recovered observation
def get_checkpoint_state(self):
return self.env.unwrapped.clone_state()
def get_RAM(self):
return self.env.unwrapped.ale.getRAM()
"""
Borrowed from OpenAI Baselines at 4/4/2019
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
https://github.com/openai/baselines/blob/master/baselines/common/wrappers.py
"""
# I moved the source code of this module just below
# from .wrappers import TimeLimit
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""
Bin reward to {+1, 0, -1} by its sign.
Reference:
https://docs.scipy.org/doc/numpy-1.9.3/reference/generated/numpy.sign.html
Usage:
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
"""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = width
self.height = height
self.grayscale = grayscale
if self.grayscale:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 3), dtype=np.uint8)
def observation(self, frame):
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
if self.grayscale:
frame = np.expand_dims(frame, -1)
return frame
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
# don't do this.... LazyFrame is much more memory efficient
# on my local, it reduced from 1.4GB for ReplayBuffer(50000) to 397.1MB..... incredible.
# return np.concatenate(list(self.frames), axis=-1)
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def make_atari(env_id, skip_frame_k=4, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=skip_frame_k)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
# since my code does not have an function or APIs to repeat the same action several times,
# I will rely on those wrappers.
# frame_stack and scale consumes a lot of memory so that if you have enough memory, then try it.
# my local has 16GB RAM though, it was not enough.... 0.5M replay buffer took up about 8.5GB of RAM...
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, skip_frame_k=4, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, skip_frame_k)
return env
| [] |
2024-01-10 | mao-at/BI-SEP | python~api_demo~consume_secured_api.py | import openai
import os
import time
SYSTEM_PROMPT= "you are a food classifier bot. You will respond 'junk' when the user prompt mentions any kind of junk food, and respond 'not junk' if all food mentioned are not junk food."
USER_PROMPT = "apple,pear,orange,banana"
def junk_or_not(system_prompt=SYSTEM_PROMPT, user_prompt=USER_PROMPT, retries=3):
for i in range(retries):
try:
# openai.api_key= os.getenv("OPENAI_API_KEY")
openai.api_key="abcdefg1234567890"
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": f"{system_prompt}"},
{"role": "user", "content": f"{user_prompt}"},
]
)
result = completion.choices[0].message.get("content")
break # If it gets to this line, no exception was raised, so break the loop
except Exception as e:
print(f"Attempt {i+1} failed with error: {str(e)}")
print(f"retrying in {(i+1)**2} seconds...")
time.sleep((i+1)**2) # Pause before the next attempt
if i == retries - 1: # If this was the last attempt
msg = f"ALL RETRIES FAILED"
print(msg)
return None
print(f"result: {result}")
return result
if __name__ == "__main__":
junk_or_not()
| [
"you are a food classifier bot. You will respond 'junk' when the user prompt mentions any kind of junk food, and respond 'not junk' if all food mentioned are not junk food.",
"apple,pear,orange,banana",
"PLACEHOLDER"
] |
2024-01-10 | Gaurang-1402/ChatManipulators | src~rosgpt~rosgpt~rosgpt.py | #!/usr/bin/env python3
# This file is part of rosgpt package.
#
# Copyright (c) 2023 Anis Koubaa.
# All rights reserved.
#
# This work is licensed under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike 4.0
# International Public License. See https://creativecommons.org/licenses/by-nc-sa/4.0/ for details.
import os
import json
import openai
import rclpy
import threading
from rclpy.node import Node
from std_msgs.msg import String
from flask import Flask, request, send_from_directory, jsonify
from flask_restful import Resource, Api
from flask_cors import CORS
import pyttsx3 # pip install pyttsx3 #you need to install libespeak1 on Ubuntu # sudo apt-get install libespeak1
from rclpy.executors import SingleThreadedExecutor
import subprocess
from ament_index_python import get_package_share_directory
# Instantiate a Flask application object with the given name
app = Flask(__name__)
# Enable Cross-Origin Resource Sharing (CORS) for the Flask app
CORS(app)
# Create an API object that wraps the Flask app to handle RESTful requests
api = Api(app)
#You must add OPENAI_API_KEY as an environment variable
#In Ubuntu: echo 'export OPENAI_API_KEY=your_api_key' >> ~/.bashrc
# Get the API key from the environment variable.
openai_api_key = os.getenv('OPENAI_API_KEY')
#print(openai_api_key)
# Now you can use the openai_api_key variable to authenticate with the OpenAI API
# Initialize a threading lock for synchronizing access to shared resources
# when multiple threads are involved
spin_lock = threading.Lock()
# Initialize the Text-to-Speech (TTS) engine using the pyttsx3 library
# you need to install the following dependencies
# sudo apt-get install libespeak1
# pip3 install pyttsx3
tts_engine = pyttsx3.init()
# Create a separate threading lock for synchronizing access to the TTS engine
tts_lock = threading.Lock()
class ROSGPTNode(Node):
def __init__(self):
"""
Initialize the ROSGPTNode class which is derived from the rclpy Node class.
"""
# Call the superclass constructor and pass the name of the node
super().__init__('chatgpt_ros2_node')
# Create a publisher for the 'voice_cmd' topic with a message queue size of 10
self.publisher = self.create_publisher(String, 'voice_cmd', 10)
def publish_message(self, message):
"""
Publish the given message to the 'voice_cmd' topic.
Args:
message (str): The message to be published.
"""
msg = String() # Create a new String message
msg.data = message # Convert the message to a JSON string and set the data field of the message
self.publisher.publish(msg) # Publish the message using the publisher
#print('message Published: ', message) # Log the published message
#print('msg.data Published: ', msg.data) # Log the published message
def process_and_publish_chatgpt_response(chatgpt_ros2_node, text_command, chatgpt_response, use_executors=True):
"""
Process the chatbot's response and publish it to the 'voice_cmd' topic.
Args:
chatgpt_ros2_node (ROSGPTNode): The ROS2 node instance.
text_command (str): The text command received from the user.
chatgpt_response (str): The response from the chatbot.
use_executors (bool, optional): Flag to indicate whether to use SingleThreadedExecutor. Defaults to True.
"""
chatgpt_ros2_node.publish_message(chatgpt_response) # Publish the chatbot's response using the ROS2 node
# If use_executors flag is True, use SingleThreadedExecutor
if use_executors:
executor = SingleThreadedExecutor()# Create a new executor for each request
executor.add_node(chatgpt_ros2_node) # Add the node to the executor
executor.spin_once()# Spin the executor once
executor.remove_node(chatgpt_ros2_node) # Remove the node from the executor
# If use_executors flag is False, use spin_lock to synchronize access
else:
with spin_lock:
rclpy.spin_once(chatgpt_ros2_node)
class ROSGPTProxy(Resource):
"""
A class derived from flask_restful.Resource, responsible for handling incoming HTTP POST requests.
"""
def __init__(self, chatgpt_ros2_node):
"""
Initialize the ROSGPTProxy class with the given ROS2 node.
Args:
chatgpt_ros2_node (ROSGPTNode): The ROS2 node instance.
"""
self.chatgpt_ros2_node = chatgpt_ros2_node
def askGPT(self, text_command):
"""
Send a text command to the GPT-3 model and receive a response.
Args:
text_command (str): The text command to be sent to the GPT-3 model.
Returns:
str: The response from the GPT-3 model as a JSON string.
"""
# Create the GPT-3 prompt with example inputs and desired outputs
prompt = '''
Consider the following ontology:
{"action": "center", "params": {}}
{"action": "random", "params": {}}
{"action": "move_joint", "params": {"joint": joint, "angle": angle, "speed": speed}}
The 'joint' parameter can take values "shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint", "wrist_1_joint", "wrist_2_joint", "wrist_3_joint" to indicate the joint to move. The 'angle' parameter represents the target angle for the joint in radians, and 'speed' is the speed at which to move the joint in radians per second. The 'angle' value must be between -6.13 and 6.13 for all joints except 'elbow_joint', which must be between -2.99 and 2.99. Here are some examples.
If speed is not given in the prompt, it is assumed to be 0.5 radians per second.
All numerical answers should be in float form.
All joint values are between -6.13 to 6.13
except elbow_joint which has joint values between -2.99 to 2.99
Prompt: "Move the shoulder_pan_joint to angle 0.7 radians at a speed of 0.2 radians per second."
Returns: {"action": "move_joint", "params": {"joint": "shoulder_pan_joint", "angle": 0.7, "speed": 0.2}}
Prompt: "Move the elbow_joint to angle 1.3 radians."
Returns: {"action": "move_joint", "params": {"joint": "elbow_joint", "angle": 1.3, "speed": 0.5}}
Prompt: "Move the wrist_1_joint to angle 2.2 radians at a speed of 0.3 radians per second."
Returns: {"action": "move_joint", "params": {"joint": "wrist_1_joint", "angle": 2.2, "speed": 0.3}}
Prompt: "Move the robot to the center position."
Returns: {"action": "center", "params": {}}
Prompt: "Move the robot to a random joint configuration."
Returns: {"action": "random", "params": {}}
You will be given human language prompts, and you need to return a JSON conformant to the ontology. Any action not in the ontology must be ignored.
Examples in other languages:
German:
Prompt: "Bewegen Sie das shoulder_pan_joint auf einen Winkel von 0,7 Radiant mit einer Geschwindigkeit von 0,2 Radiant pro Sekunde."
Returns: {"action": "move_joint", "params": {"joint": "shoulder_pan_joint", "angle": 0.7, "speed": 0.2}}
Japanese:
Prompt: "Elbow_joint o 1.3 radian ni ido sase, speed wa 0.5 radian per second desu."
Returns: {"action": "move_joint", "params": {"joint": "elbow_joint", "angle": 1.3, "speed": 0.5}}
Hindi:
Prompt: "Wrist_1_joint ko 2.2 radians par le jao, speed 0.3 radians per second ke saath."
Returns: {"action": "move_joint", "params": {"joint": "wrist_1_joint", "angle": 2.2, "speed": 0.3}}
'''
prompt = prompt+'\nprompt: '+text_command
#print(prompt) #for testing
# Create the message structure for the GPT-3 model
messages = [
{"role": "system", "content": "You are a robot brain that translates human natural language to robot control commands in json."},
{"role": "user", "content": prompt}
]
# Try to send the request to the GPT-3 model and handle any exceptions
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
except openai.error.InvalidRequestError as e:
print(f"Error: {e}")
return None
except Exception as e:
print(f"Unexpected error: {e}")
return None
# Extract the GPT-3 model response from the returned JSON
chatgpt_response = response.choices[0].message['content'].strip()
#print(chatgpt_response)
# Find the start and end indices of the JSON string in the response
start_index = chatgpt_response.find('{')
end_index = chatgpt_response.rfind('}') + 1
# Extract the JSON string from the response
json_response_dict = chatgpt_response[start_index:end_index]
#print('\n\n\njson_response_dict ',json_response_dict)
return json.dumps({'text': chatgpt_response, 'json': json_response_dict})
def post(self):
"""
Handles an incoming POST request containing a text command. The method sends the text command
to the GPT-3 model and processes the response using the process_and_publish_chatgpt_response function in a separate thread.
Returns:
dict: A dictionary containing the GPT-3 model response as a JSON string.
"""
text_command = request.form['text_command']
print ('[ROSGPT] Command received. ', text_command, '. Asking ChatGPT ...')
# Run the speak function on a separate thread
#print('text_command:', text_command,'\n')
chatgpt_response = self.askGPT(text_command)
print ('[ROSGPT] Response received from ChatGPT. \n', str(json.loads(chatgpt_response))[:60], '...')
#print('eval(chatgpt_response)', eval(chatgpt_response))
# Run the speak function on a separate thread
if chatgpt_response is None:
return {'error': 'An error occurred while processing the request'}
threading.Thread(target=process_and_publish_chatgpt_response, args=(self.chatgpt_ros2_node, text_command, chatgpt_response, True)).start()
#print(json.loads(chatgpt_response))
return json.loads(chatgpt_response)
@app.route('/')
def index():
print(os.path.join(get_package_share_directory('rosgpt'), 'webapp'))
return send_from_directory(os.path.join(get_package_share_directory('rosgpt'), 'webapp'), 'index.html')
def main():
rclpy.init(args=None)
chatgpt_ros2_node = ROSGPTNode()
api.add_resource(ROSGPTProxy, '/rosgpt', resource_class_args=(chatgpt_ros2_node,))
app.run(debug=True, host='0.0.0.0', port=5000)
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"\n Consider the following ontology:\n {\"action\": \"center\", \"params\": {}}\n {\"action\": \"random\", \"params\": {}}\n {\"action\": \"move_joint\", \"params\": {\"joint\": joint, \"angle\": angle, \"speed\": speed}}\n\n The 'joint' parameter can take values \"shoulder_pan_joint\", \"shoulder_lift_joint\", \"elbow_joint\", \"wrist_1_joint\", \"wrist_2_joint\", \"wrist_3_joint\" to indicate the joint to move. The 'angle' parameter represents the target angle for the joint in radians, and 'speed' is the speed at which to move the joint in radians per second. The 'angle' value must be between -6.13 and 6.13 for all joints except 'elbow_joint', which must be between -2.99 and 2.99. Here are some examples.\n\n If speed is not given in the prompt, it is assumed to be 0.5 radians per second.\n All numerical answers should be in float form.\n\n All joint values are between -6.13 to 6.13\n except elbow_joint which has joint values between -2.99 to 2.99 \n\n Prompt: \"Move the shoulder_pan_joint to angle 0.7 radians at a speed of 0.2 radians per second.\"\n Returns: {\"action\": \"move_joint\", \"params\": {\"joint\": \"shoulder_pan_joint\", \"angle\": 0.7, \"speed\": 0.2}}\n\n Prompt: \"Move the elbow_joint to angle 1.3 radians.\"\n Returns: {\"action\": \"move_joint\", \"params\": {\"joint\": \"elbow_joint\", \"angle\": 1.3, \"speed\": 0.5}}\n\n Prompt: \"Move the wrist_1_joint to angle 2.2 radians at a speed of 0.3 radians per second.\"\n Returns: {\"action\": \"move_joint\", \"params\": {\"joint\": \"wrist_1_joint\", \"angle\": 2.2, \"speed\": 0.3}}\n\n Prompt: \"Move the robot to the center position.\"\n Returns: {\"action\": \"center\", \"params\": {}}\n\n Prompt: \"Move the robot to a random joint configuration.\"\n Returns: {\"action\": \"random\", \"params\": {}}\n\n You will be given human language prompts, and you need to return a JSON conformant to the ontology. Any action not in the ontology must be ignored.\n\n Examples in other languages:\n\n German:\n Prompt: \"Bewegen Sie das shoulder_pan_joint auf einen Winkel von 0,7 Radiant mit einer Geschwindigkeit von 0,2 Radiant pro Sekunde.\"\n Returns: {\"action\": \"move_joint\", \"params\": {\"joint\": \"shoulder_pan_joint\", \"angle\": 0.7, \"speed\": 0.2}}\n\n Japanese:\n Prompt: \"Elbow_joint o 1.3 radian ni ido sase, speed wa 0.5 radian per second desu.\"\n Returns: {\"action\": \"move_joint\", \"params\": {\"joint\": \"elbow_joint\", \"angle\": 1.3, \"speed\": 0.5}}\n\n Hindi:\n Prompt: \"Wrist_1_joint ko 2.2 radians par le jao, speed 0.3 radians per second ke saath.\"\n Returns: {\"action\": \"move_joint\", \"params\": {\"joint\": \"wrist_1_joint\", \"angle\": 2.2, \"speed\": 0.3}}\n ",
"You are a robot brain that translates human natural language to robot control commands in json.",
"PLACEHOLDER\nprompt: PLACEHOLDER"
] |
2024-01-10 | Nabil-Mabrouk/GemiMe | src~ressources~example.py | from dotenv import load_dotenv
from langchain.vectorstores import Chroma
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.summarize import load_summarize_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
from docx import Document
from docx.table import _Cell
from streamlit_extras.add_vertical_space import add_vertical_space
import sys
def clear_history():
if "history" in st.session_state:
del st.session_state["history"]
def format_chat_history(chat_history):
formatted_history = ""
for entry in chat_history:
question, answer = entry
# Added an extra '\n' for the blank line
formatted_history += f"Question: {question}\nAnswer: {answer}\n\n"
return formatted_history
def extract_text_from_table(table):
text = ""
for row in table.rows:
for cell in row.cells:
if isinstance(cell, _Cell):
text += cell.text + "\n"
return text.strip()
#side bar contents
# Configure Streamlit page settings
st.set_page_config(page_title="PDFReader")
st.title("PDF & Word Reader ✨")
def main():
if "model" not in st.session_state:
st.session_state.model = "text-davinci-003"
# brief summary
with st.sidebar:
st.title('🤗💬 LLM PDFReader App')
st.markdown("""
## About
This app is an LLM-powered chatbot built using:
- [Streamlit](https://streamlit.io/)
- [Langchain](https://python.langchian.com/)
- [OpenAI](https://platform.openai.com/docs/models) LLM model
""")
st.radio(
"Model 👉",
key="model",
options=["text-ada-001", "text-davinci-002", "text-davinci-003"],
)
add_vertical_space(5)
llm = OpenAI(temperature=0.7, model=st.session_state.model)
#llmchat = OpenAI(temperature=0.7, model_name='gpt-3.5-turbo')
chain = load_summarize_chain(llm, chain_type="stuff")
chain_large = load_summarize_chain(llm, chain_type="map_reduce")
chain_qa = load_qa_chain(llm, chain_type="stuff")
chain_large_qa = load_qa_chain(llm, chain_type="map_reduce")
# Load environment variables
load_dotenv()
# Upload file
uploaded_file = st.file_uploader("Upload your file", type=["pdf", "docx"])
# Initialize session state
if 'pdf_name' not in st.session_state:
st.session_state.pdf_name = None
# Extract the text
if uploaded_file is not None :
file_type = uploaded_file.type
# Clear summary if a new file is uploaded
if 'summary' in st.session_state and st.session_state.file_name != uploaded_file.name:
st.session_state.summary = None
st.session_state.file_name = uploaded_file.name
try:
if file_type == "application/pdf":
# Handle PDF files
pdf_reader = PdfReader(uploaded_file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
elif file_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
# Handle Word documents
doc = Document(uploaded_file)
paragraphs = [p.text for p in doc.paragraphs]
text = "\n".join(paragraphs)
# Extract text from tables
for table in doc.tables:
table_text = extract_text_from_table(table)
if table_text:
text += "\n" + table_text
else:
st.error("Unsupported file format. Please upload a PDF or DOCX file.")
return
# Split text into chunks, use this if you only use this app for small documents.
# text_splitter = CharacterTextSplitter(
# separator="\n",
# chunk_size=1000,
# chunk_overlap=200,
# length_function=len
# )
# Split text into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
# Create embeddings
embeddings = OpenAIEmbeddings(disallowed_special=())
knowledge_base = FAISS.from_texts(chunks, embeddings)
st.header("Here's a brief summary of your file:")
pdf_summary = "Give me a concise summary, use the language that the file is in. "
docs = knowledge_base.similarity_search(pdf_summary)
if 'summary' not in st.session_state or st.session_state.summary is None:
with st.spinner('Wait for it...'):
with get_openai_callback() as scb:
try:
st.session_state.summary = chain.run(input_documents=docs, question=pdf_summary)
except Exception as maxtoken_error:
# Fallback to the larger model if the context length is exceeded
print(maxtoken_error)
st.session_state.summary = chain_large.run(input_documents=docs, question=pdf_summary)
print(scb)
st.write(st.session_state.summary)
# User input for questions
user_question = st.text_input("Ask a question about your file:")
if user_question:
docs = knowledge_base.similarity_search(user_question)
with st.spinner('Wait for it...'):
with get_openai_callback() as cb:
try:
response = chain_qa.run(input_documents=docs, question=user_question)
except Exception as maxtoken_error:
print(maxtoken_error)
response = chain_large_qa.run(input_documents=docs, question=user_question)
print(cb)
# Show/hide section using st.beta_expander
#with st.expander("Used Tokens", expanded=False):
#st.write(cb)
st.write(response)
except IndexError:
#st.caption("Well, Seems like your PDF doesn't contain any text, try another one.🆖")
st.error("Please upload another PDF. It seems like this PDF doesn't contain any text.")
except Exception as e:
st.error(f"An error occurred: {str(e)}")
if __name__ == '__main__':
main() | [] |
2024-01-10 | Nabil-Mabrouk/GemiMe | src~app3.py | import streamlit as st
import time
from pdfminer.high_level import extract_pages
from langchain_google_genai import GoogleGenerativeAI
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from pdfminer.high_level import extract_text
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain_google_genai import GoogleGenerativeAIEmbeddings
#Tru
from trulens_eval import TruChain, Feedback, Tru, LiteLLM
from langchain.chains import LLMChain
from langchain.llms import VertexAI
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.prompts import PromptTemplate
tru = Tru()
# Configure Streamlit page and state
st.set_page_config(page_title="GemiMe", page_icon="👨💼")
# Define the steps of the workflow
workflow_steps = [
"Home",
"Specifications",
"Design",
"Costing",
"Proposal"
]
with st.sidebar:
help='''GemiMe wil take through different steps from loading the specifications to generating a proposal. You can move from one step to another manually or let GemiMe do iy automatically'''
st.info(help)
with st.form("config"):
st.header("Configuration")
selection = st.radio("Select", ['Automatic', 'Manual'])
gemini_api_key = st.text_input("Your Gemini API key", placeholder="sk-xxxx", type="password")
temperature = st.slider("Temperature", 0.0, 1.0, 0.1, 0.1, format="%.1f")
max_retries = st.slider("Max Retries", 0, 10, 2, 1)
if st.form_submit_button("Save"):
st.session_state.model_config = {
"selection": selection,
"gemini_api_key": gemini_api_key,
"temperature": temperature,
"max_retries": max_retries,
}
st.success(f"Selected model: {selection}")
def load_project_specification():
st.session_state.file_uploaded=False
st.write("### Step 1: Loading specification file")
# Function to upload and display the project specification
uploaded_file = st.file_uploader("Upload Project Specification", type=["pdf", "docx"])
if uploaded_file is not None:
st.write("Project Specification:")
return uploaded_file
#st.write(uploaded_file)
#st.session_state.file_uploaded=True
#for page_layout in extract_pages(uploaded_file):
# for element in page_layout:
# st.write(element)
def main():
#Rendering main page
st.title("👨💼 GemiMe")
tab_home, tab_specs, tab_design, tab_cost, tab_proposal =st.tabs(["Home", "Specifications", "Design", "Costing", "Proposal"])
with tab_home:
intro='''A proposal engineer plays a crucial role in the process of bidding for and securing projects,
particularly in industries where complex technical solutions are required. '''
st.write(intro)
with tab_specs:
st.write("### Step 1: Loading specification file")
intro='''Load the specification file of the project. This file can be in pdf or docx format. You can also use one of our examples demo specifciation files below'''
st.write(intro)
uploaded_file = st.file_uploader("Upload Project Specification", type=["pdf", "docx"])
# create llm
#llm = OpenAI(temperature=0.7, model=st.session_state.model)
llm = GoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GEMINI_API_KEY"])
chain = load_summarize_chain(llm, chain_type="stuff")
text = ""
pdf_summary = "Give me a concise summary, use the language that the file is in. "
pdf_title="Extract the title"
if uploaded_file is not None:
text = extract_text(uploaded_file)
# Clear summary if a new file is uploaded
if 'summary' in st.session_state and st.session_state.file_name != uploaded_file.name:
st.session_state.summary = None
st.session_state.title = None
st.session_state.file_name = uploaded_file.name
# Split text into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
# Create embeddings
#embeddings = OpenAIEmbeddings(disallowed_special=())
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=st.secrets["GEMINI_API_KEY"])
knowledge_base = FAISS.from_texts(chunks, embeddings)
with st.form("Chat with specifications"):
st.header("Ask a question about the document that you uploaded")
temp="You will be provided with a set of documents and a user question. Try to answer the user question by using the information contained in the documents. user question:"
question=st.text_input("Enter your question:")
question=temp+question
submit_button = st.form_submit_button(label="Submit")
if submit_button:
docs = knowledge_base.similarity_search(question)
llm_response=chain.run(input_documents=docs, question=question)
st.markdown(llm_response)
#if 'summary' not in st.session_state or st.session_state.summary is None:
# try:
# st.session_state.summary = chain.run(input_documents=docs, question=pdf_summary)
# st.info(st.session_state.title)
# st.write(st.session_state.summary)
# except Exception as maxtoken_error:
# # Fallback to the larger model if the context length is exceeded
# print(maxtoken_error)
with tab_design:
st.write("Hello")
llm = GoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GEMINI_API_KEY"])
full_prompt=HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="Provide a helpful response with relevant background information for the following: {prompt}",
input_variables=["prompt"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([full_prompt])
chain = LLMChain(llm=llm, prompt=chat_prompt_template, verbose=True)
with st.form("Test Form"):
st.header("Project info")
question=st.text_input("Enter your question:")
submit_button = st.form_submit_button(label="Submit")
if submit_button:
llm_response = chain(question)
st.markdown(llm_response['text'])
with tab_cost:
st.info("Hello")
if __name__ == "__main__":
main() | [
"Provide a helpful response with relevant background information for the following: {prompt}"
] |
2024-01-10 | funny2code/intellireviewer_by_chatgpt | backend.py | import re
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
def call_openai(prompt, max_tokens=2300):
messages = [{"role": "system", "content": "you are career advisor"}]
messages.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.get("choices")[0]['message']['content']
def industry_trends(text, details=''):
prompt = """Analyze the employee's performance review to identify growth-oriented topics for 1:1 meetings with their manager. Focus on areas benefiting from guidance, feedback, and support. Present prioritized discussion topics in a clear and organized format:
Topic 1 (related to growth objectives)
Topic 2 (related to growth objectives)
Topic 3 (related to growth objectives)
Topic 4 (related to growth objectives)
Topic 5 (related to growth objectives)
Please format it nicely with line breaks to make it more readable
"""
if details:
prompt += f"use this details to grab more ideas about the user in oreder to recommend courses {details}"
response = call_openai(f"{text}\n\n{prompt}")
return response
def SMART(text):
prompt = """Analyze the employee's performance review to set SMART goals for the next year. Provide an action plan with the following elements:
Milestones: Break goals into manageable milestones.
Resources: Identify relevant resources for skill development.
Time management: Offer strategies for effective time allocation.
Accountability: Suggest methods for progress tracking.
Potential obstacles: Anticipate challenges and provide strategies to overcome them.
Support network: Encourage building a network of colleagues, mentors, and peers.
A structured action plan helps the employee achieve their goals and advance in their career.
Please format it nicely with line breaks to make it more readable
"""
response = call_openai(f"{text}\n\n{prompt}")
return_response = ''
temp_list = response.split('•')
if len(temp_list) > 2:
for item in temp_list:
if item != '':
return_response += f"* {item}"
return return_response
return response
def soft_skills(text, details=''):
prompt = """Based on the employee's performance review, provide personalized tips for soft skill development and topics for professional growth. List these recommendations in a prioritized order:
Tip 1 (related to strengths/growth areas)
Tip 2 (related to strengths/growth areas)
Tip 3 (related to strengths/growth areas)
Please format it nicely with line breaks to make it more readable
"""
response = call_openai(f"{text}\n\n{prompt}")
if details:
prompt += f"use this details to grab more ideas about the user in oreder to recommend courses {details}"
return_response = ''
temp_list = response.split('•')
if len(temp_list) > 2:
for item in temp_list:
if item != '':
return_response += f"* {item}"
return return_response
return response
def get_details(text):
prompt = """As an attentive reader, meticulously extract the employee's name, organization, manager, and evaluator from the performance review document.
Write the employee's name, organization, manager, and evaluator, formatted as a Markdown list.
"""
response = call_openai(f"{text}\n\n{prompt}")
response = re.sub(r"[0-9].|•", "\n-", response)
slice = response.find("-")
res = response[slice:]
print("Details: ", res)
return res
def career_dev_ops(text, details=''):
prompt = """Analyze the employee's performance review and identify career development opportunities.
Write the possible opportunities, formatted as a Markdown list.
"""
if details:
prompt += f"use this details to grab more ideas about the user in oreder to recommend courses {details}"
response = call_openai(f"{text} \n {prompt}")
response = re.sub(r"[0-9].|•", "\n-", response)
slice = response.find("-")
res = response[slice:]
print("Career: ", res)
return res
def recommend_courses(text, details =""):
prompt = """Analyze the employee's performance review to identify strengths and growth areas.
Write 3 relevant courses from Udemy or LinkedIn Learning with its url to help them maximize potential, formatted as a Markdown list.
"""
if details:
prompt += f"use this details to grab more ideas about the user in oreder to recommend courses {details}"
response = call_openai(f"{text}\n\n{prompt}")
response = re.sub(r"[0-9].|•", "\n-", response)
slice = response.find("-")
res = response[slice:]
print("Courses: ", res)
return res
def highlight_action_items(text, details =''):
prompt = """Analyze the employee's performance review to identify areas where they can contribute to the team's goals.
Write the targeted measures to create a high-performance environment and recommend prioritized action items for the next year based on their performance review, formatted as a Markdown list.
"""
if details:
prompt += f"use this details to grab more ideas about the user in oreder to recommend courses {details}"
response = call_openai(f"{text}\n\n{prompt}")
response = re.sub(r"[0-9].|•", "\n-", response)
slice = response.find("-")
res = response[slice:]
print("Action Items: ", res)
return res | [
"Analyze the employee's performance review to identify growth-oriented topics for 1:1 meetings with their manager. Focus on areas benefiting from guidance, feedback, and support. Present prioritized discussion topics in a clear and organized format:\n Topic 1 (related to growth objectives)\n Topic 2 (related to growth objectives)\n Topic 3 (related to growth objectives)\n Topic 4 (related to growth objectives)\n Topic 5 (related to growth objectives)\n Please format it nicely with line breaks to make it more readable\n ",
"Analyze the employee's performance review to identify strengths and growth areas. \n Write 3 relevant courses from Udemy or LinkedIn Learning with its url to help them maximize potential, formatted as a Markdown list.\n ",
"Analyze the employee's performance review and identify career development opportunities.\n Write the possible opportunities, formatted as a Markdown list.\n ",
"Analyze the employee's performance review to set SMART goals for the next year. Provide an action plan with the following elements:\n Milestones: Break goals into manageable milestones.\n Resources: Identify relevant resources for skill development.\n Time management: Offer strategies for effective time allocation.\n Accountability: Suggest methods for progress tracking.\n Potential obstacles: Anticipate challenges and provide strategies to overcome them.\n Support network: Encourage building a network of colleagues, mentors, and peers.\n A structured action plan helps the employee achieve their goals and advance in their career.\n Please format it nicely with line breaks to make it more readable\n ",
"Analyze the employee's performance review to identify areas where they can contribute to the team's goals. \n Write the targeted measures to create a high-performance environment and recommend prioritized action items for the next year based on their performance review, formatted as a Markdown list.\n ",
"As an attentive reader, meticulously extract the employee's name, organization, manager, and evaluator from the performance review document.\n Write the employee's name, organization, manager, and evaluator, formatted as a Markdown list.\n ",
"you are career advisor",
"Based on the employee's performance review, provide personalized tips for soft skill development and topics for professional growth. List these recommendations in a prioritized order:\n Tip 1 (related to strengths/growth areas)\n Tip 2 (related to strengths/growth areas)\n Tip 3 (related to strengths/growth areas)\n Please format it nicely with line breaks to make it more readable\n ",
"use this details to grab more ideas about the user in oreder to recommend courses PLACEHOLDER"
] |
2024-01-10 | funny2code/intellireviewer_by_chatgpt | faq.py | import streamlit as st
def add_bg_from_url():
st.markdown(
f"""
<style>
.stApp {{
background-image: url("https://i.imgur.com/ptmLoCO.png");
background-attachment: fixed;
background-size: cover;
}}
</style>
""",
unsafe_allow_html=True
)
add_bg_from_url()
faq_data = [
{
"question": "How does IntelliReview Solutions work?",
"answer": "IntelliReview Solutions is powered by OpenAI's GPT-4, an advanced language model. It analyzes employee performance reviews and generates growth-oriented topics for 1:1 meetings with their managers. Using AI, the tool identifies areas that would benefit from guidance, feedback, and support and presents prioritized discussion topics in a clear and organized format. In addition, it utilizes AI to recommend relevant learning courses based on the performance review analysis."
},
{
"question": "What benefit does using IntelliReview Solutions provide?",
"answer":""" IntelliReview Solutions can provide invaluable benefits, including but not limited to:
Personalized Learning Recommendations: By analyzing employee performance, IntelliReview can recommend personalized learning courses to help employees grow in their roles. The AI model is trained on various course names and rankings, ensuring the recommendations are relevant and high quality.
Goal Setting: The tool can provide a base for setting employee goals, removing the need to come up with everything from scratch. This makes the goal-setting process more efficient and targeted.
Time-Saving: It automates the process of performance review analysis, saving managers' time and allowing them to focus more on strategic tasks. """
},
{
"question": "What are some benefits from using this service?",
"answer":""" IntelliReview Solutions offers numerous benefits, including:
Improved Employee Development: By providing personalized learning recommendations and goal-setting guidance, IntelliReview facilitates employee development, ultimately improving their performance and productivity.
Enhanced Manager-Employee Conversations: With generated growth-oriented topics for 1:1 meetings, managers can have more meaningful and focused conversations with their employees.
Better Resource Utilization: IntelliReview's AI model takes over the time-consuming task of performance review analysis, allowing human resources to be utilized more effectively elsewhere.
"""
},
{
"question": "What's a high-level diagram of how IntelliReview works?",
"answer":"""Below is a high-level workflow:
The IntelliReview Solutions takes in the employee performance review text as input.
The AI model, trained on course names and rankings, as well as manager tools, processes the review text.
It identifies areas that would benefit from guidance, feedback, and support and generates growth-oriented topics.
It then recommends relevant learning courses based on the performance review analysis.
The results are presented to the user in a clear and organized format. """
},
{
"question": "What makes IntelliReview Solutions unique in the market?",
"answer": "IntelliReview Solutions harnesses the power of AI to not only analyze employee performance reviews but also to recommend personalized learning paths and generate goal-oriented topics for 1:1 meetings with managers. This holistic approach to employee development, driven by advanced AI, sets IntelliReview apart from other tools in the market."
},
{
"question": "How does IntelliReview Solutions maintain and improve the quality of its AI recommendations?",
"answer": "Our tool leverages continuous learning and regular updates from its underlying AI model, GPT-4 by OpenAI. Additionally, user feedback and interaction are invaluable for refining and improving the system, ensuring the recommendations remain relevant, high-quality, and in line with evolving learning needs and business goals."
},
{
"question": "How scalable is IntelliReview Solutions?",
"answer": "IntelliReview Solutions is highly scalable. Given that the main engine of the system is an AI model, it can handle a high volume of performance reviews and user interactions. This makes it ideal for both small businesses and large corporations."
},
{
"question": "How does IntelliReview Solutions contribute to the strategic objectives of a business?",
"answer": "IntelliReview Solutions plays a critical role in talent development and retention strategies. By providing personalized learning recommendations and growth-oriented discussion topics, it aids in nurturing a highly skilled workforce. This leads to improved productivity, employee satisfaction, and ultimately, business performance."
},
{
"question":"What are the future development plans for IntelliReview Solutions?",
"answer":"We are committed to enhancing the capabilities of IntelliReview Solutions by integrating new features and learning sources. Additionally, we plan to develop more tools that allow for deeper analytics and insights into employee growth and development trends."
},
{
"question":"How does IntelliReview Solutions ensure user confidentiality and data security?",
"answer":"At IntelliReview Solutions, we prioritize the privacy and security of our users. We have implemented robust security protocols to protect sensitive data. Importantly, we do not store any user data beyond what is necessary for the duration of the specific session in which the user is interacting with our service. Once the session ends, the data is automatically purged from our system. This ensures that user confidentiality is maintained at all times. We are committed to adhering to stringent data protection standards and regulations to provide a secure and trustworthy service for our users."
},
{
"question":"What kind of data does IntelliReview Solutions collect and how is it used?",
"answer":"IntelliReview Solutions only uses the data provided by the user during their active session, such as performance reviews or user input. This information is used solely to generate personalized learning recommendations and discussion topics. We do not store, sell, or share any personal data. Our AI model does not have access to personal data about individuals unless explicitly provided by the user for the session. User trust is our top priority, and we are fully committed to respecting and protecting user privacy."
}
# Rest of the FAQ data...
]
# Set the title style
st.markdown(
"""
<h1 style='text-align: center; color: #FF5722; font-weight: bold;'>
Frequently Asked Questions
</h1>
""",
unsafe_allow_html=True
)
# Set the container style with background color
st.markdown(
"""
<style>
.faq-expander .content {
color: #FFFFFF;
}
.faq-expander {
background-color: #FFFFFF;
}
.faq-expander .st-expander {
color: #FFFFFF;
}
</style>
""",
unsafe_allow_html=True
)
for faq in faq_data:
expander = st.expander(faq["question"], expanded=False)
with expander:
st.markdown(
f"""
<div class="content">
{faq["answer"]}
</div>
""",
unsafe_allow_html=True
)
| [] |
2024-01-10 | ipriyam26/AI-Tutor | hero~chains.py | import contextlib
import enum
import math
import os
import re
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
from dotenv import load_dotenv
from langchain.memory import ConversationBufferWindowMemory, ConversationBufferMemory
from langchain.prompts.example_selector import SemanticSimilarityExampleSelector
from langchain.chains import LLMChain
from hero.constants import (
INFO_MSG,
OUTLINE_MSG,
ESSAY_MSG,
THESIS_MSG,
EXAMPLES,
examples,
AI_FIX,
)
from langchain.prompts.example_selector import SemanticSimilarityExampleSelector
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Chroma
from colorama import Fore, Style
from langchain.schema import Document
from langchain.chains.summarize import load_summarize_chain
load_dotenv()
class RunningState(enum.Enum):
INFO = enum.auto()
THESIS = enum.auto()
OUTLINE = enum.auto()
ESSAY = enum.auto()
class Chains:
def __init__(self) -> None:
self.embeddings = OpenAIEmbeddings()
self.chat_history = []
self.model = ChatOpenAI()
self.current_chain = self.get_info()
self.info = ""
self.thesis = ""
self.outline = ""
self.essay = ""
self.citation = ""
self.sections = []
def get_info(self) -> LLMChain:
return LLMChain(
prompt=PromptTemplate(
template=INFO_MSG, input_variables=["history", "input"]
),
memory=ConversationBufferMemory(),
llm=ChatOpenAI(model="gpt-4", temperature=0)
)
def get_thesis(self) -> LLMChain:
return LLMChain(
prompt=PromptTemplate(
template=THESIS_MSG, input_variables=["input", "history"]
),
llm=self.model,
memory=ConversationBufferMemory(),
)
def get_outline(self) -> LLMChain:
return LLMChain(
prompt=PromptTemplate(
template=OUTLINE_MSG, input_variables=["input", "history"]
),
memory=ConversationBufferMemory(),
llm=ChatOpenAI(temperature=0.7, max_tokens=1500),
)
def get_essay(self) -> LLMChain:
essay = PromptTemplate(
template=ESSAY_MSG,
input_variables=[
"user_input",
"history",
"instruct",
# "example",
"CITATIONS",
],
).partial(
instruct=self.info,
CITATIONS=f"ADD CITATION IN STYLE: {self.citation}"
if self.citation and self.citation.upper() != "NONE"
else "",
)
return LLMChain(
prompt=essay,
memory=ConversationBufferMemory(
memory_key="history", input_key="user_input"
),
llm=ChatOpenAI(model="gpt-4", temperature=0),
)
def gen_info(self, input: str):
reply = self.current_chain.predict(input=input)
if "<info>" in reply:
with contextlib.suppress(Exception):
return self.extract_info(reply)
return reply, True
def extract_info(self, reply: str):
self.info = reply.split("<info>")[1].split("</info>")[0]
word_count_pattern = r"Word Count:\s+(\d+)"
# Citation Style: Chicago
citation_pattern = r"Citation Style:\s+(.*)"
if match := re.search(word_count_pattern, self.info):
self.word_count = match[1]
if match := re.search(citation_pattern, self.info):
self.citation = match[1]
self.current_chain = self.get_thesis()
return self.info, False
def gen_thesis(self) -> str:
chain = self.get_thesis()
msg = f"Given these requirements, write a thesis\n{self.info}"
for _ in range(5):
reply = chain.predict(input=msg)
if "<Thesis>" in reply:
with contextlib.suppress(Exception):
self.thesis = reply.split("<Thesis>")[1].split("</Thesis>")[0]
return self.thesis
msg = "Please generate a thesis statement\n And wrap it in <Thesis> </Thesis> tags"
return "Sorry, I am not able to generate a thesis with given information"
def gen_outline(self):
chain = self.get_outline()
section = math.ceil( int(self.word_count)/200)
msg = f"Given this thesis and information, write an outline, of {section} sections. \n Thesis: {self.thesis}\n Information:\n {self.info}. Outline should keep the world limit in mind."
for _ in range(5):
reply = chain.predict(input=msg)
if "<Outline>" in reply:
with contextlib.suppress(Exception):
outline = reply.split("<Outline>")[1].split("</Outline>")[0]
self.outline = re.findall(
r"<section>(.*?)</section>", outline, re.DOTALL
)
return self.outline
msg = (
"Please generate an outline\n And wrap it in <Outline> </Outline> tags"
)
return "Sorry, I am not able to generate an outline with given information and thesis"
def gen_essay(self) -> str:
chain = self.get_essay()
# go through two sections at a time
for section in self.outline:
for _ in range(2):
with contextlib.suppress(Exception):
# example = self.example_selector(section)
essay_section = chain.predict(
user_input=section,
# example=example
)
essay = re.findall(
r"<ESSAY>(.*?)</ESSAY>", essay_section, re.DOTALL
)[0]
self.sections.append(essay)
print(f"AI: {Fore.LIGHTMAGENTA_EX} {essay}\n\n {Style.RESET_ALL}")
self.essay += essay + "\n"
break
return self.essay
def gen_summary(self) -> str:
llm = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=0)
# essay = "\n".join(self.sections)
start_prompt = f"""Using the provided info write an essay of {self.word_count} words\n Info: {self.essay}"""
prompt_template = """
{start_prompt}
----
{history}
----
User:{msg}
AI:
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["msg", "history", "start_prompt"]
)
chain = LLMChain(
prompt=PROMPT, llm=llm, memory=ConversationBufferMemory(input_key="msg")
)
completed = False
msg = "Please, Start writing the essay"
while not completed:
start = chain.predict(msg=msg, start_prompt=start_prompt)
current_count = len(start.split(" "))
print(f"AI: {Fore.LIGHTMAGENTA_EX} {start}\n\n {Style.RESET_ALL}")
print(f"{Fore.GREEN} Current word count: {current_count} {Style.BRIGHT}")
# if current_count is + or - 10% of the word count, then we are done
if current_count > int(self.word_count) * 1.1:
msg = "The essay you have provided is a bit long, please remove some content from it"
elif current_count < int(self.word_count) * 0.9:
msg = "The essay you have provided is a bit short, please add some more content to it"
else:
completed = True
return start
def gen_rewriter(self) -> str:
prompt = """I want you act an expert in essay writing and rewrite the essay I have written. add flow to it and make it more readable. don't change the meaning of length should be the same as the original essay. i have been told that the essay is not good and needs to be rewritten. it contains conlusions in places it should not have and has no flow. please rewrite it. KEEP THE WORD COUNT THE SAME. NEVER CHANGE THE WORD COUNT.
Wrap completed essay in <ESSAY> </ESSAY> tags
----
{essay}
"""
PROMPT = PromptTemplate(template=prompt, input_variables=["essay"])
llm = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=1)
reply = llm.predict(text=PROMPT.format(essay=self.essay))
return re.findall(r"<ESSAY>(.*?)</ESSAY>", reply, re.DOTALL)[0]
def example_selector(self, event: str):
example_prompt = PromptTemplate(
input_variables=["input", "output"],
template="Outline: {input}\nESSAY: {output}",
)
example_selector = SemanticSimilarityExampleSelector.from_examples(
examples, OpenAIEmbeddings(), Chroma, k=2
)
ret = example_selector.select_examples({"input": event})
return "\n".join([example_prompt.format(**r) for r in ret])
def prevent_ai_detection(self):
PROMPT = PromptTemplate(template=AI_FIX, input_variables=["content","word_count"])
llm = ChatOpenAI(model="gpt-4", temperature=1)
return llm.predict(text=PROMPT.format(content=self.essay, word_count=self.word_count))
if __name__ == "__main__":
chain = Chains()
msg_ai, more = chain.gen_info("I want help writing an essay")
while more:
print(f"AI: {msg_ai}")
if "1. Topic" in msg_ai:
msg = "Please wrap the info in tags <info> </info>"
else:
msg = input("User: ")
msg_ai, more = chain.gen_info(msg)
print(f"AI: {Fore.CYAN} {msg_ai} {Style.RESET_ALL}")
print("\n\nGenerating Thesis")
print(f"AI: {Fore.LIGHTCYAN_EX} {chain.gen_thesis()} {Style.RESET_ALL}")
print("\n\nGenerating Outline")
outline = "\n".join(chain.gen_outline())
print(f"AI: {Fore.CYAN} {outline} {Style.RESET_ALL}")
print("\n\nGenerating Essay")
chain.gen_essay()
print(f"AI: {Fore.LIGHTYELLOW_EX} {chain.essay} {Style.RESET_ALL}\n")
print(f"\n\nEssay Length: {len(chain.essay.split(' '))}")
print("Fixing AI detection")
ai_fix = chain.prevent_ai_detection()
print(f"AI: {Fore.LIGHTYELLOW_EX} {ai_fix} {Style.RESET_ALL}\n")
print(f"\n\nEssay Length: {len(ai_fix.split(' '))}")
# returnn = chain.gen_summary()cp
# rewrite = chain.gen_rewriter()
# print(f"AI: {Fore.LIGHTMAGENTA_EX} {rewrite} {Style.RESET_ALL}")
# print(f"\n\nEssay Length: {len(rewrite.split(' '))}")
# # print(f"AI: {Fore.LIGHTRED_EX} {returnn} {Style.RESET_ALL}")
# print("\n\nEssay word count: ", len(returnn.split(" ")))
| [
"word_count",
"input",
"content",
"start_prompt",
"Outline: {input}\nESSAY: {output}",
"I want you act an expert in essay writing and rewrite the essay I have written. add flow to it and make it more readable. don't change the meaning of length should be the same as the original essay. i have been told that the essay is not good and needs to be rewritten. it contains conlusions in places it should not have and has no flow. please rewrite it. KEEP THE WORD COUNT THE SAME. NEVER CHANGE THE WORD COUNT.\n\n Wrap completed essay in <ESSAY> </ESSAY> tags\n ----\n {essay}\n ",
"\n {start_prompt}\n ----\n\n {history}\n\n ----\n User:{msg}\n AI:\n "
] |
2024-01-10 | Baquara/lll-chat-interface | backend~summarization~exl2.py | from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import Field, root_validator
from exllamav2.model import ExLlamaV2
from exllamav2.cache import ExLlamaV2Cache
from exllamav2.config import ExLlamaV2Config
from exllamav2.tokenizer import ExLlamaV2Tokenizer
from exllamav2.generator import ExLlamaV2StreamingGenerator
from exllamav2.generator.sampler import ExLlamaV2Sampler
import os, glob
class ExllamaV2(LLM):
client: Any #: :meta private:
model_path: str
"""The path to the GPTQ model folder."""
exllama_cache: ExLlamaV2Cache = None#: :meta private:
config: ExLlamaV2Config = None#: :meta private:
generator: ExLlamaV2StreamingGenerator = None#: :meta private:
tokenizer: ExLlamaV2Tokenizer = None#: :meta private:
settings: ExLlamaV2Sampler.Settings = None#: :meta private:
##Langchain parameters
logfunc = print
stop_strings: Optional[List[str]] = Field("", description="Sequences that immediately will stop the generator.")
streaming: Optional[bool] = Field(True, description="Whether to stream the results, token by token.")
##Generator parameters
disallowed_tokens: Optional[List[int]] = Field(None, description="List of tokens to disallow during generation.")
temperature: Optional[float] = Field(None, description="Temperature for sampling diversity.")
top_k: Optional[int] = Field(None, description="Consider the most probable top_k samples, 0 to disable top_k sampling.")
top_p: Optional[float] = Field(None, description="Consider tokens up to a cumulative probabiltiy of top_p, 0.0 to disable top_p sampling.")
# min_p: Optional[float] = Field(None, description="Do not consider tokens with probability less than this.")
typical: Optional[float] = Field(None, description="Locally typical sampling threshold, 0.0 to disable typical sampling.")
token_repetition_penalty: Optional[float] = Field(None, description="Repetition penalty for most recent tokens.")
token_repetition_range: Optional[int] = Field(None, description="No. most recent tokens to repeat penalty for, -1 to apply to whole context.")
token_repetition_decay: Optional[int] = Field(None, description="Gradually decrease penalty over this many tokens.")
# beams: Optional[int] = Field(None, description="Number of beams for beam search.")
# beam_length: Optional[int] = Field(None, description="Length of beams for beam search.")
##Config overrides
max_seq_len: Optional[int] = Field(2048, decription="Reduce to save memory. Can also be increased, ideally while also using compress_pos_emn and a compatible model/LoRA")
# compress_pos_emb: Optional[float] = Field(1.0, description="Amount of compression to apply to the positional embedding.")
set_auto_map: Optional[str] = Field(None, description ="Comma-separated list of VRAM (in GB) to use per GPU device for model layers, e.g. 20,7,7")
gpu_peer_fix: Optional[bool] = Field(None, description="Prevent direct copies of data between GPUs")
# alpha_value: Optional[float] = Field(1.0, description="Rope context extension alpha") #Old Param
scale_alpha_value: Optional[float] = Field(1.0, description="Rope context extension alpha") #New Param
##Tuning
matmul_recons_thd: Optional[int] = Field(None)
fused_mlp_thd: Optional[int] = Field(None)
sdp_thd: Optional[int] = Field(None)
fused_attn: Optional[bool] = Field(None)
matmul_fused_remap: Optional[bool] = Field(None)
rmsnorm_no_half2: Optional[bool] = Field(None)
rope_no_half2: Optional[bool] = Field(None)
matmul_no_half2: Optional[bool] = Field(None)
silu_no_half2: Optional[bool] = Field(None)
concurrent_streams: Optional[bool] = Field(None)
##Lora Parameters
# lora_path: Optional[str] = Field(None, description="Path to your lora.") #Exllamav2 doesn't yet support loras
@staticmethod
def get_model_path_at(path):
patterns = ["*.safetensors", "*.bin", "*.pt"]
model_paths = []
for pattern in patterns:
full_pattern = os.path.join(path, pattern)
model_paths = glob.glob(full_pattern)
if model_paths: # If there are any files matching the current pattern
break # Exit the loop as soon as we find a matching file
if model_paths: # If there are any files matching any of the patterns
return model_paths[0]
else:
return None # Return None if no matching files were found
@staticmethod
def configure_object(params, values, logfunc):
obj_params = {k: values.get(k) for k in params}
def apply_to(obj):
for key, value in obj_params.items():
if value:
if hasattr(obj, key):
setattr(obj, key, value)
logfunc(f"{key} {value}")
else:
raise AttributeError(f"{key} does not exist in {obj}")
return apply_to
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
model_path = values["model_path"]
# lora_path = values["lora_path"]
config = ExLlamaV2Config()
config.model_dir = model_path
config.prepare()
tokenizer_path = os.path.join(model_path, "tokenizer.model")
model_config_path = os.path.join(model_path, "config.json")
model_path = ExllamaV2.get_model_path_at(model_path)
# config = ExLlamaV2Config(model_config_path)
# tokenizer = ExLlamaV2Tokenizer(tokenizer_path)
tokenizer = ExLlamaV2Tokenizer(config)
# config.model_path = model_path
##Set logging function if verbose or set to empty lambda
verbose = values['verbose']
if not verbose:
values['logfunc'] = lambda *args, **kwargs: None
logfunc = values['logfunc']
model_param_names = [
"temperature",
"top_k",
"top_p",
"min_p",
"typical",
"token_repetition_penalty_max",
"token_repetition_penalty_sustain",
"token_repetition_penalty_decay",
"beams",
"beam_length",
]
config_param_names = [
"max_seq_len",
# "compress_pos_emb",
"gpu_peer_fix",
# "alpha_value"
"scale_alpha_value"
]
tuning_parameters = [
"matmul_recons_thd",
"fused_mlp_thd",
"sdp_thd",
"matmul_fused_remap",
"rmsnorm_no_half2",
"rope_no_half2",
"matmul_no_half2",
"silu_no_half2",
"concurrent_streams",
"fused_attn",
]
configure_config = ExllamaV2.configure_object(config_param_names, values, logfunc)
configure_config(config)
configure_tuning = ExllamaV2.configure_object(tuning_parameters, values, logfunc)
configure_tuning(config)
configure_model = ExllamaV2.configure_object(model_param_names, values, logfunc)
##Special parameter, set auto map, it's a function
if values['set_auto_map']:
config.set_auto_map(values['set_auto_map'])
logfunc(f"set_auto_map {values['set_auto_map']}")
model = ExLlamaV2(config)
model.load()
exllama_cache = ExLlamaV2Cache(model)
settings = ExLlamaV2Sampler.Settings()
# settings = ExLlamaV2Sampler.Settings()
configure_model(settings)
# settings.temperature = 0.85
# settings.top_k = 50
# settings.top_p = 0.8
# settings.token_repetition_penalty = 1.15
# settings.disallow_tokens(tokenizer, [tokenizer.eos_token_id])
generator = ExLlamaV2StreamingGenerator(model, exllama_cache, tokenizer)
##Load and apply lora to generator
# if lora_path is not None:
# lora_config_path = os.path.join(lora_path, "adapter_config.json")
# lora_path = ExllamaV2.get_model_path_at(lora_path)
# lora = ExLlamaLora(model, lora_config_path, lora_path)
# generator.lora = lora
# logfunc(f"Loaded LORA @ {lora_path}")
##Configure the model and generator
values["stop_strings"] = [x.strip().lower() for x in values["stop_strings"]]
print(generator.__dict__)
# configure_model(generator.settings) #This may be necessary
setattr(settings, "stop_strings", values["stop_strings"])
logfunc(f"stop_strings {values['stop_strings']}")
disallowed = values.get("disallowed_tokens")
if disallowed:
generator.disallow_tokens(disallowed)
print(f"Disallowed Tokens: {generator.disallowed_tokens}")
values["client"] = model
values["generator"] = generator
values["config"] = config
values["tokenizer"] = tokenizer
values["exllama_cache"] = exllama_cache
values["settings"] = settings
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "Exllama"
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text."""
return self.generator.tokenizer.num_tokens(text)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
combined_text_output = ""
for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager):
combined_text_output += token
return combined_text_output
from enum import Enum
class MatchStatus(Enum):
EXACT_MATCH = 1
PARTIAL_MATCH = 0
NO_MATCH = 2
def match_status(self, sequence: str, banned_sequences: List[str]):
sequence = sequence.strip().lower()
for banned_seq in banned_sequences:
if banned_seq == sequence:
return self.MatchStatus.EXACT_MATCH
elif banned_seq.startswith(sequence):
return self.MatchStatus.PARTIAL_MATCH
return self.MatchStatus.NO_MATCH
def stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
# config = self.config
generator = self.generator
# tokenizer = self.tokenizer
# beam_search = (self.beams and self.beams >= 1 and self.beam_length and self.beam_length >= 1)
ids = generator.tokenizer.encode(prompt)
generator._gen_begin_reuse(ids, self.settings)
# if beam_search:
# generator.begin_beam_search()
# token_getter = generator.beam_search
# else:
# generator.end_beam_search()
token_getter = generator._gen_single_token
last_newline_pos = 0
match_buffer = ""
# seq_length = len(generator.tokenizer.decode(generator.sequence_actual[0])) # Old line
seq_length = len(generator.tokenizer.decode(generator.sequence_ids[0]))
response_start = seq_length
cursor_head = response_start
# while(generator.gen_num_tokens() <= (self.max_seq_len - 4)): #Slight extra padding space as we seem to occassionally get a few more than 1-2 tokens
while(len(generator.sequence_ids) <= (self.max_seq_len - 4)): #Slight extra padding space as we seem to occassionally get a few more than 1-2 tokens
#Fetch a token
token, eos = token_getter(self.settings)
#If it's the ending token replace it and end the generation.
if token.item() == generator.tokenizer.eos_token_id:
# generator.replace_last_token(generator.tokenizer.newline_token_id)
generator.sequence_ids[:, -1] = generator.tokenizer.newline_token_id
# if beam_search:
# generator.end_beam_search()
return
#Tokenize the string from the last new line, we can't just decode the last token due to how sentencepiece decodes.
stuff = generator.tokenizer.decode(generator.sequence_ids[0][last_newline_pos:])
cursor_tail = len(stuff)
chunk = stuff[cursor_head:cursor_tail]
cursor_head = cursor_tail
#Append the generated chunk to our stream buffer
match_buffer = match_buffer + chunk
if token.item() == generator.tokenizer.newline_token_id:
last_newline_pos = len(generator.sequence_ids[0])
cursor_head = 0
cursor_tail = 0
#Check if the stream buffer is one of the stop sequences
status = self.match_status(match_buffer, self.stop_strings)
if status == self.MatchStatus.EXACT_MATCH:
#Encountered a stop, rewind our generator to before we hit the match and end generation.
rewind_length = generator.tokenizer.encode(match_buffer).shape[-1]
generator.gen_rewind(rewind_length)
gen = generator.tokenizer.decode(generator.sequence_ids[0][response_start:])
# if beam_search:
# generator.end_beam_search()
return
elif status == self.MatchStatus.PARTIAL_MATCH:
#Partially matched a stop, continue buffering but don't yield.
continue
elif status == self.MatchStatus.NO_MATCH:
if run_manager:
run_manager.on_llm_new_token(
token=match_buffer, verbose=self.verbose,
)
yield match_buffer # Not a stop, yield the match buffer.
match_buffer = ""
return
| [] |
2024-01-10 | adimail/spotlight | streamlit~modules~extract.py | import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceInstructEmbeddings, OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.llms import HuggingFaceHub
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vector_store(text_chunks):
# embeddings = OpenAIEmbeddings()
# embeddings = HuggingFaceInstructEmbeddings(model_name = 'hkunlp/instructor-large')
embeddings = HuggingFaceInstructEmbeddings(model_name = 'all-MiniLM-L12-v2')
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
# llm = ChatOpenAI()
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":512})
# llm = HuggingFaceHub(repo_id="stabilityai/stablelm-3b-4e1t", model_kwargs={"temperature":1, "max_length":512})
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
st.write(response)
###############################################################################
###############################################################################
#Below is the test code, module code can is written above octothorpes
# def extract_text_from_pdf(filename):
# file_path = os.path.join('docs', filename)
# try:
# pdf_file = open(file_path, 'rb')
# except FileNotFoundError:
# print(f"The file '{file_path}' was not found.")
# return
# pdf_reader = PyPDF2.PdfReader(pdf_file)
# text = ""
# for page in pdf_reader.pages:
# text += page.extract_text()
# pdf_file.close()
# return text
# def export_text_to_txt(pdf_filename):
# try:
# if not os.path.exists('texts'):
# os.makedirs('texts')
# txt_filename = os.path.splitext(pdf_filename)[0] + '.txt'
# txt_filepath = os.path.join('texts', txt_filename)
# with open(txt_filepath, 'w', encoding='utf-8') as txt_file:
# txt_file.write(extract_text_from_pdf(pdf_filename))
# except Exception as e:
# print(f"An error occurred while writing to '{txt_filename}': {str(e)}")
# if __name__ == "__main__":
# pdf_files_to_process = ['fundamental_rights.pdf', 'indian_constitution.pdf']
# for pdf_filename in pdf_files_to_process:
# export_text_to_txt(pdf_filename)
# print(f"Text extracted from '{pdf_filename}' and saved as '{pdf_filename}.txt' in the 'texts' folder.") | [] |
2024-01-10 | tnuocca-wen/sale-deed-translation | gpt4.py | from langchain.chat_models import ChatOpenAI
# from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
# from langchain import PromptTemplate
# from langchain.llms import OpenAI
from langchain.chains import LLMChain
def simp_clean(text):
llm = ChatOpenAI(model = 'gpt-4', temperature=0)
template = '''
Analyse the given sentence relating to a land sale and see what words are wrong and out of place(these may sometimes include names and jibberish characters) in it and remove them, also correct grammar mistakes and make the sentence logically correct
In effect clean the sentence preserving all the pertinent and important details. By no means add fake details.:" {text}"
Response:
'''
prompt_template = PromptTemplate(
input_variables=["text"],
template=template,
)
chain = LLMChain(llm=llm, prompt=prompt_template)
response = chain.run({"text":text})
# print(response)
return response | [
"\n Analyse the given sentence relating to a land sale and see what words are wrong and out of place(these may sometimes include names and jibberish characters) in it and remove them, also correct grammar mistakes and make the sentence logically correct\n In effect clean the sentence preserving all the pertinent and important details. By no means add fake details.:\" {text}\"\n Response:\n ",
" {text}"
] |
2024-01-10 | xmed-lab/CLIPN | src~open_clip~transform.py | from typing import Optional, Sequence, Tuple
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
CenterCrop
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
class ResizeMaxSize(nn.Module):
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
super().__init__()
if not isinstance(max_size, int):
raise TypeError(f"Size should be int. Got {type(max_size)}")
self.max_size = max_size
self.interpolation = interpolation
self.fn = min if fn == 'min' else min
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
height, width = img.shape[:2]
else:
width, height = img.size
scale = self.max_size / float(max(height, width))
if scale != 1.0:
new_size = tuple(round(dim * scale) for dim in (height, width))
img = F.resize(img, new_size, self.interpolation)
pad_h = self.max_size - new_size[0]
pad_w = self.max_size - new_size[1]
img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill)
return img
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
resize_longest_max: bool = False,
fill_color: int = 0,
):
mean = mean or OPENAI_DATASET_MEAN
if not isinstance(mean, (list, tuple)):
mean = (mean,) * 3
std = std or OPENAI_DATASET_STD
if not isinstance(std, (list, tuple)):
std = (std,) * 3
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
if resize_longest_max:
transforms = [
ResizeMaxSize(image_size, fill=fill_color)
]
else:
transforms = [
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
]
transforms.extend([
_convert_to_rgb,
ToTensor(),
normalize,
])
return Compose(transforms)
| [] |
Subsets and Splits