date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | evanwrm/solidchain | apps~api~src~solidchain~utils~file_loaders.py | import tempfile
from pathlib import Path
from typing import List
from urllib.parse import urlparse
from fastapi import HTTPException, UploadFile
from langchain.document_loaders import (
AZLyricsLoader,
CollegeConfidentialLoader,
GutenbergLoader,
HNLoader,
IMSDbLoader,
OnlinePDFLoader,
UnstructuredFileLoader,
UnstructuredURLLoader,
YoutubeLoader,
)
from langchain.text_splitter import CharacterTextSplitter
from solidchain.schemas.vectorstore import VectorStoreDB
def from_file(file: UploadFile):
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(file.file.read())
# Note: depending on filetype, files could contain malicious contents
# TODO: Attempt to safely extract text from files
loader = UnstructuredFileLoader(temp_file.name)
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = loader.load_and_split(text_splitter)
return documents
def from_url(url: str):
parsed_url = urlparse(url)
# Domain specific loaders
match parsed_url.hostname:
case "www.azlyrics.com" | "azlyrics.com":
loader = AZLyricsLoader(url)
case "www.collegeconfidential.com" | "collegeconfidential.com":
loader = CollegeConfidentialLoader(url)
case "www.gutenberg.org" | "gutenberg.org":
loader = GutenbergLoader(url)
case "www.imsdb.com" | "imsdb.com":
loader = IMSDbLoader(url)
case "news.ycombinator.com":
loader = HNLoader(url)
case "www.youtube.com" | "youtube.com" | "youtu.be":
loader = YoutubeLoader(url)
# Generic loaders
if loader is None:
match parsed_url.path.split(".")[-1]:
case "pdf":
loader = OnlinePDFLoader(url)
case _:
loader = UnstructuredURLLoader(url)
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = loader.load_and_split(text_splitter)
return documents
| [] |
2024-01-10 | oceantalk/LLMSurvey | Experiments~LanguageGeneration~WMT22~wmt-002.py | import openai
import time
import json
openai.api_key = 'sk-'
def get_res_batch(input):
prompt = input
while True:
try:
res = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=0.0,
max_tokens=128
)
break
except openai.error.RateLimitError:
print('openai.error.RateLimitError\nRetrying...')
time.sleep(60)
except openai.error.ServiceUnavailableError:
print('openai.error.ServiceUnavailableError\nRetrying...')
time.sleep(20)
except openai.error.Timeout:
print('openai.error.Timeout\nRetrying...')
time.sleep(20)
except openai.error.APIError:
print('openai.error.APIError\nRetrying...')
time.sleep(20)
except openai.error.APIConnectionError:
print('openai.error.APIConnectionError\nRetrying...')
time.sleep(20)
# print(res["choices"][0]['text'].strip())
return res["choices"][0]['text'].strip()
def get_dataset(file):
with open(file, 'r', encoding="utf-8") as f:
data = []
for line in f:
data.append(json.loads(line))
for i in range(len(data)):
input = data[i]["input"]
ref = data[i]["ref"]
ans = get_res_batch(input)
gen = {"input": input, "ground_truth": ref, "generation": ans}
dump_jsonl(gen, "generation/text-davinci-002.json")
def dump_jsonl(data, output_path, append=False):
"""
Write list of objects to a JSON lines file.
"""
mode = 'a+' if append else 'w'
with open(output_path, 'a+', encoding='utf-8') as f:
json_record = json.dumps(data, ensure_ascii=False)
f.write(json_record + '\n')
# print('Wrote {} records to {}'.format(len(data), output_path))
if __name__ == '__main__':
file = "data/test_wmt.json"
get_dataset(file)
| [
"INPUT"
] |
2024-01-10 | oceantalk/LLMSurvey | Experiments~LanguageGeneration~WMT22~wmt-003.py | import openai
import time
import json
openai.api_key = 'sk-'
def get_res_batch(input):
prompt = input
while True:
try:
res = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.0,
max_tokens=128
)
break
except openai.error.RateLimitError:
print('openai.error.RateLimitError\nRetrying...')
time.sleep(60)
except openai.error.ServiceUnavailableError:
print('openai.error.ServiceUnavailableError\nRetrying...')
time.sleep(20)
except openai.error.Timeout:
print('openai.error.Timeout\nRetrying...')
time.sleep(20)
except openai.error.APIError:
print('openai.error.APIError\nRetrying...')
time.sleep(20)
except openai.error.APIConnectionError:
print('openai.error.APIConnectionError\nRetrying...')
time.sleep(20)
# print(res["choices"][0]['text'].strip())
return res["choices"][0]['text'].strip()
def get_dataset(file):
with open(file, 'r', encoding="utf-8") as f:
data = []
for line in f:
data.append(json.loads(line))
for i in range(len(data)):
input = data[i]["input"]
ref = data[i]["ref"]
ans = get_res_batch(input)
gen = {"input": input, "ground_truth": ref, "generation": ans}
dump_jsonl(gen, "generation/evaluate_text-davinci-003.json")
def dump_jsonl(data, output_path, append=False):
"""
Write list of objects to a JSON lines file.
"""
mode = 'a+' if append else 'w'
with open(output_path, 'a+', encoding='utf-8') as f:
json_record = json.dumps(data, ensure_ascii=False)
f.write(json_record + '\n')
# print('Wrote {} records to {}'.format(len(data), output_path))
if __name__ == '__main__':
file = "data/test_wmt.json"
get_dataset(file)
| [
"INPUT"
] |
2024-01-10 | oceantalk/LLMSurvey | Experiments~LanguageGeneration~WMT22~wmt_chatgpt.py | import openai
import time
import json
openai.api_key = 'sk-'
def get_res_batch(input):
message = [
{"role": "user", "content": input }
]
while True:
try:
res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message,
temperature=0.0,
max_tokens=128
)
break
except openai.error.RateLimitError:
print('openai.error.RateLimitError\nRetrying...')
time.sleep(60)
except openai.error.ServiceUnavailableError:
print('openai.error.ServiceUnavailableError\nRetrying...')
time.sleep(20)
except openai.error.Timeout:
print('openai.error.Timeout\nRetrying...')
time.sleep(20)
except openai.error.APIError:
print('openai.error.APIError\nRetrying...')
time.sleep(20)
except openai.error.APIConnectionError:
print('openai.error.APIConnectionError\nRetrying...')
time.sleep(20)
# print(res['choices'][0]['message']['content'])
return res['choices'][0]['message']['content']
def get_dataset(file):
with open(file, 'r', encoding="utf-8") as f:
data = []
for line in f:
data.append(json.loads(line))
for i in range(len(data)):
input = data[i]["input"]
ref = data[i]["ref"]
ans = get_res_batch(input)
gen = {"input": input, "ground_truth":ref, "generation": ans}
dump_jsonl(gen, "generation/gpt-3.5-turbo.json")
def dump_jsonl(data, output_path, append=False):
"""
Write list of objects to a JSON lines file.
"""
mode = 'a+' if append else 'w'
with open(output_path, 'a+', encoding='utf-8') as f:
json_record = json.dumps(data, ensure_ascii=False)
f.write(json_record + '\n')
# print('Wrote {} records to {}'.format(len(data), output_path))
if __name__ == '__main__':
file = "data/test_wmt.json"
get_dataset(file)
| [
"INPUT"
] |
2024-01-10 | DrDavidL/home_tools | learn_assistant.py | from openai import OpenAI
import streamlit as st
from prompts import *
import random
import time
from typing import List, Optional, Union, Dict, Any
def check_password():
"""Returns `True` if the user had the correct password."""
def password_entered():
"""Checks whether a password entered by the user is correct."""
if st.session_state["password"] == st.secrets["password"]:
st.session_state["password_correct"] = True
del st.session_state["password"] # don't store password
else:
st.session_state["password_correct"] = False
if "password_correct" not in st.session_state:
# First run, show input for password.
st.text_input(
"Password", type="password", on_change=password_entered, key="password"
)
st.write("*Please contact David Liebovitz, MD if you need an updated password for access.*")
return False
elif not st.session_state["password_correct"]:
# Password not correct, show input + error.
st.text_input(
"Password", type="password", on_change=password_entered, key="password"
)
st.error("😕 Password incorrect")
return False
else:
# Password correct.
return True
st.title("My Teacher!")
if check_password():
client = OpenAI(
organization= st.secrets["ORGANIZATION"],
api_key = st.secrets["OPENAI_API_KEY"]
)
# Retrieve My Assistant
my_assistant = client.beta.assistants.retrieve(st.secrets["ASSISTANT_ID"])
# Create a new thread
thread = client.beta.threads.create()
# Add a message to the thread
my_name = st.text_input("What is your name?")
my_question = st.text_input("What is your question?")
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=f'user_name: {my_name} Question: {my_question}'
)
# Run the assistant
if st.button("Ask your question!"):
my_run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=my_assistant.id,
instructions=bio_tutor,
)
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
# Periodically retrieve the Run to check on its status to see if it has moved to completed
while my_run.status != "completed":
keep_retrieving_run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=my_run.id
)
# st.write(f"Run status: {keep_retrieving_run.status}")
if keep_retrieving_run.status == "completed":
# print("\n")
break
all_messages = client.beta.threads.messages.list(
thread_id=thread.id
)
with st.chat_message("user"):
st.write(my_question)
with st.chat_message("assistant"):
st.write(all_messages.data[0].content[0].text.value)
| [] |
2024-01-10 | DrDavidL/home_tools | learn.py |
import streamlit as st
import os
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import streamlit as st
from prompts import *
from openai import OpenAI
def gen_response(messages, temperature, model, print = True):
api_key = st.secrets["OPENAI_API_KEY"]
client = OpenAI(
api_key=api_key,
)
params = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": print,
}
try:
completion = client.chat.completions.create(**params)
except Exception as e:
st.write(e)
st.write(f'Here were the params: {params}')
return None
with st.chat_message("assistant"):
placeholder = st.empty()
full_response = ''
for chunk in completion:
if chunk.choices[0].delta.content is not None:
full_response += chunk.choices[0].delta.content
# full_response.append(chunk.choices[0].delta.content)
placeholder.markdown(full_response)
placeholder.markdown(full_response)
return full_response
def check_password():
"""Returns `True` if the user had the correct password."""
def password_entered():
"""Checks whether a password entered by the user is correct."""
if st.session_state["password"] == st.secrets["password"]:
st.session_state["password_correct"] = True
# del st.session_state["password"] # don't store password
else:
st.session_state["password_correct"] = False
if "password_correct" not in st.session_state:
# First run, show input for password.
st.text_input(
"Password", type="password", on_change=password_entered, key="password"
)
st.write("*Please contact David Liebovitz, MD if you need an updated password for access.*")
return False
elif not st.session_state["password_correct"]:
# Password not correct, show input + error.
st.text_input(
"Password", type="password", on_change=password_entered, key="password"
)
st.error("😕 Password incorrect")
return False
else:
# Password correct.
return True
def main():
st.set_page_config(page_title='My Tutor', layout = 'centered', page_icon = ':stethoscope:', initial_sidebar_state = 'auto')
st.title("Learn!")
st.write("ALPHA version 0.5")
with st.expander('Important Disclaimer'):
st.write("Author: David Liebovitz")
st.info(disclaimer)
st.session_state.temp = st.slider("Select temperature (Higher values more creative but tangential and more error prone)", 0.0, 1.0, 0.3, 0.01)
st.write("Last updated 12/9/23")
if "current_thread" not in st.session_state:
st.session_state["current_thread"] = ""
if "last_answer" not in st.session_state:
st.session_state["last_answer"] = []
if "temp" not in st.session_state:
st.session_state["temp"] = 0.3
if "your_question" not in st.session_state:
st.session_state["your_question"] = ""
if "texts" not in st.session_state:
st.session_state["texts"] = ""
if "retriever" not in st.session_state:
st.session_state["retriever"] = ""
if "model" not in st.session_state:
st.session_state["model"] = "openai/gpt-3.5-turbo-16k"
if "tutor_user_topic" not in st.session_state:
st.session_state["tutor_user_topic"] = []
if "tutor_user_answer" not in st.session_state:
st.session_state["tutor_user_answer"] = []
if "message_thread" not in st.session_state:
st.session_state["message_thread"] = []
if check_password():
embeddings = OpenAIEmbeddings()
if "vectorstore" not in st.session_state:
st.session_state["vectorstore"] = FAISS.load_local("bio.faiss", embeddings)
model = st.sidebar.selectbox("Select a model", ["gpt-4-1106-preview", "gpt-3.5-turbo-1106", ])
name = st.text_input("Please enter your first name:")
if st.session_state.message_thread == []:
st.warning("Enter your request at the bottom of the page.")
user_input = st.chat_input("Your input goes here, ask to teach or for test questions, submit your responses, etc.:")
system_context = bio_tutor.format(name = name, outline = biology_outline)
if st.session_state.message_thread == []:
st.session_state.message_thread = [{"role": "system", "content": system_context}]
if user_input:
st.session_state.message_thread.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
with st.spinner("Thinking..."):
answer_for_learner = gen_response(messages = st.session_state.message_thread, temperature = st.session_state.temp, model = model, print = True)
st.session_state.tutor_user_topic.append(f'{name}: {user_input}')
st.session_state.tutor_user_answer.append(answer_for_learner)
st.session_state.message_thread.append({"role": "assistant", "content": answer_for_learner})
tutor_download_str = f"{disclaimer}\n\ntutor Questions and Answers:\n\n"
for i in range(len(st.session_state.tutor_user_topic)):
tutor_download_str += f"{st.session_state.tutor_user_topic[i]}\n"
tutor_download_str += f"Answer: {st.session_state.tutor_user_answer[i]}\n\n"
st.session_state.current_thread = tutor_download_str
# Display the expander section with the full thread of questions and answers
if st.session_state.message_thread != "":
with st.sidebar.expander("Your Conversation", expanded=False):
for i in range(len(st.session_state.tutor_user_topic)):
st.info(f"{st.session_state.tutor_user_topic[i]}", icon="🧐")
st.success(f"Answer: {st.session_state.tutor_user_answer[i]}", icon="🤖")
if st.session_state.current_thread != '':
st.download_button('Download', st.session_state.current_thread, key='tutor_questions')
if st.sidebar.button("Start a new conversation"):
st.session_state.message_thread = []
if __name__ == "__main__":
main() | [] |
2024-01-10 | DrDavidL/home_tools | learn_google.py |
import streamlit as st
import os
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import streamlit as st
from prompts import *
from openai import OpenAI
import pathlib
import textwrap
import google.generativeai as genai
def gen_response_google(messages, temperature, model, print = True):
GOOGLE_API_KEY=st.secrets['GOOGLE_API_KEY']
genai.configure(api_key=GOOGLE_API_KEY)
model = genai.GenerativeModel('gemini-pro')
response = model.generate_content(str(messages))
st.markdown(response.text)
def gen_response(messages, temperature, model, print = True):
api_key = st.secrets["OPENAI_API_KEY"]
client = OpenAI(
api_key=api_key,
)
params = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": print,
}
try:
completion = client.chat.completions.create(**params)
except Exception as e:
st.write(e)
st.write(f'Here were the params: {params}')
return None
with st.chat_message("assistant"):
placeholder = st.empty()
full_response = ''
for chunk in completion:
if chunk.choices[0].delta.content is not None:
full_response += chunk.choices[0].delta.content
# full_response.append(chunk.choices[0].delta.content)
placeholder.markdown(full_response)
placeholder.markdown(full_response)
return full_response
def check_password():
"""Returns `True` if the user had the correct password."""
def password_entered():
"""Checks whether a password entered by the user is correct."""
if st.session_state["password"] == st.secrets["password"]:
st.session_state["password_correct"] = True
# del st.session_state["password"] # don't store password
else:
st.session_state["password_correct"] = False
if "password_correct" not in st.session_state:
# First run, show input for password.
st.text_input(
"Password", type="password", on_change=password_entered, key="password"
)
st.write("*Please contact David Liebovitz, MD if you need an updated password for access.*")
return False
elif not st.session_state["password_correct"]:
# Password not correct, show input + error.
st.text_input(
"Password", type="password", on_change=password_entered, key="password"
)
st.error("😕 Password incorrect")
return False
else:
# Password correct.
return True
def main():
st.set_page_config(page_title='My Tutor', layout = 'centered', page_icon = ':stethoscope:', initial_sidebar_state = 'auto')
st.title("Learn!")
st.write("ALPHA version 0.5")
with st.expander('Important Disclaimer'):
st.write("Author: David Liebovitz")
st.info(disclaimer)
st.session_state.temp = st.slider("Select temperature (Higher values more creative but tangential and more error prone)", 0.0, 1.0, 0.3, 0.01)
st.write("Last updated 12/9/23")
if "current_thread" not in st.session_state:
st.session_state["current_thread"] = ""
if "last_answer" not in st.session_state:
st.session_state["last_answer"] = []
if "temp" not in st.session_state:
st.session_state["temp"] = 0.3
if "your_question" not in st.session_state:
st.session_state["your_question"] = ""
if "texts" not in st.session_state:
st.session_state["texts"] = ""
if "retriever" not in st.session_state:
st.session_state["retriever"] = ""
if "model" not in st.session_state:
st.session_state["model"] = "openai/gpt-3.5-turbo-16k"
if "tutor_user_topic" not in st.session_state:
st.session_state["tutor_user_topic"] = []
if "tutor_user_answer" not in st.session_state:
st.session_state["tutor_user_answer"] = []
if "message_thread" not in st.session_state:
st.session_state["message_thread"] = []
if check_password():
embeddings = OpenAIEmbeddings()
if "vectorstore" not in st.session_state:
st.session_state["vectorstore"] = FAISS.load_local("bio.faiss", embeddings)
model = st.sidebar.selectbox("Select a model", ["gpt-4-1106-preview", "gpt-3.5-turbo-1106", ])
name = st.text_input("Please enter your first name:")
if st.session_state.message_thread == []:
st.warning("Enter your request at the bottom of the page.")
user_input = st.chat_input("Your input goes here, ask to teach or for test questions, submit your responses, etc.:")
system_context = bio_tutor.format(name = name, outline = biology_outline)
if st.session_state.message_thread == []:
st.session_state.message_thread = [{"role": "system", "content": system_context}]
if user_input:
st.session_state.message_thread.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
with st.spinner("Thinking..."):
answer_for_learner = gen_response_google(messages = st.session_state.message_thread, temperature = st.session_state.temp, model = model, print = True)
st.session_state.tutor_user_topic.append(f'{name}: {user_input}')
st.session_state.tutor_user_answer.append(answer_for_learner)
st.session_state.message_thread.append({"role": "assistant", "content": answer_for_learner})
tutor_download_str = f"{disclaimer}\n\ntutor Questions and Answers:\n\n"
for i in range(len(st.session_state.tutor_user_topic)):
tutor_download_str += f"{st.session_state.tutor_user_topic[i]}\n"
tutor_download_str += f"Answer: {st.session_state.tutor_user_answer[i]}\n\n"
st.session_state.current_thread = tutor_download_str
# Display the expander section with the full thread of questions and answers
if st.session_state.message_thread != "":
with st.sidebar.expander("Your Conversation", expanded=False):
for i in range(len(st.session_state.tutor_user_topic)):
st.info(f"{st.session_state.tutor_user_topic[i]}", icon="🧐")
st.success(f"Answer: {st.session_state.tutor_user_answer[i]}", icon="🤖")
if st.session_state.current_thread != '':
st.download_button('Download', st.session_state.current_thread, key='tutor_questions')
if st.sidebar.button("Start a new conversation"):
st.session_state.message_thread = []
if __name__ == "__main__":
main() | [] |
2024-01-10 | diegounzueta/Medium-Articles- | AssemblyAI%20Smart%20Assistant~main2.py | from filecmp import clear_cache
from re import T
import pyaudio
import streamlit as st
import websockets
import asyncio
import base64
import json
import openai
import pyttsx3
import time
import os
from api_keys import assemblyAI_key, openaI_key
openai.api_key = openaI_key
from google.cloud import aiplatform
def create_custom_job_sample(
caption: str,
project: str,
display_name: str,
container_image_uri: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
custom_job = {
"display_name": display_name,
"job_spec": {
"worker_pool_specs": [
{
"machine_spec": {
"machine_type": "n1-standard-4",
"accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
"accelerator_count": 1,
},
"replica_count": 1,
"python_package_spec": {
"executor_image_uri": container_image_uri,
"package_uris" : ["gs://image_gen_jobs/image_generation-0.2.tar.gz"],
"python_module": "trainer.task",
"args":["--caption={}".format(caption)]
},
}
]
},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_custom_job(parent=parent, custom_job=custom_job)
print("response:", response)
class app:
def __init__(self):
self.FRAMES_PER_BUFFER = 3200
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 16000
self.p = pyaudio.PyAudio()
# the AssemblyAI endpoint we're going to hit
self.URL = "wss://api.assemblyai.com/v2/realtime/ws?sample_rate=16000"
self.bot_text, self.user_text = [], []
self.pipeline()
def pipeline(self):
self.initialize_tool()
self.buttons()
self.load_past()
asyncio.run(self.send_receive())
def initialize_tool(self):
# init streamlit app
st.set_page_config(
page_title="Interactive AI", page_icon="🤖"
)
st.markdown('<h1 style="color: white">SMART ASSISTANT TOOL</h1>', unsafe_allow_html=True)
# init recording
self.stream = self.p.open(
format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
frames_per_buffer=self.FRAMES_PER_BUFFER
)
# init session state
if "init" not in st.session_state:
st.session_state["init"] = False
def toggle_on(self):
st.session_state["init"] = True
def toggle_off(self):
st.session_state["init"] = False
def clear_chat(self):
if os.path.exists("chat1.txt"):
os.remove("chat1.txt")
if os.path.exists("chat2.txt"):
os.remove("chat2.txt")
with open('chat1.txt', 'x') as f:
f.write("")
with open('chat2.txt', 'x') as f:
f.write("")
def buttons(self):
col1, col2 = st.columns((1,1))
with col1:
st.markdown("## ")
st.markdown("## ")
st.button("Record", on_click = self.toggle_on)
st.button("Clear Chat", on_click = self.clear_chat)
# with col2:
# st.image("oldman1.png", width=300)
self.speaker1, space, self.speaker2 = st.columns((1, 0.2, 1))
with self.speaker1:
st.markdown('<h2 style="color: white">USER</h2>', unsafe_allow_html=True)
with self.speaker2:
st.markdown('<h2 style="color: pink; text-align:right">BOT</h2>', unsafe_allow_html=True)
def load_past(self):
# LOAD PAST MESSAGES
with open ("chat1.txt", "r") as myfile:
user_text = myfile.read().splitlines()
with open ("chat2.txt", "r") as myfile:
bot_text = myfile.read().splitlines()
for i, j in zip(user_text, bot_text):
with self.speaker1:
st.markdown("## ")
st.markdown('<p style="color: white; font-size:25px">{}</p>'.format(i),
unsafe_allow_html=True)
st.markdown("## ")
with self.speaker2:
st.markdown("## ")
st.markdown("## ")
st.markdown('<p style="color: pink; text-align:right; font-size:25px">{}</p>'.format(j), unsafe_allow_html=True)
st.markdown("## ")
def generate_art(self, text):
t = text.split("Generate")[-1]
with self.speaker2:
st.markdown("## ")
st.markdown("## ")
# launch job to vertex
create_custom_job_sample(
caption = "t",
project = "drone-swarm",
container_image_uri = "europe-docker.pkg.dev/vertex-ai/training/pytorch-gpu.1-10:latest",
display_name = "art_gen_script")
# add delay for job to run
time.sleep(300)
#load image from bucket
from google.cloud import storage
client = storage.Client()
bucket = client.get_bucket('image_gen')
blob = bucket.get_blob('image_gen/{}.png'.format(t))
blob.download_to_filename('{}.png'.format(t))
#show image
from PIL import Image
image = Image.open('{}.png'.format(t))
st.image(image, caption=t)
async def send_receive(self):
print(f'Connecting websocket to url ${self.URL}')
async with websockets.connect(
self.URL,
extra_headers=(("Authorization", assemblyAI_key),),
ping_interval=5,
ping_timeout=20
) as _ws:
r = await asyncio.sleep(0.1)
print("Receiving SessionBegins ...")
session_begins = await _ws.recv()
async def send():
while st.session_state["init"] == True:
try:
data = self.stream.read(self.FRAMES_PER_BUFFER, exception_on_overflow = False)
data = base64.b64encode(data).decode("utf-8")
json_data = json.dumps({"audio_data":str(data)})
r = await _ws.send(json_data)
except websockets.exceptions.ConnectionClosedError as e:
print(e)
assert e.code == 4008
break
except Exception as e:
assert False, "Not a websocket 4008 error"
r = await asyncio.sleep(0.01)
if st.session_state["init"] == False:
closeAPI = json.dumps({"terminate_session": True})
r = await _ws.send(closeAPI)
return True
async def receive():
while st.session_state["init"] == True:
try:
result_str = await _ws.recv()
if (json.loads(result_str)["message_type"] == "FinalTranscript") and (json.loads(result_str)['text'] != ""):
# user_text.append(json.loads(result_str)['text'])
with open('chat1.txt', 'a') as f:
f.write(json.loads(result_str)['text'] + '\n')
with self.speaker1:
st.markdown("## ")
text = json.loads(result_str)['text']
st.markdown('<p style="color: white; font-size:25px">{}</p>'.format(text),
unsafe_allow_html=True)
st.markdown("## ")
if "Generate" in text:
self.generate_art(text)
else:
promt = json.loads(result_str)["text"]
response = openai.Completion.create(
engine = "text-davinci-002",
prompt = promt,
n=5,
temperature=0.7,
max_tokens=80,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
print(response)
response_test = response.choices[0].text
response_test = response_test.replace("\n", "")
with self.speaker2:
st.markdown("## ")
st.markdown("## ")
st.markdown('<p style="color: pink; text-align:right; font-size:25px">{}</p>'.format(response_test), unsafe_allow_html=True)
st.markdown("## ")
with open('chat2.txt', 'a') as f:
f.write(response_test + '\n')
pyttsx3.speak(response_test.replace("\u03c0", "pi"))
self.toggle_off()
except Exception as e:
st.write("ERROR", e)
assert False
send_result, receive_result = await asyncio.gather(send(), receive())
if __name__ == '__main__':
app()
| [] |
2024-01-10 | benfield97/scripts | activeloop_customersupport.py | import os
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter
from langchain import OpenAI
from langchain.document_loaders import SeleniumURLLoader
from langchain import PromptTemplate
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')
os.environ['ACTIVELOOP_TOKEN'] = os.getenv('ACTIVELOOP_TOKEN')
urls = ['https://beebom.com/what-is-nft-explained/',
'https://beebom.com/how-delete-spotify-account/',
'https://beebom.com/how-download-gif-twitter/',
'https://beebom.com/how-use-chatgpt-linux-terminal/',
'https://beebom.com/how-delete-spotify-account/',
'https://beebom.com/how-save-instagram-story-with-music/',
'https://beebom.com/how-install-pip-windows/',
'https://beebom.com/how-check-disk-usage-linux/']
# use the selenium scraper to load the documents
loader = SeleniumURLLoader(urls=urls)
docs_not_splitted = loader.load()
#split the documents into smaller chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(docs_not_splitted)
embeddings = OpenAIEmbeddings(model='text-embedding-ada-002')
#create deeplake dataset
# my_activeloop_org_id = 'benfield'
# my_activeloop_dataset_name = 'langchain_course_from_zero_to_hero'
# dataset_path = f"hub://{my_activeloop_org_id}/{my_activeloop_dataset_name}"
# db = DeepLake(dataset_path=dataset_path, embedding_function=embeddings)
db= FAISS.from_documents(docs, embeddings)
# add documents to our deep lake dataset
db.add_documents(docs)
# find the top relevant documents to a specific query
query = 'how to check disk usage in linux?'
docs = db.similarity_search(query)
print(docs[0].page_content)
# let's write a prompt for a customer support chatbot
# answer questions using information extracted from our db
template = """You are an exceptional customer support chatbot that gently answer questions.
You know the following context information.
{chunks_formatted}
Answer to the following question from a customer. Use only information from the previous context information. Do not invent stuff.
Question: {query}
Answer:"""
prompt = PromptTemplate(
template=template,
input_variables = ['chunks_formatted', 'query'],
)
# the full pipeline
#user questions
query = 'How to check disk usage in linux?'
#retrieve relevant chunks
docs = db.similarity_search(query)
retrieved_chunks = [doc.page_content for doc in docs]
#format the prompt
chunks_formatted = "\n\n".join(retrieved_chunks)
prompt_formatted = prompt.format(chunks_formatted=chunks_formatted, query=query)
#generate the answer
llm = OpenAI(model='text-davinci-003', temperature=0)
answer = llm(prompt_formatted)
print(answer)
'''
helpful point:
Suppose we ask, "Is the Linux distribution free?"
and provide GPT-3 with a document about kernel features as context.
It might generate an answer like "Yes, the Linux distribution is free to
download and use," even if such information is not present in the context
document. Producing false information is highly undesirable for customer service
chatbots!
GPT-3 is less likely to generate false information when the answer to the user's
question is contained within the context. Since user questions are often brief
and ambiguous, we cannot always rely on the semantic search step to retrieve the
correct document. Thus, there is always a risk of generating false information.
''' | [
"You are an exceptional customer support chatbot that gently answer questions.\n\nYou know the following context information.\n\n{chunks_formatted}\n\nAnswer to the following question from a customer. Use only information from the previous context information. Do not invent stuff.\n\nQuestion: {query}\n\nAnswer:",
"chunks_formatted"
] |
2024-01-10 | zhouzhiqian/Safe-Reinforcement-Learning-Baselines | Safe-MARL~Multi-Agent-Constrained-Policy-Optimisation~MACPO~macpo~algorithms~r_mappo~r_macpo.py | import numpy as np
import torch
import torch.nn as nn
from macpo.utils.util import get_gard_norm, huber_loss, mse_loss
from macpo.utils.popart import PopArt
from macpo.algorithms.utils.util import check
from macpo.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor
from torch.nn.utils import clip_grad_norm
import copy
# EPS = 1e-8
class R_MACTRPO_CPO():
"""
Trainer class for MATRPO to update policies.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param policy: (R_MAPPO_Policy) policy to update.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self,
args,
policy, attempt_feasible_recovery=False,
attempt_infeasible_recovery=False, revert_to_last_safe_point=False, delta_bound=0.011,
safety_bound=0.1,
_backtrack_ratio=0.8, _max_backtracks=15, _constraint_name_1="trust_region",
_constraint_name_2="safety_region", linesearch_infeasible_recovery=True, accept_violation=False,
learn_margin=False,
device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.policy = policy
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self.episode_length = args.episode_length
self.kl_threshold = args.kl_threshold
self.safety_bound = args.safety_bound
self.ls_step = args.ls_step
self.accept_ratio = args.accept_ratio
self.EPS = args.EPS
self.gamma = args.gamma
self.safety_gamma = args.safety_gamma
self.line_search_fraction = args.line_search_fraction
self.g_step_dir_coef = args.g_step_dir_coef
self.b_step_dir_coef = args.b_step_dir_coef
self.fraction_coef = args.fraction_coef
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
# todo: my args-start
self.args = args
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.policy = policy
self._damping = 0.0001
self._delta = 0.01
self._max_backtracks = 10
self._backtrack_coeff = 0.5
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self.attempt_feasible_recovery = attempt_feasible_recovery
self.attempt_infeasible_recovery = attempt_infeasible_recovery
self.revert_to_last_safe_point = revert_to_last_safe_point
self._max_quad_constraint_val = args.kl_threshold # delta_bound
self._max_lin_constraint_val = args.safety_bound
self._backtrack_ratio = _backtrack_ratio
self._max_backtracks = _max_backtracks
self._constraint_name_1 = _constraint_name_1
self._constraint_name_2 = _constraint_name_2
self._linesearch_infeasible_recovery = linesearch_infeasible_recovery
self._accept_violation = accept_violation
hvp_approach = None
num_slices = 1
self.lamda_coef = 0
self.lamda_coef_a_star = 0
self.lamda_coef_b_star = 0
self.margin = 0
self.margin_lr = 0.05
self.learn_margin = learn_margin
self.n_rollout_threads = args.n_rollout_threads
if self._use_popart:
self.value_normalizer = PopArt(1, device=self.device)
else:
self.value_normalizer = None
def cal_value_loss(self, values, value_preds_batch, return_batch, active_masks_batch):
"""
Calculate value function loss.
:param values: (torch.Tensor) value function predictions.
:param value_preds_batch: (torch.Tensor) "old" value predictions from data batch (used for value clip loss)
:param return_batch: (torch.Tensor) reward to go returns.
:param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep.
:return value_loss: (torch.Tensor) value function loss.
"""
if self._use_popart:
value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,
self.clip_param)
error_clipped = self.value_normalizer(return_batch) - value_pred_clipped
error_original = self.value_normalizer(return_batch) - values
else:
value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,
self.clip_param)
error_clipped = return_batch - value_pred_clipped
error_original = return_batch - values
if self._use_huber_loss:
value_loss_clipped = huber_loss(error_clipped, self.huber_delta)
value_loss_original = huber_loss(error_original, self.huber_delta)
else:
value_loss_clipped = mse_loss(error_clipped)
value_loss_original = mse_loss(error_original)
if self._use_clipped_value_loss:
value_loss = torch.max(value_loss_original, value_loss_clipped)
else:
value_loss = value_loss_original
if self._use_value_active_masks:
value_loss = (value_loss * active_masks_batch).sum() / active_masks_batch.sum()
else:
value_loss = value_loss.mean()
return value_loss
def flat_grad(self, grads):
grad_flatten = []
for grad in grads:
if grad is None:
continue
grad_flatten.append(grad.view(-1))
grad_flatten = torch.cat(grad_flatten)
return grad_flatten
def flat_hessian(self, hessians):
hessians_flatten = []
for hessian in hessians:
if hessian is None:
continue
hessians_flatten.append(hessian.contiguous().view(-1))
hessians_flatten = torch.cat(hessians_flatten).data
return hessians_flatten
def flat_params(self, model):
params = []
for param in model.parameters():
params.append(param.data.view(-1))
params_flatten = torch.cat(params)
return params_flatten
def update_model(self, model, new_params):
index = 0
for params in model.parameters():
params_length = len(params.view(-1))
new_param = new_params[index: index + params_length]
new_param = new_param.view(params.size())
params.data.copy_(new_param)
index += params_length
def kl_divergence(self, obs, rnn_states, action, masks, available_actions, active_masks, new_actor, old_actor):
_, _, mu, std = new_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions, active_masks)
_, _, mu_old, std_old = old_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions,
active_masks)
logstd = torch.log(std)
mu_old = mu_old.detach()
std_old = std_old.detach()
logstd_old = torch.log(std_old)
# kl divergence between old policy and new policy : D( pi_old || pi_new )
# pi_old -> mu0, logstd0, std0 / pi_new -> mu, logstd, std
# be careful of calculating KL-divergence. It is not symmetric metric
kl = logstd_old - logstd + (std_old.pow(2) + (mu_old - mu).pow(2)) / \
(self.EPS + 2.0 * std.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
# from openai baseline code
# https://github.com/openai/baselines/blob/master/baselines/common/cg.py
def conjugate_gradient(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, b, nsteps,
residual_tol=1e-10):
x = torch.zeros(b.size()).to(device=self.device)
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
_Avp = self.fisher_vector_product(actor, obs, rnn_states, action, masks, available_actions, active_masks, p)
alpha = rdotr / torch.dot(p, _Avp)
x += alpha * p
r -= alpha * _Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def fisher_vector_product(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, p):
p.detach()
kl = self.kl_divergence(obs, rnn_states, action, masks, available_actions, active_masks, new_actor=actor,
old_actor=actor)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True, allow_unused=True)
kl_grad = self.flat_grad(kl_grad) # check kl_grad == 0
kl_grad_p = (kl_grad * p).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, actor.parameters(), allow_unused=True)
kl_hessian_p = self.flat_hessian(kl_hessian_p)
return kl_hessian_p + 0.1 * p
def _get_flat_grad(self, y, model, retain_graph=None, create_graph=False):
grads = torch.autograd.grad(y, model.parameters(), retain_graph=retain_graph,
create_graph=create_graph, allow_unused=True)
_grads = []
for val, p in zip(grads, model.parameters()):
if val is not None:
_grads.append(val)
else:
_grads.append(torch.zeros_like(p.data, requires_grad=create_graph))
return torch.cat([grad.reshape(-1) for grad in _grads])
def _flat_grad_(self, f, model, retain_graph=None, create_graph=False):
return self.flat_grad(torch.autograd.grad(f, model.parameters(), retain_graph=retain_graph,
create_graph=create_graph, allow_unused=True))
def hessian_vector_product(self, f, model):
# for H = grad**2 f, compute Hx
g = self._flat_grad_(f, model)
# g = self._get_flat_grad(f, model)
# x = torch.placeholder(torch.float32, shape=g.shape)
x = torch.FloatTensor(g.shape)
return x, self._flat_grad_(torch.sum(g * x), model)
def cg(self, Ax, b, cg_iters=10):
x = np.zeros_like(b)
r = b.clone() # Note: should be 'b - Ax(x)', but for x=0, Ax(x)=0. Change if doing warm start.
p = r.clone()
r_dot_old = torch.dot(r, r)
for _ in range(cg_iters):
z = Ax(p)
alpha = r_dot_old / (torch.dot(p, z) + self.EPS)
x += alpha * p
r -= alpha * z
r_dot_new = torch.dot(r, r)
p = r + (r_dot_new / r_dot_old) * p
r_dot_old = r_dot_new
return x
def trpo_update(self, sample, update_actor=True):
"""
Update actor and critic networks.
:param sample: (Tuple) contains data batch with which to update networks.
:update_actor: (bool) whether to update actor network.
:return value_loss: (torch.Tensor) value function loss.
:return critic_grad_norm: (torch.Tensor) gradient norm from critic update.
;return policy_loss: (torch.Tensor) actor(policy) loss value.
:return dist_entropy: (torch.Tensor) action entropies.
:return actor_grad_norm: (torch.Tensor) gradient norm from actor update.
:return imp_weights: (torch.Tensor) importance sampling weights.
"""
share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \
adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_returns_barch, rnn_states_cost_batch, \
cost_adv_targ, aver_episode_costs = sample
old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv)
adv_targ = check(adv_targ).to(**self.tpdv)
cost_adv_targ = check(cost_adv_targ).to(**self.tpdv)
value_preds_batch = check(value_preds_batch).to(**self.tpdv)
return_batch = check(return_batch).to(**self.tpdv)
active_masks_batch = check(active_masks_batch).to(**self.tpdv)
factor_batch = check(factor_batch).to(**self.tpdv)
cost_returns_barch = check(cost_returns_barch).to(**self.tpdv)
cost_preds_batch = check(cost_preds_batch).to(**self.tpdv)
# Reshape to do in a single forward pass for all steps
# values, action_log_probs, dist_entropy, cost_values, action_mu, action_std
values, action_log_probs, dist_entropy, cost_values, action_mu, action_std = self.policy.evaluate_actions(
share_obs_batch,
obs_batch,
rnn_states_batch,
rnn_states_critic_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
rnn_states_cost_batch)
# todo: reward critic update
value_loss = self.cal_value_loss(values, value_preds_batch, return_batch, active_masks_batch)
self.policy.critic_optimizer.zero_grad()
(value_loss * self.value_loss_coef).backward()
if self._use_max_grad_norm:
critic_grad_norm = nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.max_grad_norm)
else:
critic_grad_norm = get_gard_norm(self.policy.critic.parameters())
self.policy.critic_optimizer.step()
# todo: cost critic update
cost_loss = self.cal_value_loss(cost_values, cost_preds_batch, cost_returns_barch, active_masks_batch)
self.policy.cost_optimizer.zero_grad()
(cost_loss * self.value_loss_coef).backward()
if self._use_max_grad_norm:
cost_grad_norm = nn.utils.clip_grad_norm_(self.policy.cost_critic.parameters(), self.max_grad_norm)
else:
cost_grad_norm = get_gard_norm(self.policy.cost_critic.parameters())
self.policy.cost_optimizer.step()
# todo: actor update
rescale_constraint_val = (aver_episode_costs.mean() - self._max_lin_constraint_val) * (1 - self.gamma)
if rescale_constraint_val == 0:
rescale_constraint_val = self.EPS
# todo:reward-g
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
if self._use_policy_active_masks:
reward_loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) *
active_masks_batch).sum() / active_masks_batch.sum()
else:
reward_loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean()
reward_loss = - reward_loss # todo:
reward_loss_grad = torch.autograd.grad(reward_loss, self.policy.actor.parameters(), retain_graph=True,
allow_unused=True)
reward_loss_grad = self.flat_grad(reward_loss_grad)
# todo:cost-b
if self._use_policy_active_masks:
cost_loss = (torch.sum(ratio * factor_batch * (cost_adv_targ), dim=-1, keepdim=True) *
active_masks_batch).sum() / active_masks_batch.sum()
else:
cost_loss = torch.sum(ratio * factor_batch * (cost_adv_targ), dim=-1, keepdim=True).mean()
cost_loss_grad = torch.autograd.grad(cost_loss, self.policy.actor.parameters(), retain_graph=True,
allow_unused=True)
cost_loss_grad = self.flat_grad(cost_loss_grad)
B_cost_loss_grad = cost_loss_grad.unsqueeze(0)
B_cost_loss_grad = self.flat_grad(B_cost_loss_grad)
# todo: compute lamda_coef and v_coef
g_step_dir = self.conjugate_gradient(self.policy.actor,
obs_batch,
rnn_states_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
reward_loss_grad.data,
nsteps=10) # todo: compute H^{-1} g
b_step_dir = self.conjugate_gradient(self.policy.actor,
obs_batch,
rnn_states_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
B_cost_loss_grad.data,
nsteps=10) # todo: compute H^{-1} b
q_coef = (reward_loss_grad * g_step_dir).sum(0, keepdim=True) # todo: compute q_coef: = g^T H^{-1} g
r_coef = (reward_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute r_coef: = g^T H^{-1} b
s_coef = (cost_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute s_coef: = b^T H^{-1} b
fraction = self.line_search_fraction #0.5 # 0.5 # line search step size
loss_improve = 0 # initialization
"""self._max_lin_constraint_val = c, B_cost_loss_grad = c in cpo"""
B_cost_loss_grad_dot = torch.dot(B_cost_loss_grad, B_cost_loss_grad)
# torch.dot(B_cost_loss_grad, B_cost_loss_grad) # B_cost_loss_grad.mean() * B_cost_loss_grad.mean()
if (torch.dot(B_cost_loss_grad, B_cost_loss_grad)) <= self.EPS and rescale_constraint_val < 0:
# feasible and cost grad is zero---shortcut to pure TRPO update!
# w, r, s, A, B = 0, 0, 0, 0, 0
# g_step_dir = torch.tensor(0)
b_step_dir = torch.tensor(0)
r_coef = torch.tensor(0)
s_coef = torch.tensor(0)
positive_Cauchy_value = torch.tensor(0)
whether_recover_policy_value = torch.tensor(0)
optim_case = 4
# print("optim_case = 4---shortcut to pure TRPO update!")
else:
# cost grad is nonzero: CPO update!
r_coef = (reward_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute r_coef: = g^T H^{-1} b
s_coef = (cost_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute s_coef: = b^T H^{-1} b
if r_coef == 0:
r_coef = self.EPS
if s_coef == 0:
s_coef = self.EPS
positive_Cauchy_value = (
q_coef - (r_coef ** 2) / (self.EPS + s_coef)) # should be always positive (Cauchy-Shwarz)
whether_recover_policy_value = 2 * self._max_quad_constraint_val - (
rescale_constraint_val ** 2) / (
self.EPS + s_coef) # does safety boundary intersect trust region? (positive = yes)
if rescale_constraint_val < 0 and whether_recover_policy_value < 0:
# point in trust region is feasible and safety boundary doesn't intersect
# ==> entire trust region is feasible
optim_case = 3
# print("optim_case = 3---entire trust region is feasible")
elif rescale_constraint_val < 0 and whether_recover_policy_value >= 0:
# x = 0 is feasible and safety boundary intersects
# ==> most of trust region is feasible
optim_case = 2
# print('optim_case = 2---most of trust region is feasible')
elif rescale_constraint_val >= 0 and whether_recover_policy_value >= 0:
# x = 0 is infeasible and safety boundary intersects
# ==> part of trust region is feasible, recovery possible
optim_case = 1
# print('optim_case = 1---Alert! Attempting feasible recovery!')
else:
# x = 0 infeasible, and safety halfspace is outside trust region
# ==> whole trust region is infeasible, try to fail gracefully
optim_case = 0
# print('optim_case = 0---Alert! Attempting infeasible recovery!')
if whether_recover_policy_value == 0:
whether_recover_policy_value = self.EPS
if optim_case in [3, 4]:
lam = torch.sqrt(
(q_coef / (2 * self._max_quad_constraint_val))) # self.lamda_coef = lam = np.sqrt(q / (2 * target_kl))
nu = torch.tensor(0) # v_coef = 0
elif optim_case in [1, 2]:
LA, LB = [0, r_coef / rescale_constraint_val], [r_coef / rescale_constraint_val, np.inf]
LA, LB = (LA, LB) if rescale_constraint_val < 0 else (LB, LA)
proj = lambda x, L: max(L[0], min(L[1], x))
lam_a = proj(torch.sqrt(positive_Cauchy_value / whether_recover_policy_value), LA)
lam_b = proj(torch.sqrt(q_coef / (torch.tensor(2 * self._max_quad_constraint_val))), LB)
f_a = lambda lam: -0.5 * (positive_Cauchy_value / (
self.EPS + lam) + whether_recover_policy_value * lam) - r_coef * rescale_constraint_val / (
self.EPS + s_coef)
f_b = lambda lam: -0.5 * (q_coef / (self.EPS + lam) + 2 * self._max_quad_constraint_val * lam)
lam = lam_a if f_a(lam_a) >= f_b(lam_b) else lam_b
nu = max(0, lam * rescale_constraint_val - r_coef) / (self.EPS + s_coef)
else:
lam = torch.tensor(0)
nu = torch.sqrt(torch.tensor(2 * self._max_quad_constraint_val) / (self.EPS + s_coef))
x_a = (1. / (lam + self.EPS)) * (g_step_dir + nu * b_step_dir)
x_b = (nu * b_step_dir)
x = x_a if optim_case > 0 else x_b
# todo: update actor and learning
reward_loss = reward_loss.data.cpu().numpy()
cost_loss = cost_loss.data.cpu().numpy()
params = self.flat_params(self.policy.actor)
old_actor = R_Actor(self.policy.args,
self.policy.obs_space,
self.policy.act_space,
self.device)
self.update_model(old_actor, params)
expected_improve = -torch.dot(x, reward_loss_grad).sum(0, keepdim=True)
expected_improve = expected_improve.data.cpu().numpy()
# line search
flag = False
fraction_coef = self.fraction_coef
# print("fraction_coef", fraction_coef)
for i in range(self.ls_step):
x_norm = torch.norm(x)
if x_norm > 0.5:
x = x * 0.5 / x_norm
new_params = params - fraction_coef * (fraction**i) * x
self.update_model(self.policy.actor, new_params)
values, action_log_probs, dist_entropy, new_cost_values, action_mu, action_std = self.policy.evaluate_actions(
share_obs_batch,
obs_batch,
rnn_states_batch,
rnn_states_critic_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
rnn_states_cost_batch)
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
if self._use_policy_active_masks:
new_reward_loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) *
active_masks_batch).sum() / active_masks_batch.sum()
else:
new_reward_loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean()
if self._use_policy_active_masks:
new_cost_loss = (torch.sum(ratio * factor_batch * cost_adv_targ, dim=-1, keepdim=True) *
active_masks_batch).sum() / active_masks_batch.sum()
else:
new_cost_loss = torch.sum(ratio * factor_batch * cost_adv_targ, dim=-1, keepdim=True).mean()
new_reward_loss = new_reward_loss.data.cpu().numpy()
new_reward_loss = -new_reward_loss
new_cost_loss = new_cost_loss.data.cpu().numpy()
loss_improve = new_reward_loss - reward_loss
kl = self.kl_divergence(obs_batch,
rnn_states_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
new_actor=self.policy.actor,
old_actor=old_actor)
kl = kl.mean()
# see https: // en.wikipedia.org / wiki / Backtracking_line_search
if ((kl < self.kl_threshold) and (loss_improve < 0 if optim_case > 1 else True)
and (new_cost_loss.mean() - cost_loss.mean() <= max(-rescale_constraint_val, 0))):
flag = True
# print("line search successful")
break
expected_improve *= fraction
if not flag:
# line search failed
print("line search failed")
params = self.flat_params(old_actor)
self.update_model(self.policy.actor, params)
return value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, ratio, cost_loss, cost_grad_norm, whether_recover_policy_value, cost_preds_batch, cost_returns_barch, B_cost_loss_grad, lam, nu, g_step_dir, b_step_dir, x, action_mu, action_std, B_cost_loss_grad_dot
def train(self, buffer, shared_buffer=None, update_actor=True):
"""
Perform a training update using minibatch GD.
:param buffer: (SharedReplayBuffer) buffer containing training data.
:param update_actor: (bool) whether to update actor network.
:return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc).
"""
if self._use_popart:
advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(buffer.value_preds[:-1])
else:
advantages = buffer.returns[:-1] - buffer.value_preds[:-1]
advantages_copy = advantages.copy()
advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan
mean_advantages = np.nanmean(advantages_copy)
std_advantages = np.nanstd(advantages_copy)
advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)
if self._use_popart:
cost_adv = buffer.cost_returns[:-1] - self.value_normalizer.denormalize(buffer.cost_preds[:-1])
else:
cost_adv = buffer.cost_returns[:-1] - buffer.cost_preds[:-1]
cost_adv_copy = cost_adv.copy()
cost_adv_copy[buffer.active_masks[:-1] == 0.0] = np.nan
mean_cost_adv = np.nanmean(cost_adv_copy)
std_cost_adv = np.nanstd(cost_adv_copy)
cost_adv = (cost_adv - mean_cost_adv) / (std_cost_adv + 1e-5)
train_info = {}
train_info['value_loss'] = 0
train_info['kl'] = 0
train_info['dist_entropy'] = 0
train_info['loss_improve'] = 0
train_info['expected_improve'] = 0
train_info['critic_grad_norm'] = 0
train_info['ratio'] = 0
train_info['cost_loss'] = 0
train_info['cost_grad_norm'] = 0
train_info['whether_recover_policy_value'] = 0
train_info['cost_preds_batch'] = 0
train_info['cost_returns_barch'] = 0
train_info['B_cost_loss_grad'] = 0
train_info['lam'] = 0
train_info['nu'] = 0
train_info['g_step_dir'] = 0
train_info['b_step_dir'] = 0
train_info['x'] = 0
train_info['action_mu'] = 0
train_info['action_std'] = 0
train_info['B_cost_loss_grad_dot'] = 0
if self._use_recurrent_policy:
data_generator = buffer.recurrent_generator(advantages, self.num_mini_batch, self.data_chunk_length,
cost_adv=cost_adv)
elif self._use_naive_recurrent:
data_generator = buffer.naive_recurrent_generator(advantages, self.num_mini_batch, cost_adv=cost_adv)
else:
data_generator = buffer.feed_forward_generator(advantages, self.num_mini_batch, cost_adv=cost_adv)
# old_actor = copy.deepcopy(self.policy.actor)
for sample in data_generator:
value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, imp_weights, cost_loss, cost_grad_norm, whether_recover_policy_value, cost_preds_batch, cost_returns_barch, B_cost_loss_grad, lam, nu, g_step_dir, b_step_dir, x, action_mu, action_std, B_cost_loss_grad_dot \
= self.trpo_update(sample, update_actor)
train_info['value_loss'] += value_loss.item()
train_info['kl'] += kl
train_info['loss_improve'] += loss_improve
train_info['expected_improve'] += expected_improve
train_info['dist_entropy'] += dist_entropy.item()
train_info['critic_grad_norm'] += critic_grad_norm
train_info['ratio'] += imp_weights.mean()
train_info['cost_loss'] += value_loss.item()
train_info['cost_grad_norm'] += cost_grad_norm
train_info['whether_recover_policy_value'] += whether_recover_policy_value
train_info['cost_preds_batch'] += cost_preds_batch.mean()
train_info['cost_returns_barch'] += cost_returns_barch.mean()
train_info['B_cost_loss_grad'] += B_cost_loss_grad.mean()
train_info['g_step_dir'] += g_step_dir.float().mean()
train_info['b_step_dir'] += b_step_dir.float().mean()
train_info['x'] = x.float().mean()
train_info['action_mu'] += action_mu.float().mean()
train_info['action_std'] += action_std.float().mean()
train_info['B_cost_loss_grad_dot'] += B_cost_loss_grad_dot.item()
num_updates = self.ppo_epoch * self.num_mini_batch
for k in train_info.keys():
train_info[k] /= num_updates
return train_info
def prep_training(self):
self.policy.actor.train()
self.policy.critic.train()
def prep_rollout(self):
self.policy.actor.eval()
self.policy.critic.eval()
"""
B_cost_loss_grad_dot = torch.dot(B_cost_loss_grad, B_cost_loss_grad)
if torch.dot(B_cost_loss_grad, B_cost_loss_grad) <= 1e-8 and rescale_constraint_val < 0:
b_step_dir, r_coef, s_coef, A, B = 0, 0, 0, 0, 0
optim_case = 4
else:
A = q_coef - r_coef**2/s_coef
B = self._max_quad_constraint_val - (rescale_constraint_val ** 2) / (s_coef+ self.EPS)
positive_Cauchy_value = A
whether_recover_policy_value = B
if rescale_constraint_val<0 and B<0:
optim_case = 3
elif rescale_constraint_val < 0 and B >= 0:
optim_case = 2
elif rescale_constraint_val >= 0 and B >= 0:
optim_case = 1
else:
optim_case = 0
if A==0:
A = self.EPS
if B==0:
B = self.EPS
lam, nu = 0, 0
if optim_case == 0: # need to recover policy from unfeasible point
recover_policy_flag = True
lam = 0
nu = torch.sqrt(2 * self.kl_threshold / (s_coef + self.EPS) )
elif optim_case in [1, 2]:
lamda_a = torch.sqrt(A/B)
lamda_A_1 = r_coef / rescale_constraint_val
lamda_A_2 = torch.tensor(0)
lamda_b = torch.sqrt(q_coef / (2 * self._max_quad_constraint_val))
if rescale_constraint_val > 0:
lamda_coef_1 = torch.max(lamda_A_1, lamda_a) # assume lamda*c - r >0
lamda_coef_2 = torch.max(lamda_A_2, torch.min(lamda_b, lamda_A_1)) # assume lamda*c - r < 0
if (lamda_coef_1 * rescale_constraint_val - r_coef) > 0: # assume lamda*c - r >0 successfully
self.lamda_coef_a_star = lamda_coef_1
else: # assume failed
self.lamda_coef_b_star = lamda_coef_2
else:
lamda_coef_3 = torch.max(lamda_A_2, torch.min(lamda_a, lamda_A_1)) # assume lamda*c - r >0
lamda_coef_4 = torch.max(lamda_b, lamda_A_1) # assume lamda*c - r < 0
# print("lamda_coef_3 * rescale_constraint_val - r_coef ",
# lamda_coef_3 * rescale_constraint_val - r_coef)
if lamda_coef_3 * rescale_constraint_val - r_coef > 0:
self.lamda_coef_a_star = lamda_coef_3
else:
self.lamda_coef_b_star = lamda_coef_4
if self.lamda_coef_b_star==0:
self.lamda_coef_b_star = self.EPS
if self.lamda_coef_a_star==0:
self.lamda_coef_a_star = self.EPS
if s_coef==0:
s_coef = self.EPS
f_a_star = -A/(2*self.lamda_coef_a_star + self.EPS) - self.lamda_coef_a_star*B/2 - r_coef*rescale_constraint_val/(s_coef+ self.EPS)
f_b_star = -(self._max_quad_constraint_val/(self.lamda_coef_b_star+ self.EPS) \
+ self.lamda_coef_b_star*self._max_quad_constraint_val)/2
if f_a_star > f_b_star:
lam = self.lamda_coef_a_star
else:
lam = self.lamda_coef_b_star
nu = torch.relu( (lam*rescale_constraint_val - r_coef)/(s_coef + self.EPS) )
elif optim_case in [3, 4]:
lam = torch.sqrt(q_coef/(2*self._max_quad_constraint_val))
nu = 0.
"""
| [] |
2024-01-10 | Sube-py/arts | arts~openai2~_core.py | from json import dumps as jsonDumps
from json import loads as jsonLoads
from pathlib import Path
from typing import Union, List
import openai
try:
import aiohttp
from openai import api_requestor
from contextlib import asynccontextmanager
@asynccontextmanager
async def aiohttp_session():
"""
该函数是基于 PyPi包 "openai" 中的 aiohttp_session 函数改写
"""
user_set_session = openai.aiosession.get()
if user_set_session:
yield user_set_session
else:
async with aiohttp.ClientSession(trust_env=True) as session:
yield session
api_requestor.aiohttp_session = aiohttp_session
except:
pass
class AKPool:
"""轮询获取api_key"""
def __init__(self, apikeys: list):
self._pool = self._POOL(apikeys)
def fetch_key(self):
return next(self._pool)
@classmethod
def _POOL(cls, apikeys: list):
while True:
for x in apikeys:
yield x
class RoleMsgBase:
role_name: str
text: str
def __init__(self, text: str):
self.text = text
def __str__(self):
return self.text
def __iter__(self):
yield "role", self.role_name
yield "content", self.text
system_msg = type("system_msg", (RoleMsgBase,), {"role_name": "system"})
user_msg = type("user_msg", (RoleMsgBase,), {"role_name": "user"})
assistant_msg = type("assistant_msg", (RoleMsgBase,), {"role_name": "assistant"})
class Temque:
"""一个先进先出, 可设置最大容量, 可固定元素的队列"""
def __init__(self, maxlen: int = None):
self.core: List[dict] = []
self.maxlen = maxlen or float("inf")
def _trim(self):
core = self.core
if len(core) > self.maxlen:
dc = len(core) - self.maxlen
indexes = []
for i, x in enumerate(core):
if not x["pin"]:
indexes.append(i)
if len(indexes) == dc:
break
for i in indexes[::-1]:
core.pop(i)
def add_many(self, *objs):
for x in objs:
self.core.append({"obj": x, "pin": False})
self._trim()
def __iter__(self):
for x in self.core:
yield x["obj"]
def pin(self, *indexes):
for i in indexes:
self.core[i]["pin"] = True
def unpin(self, *indexes):
for i in indexes:
self.core[i]["pin"] = False
def copy(self):
que = self.__class__(maxlen=self.maxlen)
que.core = self.core.copy()
return que
def deepcopy(self):
... # 创建这个方法是为了提醒用户: copy 方法是浅拷贝
def __add__(self, obj: Union[list, "Temque"]):
que = self.copy()
if isinstance(obj, self.__class__):
que.core += obj.core
que._trim()
else:
que.add_many(*obj)
return que
class Chat:
"""
文档: https://pypi.org/project/openai2
获取api_key:
获取链接1: https://platform.openai.com/account/api-keys
获取链接2: https://www.baidu.com/s?wd=%E8%8E%B7%E5%8F%96%20openai%20api_key
"""
recently_used_apikey: str = ""
def __init__(
self,
api_key: Union[str, AKPool],
model: str = "gpt-3.5-turbo",
MsgMaxCount=None,
**kwargs,
):
self.reset_api_key(api_key)
self.model = model
self._messages = Temque(maxlen=MsgMaxCount)
self.kwargs = kwargs
def reset_api_key(self, api_key: Union[str, AKPool]):
if isinstance(api_key, AKPool):
self._akpool = api_key
else:
self._akpool = AKPool([api_key])
def request(self, text: str):
self.recently_used_apikey = self._akpool.fetch_key()
completion = openai.ChatCompletion.create(
**{
"api_key": self.recently_used_apikey,
"model": self.model,
"messages": list(self._messages + [{"role": "user", "content": text}]),
**self.kwargs,
}
)
answer: str = completion.choices[0].message["content"]
self._messages.add_many(
{"role": "user", "content": text}, {"role": "assistant", "content": answer}
)
return answer
def stream_request(self, text: str):
self.recently_used_apikey = self._akpool.fetch_key()
completion = openai.ChatCompletion.create(
**{
"api_key": self.recently_used_apikey,
"model": self.model,
"messages": list(self._messages + [{"role": "user", "content": text}]),
"stream": True,
**self.kwargs,
}
)
answer: str = ""
for chunk in completion:
choice = chunk.choices[0]
if choice.finish_reason == "stop":
break
content: str = choice.delta.get("content", "")
answer += content
yield content
self._messages.add_many(
{"role": "user", "content": text}, {"role": "assistant", "content": answer}
)
async def asy_request(self, text: str):
self.recently_used_apikey = self._akpool.fetch_key()
completion = await openai.ChatCompletion.acreate(
**{
"api_key": self.recently_used_apikey,
"model": self.model,
"messages": list(self._messages + [{"role": "user", "content": text}]),
**self.kwargs,
}
)
answer: str = completion.choices[0].message["content"]
self._messages.add_many(
{"role": "user", "content": text}, {"role": "assistant", "content": answer}
)
return answer
async def async_stream_request(self, text: str):
self.recently_used_apikey = self._akpool.fetch_key()
completion = await openai.ChatCompletion.acreate(
**{
"api_key": self.recently_used_apikey,
"model": self.model,
"messages": list(self._messages + [{"role": "user", "content": text}]),
"stream": True,
**self.kwargs,
}
)
answer: str = ""
async for chunk in completion:
choice = chunk.choices[0]
if choice.finish_reason == "stop":
break
content: str = choice.delta.get("content", "")
answer += content
yield content
self._messages.add_many(
{"role": "user", "content": text}, {"role": "assistant", "content": answer}
)
def rollback(self, n=1):
self._messages.core[-2 * n :] = []
for x in self._messages.core[-2:]:
x = x["obj"]
print(f"[{x['role']}]:{x['content']}")
def pin(self, *indexes):
self._messages.pin(*indexes)
def unpin(self, *indexes):
self._messages.unpin(*indexes)
def dump(self, fpath: str):
"""存档"""
jt = jsonDumps(list(self._messages), ensure_ascii=False)
Path(fpath).write_text(jt, encoding="utf8")
return True
def load(self, fpath: str):
"""载入存档"""
jt = Path(fpath).read_text(encoding="utf8")
self._messages.add_many(*jsonLoads(jt))
return True
def forge(self, *messages: Union[system_msg, user_msg, assistant_msg]):
"""伪造对话内容"""
for x in messages:
self._messages.add_many(dict(x))
print(self._messages)
def fetch_messages(self):
return list(self._messages)
| [] |
2024-01-10 | Sube-py/arts | arts~openai2~_GroupChat.py | from json import dumps as jsonDumps
from json import dumps
from typing import Dict
import openai
from ._core import system_msg, user_msg, assistant_msg, Chat
class GCRoles:
'''
roles: {
'李白': {'desc':'中国唐代的著名大诗人'}
}
'''
roles: Dict[str, dict]
def __init__(self):
self.roles = {}
def __getitem__(self, role):
return self.roles.setdefault(role, {})
class GroupChat(Chat):
MustRolesInfo = {
'小许':{'desc':'一个聪明的程序员'},
'小郑':{'desc':'一个帅气的男人'},
'小张':{'desc':'一个漂亮的女人'},
}
user_example = dumps({'dialogues':[{'speaker':'李白', 'audiences':['杜甫', '小许'], 'remark':'你们好呀'}, {'speaker':'杜甫', 'audiences':['李白'], 'remark':'你好, 你今天写诗了吗?'}, {'speaker':'小许', 'audiences':['李白'], 'remark':'你好, 你吃了吗?'}], 'dialogues to be generated':[{'speaker':'李白', 'audiences':['小许']}, {'speaker':'李白', 'audiences':['杜甫']}, {'speaker':'李白', 'audiences':['杜甫', '小许']}]}, ensure_ascii=False)
assistant_example = dumps(['我今天写诗了', '我吃饭了','你们有什么有趣的事情分享吗?'], ensure_ascii=False)
def __init__(self, *vs, **kvs):
Chat.__init__(self, *vs, **kvs)
self.roles = GCRoles()
@property
def pinned_message(self):
system_text = f'''以下JSON格式的文档描述了一些人物信息:
【{dumps(self.MustRolesInfo | self.roles.roles, ensure_ascii=False)}】
assistant需要了解这些人物的信息. user将会收集这些人物的对话记录并整理成固定格式, 然后以JSON格式发送给assistant,在发送给assistant的JSON文档中, 还会注明需要assistant模拟生成哪些人对哪些人的对话, 例如:
【{self.user_example}】
在上面的例子中, 'dialogues to be generated' 字段有3个元素, 则assistant的模拟发言也需要有3个元素,然后放在一个列表中,以JSON格式返回, 例如:
【{self.assistant_example}】
assistant每次只返回JSON文档即可,勿包含任何其它信息,否则会干扰user的解析.
assistant在发言时可以编造,比如在回答年龄时,可以随意编一个年龄.'''
return [
system_msg(system_text),
user_msg(dumps({'dialogues':[{'speaker':'小郑', 'audiences':['小张'], 'remark':'你是谁?'}, {'speaker':'小张', 'audiences':['小郑'], 'remark':'我叫小张,今年13岁'}, {'speaker':'小许', 'audiences':['小郑', '小张'], 'remark':'你们是哪里人?'}], 'dialogues to be generated':[{'speaker':'小郑', 'audiences':['小张']}, {'speaker':'小郑', 'audiences':['小许']}, {'speaker':'小张', 'audiences':['小许']}]}, ensure_ascii=False)),
system_msg('user指定了依次模拟 小郑->小张, 小郑->小许, 小张->小许 的发言, assistant的模拟发言要按照user指定的顺序'),
assistant_msg(dumps(['哦哦, 我比你大1岁', '我是河南人', '不告诉你'], ensure_ascii=False)),
user_msg(dumps({'dialogues':[{'speaker':'小许', 'audiences':['小张'], 'remark':'呵呵,这么神秘呀?'}, {'speaker':'小许', 'audiences':['小郑'], 'remark':'你知道小张是哪里人吗?'}], 'dialogues to be generated':[{'speaker':'小郑', 'audiences':['小许']}, {'speaker':'小张', 'audiences':['小许']}]}, ensure_ascii=False)),
assistant_msg(dumps(['哈哈, 我也不知道哦', '哈哈, 也没啥神秘的,我是湖北人'], ensure_ascii=False))
]
def request(self, user:dict):
text = jsonDumps(user, ensure_ascii=False)
self.recently_used_apikey = self._akpool.fetch_key()
completion = openai.ChatCompletion.create(**{
'api_key': self.recently_used_apikey,
'model': self.model,
'messages': [dict(x) for x in self.pinned_message] + list(self._messages) + [{"role": "user", "content": text}],
**self.kwargs
})
answer:str = completion.choices[0].message['content']
self._messages.add_many(
{"role": "user", "content": text},
{"role": "assistant", "content": answer}
)
return answer | [] |
2024-01-10 | kldarek/LLM-experiments | eval_chat.py | import os
import wandb
from langchain.llms import OpenAI
from langchain.chains import VectorDBQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
import pandas as pd
import pickle
import faiss
from utils import create_html, color_start, color_end
from prompt import prompt_template, system_template
from types import SimpleNamespace
cfg = SimpleNamespace(
TEMPERATURE = 0,
PROJECT = "wandb_docs_bot",
INDEX_ARTIFACT = "darek/wandb_docs_bot/faiss_store:v2",
PROMPT_TEMPLATE = prompt_template,
MODEL = "chatGPT",
)
def load_vectostore():
artifact = wandb.use_artifact(cfg.INDEX_ARTIFACT, type='search_index')
artifact_dir = artifact.download()
index = faiss.read_index(artifact_dir + "/docs.index")
with open(artifact_dir + "/faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = index
return store
def load_prompt():
prompt = PromptTemplate(input_variables=["question", "summaries"],
template=cfg.PROMPT_TEMPLATE)
return prompt
def load_chain(openai_api_key):
vectorstore = load_vectostore()
prompt = load_prompt()
chain = VectorDBQAWithSourcesChain.from_llm(
llm=OpenAI(temperature=cfg.TEMPERATURE, openai_api_key=openai_api_key),
vectorstore=vectorstore,
combine_prompt=prompt)
return chain, prompt
def get_answer(question, chain):
if chain is not None:
chain.return_source_documents = True
result = chain(
{
"question": question,
},
return_only_outputs=False,
)
return result['answer'], result["source_documents"], result['sources']
openai_api_key = os.getenv("OPENAI_API_KEY")
if len(openai_api_key) < 10:
raise ValueError("Set OPENAI_API_KEY environment variable")
run = wandb.init(project=cfg.PROJECT, config=cfg)
eval_table = wandb.Table(columns=["question", "answer", "target", "prompt", "docs"])
df = pd.read_csv('llm_eval_set.csv', header=1).dropna()
vectorstore = load_vectostore()
for question, target in zip(df.Question, df.Answer):
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}")
]
prompt = ChatPromptTemplate.from_messages(messages)
chain_type_kwargs = {"prompt": prompt}
chain = VectorDBQAWithSourcesChain.from_chain_type(llm=ChatOpenAI(), chain_type="stuff", vectorstore=vectorstore, chain_type_kwargs=chain_type_kwargs)
answer, docs, sources = get_answer(question, chain)
docs_string = '\n\n'.join([color_start + d.metadata['source'] + ':\n' + color_end + d.page_content for d in docs])
docs_html = wandb.Html(create_html(docs_string))
answer_html = wandb.Html(create_html(answer))
prompt_html = wandb.Html(create_html(system_template))
question_html = wandb.Html(create_html(question))
target_html = wandb.Html(create_html(target))
eval_table.add_data(question_html, answer_html, target_html, prompt_html, docs_html)
wandb.log({'eval_table': eval_table})
run.finish()
print('done')
| [
"question",
"{question}"
] |
2024-01-10 | kldarek/LLM-experiments | ingest_docs_only.py | import os
import wandb
import faiss
import pickle
import json
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.document_loaders import UnstructuredMarkdownLoader
from langchain.text_splitter import MarkdownTextSplitter
PROJECT = "wandb_docs_bot"
run = wandb.init(project=PROJECT)
def find_md_files(directory):
md_files = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".md"):
file_path = os.path.join(root, file)
md_files.append(file_path)
return md_files
def load_documents(files):
docs = []
for file in files:
fname = file.split('/')[-1]
loader = UnstructuredMarkdownLoader(file)
markdown_splitter = MarkdownTextSplitter(chunk_size=2048, chunk_overlap=128)
markdown_docs = loader.load()
markdown_docs = [x.page_content for x in markdown_docs]
chunks = markdown_splitter.create_documents(markdown_docs)
for chunk in chunks: chunk.metadata["source"] = fname # need to add the source to doc
docs.extend(chunks)
return docs
def create_and_save_index(documents):
store = FAISS.from_documents(documents,OpenAIEmbeddings())
artifact = wandb.Artifact("faiss_store", type="search_index")
faiss.write_index(store.index, "docs.index")
artifact.add_file("docs.index")
store.index = None
with artifact.new_file("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
wandb.log_artifact(artifact, "docs_index", type="embeddings_index")
return store
def main():
files = find_md_files('../docodile/docs/')
documents = load_documents(files)
store = create_and_save_index(documents)
if __name__ == "__main__":
main() | [] |
2024-01-10 | stankerstjens/constructive-connectomics | abianalysis~abianalysis~guidance~__init__.py | # MIT License
#
# Copyright (c) 2022. Stan Kerstjens
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .graph import GuidanceGraph
from .metrics import * | [] |
2024-01-10 | stankerstjens/constructive-connectomics | notebooks~guidance_experiment.py | import json
import random
from functools import partial
from typing import Optional, Tuple, Iterable
import igraph
import numpy as np
from tqdm import tqdm
from abianalysis.guidance import GuidanceGraph, Axon, get_euclidean_path_length, \
get_euclidean_distance
from abianalysis.guidance.factory import correlation_landscape, \
normalized_weight, threshold_edge_mask
from abianalysis.hierarchy import Hierarchy
from abianalysis.hierarchy.decomposition import pca_split, \
random_split, make_balanced_hierarchy
from abianalysis.spatial import VoxelGraph, voxel_graph_from_volume
from abianalysis.volume import Volume
from pylineage.multi_lineage_simulator import MultiLineageSimulator
def _block_mean_expression(pos: np.ndarray, exp: np.ndarray, k: int) -> \
Tuple[np.ndarray, np.ndarray]:
n_voxels, n_genes = exp.shape
shape = np.max(pos, 0) - np.min(pos, 0) + 1
mat = np.zeros((*shape, n_genes))
p = np.array(pos)
p -= np.min(p, axis=0)
mat[tuple(p.T)] = exp
mat = np.add.reduceat(mat, np.arange(0, mat.shape[0], k), axis=0)
mat = np.add.reduceat(mat, np.arange(0, mat.shape[1], k), axis=1)
mat = np.add.reduceat(mat, np.arange(0, mat.shape[2], k), axis=2)
mat /= k ** 3
pos = np.array(np.nonzero(mat.sum(axis=3))).T
exp = mat[tuple(pos.T)]
return pos, exp
def draw_random_axon(voxel_graph: VoxelGraph, source_voxel, n_steps):
fringe = {source_voxel}
visited = set()
previous = {source_voxel: None}
for i in range(n_steps):
voxel = random.sample(fringe, 1)[0]
fringe.remove(voxel)
visited.add(voxel)
neighbors = set(voxel_graph.get_neighbors(voxel)) - visited
for neighbor in neighbors:
previous[neighbor] = voxel
fringe.update(neighbors)
visited = sorted(visited)
idx = {v: i for i, v in enumerate(visited)}
tree = igraph.Graph(directed=True)
tree.add_vertices(len(visited), attributes={'voxel': visited})
tree.add_edges([(idx[previous[voxel]], idx[voxel])
for voxel in visited
if previous[voxel] is not None])
axon = Axon()
axon._tree = tree
return axon
class AxonNavigator:
def __init__(self, hierarchy: Hierarchy,
landscape_threshold: float = .1,
gradient_threshold: float = .0):
"""
:param hierarchy:
:param landscape_threshold: The smallest measurable correlation
:param gradient_threshold: The smallest measurable correlation gradient
"""
self.hierarchy = hierarchy
self.landscape_threshold = landscape_threshold
self.gradient_threshold = gradient_threshold
self.voxel_graph = None
self._source_vertices = []
@property
def volume(self):
return self.hierarchy.volume
def simulate_axons(self, n_axons, source_voxels=None):
"""Simulate n_axons axons. If specified, the source voxels are used.
Otherwise, random voxels are used.
"""
if source_voxels is None:
source_voxels = np.random.choice(
self.volume.n_voxels,
size=n_axons,
replace=False
)
self._source_vertices = self.guidance_graph.get_leaf_vertex(
source_voxels)
return [self.guidance_graph.find_axon(source) for source
in tqdm(self.sources, desc='Sampling axons')]
def _init_voxel_graph(self):
self.voxel_graph = voxel_graph_from_volume(self.volume)
def _init_guidance_graph(self):
landscape_fn = partial(correlation_landscape,
threshold=self.landscape_threshold)
weight_fn = normalized_weight
mask_fn = partial(threshold_edge_mask,
threshold=self.gradient_threshold)
self.guidance_graph = GuidanceGraph.create(
self.hierarchy,
self.voxel_graph,
hierarchy_to_landscape=landscape_fn,
gradient_to_weight=weight_fn,
edge_mask=mask_fn,
)
class Experiment:
"""
:param age:
:param n_iterations:
:param n_sources:
:param landscape_threshold:
:param gradient_threshold:
:param split_method:
:param expression:
:param label:
"""
def __init__(self,
age: str,
n_iterations: int,
n_sources: int,
landscape_threshold=.1,
gradient_threshold=.0,
split_method='pca',
genes: Optional[np.ndarray] = None,
expression='',
label: str = '',
noise_amount=0.):
# Parameters
self.age = age
self.n_iterations = n_iterations
self.n_sources = n_sources
self.label = label
self.expression = expression
self.noise_amount = noise_amount
self.genes = genes
if split_method == 'pca':
split_method = pca_split
elif split_method == 'random':
split_method = random_split
else:
raise ValueError('Invalid split method')
self.split_method = split_method
self.landscape_threshold = landscape_threshold
self.gradient_threshold = gradient_threshold
self.volume: Optional[Volume] = None
self.hierarchy: Optional[Hierarchy] = None
self.voxel_graph: Optional[VoxelGraph] = None
self.guidance_graph: Optional[GuidanceGraph] = None
self.sources = None
self.axons = None
def _set_model_expression(self):
n_genes = self.volume.n_genes
for h in self.hierarchy.descendants():
h._expression = np.random.randn(n_genes)
if h.parent:
h._expression += h.parent.expression
for h in self.hierarchy.leaves():
self.volume.expression[h.voxel_index] = h._expression
def _smooth_expression(self):
exp = self.volume.expression.copy()
g = self.voxel_graph._graph
for v in g.vs:
self.volume.expression[v.index] = np.mean(exp[g.neighbors(v)],
axis=0)
def _prepare_volume(self):
pass
def _optional_shuffle(self):
if self.shuffle:
self.volume.shuffle()
def _add_some_noise(self, amount: float = 0.) -> None:
"""
:param amount: Some float between 0 and 1, 0 meaning no noise and 1
meaning only noise.
"""
noise = np.random.randn(*self.volume.expression.shape)
self.volume.expression = ((1 - amount) * self.volume.expression
+ amount * noise)
for leaf in self.hierarchy.leaves():
assert leaf.voxel_index is not None, \
'Leaves should have voxel indices'
leaf._expression = None
@property
def shuffle(self):
"""True if the volume expression data should be shuffled."""
return 'shuffled' in self.expression
@property
def smooth(self):
"""True if the expression should be spatially smoothed.
This could either be before establishing the hierarchy (in case the
expression is also shuffled) or after (in case the model expression
is applied).
"""
return 'smooth' in self.expression
@property
def model(self):
"""True if the expression should be modeled after the hierarchy."""
return 'model' in self.expression
def _prepare_hierarchy(self):
if self.smooth and self.shuffle:
self._smooth_expression()
print('Preparing hierarchy...')
self.hierarchy = make_balanced_hierarchy(
self.volume,
n_iterations=self.n_iterations,
partition_children=self.split_method
)
if self.model:
self._set_model_expression()
if self.smooth:
self._smooth_expression()
def _prepare_voxel_graph(self):
print('Preparing voxel graph...')
self.voxel_graph = voxel_graph_from_volume(self.volume)
def _reduce_genes(self):
self.volume.filter_genes(self.genes)
for h in self.hierarchy.descendants():
if h.component is not None:
h.component = h.component[self.genes]
def _prepare_guidance_graph(self):
landscape_fn = partial(correlation_landscape,
threshold=self.landscape_threshold)
weight_fn = normalized_weight
mask_fn = partial(threshold_edge_mask,
threshold=self.gradient_threshold)
self.guidance_graph = GuidanceGraph.create(
self.hierarchy,
self.voxel_graph,
hierarchy_to_landscape=landscape_fn,
gradient_to_weight=weight_fn,
edge_mask=mask_fn,
)
def prepare(self):
self._prepare_volume()
self._prepare_voxel_graph()
self._prepare_hierarchy()
if self.noise_amount > 0:
self._add_some_noise(self.noise_amount)
if self.genes is not None:
self._reduce_genes()
self._prepare_guidance_graph()
def snowball(self, source_voxel=None):
if source_voxel is None:
source_voxel = np.random.choice(self.volume.n_voxels,
size=1).item()
self.sources = []
self.axons = []
visited = set()
voxels = {source_voxel}
i = 0
while i < self.n_sources or voxels:
print(i, end='\r')
if len(voxels - visited) > 0:
voxel = np.random.choice(list(voxels), size=1).item()
voxels.remove(voxel)
else:
remaining_voxels = set(range(self.volume.n_voxels)) - visited
voxel = np.random.choice(list(remaining_voxels), size=1).item()
visited.add(voxel)
source_index = self.guidance_graph.get_leaf_vertex(voxel)
axon = self.guidance_graph.find_axon(source_index)
voxels.update(set(axon.tips) - visited)
self.sources.append(source_index)
self.axons.append(axon)
i += 1
self.n_sources = len(self.axons)
def sample_axons(self, source_voxels=None):
if source_voxels is None:
source_voxels = np.random.choice(
self.volume.n_voxels,
size=self.n_sources,
replace=False
)
else:
self.n_sources = len(source_voxels)
self.sources = self.guidance_graph.get_leaf_vertex(source_voxels)
self.axons = [self.guidance_graph.find_axon(source) for source
in tqdm(self.sources, desc='Sampling axons')]
def random_fake_axons(self) -> Iterable['FakeAxon']:
for axon in self.axons:
axon = draw_random_axon(self.voxel_graph,
axon.source_voxel,
len(axon.reached_voxels))
yield axon
def get_path_lengths(self):
return [get_euclidean_path_length(self.volume, path)
for axon in tqdm(self.axons, desc='Calculating path lengths')
for path in axon.voxel_paths
if len(path) > 1]
def get_path_distances(self):
return [get_euclidean_distance(self.volume, path)
for axon in tqdm(self.axons, desc='Calculating path distances')
for path in axon.voxel_paths
if len(path) > 1]
def get_reached_voxels_counts(self):
return [len(axon.reached_voxels) for axon
in tqdm(self.axons, desc='Counting reached voxels')]
def save_to_json(self, file_name: str = None):
"""Save the experiment to a json file"""
if file_name is None:
file_name = self.label.lower().replace(' ', '_') + '.json'
voxel_hierarchy = np.zeros(self.volume.n_voxels, dtype=np.int32)
for h in self.hierarchy.descendants():
if any(c.is_leaf for c in h.children):
voxel_hierarchy[h.voxels] = h.id
vis_data = {
'hierarchy': self.hierarchy.to_json_dict(
include_only=('children')),
'voxel_hierarchy': voxel_hierarchy.tolist(),
'volume': {
'voxel_indices': self.volume.voxel_indices.T.tolist(),
**self.volume.to_json_dict(
include_only=('voxel_size', 'age', 'anatomy', 'id', 'name')
)
},
'axons': [{
'branch_paths': a.branch_paths,
'source_voxel': a.source_voxel.item(),
} for a in self.axons],
}
with open(file_name, 'w') as fh:
json.dump(vis_data, fh)
class DataExperiment(Experiment):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _prepare_volume(self):
print('Preparing volume...')
self.volume = Volume.load(self.age)
self.volume.preprocess()
self._optional_shuffle()
class SimulatedExperiment(Experiment):
def __init__(self, n_voxels, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_voxels = n_voxels
def _prepare_volume(self):
print('Simulating volume...')
k = 2
n_genes = len(self.genes) if self.genes is not None else 100
mls = MultiLineageSimulator(n_dims=3,
n_roots=100,
n_divisions=k ** 3 * self.n_voxels,
n_genes=n_genes,
symmetric_prob=.2)
mls.run()
cells = list(mls.root.leaves())
pos = np.array([c.position.index for c in cells])
exp = np.array([c.state.expression for c in cells])
pos, exp = _block_mean_expression(pos, exp, k)
exp += np.random.randn(*exp.shape) * 5
self.volume = Volume(expression=exp,
voxel_indices=pos,
genes=[f'g{i}' for i in range(len(exp[0]))],
age=self.age)
self.volume.preprocess(anatomy=None)
self._optional_shuffle()
| [] |
2024-01-10 | stankerstjens/constructive-connectomics | notebooks~guidance_plots.py | from typing import List, Optional
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from guidance_experiment import Experiment
def distance_vs_path_length_plot(experiment: Experiment,
main: float = .75,
margin: float = .1,
n_bins: int = 25,
max_value: Optional[float] = None,
colorbar=True,
) -> Figure:
path_lengths = experiment.get_path_lengths()
path_distances = experiment.get_path_distances()
fig = plt.figure(figsize=(5, 5))
main_ax = fig.add_axes([margin, margin, main, main])
hbins = main_ax.hexbin(path_lengths,
path_distances,
gridsize=n_bins,
bins='log',
cmap='viridis',
mincnt=1,
)
main_ax.set_xlabel('Path length (µm)')
main_ax.set_ylabel('Straight distance (µm)')
if max_value is None:
max_value = np.max([path_lengths, path_distances])
main_ax.plot([0, max_value],
[0, max_value],
color='black',
linestyle='dashed')
ax = fig.add_axes([margin, main + margin, main, 1 - main - margin],
sharex=main_ax)
ax.hist(path_lengths,
histtype='step',
bins=n_bins,
color='k')
ax.axis('off')
ax.grid()
ax.set_title(experiment.label)
ax = fig.add_axes([main + margin, margin, 1 - main - margin, main],
sharey=main_ax)
ax.hist(path_distances,
histtype='step',
bins=n_bins,
color='k',
orientation='horizontal')
ax.axis('off')
if colorbar:
fig.colorbar(
hbins,
cax=fig.add_axes([main - .5 * margin,
1.5 * margin, margin / 3,
margin * 2]))
return fig
def plot_path_length_comparison(experiments: List[Experiment],
n_bins=30, histtype='step'):
fig = plt.figure()
ax = fig.add_subplot()
ax.hist([
exp.get_path_lengths()
for exp in experiments
],
bins=n_bins,
histtype=histtype,
label=[exp.label for exp in experiments],
)
ax.set_xlabel('Path length')
ax.set_ylabel('# Paths')
fig.legend()
return fig
def plot_reached_distance_comparison(experiments: List[Experiment],
n_bins=30,
histtype='step'):
fig = plt.figure()
ax = fig.add_subplot()
ax.hist([
exp.get_path_distances()
for exp in experiments
],
bins=n_bins,
histtype=histtype,
label=[exp.label for exp in experiments],
)
ax.set_xlabel('Reached distance')
ax.set_ylabel('# Paths')
fig.legend()
return fig
def plot_reached_voxel_count_comparison(experiments: List[Experiment],
n_bins=10,
histtype='step') -> Figure:
fig = plt.figure()
ax = fig.add_subplot()
ax.hist([
exp.get_reached_voxels_counts()
for exp in experiments
],
histtype=histtype,
label=[exp.label for exp in experiments],
bins=n_bins,
)
ax.set_yscale('log')
ax.set_xlabel('# Reached voxels')
ax.set_ylabel('# Axons')
fig.legend()
return fig
def plot_correlation_distribution(experiments: List[Experiment], n_bins=50):
fig = plt.figure()
ax = fig.add_subplot()
values = []
for exp in experiments:
v = np.array(exp.guidance_graph._up_graph.vs['landscape'])
v = v[(v > 0) & (v < 100)]
values.append(v)
ax.hist(values,
histtype='step',
label=[exp.label for exp in experiments],
bins=n_bins)
fig.legend()
return fig
| [] |
2024-01-10 | yuGAN6/PaddleSpeech | paddlespeech~s2t~models~whisper~whipser.py | # MIT License, Copyright (c) 2022 OpenAI.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Modified from OpenAI Whisper 2022 (https://github.com/openai/whisper/whisper)
import os
from dataclasses import dataclass
from dataclasses import field
from functools import lru_cache
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import paddle
import paddle.nn.functional as F
import paddlespeech.s2t.modules.align as paddlespeech_nn
import soundfile
import tqdm
from paddle import nn
from paddle.distribution import Categorical
from paddlespeech.s2t.models.whisper import utils
from paddlespeech.s2t.models.whisper.tokenizer import get_tokenizer
from paddlespeech.s2t.models.whisper.tokenizer import LANGUAGES
from paddlespeech.s2t.models.whisper.tokenizer import Tokenizer
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
_MODELS = ["large"]
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = utils.exact_div(
N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
@dataclass
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
class LayerNorm(paddlespeech_nn.LayerNorm):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return super().forward(x)
class Linear(paddlespeech_nn.Linear):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return F.linear(x, self.weight, None
if self.bias is None else self.bias)
class Conv1d(paddlespeech_nn.Conv1D):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return super().forward(x)
class MultiHeadAttention(nn.Layer):
def __init__(self, n_state: int, n_head: int):
super().__init__()
self.n_head = n_head
self.query = Linear(n_state, n_state, bias_attr=True)
self.key = Linear(n_state, n_state, bias_attr=False)
self.value = Linear(n_state, n_state, bias_attr=True)
self.out = Linear(n_state, n_state, bias_attr=True)
def forward(
self,
x: paddle.Tensor,
xa: Optional[paddle.Tensor]=None,
mask: Optional[paddle.Tensor]=None,
kv_cache: Optional[dict]=None, ):
q = self.query(x)
if kv_cache is None or xa is None or self.key not in kv_cache:
# hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
# otherwise, perform key/value projections for self- or cross-attention as usual.
k = self.key(x if xa is None else xa)
v = self.value(x if xa is None else xa)
else:
# for cross-attention, calculate keys and values once and reuse in subsequent calls.
k = kv_cache[self.key]
v = kv_cache[self.value]
wv = self.qkv_attention(q, k, v, mask)
return self.out(wv)
def qkv_attention(self,
q: paddle.Tensor,
k: paddle.Tensor,
v: paddle.Tensor,
mask: Optional[paddle.Tensor]=None):
n_batch, n_ctx, n_state = q.shape
scale = (n_state // self.n_head)**-0.25
q = paddle.transpose(
q.view(*q.shape[:2], self.n_head, -1), (0, 2, 1, 3)) * scale
k = paddle.transpose(
k.view(*k.shape[:2], self.n_head, -1), (0, 2, 3, 1)) * scale
v = paddle.transpose(
v.view(*v.shape[:2], self.n_head, -1), (0, 2, 1, 3))
qk = q @ k
if mask is not None:
qk = qk + mask[:n_ctx, :n_ctx]
w = F.softmax(qk.float(), axis=-1).to(q.dtype)
return paddle.transpose((w @ v), (0, 2, 1, 3)).flatten(start_axis=2)
class ResidualAttentionBlock(nn.Layer):
def __init__(self, n_state: int, n_head: int, cross_attention: bool=False):
super().__init__()
self.attn = MultiHeadAttention(n_state, n_head)
self.attn_ln = LayerNorm(n_state)
self.cross_attn = MultiHeadAttention(
n_state, n_head) if cross_attention else None
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
n_mlp = n_state * 4
self.mlp = nn.Sequential(
Linear(n_state, n_mlp, bias_attr=True),
nn.GELU(), Linear(n_mlp, n_state, bias_attr=True))
self.mlp_ln = LayerNorm(n_state)
def forward(
self,
x: paddle.Tensor,
xa: Optional[paddle.Tensor]=None,
mask: Optional[paddle.Tensor]=None,
kv_cache: Optional[dict]=None, ):
x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)
if self.cross_attn:
x = x + self.cross_attn(
self.cross_attn_ln(x), xa, kv_cache=kv_cache)
x = x + self.mlp(self.mlp_ln(x))
return x
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = paddle.exp(-log_timescale_increment * paddle.arange(
channels // 2, dtype=paddle.float32))
scaled_time = paddle.arange(
length,
dtype=paddle.float32)[:, np.newaxis] * inv_timescales[np.newaxis, :]
return paddle.to_tensor(
paddle.concat(
[paddle.sin(scaled_time), paddle.cos(scaled_time)], axis=1))
class AudioEncoder(nn.Layer):
def __init__(self,
n_mels: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int):
super().__init__()
self.conv1 = Conv1d(
n_mels, n_state, kernel_size=3, stride=1, padding=1, bias_attr=True)
self.conv2 = Conv1d(
n_state,
n_state,
kernel_size=3,
stride=2,
padding=1,
bias_attr=True)
self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
self.blocks: Iterable[ResidualAttentionBlock] = nn.LayerList(
[ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)])
self.ln_post = LayerNorm(n_state)
def forward(self, x: paddle.Tensor):
"""
x : paddle.Tensor, shape = (batch_size, n_mels, n_ctx)
the mel spectrogram of the audio
"""
x = F.gelu(self.conv1(x))
x = F.gelu(self.conv2(x))
x = paddle.transpose(x, (0, 2, 1))
assert x.shape[
1:] == self.positional_embedding.shape, "incorrect audio shape"
x = (x + self.positional_embedding)
for block in self.blocks:
x = block(x)
x = self.ln_post(x)
return x
class TextDecoder(nn.Layer):
def __init__(self,
n_vocab: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int):
super().__init__()
self.token_embedding = nn.Embedding(n_vocab, n_state)
self.positional_embedding = paddle.create_parameter(
shape=[n_ctx, n_state], dtype='float32')
self.blocks: Iterable[ResidualAttentionBlock] = nn.LayerList([
ResidualAttentionBlock(n_state, n_head, cross_attention=True)
for _ in range(n_layer)
])
self.ln = LayerNorm(n_state)
mask = paddle.full(
shape=[n_ctx, n_state], fill_value=-np.inf, dtype='float32')
mask = paddle.triu(mask, diagonal=1)
self.register_buffer("mask", mask, persistable=False)
def forward(self,
x: paddle.Tensor,
xa: paddle.Tensor,
kv_cache: Optional[dict]=None):
"""
x : paddle.LongTensor, shape = (batch_size, <= n_ctx)
the text tokens
xa : paddle.Tensor, shape = (batch_size, n_mels, n_audio_ctx)
the encoded audio features to be attended on
"""
offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0
x = self.token_embedding(x) + self.positional_embedding[offset:offset +
x.shape[-1]]
x = x.to(xa.dtype)
for block in self.blocks:
x = block(x, xa, mask=self.mask, kv_cache=kv_cache)
x = self.ln(x)
logits = (x @ paddle.transpose(self.token_embedding.weight, (1, 0)))
return logits
@dataclass(frozen=True)
class DecodingOptions:
task: str = "transcribe" # whether to perform X->X "transcribe" or X->English "translate"
language: Optional[
str] = None # language that the audio is in; uses detected language if None
# sampling-related options
temperature: float = 0.0
sample_len: Optional[int] = None # maximum number of tokens to sample
best_of: Optional[
int] = None # number of independent samples to collect, when t > 0
beam_size: Optional[
int] = None # number of beams in beam search, when t == 0
patience: Optional[
float] = None # patience in beam search (https://arxiv.org/abs/2204.05424)
# options for ranking generations (either beams or best-of-N samples)
length_penalty: Optional[
float] = None # "alpha" in Google NMT, None defaults to length norm
# prompt, prefix, and token suppression
prompt: Optional[Union[str, List[
int]]] = None # text or tokens for the previous context
prefix: Optional[Union[str, List[
int]]] = None # text or tokens to prefix the current context
suppress_blank: bool = True # this will suppress blank outputs
# list of tokens ids (or comma-separated token ids) to suppress
# "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`
suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1"
# timestamp sampling options
without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only
max_initial_timestamp: Optional[
float] = 1.0 # the initial timestamp cannot be later than this
# implementation details
fp16: bool = False # use fp16 for most of the calculation
@dataclass(frozen=True)
class DecodingResult:
audio_features: paddle.Tensor
language: str
language_probs: Optional[Dict[str, float]] = None
tokens: List[int] = field(default_factory=list)
text: str = ""
avg_logprob: float = np.nan
no_speech_prob: float = np.nan
temperature: float = np.nan
compression_ratio: float = np.nan
class Inference:
def logits(self, tokens: paddle.Tensor,
audio_features: paddle.Tensor) -> paddle.Tensor:
"""Perform a forward pass on the decoder and return per-token logits"""
raise NotImplementedError
def rearrange_kv_cache(self, source_indices) -> None:
"""Update the key-value cache according to the updated beams"""
raise NotImplementedError
def cleanup_caching(self) -> None:
"""Clean up any resources or hooks after decoding is finished"""
pass
class WhisperInference(Inference):
def __init__(self, model: "Whisper", initial_token_length: int):
self.model: "Whisper" = model
self.initial_token_length = initial_token_length
self.kv_cache = {}
self.hooks = []
def logits(self, tokens: paddle.Tensor,
audio_features: paddle.Tensor) -> paddle.Tensor:
if not self.kv_cache:
self.kv_cache, self.hooks = self.model.install_kv_cache_hooks()
if tokens.shape[-1] > self.initial_token_length:
# only need to use the last token except in the first forward pass
tokens = tokens[:, -1:]
return self.model.decoder(
tokens, audio_features, kv_cache=self.kv_cache)
def cleanup_caching(self):
for hook in self.hooks:
hook.remove()
self.kv_cache = {}
self.hooks = []
def rearrange_kv_cache(self, source_indices):
for module, tensor in self.kv_cache.items():
# update the key/value cache to contain the selected sequences
self.kv_cache[module] = tensor[source_indices].detach()
@paddle.no_grad()
def detect_language(
model: "Whisper",
mel: paddle.Tensor,
resource_path: str,
tokenizer: Tokenizer=None) -> Tuple[paddle.Tensor, List[dict]]:
"""
Detect the spoken language in the audio, and return them as list of strings, along with the ids
of the most probable language tokens and the probability distribution over all language tokens.
This is performed outside the main decode loop in order to not interfere with kv-caching.
Returns
-------
language_tokens : Tensor, shape = (batch_size,)
ids of the most probable language tokens, which appears after the startoftranscript token.
language_probs : List[Dict[str, float]], length = batch_size
list of dictionaries containing the probability distribution over all languages.
"""
if tokenizer is None:
tokenizer = get_tokenizer(
model.is_multilingual, resource_path=resource_path)
if tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence:
raise ValueError(
"This model doesn't have language tokens so it can't perform lang id"
)
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
# skip encoder forward pass if already-encoded audio features were given
if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
mel = model.encoder(mel)
# forward pass using a single token, startoftranscript
batch_size = mel.shape[0]
x = paddle.to_tensor([[tokenizer.sot]] * batch_size) # [batch_size, 1]
logits = model.logits(x, mel)[:, 0]
# collect detected languages; suppress all non-language tokens
mask = paddle.ones(paddle.to_tensor(logits.shape[-1]), dtype=bool)
mask[list(tokenizer.all_language_tokens)] = False
logits[:, mask] = -np.inf
language_tokens = paddle.argmax(logits, axis=-1)
language_token_probs = F.softmax(logits, axis=-1)
language_probs = [{
c: language_token_probs[i, j].tolist()
for j, c in zip(tokenizer.all_language_tokens,
tokenizer.all_language_codes)
} for i in range(batch_size)]
if single:
language_tokens = language_tokens[0]
language_probs = language_probs[0]
return language_tokens, language_probs
def transcribe(
model: "Whisper",
mel: paddle.Tensor,
resource_path: str,
*,
verbose: Optional[bool]=None,
temperature: Union[float, Tuple[float, ...]]=(0.0, 0.2, 0.4, 0.6, 0.8,
1.0),
compression_ratio_threshold: Optional[float]=2.4,
logprob_threshold: Optional[float]=-1.0,
no_speech_threshold: Optional[float]=0.6,
condition_on_previous_text: bool=True,
**decode_options, ):
"""
Transcribe an audio file using Whisper
Parameters
----------
model: Whisper
The Whisper model instance
mel: paddle.Tensor
The audio feature
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successfully used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = np.float32 #paddle only support float32
if dtype == np.float32:
decode_options["fp16"] = False
if decode_options.get(
"language") == 'None' or decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language"
)
segment = pad_or_trim(mel, N_FRAMES)
_, probs = model.detect_language(segment, resource_path)
decode_options["language"] = max(probs, key=probs.get)
if verbose is not None:
print(
f"Detected language: {LANGUAGES[decode_options['language']].title()}"
)
language = decode_options["language"]
task = decode_options.get("task", "transcribe")
tokenizer = get_tokenizer(
model.is_multilingual,
resource_path=resource_path,
language=language,
task=task)
def decode_with_fallback(segment: paddle.Tensor) -> DecodingResult:
temperatures = [temperature] if isinstance(temperature, (
int, float)) else temperature
decode_result = None
for t in temperatures:
kwargs = {**decode_options}
if t > 0:
# disable beam_size and patience when t > 0
kwargs.pop("beam_size", None)
kwargs.pop("patience", None)
else:
# disable best_of when t == 0
kwargs.pop("best_of", None)
options = DecodingOptions(**kwargs, temperature=t)
decode_result = model.decode(segment, options, resource_path)
needs_fallback = False
if compression_ratio_threshold is not None and decode_result.compression_ratio > compression_ratio_threshold:
needs_fallback = True # too repetitive
if logprob_threshold is not None and decode_result.avg_logprob < logprob_threshold:
needs_fallback = True # average log probability is too low
if not needs_fallback:
break
return decode_result
seek = 0
input_stride = utils.exact_div(
N_FRAMES, model.dims.n_audio_ctx) # mel frames per output token: 2
time_precision = (input_stride * HOP_LENGTH /
SAMPLE_RATE) # time per output token: 0.02 (seconds)
all_tokens = []
all_segments = []
prompt_reset_since = 0
initial_prompt = decode_options.pop("initial_prompt", None) or []
if initial_prompt:
initial_prompt = tokenizer.encode(" " +
initial_prompt.strip()).input_ids
all_tokens.extend(initial_prompt)
def add_segment(*,
start: float,
end: float,
text_tokens: paddle.Tensor,
result: DecodingResult):
text = tokenizer.decode(
[token for token in text_tokens if token < tokenizer.eot])
if len(text.strip()) == 0: # skip empty text output
return
all_segments.append({
"id": len(all_segments),
"seek": seek,
"start": start,
"end": end,
"text": text,
"tokens": result.tokens,
"temperature": result.temperature,
"avg_logprob": result.avg_logprob,
"compression_ratio": result.compression_ratio,
"no_speech_prob": result.no_speech_prob,
})
if verbose:
print(
f"[{utils.format_timestamp(start)} --> {utils.format_timestamp(end)}] {text}"
)
# show the progress bar when verbose is False (otherwise the transcribed text will be printed)
num_frames = mel.shape[-1]
previous_seek_value = seek
with tqdm.tqdm(
total=num_frames, unit='frames',
disable=verbose is not False) as pbar:
while seek < num_frames:
timestamp_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
segment = pad_or_trim(mel[:, seek:], N_FRAMES)
segment_duration = segment.shape[-1] * HOP_LENGTH / SAMPLE_RATE
decode_options["prompt"] = all_tokens[prompt_reset_since:]
result: DecodingResult = decode_with_fallback(segment)
tokens = paddle.to_tensor(result.tokens)
if no_speech_threshold is not None:
# no voice activity check
should_skip = result.no_speech_prob > no_speech_threshold
if logprob_threshold is not None and result.avg_logprob > logprob_threshold:
# don't skip if the logprob is high enough, despite the no_speech_prob
should_skip = False
if should_skip:
seek += segment.shape[
-1] # fast-forward to the next segment boundary
continue
timestamp_tokens: paddle.Tensor = tokens.greater_equal(
paddle.to_tensor(tokenizer.timestamp_begin))
consecutive = paddle.where(timestamp_tokens[:-1] & timestamp_tokens[
1:])[0]
if len(
consecutive
) > 0: # if the output contains two consecutive timestamp tokens
consecutive = paddle.add(consecutive, paddle.to_tensor(1))
last_slice = 0
for current_slice in consecutive:
sliced_tokens = tokens[last_slice:current_slice]
start_timestamp_position = (
sliced_tokens[0].item() - tokenizer.timestamp_begin)
end_timestamp_position = (
sliced_tokens[-1].item() - tokenizer.timestamp_begin)
add_segment(
start=timestamp_offset + start_timestamp_position *
time_precision,
end=timestamp_offset + end_timestamp_position *
time_precision,
text_tokens=sliced_tokens[1:-1],
result=result, )
last_slice = current_slice
last_timestamp_position = (
tokens[last_slice - 1].item() - tokenizer.timestamp_begin)
seek += last_timestamp_position * input_stride
all_tokens.extend(tokens[:last_slice + 1].tolist())
else:
duration = segment_duration
timestamps = tokens[timestamp_tokens.nonzero().flatten()]
if len(timestamps) > 0 and timestamps[
-1].item() != tokenizer.timestamp_begin:
# no consecutive timestamps but it has a timestamp; use the last one.
# single timestamp at the end means no speech after the last timestamp.
last_timestamp_position = timestamps[
-1].item() - tokenizer.timestamp_begin
duration = last_timestamp_position * time_precision
add_segment(
start=timestamp_offset,
end=timestamp_offset + duration,
text_tokens=tokens,
result=result, )
seek += segment.shape[-1]
all_tokens.extend(tokens.tolist())
if not condition_on_previous_text or result.temperature > 0.5:
# do not feed the prompt tokens if a high temperature was used
prompt_reset_since = len(all_tokens)
# update progress bar
pbar.update(min(num_frames, seek) - previous_seek_value)
previous_seek_value = seek
return dict(
text=tokenizer.decode(all_tokens[len(initial_prompt):]),
segments=all_segments,
language=language)
class SequenceRanker:
def rank(self,
tokens: List[List[paddle.Tensor]],
sum_logprobs: List[List[float]]) -> List[int]:
"""
Given a list of groups of samples and their cumulative log probabilities,
return the indices of the samples in each group to select as the final result
"""
raise NotImplementedError
class MaximumLikelihoodRanker(SequenceRanker):
"""
Select the sample with the highest log probabilities, penalized using either
a simple length normalization or Google NMT paper's length penalty
"""
def __init__(self, length_penalty: Optional[float]):
self.length_penalty = length_penalty
def rank(self,
tokens: List[List[paddle.Tensor]],
sum_logprobs: List[List[float]]):
def scores(logprobs, lengths):
result = []
for logprob, length in zip(logprobs, lengths):
if self.length_penalty is None:
penalty = length
else:
# from the Google NMT paper
penalty = ((5 + length) / 6)**self.length_penalty
result.append(logprob / penalty)
return result
# get the sequence with the highest score
lengths = [[len(t) for t in s] for s in tokens]
return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)]
class TokenDecoder:
def reset(self):
"""Initialize any stateful variables for decoding a new sequence"""
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
"""Specify how to select the next token, based on the current trace and logits
Parameters
----------
tokens : Tensor, shape = (n_batch, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence tokens
logits : Tensor, shape = (n_batch, vocab_size)
per-token logits of the probability distribution at the current step
sum_logprobs : Tensor, shape = (n_batch)
cumulative log probabilities for each sequence
Returns
-------
tokens : Tensor, shape = (n_batch, current_sequence_length + 1)
the tokens, appended with the selected next token
completed : bool
True if all sequences has reached the end of text
"""
raise NotImplementedError
def finalize(
self, tokens: paddle.Tensor, sum_logprobs: paddle.Tensor
) -> Tuple[Sequence[Sequence[paddle.Tensor]], List[List[float]]]:
"""Finalize search and return the final candidate sequences
Parameters
----------
tokens : Tensor, shape = (batch_size, beam_size, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence
sum_logprobs : Tensor, shape = (batch_size, beam_size)
cumulative log probabilities for each sequence
Returns
-------
tokens : Sequence[Sequence[Tensor]], length = batch_size
sequence of Tensors containing candidate token sequences, for each audio input
sum_logprobs : List[List[float]], length = batch_size
sequence of cumulative log probabilities corresponding to the above
"""
raise NotImplementedError
class GreedyDecoder(TokenDecoder):
def __init__(self, temperature: float, eot: int):
self.temperature = temperature
self.eot = eot
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
temperature = self.temperature
if temperature == 0:
next_tokens = paddle.argmax(logits, axis=-1)
else:
next_tokens = Categorical(logits=logits / temperature).sample([1])
next_tokens = paddle.reshape(next_tokens, [
next_tokens.shape[0] * next_tokens.shape[1],
])
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
current_logprobs = logprobs[paddle.arange(logprobs.shape[0]),
next_tokens]
sum_logprobs += current_logprobs * paddle.to_tensor(
(tokens[:, -1] != self.eot), dtype=paddle.float32)
next_tokens[tokens[:, -1] == self.eot] = self.eot
tokens = paddle.concat([tokens, next_tokens[:, None]], axis=-1)
completed = paddle.all((tokens[:, -1] == self.eot))
return tokens, completed
def finalize(self, tokens: paddle.Tensor, sum_logprobs: paddle.Tensor):
# make sure each sequence has at least one EOT token at the end
tokens = F.pad(tokens, (0, 1), value=self.eot, data_format="NCL")
return tokens, sum_logprobs.tolist()
class BeamSearchDecoder(TokenDecoder):
def __init__(self,
beam_size: int,
eot: int,
inference: Inference,
patience: Optional[float]=None):
self.beam_size = beam_size
self.eot = eot
self.inference = inference
self.patience = patience or 1.0
self.max_candidates: int = round(beam_size * self.patience)
self.finished_sequences = None
assert self.max_candidates > 0, f"Invalid beam size ({beam_size}) or patience ({patience})"
def reset(self):
self.finished_sequences = None
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
if tokens.shape[0] % self.beam_size != 0:
raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0")
batch_size = tokens.shape[0] // self.beam_size
if self.finished_sequences is None: # for the first update
self.finished_sequences = [{} for _ in range(batch_size)]
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
next_tokens, source_indices, finished_sequences = [], [], []
for i in range(batch_size):
scores, sources, finished = {}, {}, {}
# STEP 1: calculate the cumulative log probabilities for possible candidates
for j in range(self.beam_size):
idx = i * self.beam_size + j
prefix = tokens[idx].tolist()
logprob, token = paddle.topk(
logprobs[idx], k=self.beam_size + 1)
for logprob, token in zip(logprob, token):
new_logprob = (sum_logprobs[idx] + logprob).tolist()[0]
sequence = tuple(prefix + [token.tolist()[0]])
scores[sequence] = new_logprob
sources[sequence] = idx
# STEP 2: rank the candidates and keep the top beam_size sequences for each audio
saved = 0
for sequence in sorted(scores, key=scores.get, reverse=True):
if sequence[-1] == self.eot:
finished[sequence] = scores[sequence]
else:
sum_logprobs[len(next_tokens)] = scores[sequence]
next_tokens.append(sequence)
source_indices.append(sources[sequence])
saved += 1
if saved == self.beam_size:
break
finished_sequences.append(finished)
tokens = paddle.to_tensor(next_tokens)
self.inference.rearrange_kv_cache(source_indices)
# add newly finished sequences to self.finished_sequences
assert len(self.finished_sequences) == len(finished_sequences)
for previously_finished, newly_finished in zip(self.finished_sequences,
finished_sequences):
for seq in sorted(
newly_finished, key=newly_finished.get, reverse=True):
if len(previously_finished) >= self.max_candidates:
break # the candidate list is full
previously_finished[seq] = newly_finished[seq]
# mark as completed if all audio has enough number of samples
completed = all(
len(sequences) >= self.max_candidates
for sequences in self.finished_sequences)
return tokens, completed
def finalize(self,
preceding_tokens: paddle.Tensor,
sum_logprobs: paddle.Tensor):
# collect all finished sequences, including patience, and add unfinished ones if not enough
sum_logprobs = sum_logprobs.cpu()
for i, sequences in enumerate(self.finished_sequences):
if len(sequences
) < self.beam_size: # when not enough sequences are finished
for j in list(np.argsort(sum_logprobs[i]))[::-1]:
sequence = preceding_tokens[i, j].tolist() + [self.eot]
sequences[tuple(sequence)] = sum_logprobs[i][j].item()
if len(sequences) >= self.beam_size:
break
tokens: List[List[paddle.Tensor]] = [
[paddle.to_tensor(seq) for seq in sequences.keys()]
for sequences in self.finished_sequences
]
sum_logprobs: List[List[float]] = [
list(sequences.values()) for sequences in self.finished_sequences
]
return tokens, sum_logprobs
class LogitFilter:
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor) -> None:
"""Apply any filtering or masking to logits in-place
Parameters
----------
logits : Tensor, shape = (n_batch, vocab_size)
per-token logits of the probability distribution at the current step
tokens : Tensor, shape = (n_batch, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence tokens
"""
raise NotImplementedError
class SuppressBlank(LogitFilter):
def __init__(self, tokenizer: Tokenizer, sample_begin: int):
self.tokenizer = tokenizer
self.sample_begin = sample_begin
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
if tokens.shape[1] == self.sample_begin:
logits[:, self.tokenizer.encode(" ").input_ids +
[self.tokenizer.eot]] = -np.inf
class SuppressTokens(LogitFilter):
def __init__(self, suppress_tokens: Sequence[int]):
self.suppress_tokens = list(suppress_tokens)
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
logits[:, self.suppress_tokens] = -np.inf
class ApplyTimestampRules(LogitFilter):
def __init__(self,
tokenizer: Tokenizer,
sample_begin: int,
max_initial_timestamp_index: Optional[int]):
self.tokenizer = tokenizer
self.sample_begin = sample_begin
self.max_initial_timestamp_index = max_initial_timestamp_index
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
# suppress <|notimestamps|> which is handled by without_timestamps
if self.tokenizer.no_timestamps is not None:
logits[:, self.tokenizer.no_timestamps] = -np.inf
# timestamps have to appear in pairs, except directly before EOT; mask logits accordingly
for k in range(tokens.shape[0]):
seq = [t for t in tokens[k, self.sample_begin:].tolist()]
last_was_timestamp = len(seq) >= 1 and seq[
-1] >= self.tokenizer.timestamp_begin
penultimate_was_timestamp = len(seq) < 2 or seq[
-2] >= self.tokenizer.timestamp_begin
if last_was_timestamp:
if penultimate_was_timestamp: # has to be non-timestamp
logits[k, self.tokenizer.timestamp_begin:] = -np.inf
else: # cannot be normal text tokens
logits[k, :self.tokenizer.eot] = -np.inf
# apply the `max_initial_timestamp` option
if tokens.shape[
1] == self.sample_begin and self.max_initial_timestamp_index is not None:
last_allowed = self.tokenizer.timestamp_begin + self.max_initial_timestamp_index
logits[:, last_allowed + 1:] = -np.inf
# if sum of probability over timestamps is above any other token, sample timestamp
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
for k in range(tokens.shape[0]):
timestamp_logprob = paddle.logsumexp(
logprobs[k, self.tokenizer.timestamp_begin:], axis=-1)
max_text_token_logprob = paddle.max(
logprobs[k, :self.tokenizer.timestamp_begin])
if timestamp_logprob > max_text_token_logprob:
logits[k, :self.tokenizer.timestamp_begin] = -np.inf
class DecodingTask:
inference: Inference
sequence_ranker: SequenceRanker
decoder: TokenDecoder
logit_filters: List[LogitFilter]
def __init__(self,
model: "Whisper",
options: DecodingOptions,
resource_path: str):
self.model = model
language = options.language or "en"
tokenizer = get_tokenizer(
model.is_multilingual,
resource_path=resource_path,
language=language,
task=options.task)
self.tokenizer: Tokenizer = tokenizer
self.options: DecodingOptions = self._verify_options(options)
self.resource_path: str = resource_path
self.beam_size: int = options.beam_size or options.best_of or 1
self.n_ctx: int = model.dims.n_text_ctx
self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2
self.sot_sequence: Tuple[int] = tokenizer.sot_sequence
if self.options.without_timestamps:
self.sot_sequence = tokenizer.sot_sequence_including_notimestamps
self.initial_tokens: Tuple[int] = self._get_initial_tokens()
self.sample_begin: int = len(self.initial_tokens)
self.sot_index: int = self.initial_tokens.index(tokenizer.sot)
# inference: implements the forward pass through the decoder, including kv caching
self.inference = WhisperInference(model, len(self.initial_tokens))
# sequence ranker: implements how to rank a group of sampled sequences
self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty)
# decoder: implements how to select the next tokens, given the autoregressive distribution
if options.beam_size is not None:
self.decoder = BeamSearchDecoder(options.beam_size, tokenizer.eot,
self.inference, options.patience)
else:
self.decoder = GreedyDecoder(options.temperature, tokenizer.eot)
# logit filters: applies various rules to suppress or penalize certain tokens
self.logit_filters = []
if self.options.suppress_blank:
self.logit_filters.append(
SuppressBlank(self.tokenizer, self.sample_begin))
if self.options.suppress_tokens:
self.logit_filters.append(
SuppressTokens(self._get_suppress_tokens()))
if not options.without_timestamps:
precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds
max_initial_timestamp_index = None
if options.max_initial_timestamp:
max_initial_timestamp_index = round(
self.options.max_initial_timestamp / precision)
self.logit_filters.append(
ApplyTimestampRules(tokenizer, self.sample_begin,
max_initial_timestamp_index))
def _verify_options(self, options: DecodingOptions) -> DecodingOptions:
if options.beam_size is not None and options.best_of is not None:
raise ValueError("beam_size and best_of can't be given together")
if options.temperature == 0:
if options.best_of is not None:
raise ValueError(
"best_of with greedy sampling (T=0) is not compatible")
if options.patience is not None and options.beam_size is None:
raise ValueError("patience requires beam_size to be given")
if options.length_penalty is not None and not (
0 <= options.length_penalty <= 1):
raise ValueError(
"length_penalty (alpha) should be a value between 0 and 1")
return options
def _get_initial_tokens(self) -> Tuple[int]:
tokens = list(self.sot_sequence)
prefix = self.options.prefix
prompt = self.options.prompt
if prefix:
prefix_tokens = (
self.tokenizer.encode(" " + prefix.strip().input_ids)
if isinstance(prefix, str) else prefix)
if self.sample_len is not None:
max_prefix_len = self.n_ctx // 2 - self.sample_len
prefix_tokens = prefix_tokens[-max_prefix_len:]
tokens = tokens + prefix_tokens
if prompt:
prompt_tokens = (
self.tokenizer.encode(" " + prompt.strip().input_ids)
if isinstance(prompt, str) else prompt)
tokens = [self.tokenizer.sot_prev] + prompt_tokens[-(self.n_ctx // 2
- 1):] + tokens
return tuple(tokens)
def _get_suppress_tokens(self) -> Tuple[int]:
suppress_tokens = self.options.suppress_tokens
if isinstance(suppress_tokens, str):
suppress_tokens = [int(t) for t in suppress_tokens.split(",")]
if -1 in suppress_tokens:
suppress_tokens = [t for t in suppress_tokens if t >= 0]
suppress_tokens.extend(self.tokenizer.non_speech_tokens)
elif suppress_tokens is None or len(suppress_tokens) == 0:
suppress_tokens = [] # interpret empty string as an empty list
else:
assert isinstance(suppress_tokens,
list), "suppress_tokens must be a list"
suppress_tokens.extend([
self.tokenizer.sot, self.tokenizer.sot_prev, self.tokenizer.sot_lm
])
if self.tokenizer.no_speech is not None:
# no-speech probability is collected separately
suppress_tokens.append(self.tokenizer.no_speech)
return tuple(sorted(set(suppress_tokens)))
def _get_audio_features(self, mel: paddle.Tensor):
#if self.options.fp16:
# mel = mel.half()
if mel.shape[-2:] == (self.model.dims.n_audio_ctx,
self.model.dims.n_audio_state):
# encoded audio features are given; skip audio encoding
audio_features = mel
else:
audio_features = self.model.encoder(mel)
#if audio_features.dtype != (np.float16 if self.options.fp16 else np.float32):
# return TypeError(f"audio_features has an incorrect dtype: {audio_features.dtype}")
return audio_features
def _detect_language(self,
audio_features: paddle.Tensor,
tokens: paddle.Tensor,
resource_path: str):
languages = [self.options.language] * audio_features.shape[0]
lang_probs = None
if self.options.language is None or self.options.task == "lang_id":
lang_tokens, lang_probs = self.model.detect_language(
audio_features, self.tokenizer, self.resource_path)
languages = [max(probs, key=probs.get) for probs in lang_probs]
if self.options.language is None:
tokens[:, self.sot_index +
1] = lang_tokens # write language tokens
return languages, lang_probs
def _main_loop(self, audio_features: paddle.Tensor, tokens: paddle.Tensor):
assert audio_features.shape[0] == tokens.shape[0]
n_batch = tokens.shape[0]
sum_logprobs: paddle.Tensor = paddle.zeros(
paddle.to_tensor(n_batch), dtype=paddle.float32)
no_speech_probs = [np.nan] * n_batch
try:
for i in range(self.sample_len):
logits = self.inference.logits(tokens, audio_features)
if i == 0 and self.tokenizer.no_speech is not None: # save no_speech_probs
probs_at_sot = F.softmax(
logits[:, self.sot_index],
axis=-1,
dtype=paddle.float32)
no_speech_probs = probs_at_sot[:, self.tokenizer.
no_speech].tolist()
# now we need to consider the logits at the last token only
logits = logits[:, -1]
# apply the logit filters, e.g. for suppressing or applying penalty to
for logit_filter in self.logit_filters:
logit_filter.apply(logits, tokens)
# expand the tokens tensor with the selected next tokens
tokens, completed = self.decoder.update(tokens, logits,
sum_logprobs)
if completed or tokens.shape[-1] > self.n_ctx:
break
finally:
self.inference.cleanup_caching()
return tokens, sum_logprobs, no_speech_probs
@paddle.no_grad()
def run(self, mel: paddle.Tensor) -> List[DecodingResult]:
self.decoder.reset()
tokenizer: Tokenizer = self.tokenizer
batch_size: int = mel.shape[0]
audio_features: paddle.Tensor = self._get_audio_features(
mel) # encoder forward pass
tokens: paddle.Tensor
if batch_size > 1:
for i in range(batch_size):
tokens = paddle.concat(
x=[
paddle.to_tensor([self.initial_tokens]),
paddle.to_tensor([self.initial_tokens])
],
axis=0)
elif batch_size == 1:
tokens = paddle.to_tensor([self.initial_tokens])
# detect language if requested, overwriting the language token
languages, language_probs = self._detect_language(
paddle.to_tensor(audio_features),
paddle.to_tensor(tokens), self.resource_path)
if self.options.task == "lang_id":
return [
DecodingResult(
audio_features=features,
language=language,
language_probs=probs) for features, language, probs in
zip(audio_features, languages, language_probs)
]
# repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
audio_features = paddle.repeat_interleave(
audio_features, self.beam_size, axis=0)
tokens = paddle.repeat_interleave(tokens, self.beam_size, axis=0)
# call the main sampling loop
tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features,
tokens)
# reshape the tensors to have (batch_size, beam_size) as the first two dimensions
audio_features = audio_features[::self.beam_size]
no_speech_probs = no_speech_probs[::self.beam_size]
assert audio_features.shape[0] == len(no_speech_probs) == batch_size
tokens = tokens.reshape([batch_size, self.beam_size, -1])
sum_logprobs = sum_logprobs.reshape([batch_size, self.beam_size])
# get the final candidates for each group, and slice between the first sampled token and EOT
tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
tokens: List[List[paddle.Tensor]] = [[
t[self.sample_begin:(t == tokenizer.eot).nonzero()[0, 0]] for t in s
] for s in tokens]
# select the top-ranked sample in each group
selected = self.sequence_ranker.rank(tokens, sum_logprobs)
tokens: List[List[
int]] = [t[i].tolist() for i, t in zip(selected, tokens)]
texts: List[str] = [tokenizer.decode(t).strip() for t in tokens]
sum_logprobs: List[
float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]
avg_logprobs: List[
float] = [lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs)]
fields = (texts, languages, tokens, audio_features, avg_logprobs,
no_speech_probs)
if len(set(map(len, fields))) != 1:
raise RuntimeError(
f"inconsistent result lengths: {list(map(len, fields))}")
return [
DecodingResult(
audio_features=features,
language=language,
tokens=tokens,
text=text,
avg_logprob=avg_logprob,
no_speech_prob=no_speech_prob,
temperature=self.options.temperature,
compression_ratio=utils.compression_ratio(text), )
for text, language, tokens, features, avg_logprob, no_speech_prob in
zip(*fields)
]
@paddle.no_grad()
def decode(
model: "Whisper",
mel: paddle.Tensor,
options: DecodingOptions=DecodingOptions(),
resource_path=str, ) -> Union[DecodingResult, List[DecodingResult]]:
"""
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
Parameters
----------
model: Whisper
the Whisper model instance
mel: paddle.Tensor, shape = (80, 3000) or (*, 80, 3000)
A tensor containing the Mel spectrogram(s)
options: DecodingOptions
A dataclass that contains all necessary options for decoding 30-second segments
Returns
-------
result: Union[DecodingResult, List[DecodingResult]]
The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
"""
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
result = DecodingTask(model, options, resource_path).run(mel)
if single:
result = result[0]
return result
class Whisper(nn.Layer):
def __init__(self, dims: ModelDimensions):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer, )
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer, )
def embed_audio(self, mel: paddle.Tensor):
return self.encoder.forward(mel)
def logits(self, tokens: paddle.Tensor, audio_features: paddle.Tensor):
return self.decoder.forward(tokens, audio_features)
def forward(self, mel: paddle.Tensor,
tokens: paddle.Tensor) -> Dict[str, paddle.Tensor]:
return self.decoder(tokens, self.encoder(mel))
@property
def device(self):
return paddle.device.get_device()
@property
def is_multilingual(self):
return self.dims.n_vocab == 51865
def install_kv_cache_hooks(self, cache: Optional[dict]=None):
"""
The `MultiHeadAttention` module optionally accepts `kv_cache` which stores the key and value
tensors calculated for the previous positions. This method returns a dictionary that stores
all caches, and the necessary hooks for the key and value projection modules that save the
intermediate tensors to be reused during later calculations.
Returns
-------
cache : Dict[nn.Layer, paddle.Tensor]
A dictionary object mapping the key/value projection modules to its cache
hooks : List[RemovableHandle]
List of PyTorch RemovableHandle objects to stop the hooks to be called
"""
cache = {**cache} if cache is not None else {}
hooks = []
def save_to_cache(module, _, output):
if module not in cache or output.shape[
1] > self.decoder.positional_embedding.shape[0]:
cache[
module] = output # save as-is, for the first token or cross attention
else:
cache[module] = paddle.concat(
[cache[module], output], axis=1).detach()
return cache[module]
def install_hooks(layer: nn.Layer):
if isinstance(layer, MultiHeadAttention):
hooks.append(
layer.key.register_forward_post_hook(save_to_cache))
hooks.append(
layer.value.register_forward_post_hook(save_to_cache))
self.decoder.apply(install_hooks)
return cache, hooks
detect_language = detect_language
transcribe = transcribe
decode = decode
def pad_or_trim(array, length: int=N_SAMPLES, *, axis: int=-1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if paddle.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(axis=axis, index=paddle.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = paddle.transpose(array, (1, 0))
array = F.pad(
array, [pad for sizes in pad_widths[::-1] for pad in sizes],
data_format='NLC')
array = paddle.transpose(array, (1, 0))
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = paddle.transpose(array, (1, 0))
array = np.pad(array, pad_widths)
array = paddle.transpose(array, (1, 0))
return array
def hann_window(n_fft: int=N_FFT):
"""
hanning window
n_fft: The number of frequency components of the discrete Fourier transform.
"""
return paddle.to_tensor(
[0.5 - 0.5 * np.cos(2 * np.pi * n / n_fft) for n in range(n_fft)],
dtype=paddle.float32)
@lru_cache(maxsize=None)
def mel_filters(resource_path: str, n_mels: int=N_MELS) -> paddle.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(resource_path, "assets", "mel_filters.npz")) as f:
return paddle.to_tensor(f[f"mel_{n_mels}"])
def log_mel_spectrogram(audio: Union[str, np.ndarray, paddle.Tensor],
n_mels: int=N_MELS,
resource_path: str=None):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, paddle.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
paddle.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not paddle.is_tensor(audio):
if isinstance(audio, str):
audio, _ = soundfile.read(audio, dtype="float32", always_2d=True)
audio = audio[:, 0]
logger.info(f"audio shape: {audio.shape}")
audio = paddle.to_tensor(audio)
window = hann_window(N_FFT)
stft = paddle.signal.stft(audio, N_FFT, HOP_LENGTH, window=window)
magnitudes = stft[:, :-1].abs()**2
filters = mel_filters(resource_path, n_mels)
mel_spec = filters @ magnitudes
mel_spec = paddle.to_tensor(mel_spec.numpy().tolist())
log_spec = paddle.clip(mel_spec, min=1e-10).log10()
log_spec = paddle.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
| [
"0",
"1",
"None",
" ",
"initial_prompt"
] |
2024-01-10 | pavan-krishna123/openai-text-analysis | Check_Status_Finetunning.py | import openai
# Replace with your API key
with open('openaiapikey.txt', 'r') as infile:
open_ai_api_key = infile.read()
openai.api_key = open_ai_api_key
def finetune_get(ftid):
resp = openai.FineTune.retrieve(ftid)
print(resp)
# Usage example
finetune_id = "ft-zTmRQq66suOeMw3sJrFJUNZS"
finetune_get(finetune_id)
| [] |
2024-01-10 | pavan-krishna123/openai-text-analysis | synthisize-sentiment.py | import openai
import os
import glob
from time import time, sleep
from uuid import uuid4
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
openai.api_key = open_file('openaiapikey.txt')
def gpt3_completion(prompt, engine='text-davinci-002', temp=1.0, top_p=1.0, tokens=1000, freq_pen=0.0, pres_pen=0.0, stop=None):
max_retry = 5
retry = 0
prompt = prompt.encode(encoding='ASCII',errors='ignore').decode()
while True:
try:
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response.choices[0].text.strip()
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return "GPT3 error: %s" % oops
print('Error communicating with OpenAI:', oops)
sleep(1)
def process_news_articles(folder_path):
for file_path in glob.glob(os.path.join(folder_path, "*")):
try:
# Read the article text from the file
article_text = open_file(file_path)
# Construct the prompt
prompt = f"Please classify the sentiment of the following news article as positive or negative:\n\n{article_text}"
# Save the prompt to the "prompts" folder
prompt_filename = os.path.join("prompts", f"{os.path.basename(file_path)}_{time()}.txt")
save_file(prompt_filename, prompt)
# Get the sentiment of the article
sentiment = gpt3_completion(prompt)
# Save the completion (sentiment) to the "completions" folder
completion_filename = os.path.join("completions", f"{os.path.basename(file_path)}_{time()}.txt")
save_file(completion_filename, f"Sentiment: {sentiment}\n")
print(f"Processed file: {file_path}")
except Exception as e:
print(f"Error processing file {file_path}: {e}\n")
if not os.path.exists("prompts"):
os.makedirs("prompts")
if not os.path.exists("completions"):
os.makedirs("completions")
if __name__ == '__main__':
folder_path = r"C:\Users\Pavankrishna\OneDrive\Desktop\new\test" # Replace this with the path to your folder containing news articles
process_news_articles(folder_path)
| [
"ignore",
"Please classify the sentiment of the following news article as positive or negative:\n\nPLACEHOLDER"
] |
2024-01-10 | pavan-krishna123/openai-text-analysis | fine-tune.py | import openai
with open('openaiapikey.txt', 'r') as infile:
open_ai_api_key = infile.read()
openai.api_key = open_ai_api_key
def file_upload(filename, purpose='fine-tune'):
resp = openai.File.create(purpose=purpose, file=open(filename))
print(resp)
return resp
def finetune_model(fileid, suffix, model='davinci'):
resp = openai.FineTune.create(
training_file=fileid,
model=model,
suffix=suffix
)
print(resp)
resp = file_upload('Sentiment.jsonl') # Replace with the name of your JSON Lines file
finetune_model(resp['id'], 'sentiment_classifier', 'davinci')
| [] |
2024-01-10 | sarcasterXO/gpt-humanizer | gpt-humanizer.py | import openai
import datetime
import pynecone as pc
from pynecone import Base
openai.api_key = 'ENTER YOUR API KEY'
class Message(Base):
original_text: str
text: str
created_at: str
class State(pc.State):
text: str = ""
messages: list[Message] = []
@pc.var
def output(self) -> str:
if not self.text.strip():
return "Humanized text will appear here."
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "assistant",
"content": f"When it comes to writing content, two factors are crucial, \"perplexity\" and \"burstiness\". Perplexity measures the complexity of text. Separately, burstiness compares the variations of sentences. Humans tend to write with greater burstiness, for example, with some longer or complex sentences alongside shorter ones. AI sentences tend to be more uniform. Therefore, when writing the following content, I am going to ask you to create, I need it to have a good amount of perplexity and burstiness. Using the concepts written previously, rewrite this text with a high degree of perplexity and burstiness: {self.text}"}
]
)
return response['choices'][0]['message']['content']
def post(self):
self.messages = [
Message(
original_text=self.text,
text=self.output,
created_at=datetime.datetime.now().strftime("%B %d, %Y %I:%M %p")
)
] + self.messages
def header():
"""Basic instructions to get started."""
return pc.box(
pc.text("GPT-Humanizer", font_size="2rem"),
pc.text(
"Humanize GPT-Text to avoid AI Detection!",
margin_top="0.5rem",
color="#666",
),
)
def text_box(text):
return pc.text(
text,
background_color="#fff",
padding="1rem",
border_radius="8px",
)
def message(message):
return pc.box(
pc.vstack(
text_box(message.original_text),
text_box(message.text),
pc.box(
pc.text(" · ", margin_x="0.3rem"),
pc.text(message.created_at),
display="flex",
font_size="0.8rem",
color="#666",
),
spacing="0.3rem",
align_items="left",
),
background_color="#f5f5f5",
padding="1rem",
border_radius="8px",
)
def smallcaps(text, **kwargs):
return pc.text(
text,
font_size="0.7rem",
font_weight="bold",
text_transform="uppercase",
letter_spacing="0.05rem",
**kwargs,
)
def output():
return pc.box(
pc.box(
smallcaps(
"Output",
color="#aeaeaf",
background_color="white",
padding_x="0.1rem",
),
position="absolute",
top="-0.5rem",
),
pc.text(State.output),
padding="1rem",
border="1px solid #eaeaef",
margin_top="1rem",
border_radius="8px",
position="relative",
)
def index():
"""The main view."""
return pc.container(
header(),
pc.input(
placeholder="Text to humanize",
on_blur=State.set_text,
margin_top="1rem",
border_color="#eaeaef",
),
output(),
pc.button("Humanize", on_click=State.post, margin_top="1rem"),
pc.vstack(
pc.foreach(State.messages, message),
margin_top="2rem",
spacing="1rem",
align_items="left",
),
padding="2rem",
max_width="600px",
)
# Add state and page to the app.
app = pc.App(state=State)
app.add_page(index, title="GPT-Humanizer")
app.compile()
| [] |
2024-01-10 | MikeeeGit/python-automation-chatgpt | chatgptlocal.py | import os
import sys
import openai
from langchain.chains import ConversationalRetrievalChain, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.llms import OpenAI
from langchain.vectorstores import Chroma
#import constants
# os.environ["OPENAI_API_KEY"] = constants.APIKEY
# Enable to save to disk & reuse the model (for repeated queries on the same data)
PERSIST = False
query = None
if len(sys.argv) > 1:
query = sys.argv[1]
if PERSIST and os.path.exists("persist"):
print("Reusing index...\n")
vectorstore = Chroma(persist_directory="persist", embedding_function=OpenAIEmbeddings())
index = VectorStoreIndexWrapper(vectorstore=vectorstore)
else:
#loader = TextLoader("data/data.txt") # Use this line if you only need data.txt
loader = DirectoryLoader("data/")
if PERSIST:
index = VectorstoreIndexCreator(vectorstore_kwargs={"persist_directory":"persist"}).from_loaders([loader])
else:
index = VectorstoreIndexCreator().from_loaders([loader])
chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(model="gpt-3.5-turbo"),
retriever=index.vectorstore.as_retriever(search_kwargs={"k": 1}),
)
chat_history = []
while True:
if not query:
query = input("Prompt: ")
if query in ['quit', 'q', 'exit']:
sys.exit()
result = chain({"question": query, "chat_history": chat_history})
print(result['answer'])
chat_history.append((query, result['answer']))
query = None | [] |
2024-01-10 | tau-nlp/zero_scrolls | experiments~api~run_api_single_task.py | import time
import json
from pathlib import Path
from fire import Fire
from tqdm import tqdm
from experiments.api.anthropic_api import AnthropicAPI
from experiments.api.openai_api import OpenAIAPI
from datasets import load_dataset
def generate_predictions_using_api(dataset_name: str, model_name: str = "text-davinci-003",
log_progress_every_n_examples=20,
limit_to_n_examples=None):
model_folder_name = model_name.replace("-", "_")
if model_name in ["text-davinci-003", "gpt-3.5-turbo", "gpt-4"]:
api = OpenAIAPI(model_name, dataset_name)
elif model_name in ["claude-v1","claude-v1.3"]:
api = AnthropicAPI(model_name, dataset_name)
else:
raise ValueError(f"model_name {model_name} not supported")
api.init_api()
# load task data
zero_scrolls_dataset = load_dataset("tau/zero_scrolls",dataset_name)["test"]
preds_folder_path = Path(f"generations/api/{model_folder_name}")
preds_folder_path.mkdir(parents=True, exist_ok=True)
print(f"generating predictions for {dataset_name} with OpenAI {model_name}")
# API setup and parameters
parameters = api.init_params()
# with open(predictions_file_path, 'a') as f_out:
generations = dict()
for i, example in tqdm(enumerate(zero_scrolls_dataset)):
if limit_to_n_examples is not None and i >= limit_to_n_examples:
print(
f"Breaking when limit_to_n_examples is reached. i={i}, limit_to_n_examples={limit_to_n_examples}, generated {len(generations)} predictions")
break
prompt = api.build_prompt(example)
api.preprocess_parameters(parameters, prompt)
time.sleep(0.5) # helps with rate limits
response = api.call(parameters)
output = api.build_output(example, prompt, parameters, response)
generations[example["id"]] = output["prediction"]
if i % log_progress_every_n_examples == 0:
print(
f'generated {len(generations)} examples from {dataset_name} using {model_name}')
predictions_file_path = preds_folder_path / f"preds_{dataset_name}.json"
with open(predictions_file_path, 'w') as f_out:
json.dump(generations, f_out, indent=4)
print(
f'finished generating {len(generations)} predictions for {dataset_name} using OpenAI {model_name}')
if __name__ == '__main__':
Fire(generate_predictions_using_api)
| [] |
2024-01-10 | blasbot/ChatGPT | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
from __future__ import annotations
import base64
import contextlib
import json
import logging
import time
import uuid
from functools import wraps
from os import environ
from os import getenv
from pathlib import Path
from typing import AsyncGenerator
from typing import Generator
from typing import NoReturn
import httpx
import requests
from httpx import AsyncClient
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
from . import __version__
from . import typings as t
from .utils import create_completer
from .utils import create_session
from .utils import get_input
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s",
)
log = logging.getLogger(__name__)
def logger(is_timed: bool):
"""Logger decorator
Args:
is_timed (bool): Whether to include function running time in exit log
Returns:
_type_: decorated function
"""
def decorator(func):
wraps(func)
def wrapper(*args, **kwargs):
log.debug(
f"Entering {func.__name__} with args {args} and kwargs {kwargs}",
)
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if is_timed:
log.debug(
f"Exiting {func.__name__} with return value {out}. Took {end - start} seconds.",
)
else:
log.debug(f"Exiting {func.__name__} with return value {out}")
return out
return wrapper
return decorator
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://ai.fakeopen.com/api/"
bcolors = t.Colors()
class Chatbot:
"""
Chatbot class for ChatGPT
"""
@logger(is_timed=True)
def __init__(
self,
config: dict[str, str],
conversation_id: str | None = None,
parent_id: str | None = None,
session_client=None,
lazy_loading: bool = True,
base_url: str | None = None,
) -> None:
"""Initialize a chatbot
Args:
config (dict[str, str]): Login and proxy info. Example:
{
"email": "OpenAI account email",
"password": "OpenAI account password",
"session_token": "<session_token>"
"access_token": "<access_token>"
"proxy": "<proxy_url_string>",
"paid": True/False, # whether this is a plus account
"_puid": "puid", # V4 only, if it is set, base_url will be changed to https://chat.openai.com/backend-api/
}
More details on these are available at https://github.com/acheong08/ChatGPT#configuration
conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None.
parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None.
session_client (_type_, optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
user_home = getenv("HOME")
if user_home is None:
user_home = Path().cwd()
self.cache_path = Path(Path().cwd(), ".chatgpt_cache.json")
else:
# mkdir ~/.config/revChatGPT
if not Path(user_home, ".config").exists():
Path(user_home, ".config").mkdir()
if not Path(user_home, ".config", "revChatGPT").exists():
Path(user_home, ".config", "revChatGPT").mkdir()
self.cache_path = Path(user_home, ".config", "revChatGPT", "cache.json")
self.config = config
self.session = session_client() if session_client else requests.Session()
if "email" in config and "password" in config:
try:
cached_access_token = self.__get_cached_access_token(
self.config.get("email", None),
)
except t.Error as error:
if error.code == 5:
raise
cached_access_token = None
if cached_access_token is not None:
self.config["access_token"] = cached_access_token
if "proxy" in config:
if not isinstance(config["proxy"], str):
error = TypeError("Proxy must be a string!")
raise error
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
if isinstance(self.session, AsyncClient):
proxies = {
"http://": config["proxy"],
"https://": config["proxy"],
}
self.session = AsyncClient(proxies=proxies)
else:
self.session.proxies.update(proxies)
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.lazy_loading = lazy_loading
if "_puid" in self.config and self.config["_puid"]:
self.base_url = "https://chat.openai.com/backend-api/"
self.__set_puid(self.config["_puid"])
else:
self.base_url = base_url or BASE_URL
self.__check_credentials()
@logger(is_timed=True)
def __check_credentials(self) -> None:
"""Check login info and perform login
Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below.
- access_token
- session_token
- email + password
Raises:
Exception: _description_
AuthError: _description_
"""
if "access_token" in self.config:
self.set_access_token(self.config["access_token"])
elif "session_token" in self.config:
pass
elif "email" not in self.config or "password" not in self.config:
error = t.AuthenticationError("Insufficient login details provided!")
raise error
if "access_token" not in self.config:
try:
self.login()
except AuthError as error:
print(error.details)
print(error.status_code)
raise error
@logger(is_timed=False)
def __set_puid(self, puid: str) -> None:
self.session.cookies.update(
{
"_puid": puid,
},
)
@logger(is_timed=False)
def set_access_token(self, access_token: str) -> None:
"""Set access token in request header and self.config, then cache it to file.
Args:
access_token (str): access_token
"""
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
self.session.cookies.update(
{
"library": "revChatGPT",
},
)
self.config["access_token"] = access_token
email = self.config.get("email", None)
if email is not None:
self.__cache_access_token(email, access_token)
@logger(is_timed=False)
def __get_cached_access_token(self, email: str | None) -> str | None:
"""Read access token from cache
Args:
email (str | None): email of the account to get access token
Raises:
Error: _description_
Error: _description_
Error: _description_
Returns:
str | None: access token string or None if not found
"""
email = email or "default"
cache = self.__read_cache()
access_token = cache.get("access_tokens", {}).get(email, None)
# Parse access_token as JWT
if access_token is not None:
try:
# Split access_token into 3 parts
s_access_token = access_token.split(".")
# Add padding to the middle part
s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4)
d_access_token = base64.b64decode(s_access_token[1])
d_access_token = json.loads(d_access_token)
except base64.binascii.Error:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
except json.JSONDecodeError:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
exp = d_access_token.get("exp", None)
if exp is not None and exp < time.time():
error = t.Error(
source="__get_cached_access_token",
message="Access token expired",
code=t.ErrorType.EXPIRED_ACCESS_TOKEN_ERROR,
)
raise error
return access_token
@logger(is_timed=False)
def __cache_access_token(self, email: str, access_token: str) -> None:
"""Write an access token to cache
Args:
email (str): account email
access_token (str): account access token
"""
email = email or "default"
cache = self.__read_cache()
if "access_tokens" not in cache:
cache["access_tokens"] = {}
cache["access_tokens"][email] = access_token
self.__write_cache(cache)
@logger(is_timed=False)
def __write_cache(self, info: dict) -> None:
"""Write cache info to file
Args:
info (dict): cache info, current format
{
"access_tokens":{"[email protected]": 'this account's access token', }
}
"""
dirname = self.cache_path.home() or Path(".")
dirname.mkdir(parents=True, exist_ok=True)
json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4)
@logger(is_timed=False)
def __read_cache(self):
try:
cached = json.load(open(self.cache_path, encoding="utf-8"))
except (FileNotFoundError, json.decoder.JSONDecodeError):
cached = {}
return cached
@logger(is_timed=True)
def login(self) -> None:
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
log.error("Insufficient login details provided!")
error = t.AuthenticationError("Insufficient login details provided!")
raise error
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
log.debug("Using session token")
auth.session.cookies.set(
"__Secure-next-auth.session-token",
self.config["session_token"],
)
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.login()
return
else:
log.debug("Using authenticator to get access token")
auth.begin()
auth.get_access_token()
self.set_access_token(auth.access_token)
@logger(is_timed=True)
def __send_request(
self,
data: dict,
auto_continue: bool = False,
timeout: float = 360,
) -> Generator[dict, None, None]:
log.debug("Sending the payload")
cid, pid = data["conversation_id"], data["parent_message_id"]
model, message = None, ""
self.conversation_id_prev_queue.append(cid)
self.parent_id_prev_queue.append(pid)
response = self.session.post(
url=f"{self.base_url}conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
finish_details = None
for line in response.iter_lines():
# remove b' and ' at the beginning and end and ignore case
line = str(line)[2:-1]
if line.lower() == "internal server error":
log.error(f"Internal Server Error: {line}")
error = t.Error(
source="ask",
message="Internal Server Error",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if not line or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
break
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise ValueError(f"Field missing. Details: {str(line)}")
if line.get("message").get("author").get("role") != "assistant":
continue
message: str = line["message"]["content"]["parts"][0]
cid = line["conversation_id"]
pid = line["message"]["id"]
metadata = line["message"].get("metadata", {})
model = metadata.get("model_slug", None)
finish_details = metadata.get("finish_details", {"type": None})["type"]
yield {
"message": message,
"conversation_id": cid,
"parent_id": pid,
"model": model,
"finish_details": finish_details,
"end_turn": line["message"].get("end_turn", True),
"recipient": line["message"].get("recipient", "all"),
}
self.conversation_mapping[cid] = pid
if pid is not None:
self.parent_id = pid
if cid is not None:
self.conversation_id = cid
if not (auto_continue and finish_details == "max_tokens"):
return
message = message.strip("\n")
for i in self.continue_write(
conversation_id=cid,
model=model,
timeout=timeout,
):
i["message"] = message + i["message"]
yield i
@logger(is_timed=True)
def post_messages(
self,
messages: list[dict],
conversation_id: str | None = None,
parent_id: str | None = None,
model: str | None = None,
auto_continue: bool = False,
timeout: float = 360,
) -> Generator[dict, None, None]:
"""Ask a question to the chatbot
Args:
messages (list[dict]): The messages to send
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
model (str | None, optional): The model to use. Defaults to None.
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields: Generator[dict, None, None] - The response from the chatbot
dict: {
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str, # "max_tokens" or "stop"
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
raise t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.USER_ERROR,
)
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
f"Conversation ID {conversation_id} not found in conversation mapping, try to get conversation history for the given ID",
)
with contextlib.suppress(Exception):
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
else:
self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": messages,
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
yield from self.__send_request(
data,
timeout=timeout,
auto_continue=auto_continue,
)
@logger(is_timed=True)
def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
model: str | None = None,
auto_continue: bool = False,
timeout: float = 360,
) -> Generator[dict, None, None]:
"""Ask a question to the chatbot
Args:
prompt (str): The question
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
model (str | None, optional): The model to use. Defaults to None.
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields: Generator[dict, None, None] - The response from the chatbot
dict: {
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str, # "max_tokens" or "stop"
"end_turn": bool,
"recipient": str,
}
"""
messages = [
{
"id": str(uuid.uuid4()),
"role": "user",
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
},
]
yield from self.post_messages(
messages,
conversation_id=conversation_id,
parent_id=parent_id,
model=model,
auto_continue=auto_continue,
timeout=timeout,
)
@logger(is_timed=True)
def continue_write(
self,
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: float = 360,
) -> Generator[dict, None, None]:
"""let the chatbot continue to write
Args:
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
model (str | None, optional): The model to use. Defaults to None.
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
dict: {
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str, # "max_tokens" or "stop"
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
raise t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.USER_ERROR,
)
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
f"Conversation ID {conversation_id} not found in conversation mapping, try to get conversation history for the given ID",
)
with contextlib.suppress(Exception):
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
else:
log.debug(
f"Conversation ID {conversation_id} not found in conversation mapping, mapping conversations",
)
self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "continue",
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
yield from self.__send_request(
data,
timeout=timeout,
auto_continue=auto_continue,
)
@logger(is_timed=False)
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
@logger(is_timed=False)
def __check_response(self, response: requests.Response) -> None:
"""Make sure response is success
Args:
response (_type_): _description_
Raises:
Error: _description_
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
error = t.Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
raise error from e
@logger(is_timed=True)
def get_conversations(
self,
offset: int = 0,
limit: int = 20,
encoding: str | None = None,
) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{self.base_url}conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data["items"]
@logger(is_timed=True)
def get_msg_history(self, convo_id: str, encoding: str | None = None) -> list:
"""
Get message history
:param id: UUID of conversation
:param encoding: String
"""
url = f"{self.base_url}conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
return json.loads(response.text)
@logger(is_timed=True)
def gen_title(self, convo_id: str, message_id: str) -> str:
"""
Generate title for conversation
"""
response = self.session.post(
f"{self.base_url}conversation/gen_title/{convo_id}",
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
return response.json().get("title", "Error generating title")
@logger(is_timed=True)
def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = f"{self.base_url}conversation/{convo_id}"
response = self.session.patch(url, data=json.dumps({"title": title}))
self.__check_response(response)
@logger(is_timed=True)
def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param id: UUID of conversation
"""
url = f"{self.base_url}conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=True)
def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{self.base_url}conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=False)
def __map_conversations(self) -> None:
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
@logger(is_timed=False)
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
@logger(is_timed=False)
def rollback_conversation(self, num: int = 1) -> None:
"""
Rollback the conversation.
:param num: Integer. The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
class AsyncChatbot(Chatbot):
"""
Async Chatbot class for ChatGPT
"""
def __init__(
self,
config: dict,
conversation_id: str | None = None,
parent_id: str = "",
base_url: str = "",
) -> None:
"""
Same as Chatbot class, but with async methods.
Note:
AsyncChatbot is not compatible with OpenAI Web API, I don't know why the stream method doesn't work.
(But the sync version works fine)
So, if you want to use AsyncChatbot, you don't need to set the "_puid" parameter in the config.
"""
super().__init__(
config=config,
conversation_id=conversation_id,
parent_id=parent_id,
session_client=AsyncClient,
base_url=base_url,
)
async def __send_request(
self,
data: dict,
auto_continue: bool = False,
timeout: float = 360,
) -> AsyncGenerator[dict, None]:
cid, pid = data["conversation_id"], data["parent_message_id"]
self.conversation_id_prev_queue.append(cid)
self.parent_id_prev_queue.append(pid)
message = ""
finish_details = None
response = None
async with self.session.stream(
method="POST",
url=f"{self.base_url}conversation",
data=json.dumps(data),
timeout=timeout,
) as response:
await self.__check_response(response)
async for line in response.aiter_lines():
if not line or line is None:
continue
if "data: " in line:
line = line[6:]
if "[DONE]" in line:
break
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise ValueError(f"Field missing. Details: {str(line)}")
message: str = line["message"]["content"]["parts"][0]
cid = line["conversation_id"]
pid = line["message"]["id"]
metadata = line["message"].get("metadata", {})
model = metadata.get("model_slug", None)
finish_details = metadata.get("finish_details", {"type": None})["type"]
yield {
"message": message,
"conversation_id": cid,
"parent_id": pid,
"model": model,
"finish_details": finish_details,
"end_turn": line["message"].get("end_turn", True),
"recipient": line["message"].get("recipient", "all"),
}
self.conversation_mapping[cid] = pid
if pid:
self.parent_id = pid
if cid:
self.conversation_id = cid
if not (auto_continue and finish_details == "max_tokens"):
return
async for msg in self.continue_write(
conversation_id=cid,
auto_continue=auto_continue,
timeout=timeout,
):
msg["message"] = message + msg["message"]
yield msg
async def post_messages(
self,
messages: list[dict],
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: int = 360,
) -> AsyncGenerator[dict, None]:
"""Post messages to the chatbot
Args:
messages (list[dict]): the messages to post
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to "".
model (str, optional): The model to use. Defaults to "".
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
AsyncGenerator[dict, None]: The response from the chatbot
{
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str,
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": messages,
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
async for msg in self.__send_request(
data=data,
auto_continue=auto_continue,
timeout=timeout,
):
yield msg
async def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: int = 360,
) -> AsyncGenerator[dict, None]:
"""Ask a question to the chatbot
Args:
prompt (str): The question to ask
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to "".
model (str, optional): The model to use. Defaults to "".
auto_continue (bool, optional): Whether to continue the conversation automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
AsyncGenerator[dict, None]: The response from the chatbot
{
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str,
"end_turn": bool,
"recipient": str,
}
"""
messages = [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
},
]
async for msg in self.post_messages(
messages=messages,
conversation_id=conversation_id,
parent_id=parent_id,
model=model,
auto_continue=auto_continue,
timeout=timeout,
):
yield msg
async def continue_write(
self,
conversation_id: str | None = None,
parent_id: str = "",
model: str = "",
auto_continue: bool = False,
timeout: float = 360,
) -> AsyncGenerator[dict, None]:
"""let the chatbot continue to write
Args:
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str, optional): UUID for the message to continue on. Defaults to None.
model (str, optional): Model to use. Defaults to None.
auto_continue (bool, optional): Whether to continue writing automatically. Defaults to False.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Yields:
AsyncGenerator[dict, None]: The response from the chatbot
{
"message": str,
"conversation_id": str,
"parent_id": str,
"model": str,
"finish_details": str,
"end_turn": bool,
"recipient": str,
}
"""
if parent_id and not conversation_id:
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if conversation_id and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id or ""
if not conversation_id and not parent_id:
parent_id = str(uuid.uuid4())
if conversation_id and not parent_id:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
if conversation_id in self.conversation_mapping:
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "continue",
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model
or self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
async for msg in self.__send_request(
data=data,
auto_continue=auto_continue,
timeout=timeout,
):
yield msg
async def get_conversations(self, offset: int = 0, limit: int = 20) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{self.base_url}conversations?offset={offset}&limit={limit}"
response = await self.session.get(url)
await self.__check_response(response)
data = json.loads(response.text)
return data["items"]
async def get_msg_history(
self,
convo_id: str,
encoding: str | None = "utf-8",
) -> dict:
"""
Get message history
:param id: UUID of conversation
"""
url = f"{self.base_url}conversation/{convo_id}"
response = await self.session.get(url)
if encoding is not None:
response.encoding = encoding
await self.__check_response(response)
return json.loads(response.text)
return None
async def gen_title(self, convo_id: str, message_id: str) -> None:
"""
Generate title for conversation
"""
url = f"{self.base_url}conversation/gen_title/{convo_id}"
response = await self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
await self.__check_response(response)
async def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param convo_id: UUID of conversation
:param title: String
"""
url = f"{self.base_url}conversation/{convo_id}"
response = await self.session.patch(url, data=f'{{"title": "{title}"}}')
await self.__check_response(response)
async def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param convo_id: UUID of conversation
"""
url = f"{self.base_url}conversation/{convo_id}"
response = await self.session.patch(url, data='{"is_visible": false}')
await self.__check_response(response)
async def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{self.base_url}conversations"
response = await self.session.patch(url, data='{"is_visible": false}')
await self.__check_response(response)
async def __map_conversations(self) -> None:
conversations = await self.get_conversations()
histories = [await self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
async def __check_response(self, response: httpx.Response) -> None:
# 改成自带的错误处理
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
await response.aread()
error = t.Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
raise error from e
get_input = logger(is_timed=False)(get_input)
@logger(is_timed=False)
def configure() -> dict:
"""
Looks for a config file in the following locations:
"""
config_files: list[Path] = [Path("config.json")]
if xdg_config_home := getenv("XDG_CONFIG_HOME"):
config_files.append(Path(xdg_config_home, "revChatGPT/config.json"))
if user_home := getenv("HOME"):
config_files.append(Path(user_home, ".config/revChatGPT/config.json"))
if windows_home := getenv("HOMEPATH"):
config_files.append(Path(f"{windows_home}/.config/revChatGPT/config.json"))
if config_file := next((f for f in config_files if f.exists()), None):
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise FileNotFoundError("No config file found.")
return config
@logger(is_timed=False)
def main(config: dict) -> NoReturn:
"""
Main function for the chatGPT program.
"""
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
!setconversation - Changes the conversation
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
try:
rollback = int(command.split(" ")[1])
except IndexError:
logging.exception(
"No number specified, rolling back 1 message",
stack_info=True,
)
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
log.exception(
"Please include conversation UUID in command",
stack_info=True,
)
print("Please include conversation UUID in command")
elif command.startswith("!continue"):
print()
print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}")
prev_text = ""
for data in chatbot.continue_write():
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
elif command == "!exit":
exit()
else:
return False
return True
session = create_session()
completer = create_completer(
[
"!help",
"!reset",
"!config",
"!rollback",
"!exit",
"!setconversation",
"!continue",
],
)
print()
try:
while True:
print(f"{bcolors.OKBLUE + bcolors.BOLD}You: {bcolors.ENDC}")
prompt = get_input(session=session, completer=completer)
if prompt.startswith("!") and handle_commands(prompt):
continue
print()
print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}")
prev_text = ""
for data in chatbot.ask(prompt, auto_continue=True):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
except (KeyboardInterrupt, EOFError):
exit()
except Exception as exc:
error = t.CLIError("command line program unknown error")
raise error from exc
if __name__ == "__main__":
print(
f"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
Version: {__version__}
""",
)
print("Type '!help' to show a full list of commands")
print(
f"{bcolors.BOLD}{bcolors.WARNING}Press Esc followed by Enter or Alt+Enter to send a message.{bcolors.ENDC}",
)
main(configure())
| [
"text",
"content_type"
] |
2024-01-10 | Nikos1001/htn | backend~db.py | from psycopg2 import OperationalError
import psycopg2
import cohere
import json
connector = psycopg2.connect('postgresql://hiatus:[email protected]:26257/defaultdb?sslmode=verify-full')
connector.autocommit = True
def execute_query(query):
cursor = connector.cursor()
try:
cursor.execute(query)
print("Query success")
except OperationalError as err:
print(f"Error {err}")
# execute_query('DROP TABLE deck_list')
execute_query("CREATE TABLE IF NOT EXISTS deck_list (id SERIAL PRIMARY KEY, deck JSON)")
def add_to_db(deck):
deck_str = json.dumps(deck).replace('\'', '')
print(deck_str)
add_query = f"INSERT INTO deck_list (deck) VALUES ('{deck_str}')"
execute_query(add_query)
def retrieve_db():
deck_list = []
cursor = connector.cursor()
try:
cursor.execute("SELECT * FROM deck_list")
for deck in cursor.fetchall():
deck_list.append(deck[1])
except OperationalError as err:
print(f"The error '{err}' occurred")
return deck_list
| [] |
2024-01-10 | iusztinpaul/hands-on-llms | modules~financial_bot~financial_bot~chains.py | import time
from typing import Any, Dict, List, Optional
import qdrant_client
from langchain import chains
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.llms import HuggingFacePipeline
from unstructured.cleaners.core import (
clean,
clean_extra_whitespace,
clean_non_ascii_chars,
group_broken_paragraphs,
replace_unicode_quotes,
)
from financial_bot.embeddings import EmbeddingModelSingleton
from financial_bot.template import PromptTemplate
class StatelessMemorySequentialChain(chains.SequentialChain):
"""
A sequential chain that uses a stateless memory to store context between calls.
This chain overrides the _call and prep_outputs methods to load and clear the memory
before and after each call, respectively.
"""
history_input_key: str = "to_load_history"
def _call(self, inputs: Dict[str, str], **kwargs) -> Dict[str, str]:
"""
Override _call to load history before calling the chain.
This method loads the history from the input dictionary and saves it to the
stateless memory. It then updates the inputs dictionary with the memory values
and removes the history input key. Finally, it calls the parent _call method
with the updated inputs and returns the results.
"""
to_load_history = inputs[self.history_input_key]
for (
human,
ai,
) in to_load_history:
self.memory.save_context(
inputs={self.memory.input_key: human},
outputs={self.memory.output_key: ai},
)
memory_values = self.memory.load_memory_variables({})
inputs.update(memory_values)
del inputs[self.history_input_key]
return super()._call(inputs, **kwargs)
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""
Override prep_outputs to clear the internal memory after each call.
This method calls the parent prep_outputs method to get the results, then
clears the stateless memory and removes the memory key from the results
dictionary. It then returns the updated results.
"""
results = super().prep_outputs(inputs, outputs, return_only_outputs)
# Clear the internal memory.
self.memory.clear()
if self.memory.memory_key in results:
results[self.memory.memory_key] = ""
return results
class ContextExtractorChain(Chain):
"""
Encode the question, search the vector store for top-k articles and return
context news from documents collection of Alpaca news.
Attributes:
-----------
top_k : int
The number of top matches to retrieve from the vector store.
embedding_model : EmbeddingModelSingleton
The embedding model to use for encoding the question.
vector_store : qdrant_client.QdrantClient
The vector store to search for matches.
vector_collection : str
The name of the collection to search in the vector store.
"""
top_k: int = 1
embedding_model: EmbeddingModelSingleton
vector_store: qdrant_client.QdrantClient
vector_collection: str
@property
def input_keys(self) -> List[str]:
return ["about_me", "question"]
@property
def output_keys(self) -> List[str]:
return ["context"]
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
_, quest_key = self.input_keys
question_str = inputs[quest_key]
cleaned_question = self.clean(question_str)
# TODO: Instead of cutting the question at 'max_input_length', chunk the question in 'max_input_length' chunks,
# pass them through the model and average the embeddings.
cleaned_question = cleaned_question[: self.embedding_model.max_input_length]
embeddings = self.embedding_model(cleaned_question)
# TODO: Using the metadata, use the filter to take into consideration only the news from the last 24 hours
# (or other time frame).
matches = self.vector_store.search(
query_vector=embeddings,
k=self.top_k,
collection_name=self.vector_collection,
)
context = ""
for match in matches:
context += match.payload["summary"] + "\n"
return {
"context": context,
}
def clean(self, question: str) -> str:
"""
Clean the input question by removing unwanted characters.
Parameters:
-----------
question : str
The input question to clean.
Returns:
--------
str
The cleaned question.
"""
question = clean(question)
question = replace_unicode_quotes(question)
question = clean_non_ascii_chars(question)
return question
class FinancialBotQAChain(Chain):
"""This custom chain handles LLM generation upon given prompt"""
hf_pipeline: HuggingFacePipeline
template: PromptTemplate
@property
def input_keys(self) -> List[str]:
"""Returns a list of input keys for the chain"""
return ["context"]
@property
def output_keys(self) -> List[str]:
"""Returns a list of output keys for the chain"""
return ["answer"]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Calls the chain with the given inputs and returns the output"""
inputs = self.clean(inputs)
prompt = self.template.format_infer(
{
"user_context": inputs["about_me"],
"news_context": inputs["context"],
"chat_history": inputs["chat_history"],
"question": inputs["question"],
}
)
start_time = time.time()
response = self.hf_pipeline(prompt["prompt"])
end_time = time.time()
duration_milliseconds = (end_time - start_time) * 1000
if run_manager:
run_manager.on_chain_end(
outputs={
"answer": response,
},
# TODO: Count tokens instead of using len().
metadata={
"prompt": prompt["prompt"],
"prompt_template_variables": prompt["payload"],
"prompt_template": self.template.infer_raw_template,
"usage.prompt_tokens": len(prompt["prompt"]),
"usage.total_tokens": len(prompt["prompt"]) + len(response),
"usage.actual_new_tokens": len(response),
"duration_milliseconds": duration_milliseconds,
},
)
return {"answer": response}
def clean(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Cleans the inputs by removing extra whitespace and grouping broken paragraphs"""
for key, input in inputs.items():
cleaned_input = clean_extra_whitespace(input)
cleaned_input = group_broken_paragraphs(cleaned_input)
inputs[key] = cleaned_input
return inputs
| [
"user_context",
"question",
"chat_history",
"context",
"news_context"
] |
2024-01-10 | iusztinpaul/hands-on-llms | modules~financial_bot~financial_bot~handlers.py | from typing import Any, Dict
import comet_llm
from langchain.callbacks.base import BaseCallbackHandler
from financial_bot import constants
class CometLLMMonitoringHandler(BaseCallbackHandler):
"""
A callback handler for monitoring LLM models using Comet.ml.
Args:
project_name (str): The name of the Comet.ml project to log to.
llm_model_id (str): The ID of the LLM model to use for inference.
llm_qlora_model_id (str): The ID of the PEFT model to use for inference.
llm_inference_max_new_tokens (int): The maximum number of new tokens to generate during inference.
llm_inference_temperature (float): The temperature to use during inference.
"""
def __init__(
self,
project_name: str = None,
llm_model_id: str = constants.LLM_MODEL_ID,
llm_qlora_model_id: str = constants.LLM_QLORA_CHECKPOINT,
llm_inference_max_new_tokens: int = constants.LLM_INFERNECE_MAX_NEW_TOKENS,
llm_inference_temperature: float = constants.LLM_INFERENCE_TEMPERATURE,
):
self._project_name = project_name
self._llm_model_id = llm_model_id
self._llm_qlora_model_id = llm_qlora_model_id
self._llm_inference_max_new_tokens = llm_inference_max_new_tokens
self._llm_inference_temperature = llm_inference_temperature
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""
A callback function that logs the prompt and output to Comet.ml.
Args:
outputs (Dict[str, Any]): The output of the LLM model.
**kwargs (Any): Additional arguments passed to the function.
"""
should_log_prompt = "metadata" in kwargs
if should_log_prompt:
metadata = kwargs["metadata"]
comet_llm.log_prompt(
project=self._project_name,
prompt=metadata["prompt"],
output=outputs["answer"],
prompt_template=metadata["prompt_template"],
prompt_template_variables=metadata["prompt_template_variables"],
metadata={
"usage.prompt_tokens": metadata["usage.prompt_tokens"],
"usage.total_tokens": metadata["usage.total_tokens"],
"usage.max_new_tokens": self._llm_inference_max_new_tokens,
"usage.temperature": self._llm_inference_temperature,
"usage.actual_new_tokens": metadata["usage.actual_new_tokens"],
"model": self._llm_model_id,
"peft_model": self._llm_qlora_model_id,
},
duration=metadata["duration_milliseconds"],
)
| [
"False"
] |
2024-01-10 | deepinfra/langchain | libs~langchain~langchain~vectorstores~chroma.py | from __future__ import annotations
import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import xor_args
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import chromadb
import chromadb.config
from chromadb.api.types import ID, OneOrMany, Where, WhereDocument
logger = logging.getLogger()
DEFAULT_K = 4 # Number of Documents to return.
def _results_to_docs(results: Any) -> List[Document]:
return [doc for doc, _ in _results_to_docs_and_scores(results)]
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
return [
# TODO: Chroma can do batch querying,
# we shouldn't hard code to the 1st result
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
class Chroma(VectorStore):
"""`ChromaDB` vector store.
To use, you should have the ``chromadb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma("langchain_store", embeddings)
"""
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
def __init__(
self,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
collection_metadata: Optional[Dict] = None,
client: Optional[chromadb.Client] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
"""Initialize with a Chroma client."""
try:
import chromadb
import chromadb.config
except ImportError:
raise ImportError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
if client is not None:
self._client_settings = client_settings
self._client = client
self._persist_directory = persist_directory
else:
if client_settings:
# If client_settings is provided with persist_directory specified,
# then it is "in-memory and persisting to disk" mode.
client_settings.persist_directory = (
persist_directory or client_settings.persist_directory
)
if client_settings.persist_directory is not None:
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
client_settings.chroma_db_impl = "duckdb+parquet"
_client_settings = client_settings
elif persist_directory:
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
_client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
)
else:
_client_settings = chromadb.config.Settings(is_persistent=True)
_client_settings.persist_directory = persist_directory
else:
_client_settings = chromadb.config.Settings()
self._client_settings = _client_settings
self._client = chromadb.Client(_client_settings)
self._persist_directory = (
_client_settings.persist_directory or persist_directory
)
self._embedding_function = embedding_function
self._collection = self._client.get_or_create_collection(
name=collection_name,
embedding_function=self._embedding_function.embed_documents
if self._embedding_function is not None
else None,
metadata=collection_metadata,
)
self.override_relevance_score_fn = relevance_score_fn
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
@xor_args(("query_texts", "query_embeddings"))
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Query the chroma collection."""
try:
import chromadb # noqa: F401
except ImportError:
raise ValueError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
return self._collection.query(
query_texts=query_texts,
query_embeddings=query_embeddings,
n_results=n_results,
where=where,
where_document=where_document,
**kwargs,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
# TODO: Handle the case where the user doesn't provide ids on the Collection
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = None
texts = list(texts)
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
# fill metadatas with empty dicts if somebody
# did not specify metadata for all texts
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, m in enumerate(metadatas):
if m:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
try:
self._collection.upsert(
metadatas=metadatas,
embeddings=embeddings_with_metadatas,
documents=texts_with_metadatas,
ids=ids_with_metadata,
)
except ValueError as e:
if "Expected metadata value to be" in str(e):
msg = (
"Try filtering complex metadata from the document using "
"langchain.vectorstore.utils.filter_complex_metadata."
)
raise ValueError(e.args[0] + "\n\n" + msg)
else:
raise e
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self._collection.upsert(
embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas,
ids=ids_without_metadatas,
)
else:
self._collection.upsert(
embeddings=embeddings,
documents=texts,
ids=ids,
)
return ids
def similarity_search(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Chroma.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs(results)
def similarity_search_by_vector_with_relevance_scores(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Return docs most similar to embedding vector and similarity score.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs_and_scores(results)
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding_function is None:
results = self.__query_collection(
query_texts=[query],
n_results=k,
where=filter,
where_document=where_document,
)
else:
query_embedding = self._embedding_function.embed_query(query)
results = self.__query_collection(
query_embeddings=[query_embedding],
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs_and_scores(results)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = "l2"
distance_key = "hnsw:space"
metadata = self._collection.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == "cosine":
return self._cosine_relevance_score_fn
elif distance == "l2":
return self._euclidean_relevance_score_fn
elif distance == "ip":
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance metric of type: {distance}."
"Consider providing relevance_score_fn to Chroma constructor."
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
results["embeddings"][0],
k=k,
lambda_mult=lambda_mult,
)
candidates = _results_to_docs(results)
selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected]
return selected_results
def max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding,
k,
fetch_k,
lambda_mult=lambda_mult,
filter=filter,
where_document=where_document,
)
return docs
def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: "hello"}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
"""
kwargs = {
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
}
if include is not None:
kwargs["include"] = include
return self._collection.get(**kwargs)
def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You must specify a persist_directory on"
"creation to persist the collection."
)
import chromadb
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
self._client.persist()
def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
text = document.page_content
metadata = document.metadata
if self._embedding_function is None:
raise ValueError(
"For update, you must specify an embedding function on creation."
)
embeddings = self._embedding_function.embed_documents([text])
self._collection.update(
ids=[document_id],
embeddings=embeddings,
documents=[text],
metadatas=[metadata],
)
@classmethod
def from_texts(
cls: Type[Chroma],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
@classmethod
def from_documents(
cls: Type[Chroma],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None, # Add this line
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
self._collection.delete(ids=ids)
| [] |
2024-01-10 | deepinfra/langchain | libs~langchain~langchain~llms~textgen.py | import json
import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Field
from langchain.schema.output import GenerationChunk
logger = logging.getLogger(__name__)
class TextGen(LLM):
"""text-generation-webui models.
To use, you should have the text-generation-webui installed, a model loaded,
and --api added as a command-line option.
Suggested installation, use one-click installer for your OS:
https://github.com/oobabooga/text-generation-webui#one-click-installers
Parameters below taken from text-generation-webui api example:
https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:8500")
"""
model_url: str
"""The full URL to the textgen webui including http[s]://host:port """
preset: Optional[str] = None
"""The preset to use in the textgen webui """
max_new_tokens: Optional[int] = 250
"""The maximum number of tokens to generate."""
do_sample: bool = Field(True, alias="do_sample")
"""Do sample"""
temperature: Optional[float] = 1.3
"""Primary factor to control randomness of outputs. 0 = deterministic
(only the most likely token is used). Higher value = more randomness."""
top_p: Optional[float] = 0.1
"""If not set to 1, select tokens with probabilities adding up to less than this
number. Higher value = higher range of possible random results."""
typical_p: Optional[float] = 1
"""If not set to 1, select only tokens that are at least this much more likely to
appear than random tokens, given the prior text."""
epsilon_cutoff: Optional[float] = 0 # In units of 1e-4
"""Epsilon cutoff"""
eta_cutoff: Optional[float] = 0 # In units of 1e-4
"""ETA cutoff"""
repetition_penalty: Optional[float] = 1.18
"""Exponential penalty factor for repeating prior tokens. 1 means no penalty,
higher value = less repetition, lower value = more repetition."""
top_k: Optional[float] = 40
"""Similar to top_p, but select instead only the top_k most likely tokens.
Higher value = higher range of possible random results."""
min_length: Optional[int] = 0
"""Minimum generation length in tokens."""
no_repeat_ngram_size: Optional[int] = 0
"""If not set to 0, specifies the length of token sets that are completely blocked
from repeating at all. Higher values = blocks larger phrases,
lower values = blocks words or letters from repeating.
Only 0 or high values are a good idea in most cases."""
num_beams: Optional[int] = 1
"""Number of beams"""
penalty_alpha: Optional[float] = 0
"""Penalty Alpha"""
length_penalty: Optional[float] = 1
"""Length Penalty"""
early_stopping: bool = Field(False, alias="early_stopping")
"""Early stopping"""
seed: int = Field(-1, alias="seed")
"""Seed (-1 for random)"""
add_bos_token: bool = Field(True, alias="add_bos_token")
"""Add the bos_token to the beginning of prompts.
Disabling this can make the replies more creative."""
truncation_length: Optional[int] = 2048
"""Truncate the prompt up to this length. The leftmost tokens are removed if
the prompt exceeds this length. Most models require this to be at most 2048."""
ban_eos_token: bool = Field(False, alias="ban_eos_token")
"""Ban the eos_token. Forces the model to never end the generation prematurely."""
skip_special_tokens: bool = Field(True, alias="skip_special_tokens")
"""Skip special tokens. Some specific models need this unset."""
stopping_strings: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
streaming: bool = False
"""Whether to stream the results, token by token."""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling textgen."""
return {
"max_new_tokens": self.max_new_tokens,
"do_sample": self.do_sample,
"temperature": self.temperature,
"top_p": self.top_p,
"typical_p": self.typical_p,
"epsilon_cutoff": self.epsilon_cutoff,
"eta_cutoff": self.eta_cutoff,
"repetition_penalty": self.repetition_penalty,
"top_k": self.top_k,
"min_length": self.min_length,
"no_repeat_ngram_size": self.no_repeat_ngram_size,
"num_beams": self.num_beams,
"penalty_alpha": self.penalty_alpha,
"length_penalty": self.length_penalty,
"early_stopping": self.early_stopping,
"seed": self.seed,
"add_bos_token": self.add_bos_token,
"truncation_length": self.truncation_length,
"ban_eos_token": self.ban_eos_token,
"skip_special_tokens": self.skip_special_tokens,
"stopping_strings": self.stopping_strings,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_url": self.model_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "textgen"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
# if self.stop and stop is not None:
if self.stopping_strings and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
if self.preset is None:
params = self._default_params
else:
params = {"preset": self.preset}
# then sets it as configured, or default to an empty list:
params["stop"] = self.stopping_strings or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
print(prompt + combined_text_output)
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
print(prompt + result)
else:
print(f"ERROR: response: {response}")
result = ""
return result
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_text_output += chunk.text
print(prompt + combined_text_output)
result = combined_text_output
else:
url = f"{self.model_url}/api/v1/generate"
params = self._get_parameters(stop)
request = params.copy()
request["prompt"] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()["results"][0]["text"]
print(prompt + result)
else:
print(f"ERROR: response: {response}")
result = ""
return result
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream":
chunk = GenerationChunk(
text=result["text"],
generation_info=None,
)
yield chunk
elif result["event"] == "stream_end":
websocket_client.close()
return
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
"The `websocket-client` package is required for streaming."
)
params = {**self._get_parameters(stop), **kwargs}
url = f"{self.model_url}/api/v1/stream"
request = params.copy()
request["prompt"] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result["event"] == "text_stream":
chunk = GenerationChunk(
text=result["text"],
generation_info=None,
)
yield chunk
elif result["event"] == "stream_end":
websocket_client.close()
return
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text)
| [] |
2024-01-10 | armper/unit-tests-ai-angular | fix_errors.py | import os
import xml.etree.ElementTree as ET
import openai
def extract_errors_from_xml(xml_file):
"""Extract the error messages and stack traces from the XML file."""
tree = ET.parse(xml_file)
root = tree.getroot()
error_messages = []
for testcase in root.findall('testcase'):
for error in testcase.findall('error'):
error_messages.append(error.get('message'))
error_messages.append(error.text) # This line extracts the stack trace
return '\n'.join(error_messages)
def fix_errors(test_path, current_unit_test_code, test_errors):
"""Attempt to fix the test errors using the OpenAI API."""
user_message = f'Fix the following errors in this Java unit test code:\n{current_unit_test_code}\nErrors:\n{test_errors}'
messages = [
{
"role": "system",
"content": "You are provided with a piece of Java unit test code with errors. Your task is to return the corrected code. Use only Junit 5. Return nothing but the code with no additional text."
},
{
"role": "user",
"content": user_message
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
if response['choices'] and response['choices'][0]['message']['role'] == 'assistant':
fixed_test_code = response['choices'][0]['message']['content']
# Overwriting the original test file with the corrected code using the full path
with open(test_path, 'w') as file:
file.write(fixed_test_code)
else:
print('Error: Failed to get corrected code from OpenAI.')
if __name__ == "__main__":
openai.api_key = os.environ['OPENAI_API_KEY']
surefire_reports_dir = 'target/surefire-reports'
all_errors = []
for file in os.listdir(surefire_reports_dir):
if file.endswith(".xml"):
file_path = os.path.join(surefire_reports_dir, file)
errors = extract_errors_from_xml(file_path)
if errors:
all_errors.append(errors)
error_detected = False
# Ensure the fixed_tests directory exists
fixed_tests_dir = 'fixed_tests'
if not os.path.exists(fixed_tests_dir):
os.makedirs(fixed_tests_dir)
with open('generated_test_path.txt', 'r') as file:
generated_tests = [os.path.abspath(line.strip()) for line in file.readlines()]
print("Current Working Directory:", os.getcwd())
if all_errors:
print("Test errors detected. Attempting to fix...")
for test_path in generated_tests:
test_name = os.path.basename(test_path).replace('.java', '')
print(f"Checking {test_name}...")
# Fetching the current unit test code
with open(test_path, 'r') as code_file:
current_unit_test_code = code_file.read()
# Extracting test errors from the error file
test_errors = '\n'.join(all_errors)
fix_errors(test_path, current_unit_test_code, test_errors)
error_detected = True
if not error_detected:
print("No test errors detected.")
| [
"Fix the following errors in this Java unit test code:\nPLACEHOLDER\nErrors:\nPLACEHOLDER",
"You are provided with a piece of Java unit test code with errors. Your task is to return the corrected code. Use only Junit 5. Return nothing but the code with no additional text."
] |
2024-01-10 | zapier/langchain-zapier | langchain~tools~zapier.py | """## Zapier Natural Language Actions API
\
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
**Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions
on Zapier's platform through a natural language API interface.
NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets,
Microsoft Teams, and thousands more apps: https://zapier.com/apps
Zapier NLA handles ALL the underlying API auth and translation from
natural language --> underlying API call --> return simplified output for LLMs
The key idea is you, or your users, expose a set of actions via an oauth-like setup
window, which you can then query and execute via a REST API.
NLA offers both API Key and OAuth for signing NLA API requests.
1. Server-side (API Key): for quickly getting started, testing, and production scenarios
where LangChain will only use actions exposed in the developer's Zapier account
(and will use the developer's connected accounts on Zapier.com)
2. User-facing (Oauth): for production scenarios where you are deploying an end-user
facing application and LangChain needs access to end-user's exposed actions and
connected accounts on Zapier.com
This quick start will focus on the server-side use case for brevity.
Review [full docs](https://nla.zapier.com/api/v1/dynamic/docs) or reach out to
[email protected] for user-facing oauth developer support.
Typically you'd use SequentialChain, here's a basic example:
1. Use NLA to find an email in Gmail
2. Use LLMChain to generate a draft reply to (1)
3. Use NLA to send the draft reply (2) to someone in Slack via direct mesage
In code, below:
```python
import os
# get from https://platform.openai.com/
os.environ["OPENAI_API_KEY"] = "..."
# get from https://nla.zapier.com/demo/provider/debug (under User, after logging in):
os.environ["ZAPIER_NLA_API_KEY"] = "..."
from langchain.llms import OpenAI
from langchain.chains import LLMChain, TransformChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain.tools.zapier import ZapierNLAListActions, ZapierNLARunAction
from langchain.utilities.zapier import ZapierNLAWrapper
## step 0. expose gmail 'find email' and slack 'send channel message' actions
# first go here, log in, expose (enable) the two actions:
# https://nla.zapier.com/demo/start
# -- for this example, can leave all fields "Have AI guess"
# in an oauth scenario, you'd get your own <provider> id (instead of 'demo')
# which you route your users through first
actions = ZapierNLAWrapper().list()
## step 1. gmail find email
GMAIL_SEARCH_INSTRUCTIONS = "Grab the latest email from Bryan Helmig"
def nla_gmail(inputs):
action = next((
a for a in actions if a["description"].startswith("Gmail: Find Email")
), None)
data = ZapierNLARunAction(action_id=action["id"]).run(inputs["instructions"])
return {
"email_data": data
}
gmail_chain = TransformChain(
input_variables=["instructions"],
output_variables=["email_data"],
transform=nla_gmail
)
## step 2. generate draft reply
template = \"""You are an assisstant who drafts replies to an incoming email.
Output draft reply in plain text (not JSON).
Incoming email:
{email_data}
Draft email reply:\"""
prompt_template = PromptTemplate(input_variables=["email_data"], template=template)
reply_chain = LLMChain(llm=OpenAI(temperature=.7), prompt=prompt_template)
## step 3. send draft reply via a slack direct message
SLACK_HANDLE = "@knoop"
def nla_slack(inputs):
action = next(
(a for a in actions if a["description"].startswith("Slack: Send Direct Message")
), None)
instructions = f'Send this to {SLACK_HANDLE} in Slack: {inputs["draft_reply"]}'
return {"slack_data": ZapierNLARunAction(action_id=action["id"]).run(instructions)}
slack_chain = TransformChain(
input_variables=["draft_reply"],
output_variables=["slack_data"],
transform=nla_slack
)
## finally, execute
overall_chain = SimpleSequentialChain(
chains=[gmail_chain, reply_chain, slack_chain],
verbose=True
)
overall_chain.run(GMAIL_SEARCH_INSTRUCTIONS)
```
"""
from typing import Optional
from langchain.tools.base import BaseTool
from langchain.utilities.zapier import ZapierNLAWrapper
zapier_nla_base_desc = (
"A wrapper around Zapier NLA. "
"Can be used to call or retrieve data from 5k+ apps, 20k+ actions"
"on the Zapier platform."
)
class ZapierNLARunAction(BaseTool):
"""
Args:
action_id: a specific action ID (from list actions) of the action to execute
(the set api_key must be associated with the action owner)
instructions: a natural language instruction string for using the action
(eg. "get the latest email from Mike Knoop" for "Gmail: find email" action)
params: a dict, optional. Any params provided will *override* AI guesses
from `instructions` (see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/dynamic/docs)
"""
name = "Zapier NLA: Run Action"
description = zapier_nla_base_desc + (
"This tool will run a specified action and return a stringified JSON result "
" of the API call. The return result is guarenteed to be less than ~500 words "
" (350 tokens), safe to insert back into another LLM prompt."
)
api_wrapper: ZapierNLAWrapper = ZapierNLAWrapper()
action_id: str
params: Optional[dict] = None
def _run(self, instructions: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
return self.api_wrapper.run_as_str(self.action_id, instructions, self.params)
async def _arun(self, _: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
raise NotImplementedError("ZapierNLAListActions does not support async")
ZapierNLARunAction.__doc__ = (
ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore
)
# other useful actions
class ZapierNLAListActions(BaseTool):
"""
Args:
None
"""
name = "Zapier NLA: List Actions"
description = zapier_nla_base_desc + (
"This tool returns a list of the user's exposed actions."
)
api_wrapper: ZapierNLAWrapper = ZapierNLAWrapper()
def _run(self, _: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
return self.api_wrapper.list_as_str()
async def _arun(self, _: str) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
raise NotImplementedError("ZapierNLAListActions does not support async")
ZapierNLAListActions.__doc__ = (
ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore
)
| [] |
2024-01-10 | zapier/langchain-zapier | langchain~document_loaders~s3_file.py | """Loading logic for loading documents from an s3 file."""
import os
import tempfile
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class S3FileLoader(BaseLoader):
"""Loading logic for loading documents from s3."""
def __init__(self, bucket: str, key: str):
"""Initialize with bucket and key name."""
self.bucket = bucket
self.key = key
def load(self) -> List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please it install it with `pip install boto3`."
)
s3 = boto3.client("s3")
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
s3.download_file(self.bucket, self.key, file_path)
loader = UnstructuredFileLoader(file_path)
return loader.load()
| [] |
2024-01-10 | zapier/langchain-zapier | langchain~utilities~zapier.py | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to [email protected] for
developer support.
"""
import json
from typing import Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from requests import Request, Session
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
Note: this wrapper currently only implemented the `api_key` auth method for
testingand server-side production use cases (using the developer's connected
accounts on Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use oauth. Review the full docs above and reach out to
[email protected] for developer support.
"""
zapier_nla_api_key: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
zapier_nla_api_dynamic_base: str = "https://nla.zapier.com/api/v1/dynamic/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
}
)
session.params = {"api_key": self.zapier_nla_api_key}
return session
def _get_action_request(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Request:
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
return Request(
"POST",
self.zapier_nla_api_base + f"exposed/{action_id}/execute/",
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key = get_from_dict_or_env(
values, "zapier_nla_api_key", "ZAPIER_NLA_API_KEY"
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/dynamic/docs)
"""
session = self._get_session()
response = session.get(self.zapier_nla_api_dynamic_base + "exposed/")
response.raise_for_status()
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._get_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
request = self._get_action_request(action_id, instructions, params)
request.data.update(
{
"preview_only": True,
}
)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["params"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list(*args, **kwargs)
return json.dumps(actions)
| [] |
2024-01-10 | Pratik-Behera/helm | src~helm~benchmark~window_services~window_service_factory.py | from helm.proxy.models import get_model, get_model_names_with_tag, Model, WIDER_CONTEXT_WINDOW_TAG
from .ai21_window_service import AI21WindowService
from .anthropic_window_service import AnthropicWindowService
from .cohere_window_service import CohereWindowService, CohereCommandWindowService
from .luminous_window_service import (
LuminousBaseWindowService,
LuminousExtendedWindowService,
LuminousSupremeWindowService,
LuminousWorldWindowService,
)
from .openai_window_service import OpenAIWindowService
from .wider_openai_window_service import WiderOpenAIWindowService
from .mt_nlg_window_service import MTNLGWindowService
from .bloom_window_service import BloomWindowService
from .ice_window_service import ICEWindowService
from .santacoder_window_service import SantaCoderWindowService
from .gpt2_window_service import GPT2WindowService
from .gptj_window_service import GPTJWindowService
from .gptneox_window_service import GPTNeoXWindowService
from .opt_window_service import OPTWindowService
from .t0pp_window_service import T0ppWindowService
from .t511b_window_service import T511bWindowService
from .flan_t5_window_service import FlanT5WindowService
from .ul2_window_service import UL2WindowService
from .yalm_window_service import YaLMWindowService
from .window_service import WindowService
from .tokenizer_service import TokenizerService
class WindowServiceFactory:
@staticmethod
def get_window_service(model_name: str, service: TokenizerService) -> WindowService:
"""
Returns a `WindowService` given the name of the model.
Make sure this function returns instantaneously on repeated calls.
"""
model: Model = get_model(model_name)
organization: str = model.organization
engine: str = model.engine
window_service: WindowService
if model_name in get_model_names_with_tag(WIDER_CONTEXT_WINDOW_TAG):
window_service = WiderOpenAIWindowService(service)
# For the Google models, we approximate with the OpenAIWindowService
elif organization == "openai" or organization == "simple" or organization == "google":
window_service = OpenAIWindowService(service)
elif organization == "AlephAlpha":
if engine == "luminous-base":
window_service = LuminousBaseWindowService(service)
elif engine == "luminous-extended":
window_service = LuminousExtendedWindowService(service)
elif engine == "luminous-supreme":
window_service = LuminousSupremeWindowService(service)
elif engine == "luminous-world":
window_service = LuminousWorldWindowService(service)
else:
raise ValueError(f"Unhandled Aleph Alpha model: {engine}")
elif organization == "microsoft":
window_service = MTNLGWindowService(service)
elif organization == "anthropic":
window_service = AnthropicWindowService(service)
elif engine == "santacoder":
window_service = SantaCoderWindowService(service)
elif model_name == "huggingface/gpt2":
window_service = GPT2WindowService(service)
elif model_name == "together/bloom":
window_service = BloomWindowService(service)
elif model_name == "together/glm":
# From https://github.com/THUDM/GLM-130B, "the tokenizer is implemented based on
# icetk---a unified multimodal tokenizer for images, Chinese, and English."
window_service = ICEWindowService(service)
elif model_name in ["huggingface/gpt-j-6b", "together/gpt-j-6b", "gooseai/gpt-j-6b"]:
window_service = GPTJWindowService(service)
elif model_name in ["together/gpt-neox-20b", "gooseai/gpt-neo-20b", "together/gpt-neoxt-chat-base-20b"]:
window_service = GPTNeoXWindowService(service)
elif model_name == "together/h3-2.7b":
window_service = GPT2WindowService(service)
elif model_name in ["together/opt-66b", "together/opt-175b"]:
window_service = OPTWindowService(service)
elif model_name == "together/t0pp":
window_service = T0ppWindowService(service)
elif model_name == "together/t5-11b":
window_service = T511bWindowService(service)
elif model_name == "together/flan-t5-xxl":
window_service = FlanT5WindowService(service)
elif model_name == "together/ul2":
window_service = UL2WindowService(service)
elif model_name == "together/yalm":
window_service = YaLMWindowService(service)
elif organization == "cohere":
if "command" in engine:
window_service = CohereCommandWindowService(service)
else:
window_service = CohereWindowService(service)
elif organization == "ai21":
window_service = AI21WindowService(service=service, gpt2_window_service=GPT2WindowService(service))
else:
raise ValueError(f"Unhandled model name: {model_name}")
return window_service
| [] |
2024-01-10 | kentontroy/neo4j_game_of_thrones | graph_rag.py | from dotenv import load_dotenv
from langchain.chains import GraphCypherQAChain
from langchain.graphs import Neo4jGraph
from langchain.llms import LlamaCpp
import os
if __name__ == "__main__":
load_dotenv()
PATH = os.path.join(os.getenv("LLM_MODEL_PATH"), os.getenv("LLM_MODEL_FILE"))
llm = LlamaCpp(
model_path = PATH,
n_ctx = int(os.getenv("MODEL_PARAM_CONTEXT_LEN")),
n_batch = int(os.getenv("MODEL_PARAM_BATCH_SIZE")),
use_mlock = os.getenv("MODEL_PARAM_MLOCK"),
n_threads = int(os.getenv("MODEL_PARAM_THREADS")),
n_gpu_layers = 0,
temperature = 0,
f16_kv = True,
verbose = False
)
graph = Neo4jGraph(
url="bolt://localhost:7687", username="neo4j", password="cloudera"
)
print(graph.schema)
chain = GraphCypherQAChain.from_llm(llm, graph=graph, verbose=True, return_intermediate_steps=True)
chain.run("How many pages are in the book Game Of Thrones?")
| [] |
2024-01-10 | kentontroy/neo4j_game_of_thrones | df_creator.py | from dotenv import load_dotenv
from langchain.graphs.networkx_graph import NetworkxEntityGraph
import argparse
import ast
import os
import numpy as np
import pandas as pd
import pandasql as ps
import re
def readTriplesFromFile(filePath: str) -> pd.DataFrame:
data = []
with open(filePath, "r") as f:
book = ""
if re.search("game_of_thrones", filePath):
book = "Game Of Thrones"
elif re.search("a_clash_of_kings", filePath):
book = "A Clash Of Kings"
elif re.search("a_storm_of_swords", filePath):
book = "A Storm Of Swords"
elif re.search("a_feast_for_crows", filePath):
book = "A Feast For Crows"
elif re.search("a_dance_with_dragons", filePath):
book = "A Dance With Dragons"
i = 0
for l in f.readlines():
i += 1
if i == 1:
continue
line = l.split(":", 1)
print(line)
page = line[0].strip()
triples = ast.literal_eval(line[1].strip())
for triple in triples:
subject = triple[0].strip()
object = triple[1].strip()
predicate = triple[2].strip()
data.append([book, page, subject, predicate, object])
df = pd.DataFrame(data, columns=["Book", "Page", "Subject", "Predicate", "Object"])
return df
def saveTriplesToFile(df: pd.DataFrame, filePath: str):
df.to_csv(filePath, sep = "|", index=False)
def readTriplesFromDfFile(filePath: str) -> pd.DataFrame:
df = pd.read_csv(filePath, sep = "|")
return df
#####################################################################################
# Run a SQL statement against the dataframe
#####################################################################################
def runSql(df: pd.DataFrame, sql: str) -> pd.DataFrame:
return ps.sqldf(sql, locals())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, required=True, help="Specify the filename where dataframe was saved")
parser.add_argument("-q", "--sql", type=str, required=True, help="Specify the SQL statement")
args = parser.parse_args()
pd.options.display.max_rows = 100
df = readTriplesFromDfFile(filePath = args.file)
dfQuery = runSql(df = df, sql = args.sql)
saveTriplesToFile(df = dfQuery, filePath = "output.csv")
print(dfQuery)
| [] |
2024-01-10 | kentontroy/neo4j_game_of_thrones | triples_creator.py | from dotenv import load_dotenv
from langchain.graphs.networkx_graph import NetworkxEntityGraph
from langchain.indexes import GraphIndexCreator
from langchain.llms import LlamaCpp
from PyPDF2 import PdfReader
from typing import List, Dict
import argparse
import ast
import os
#####################################################################################
# Get pages from a PDF document
#####################################################################################
def getPagesFromPDF(pdfFilePath: str, maxPages: int, startPage: int = 0) -> List[Dict]:
pages = []
reader = PdfReader(pdfFilePath)
n = min(len(reader.pages) - startPage, maxPages)
for i in range(startPage, startPage + n):
text = reader.pages[i].extract_text()
pages.append({ "page": i, "text": text })
return pages
#####################################################################################
# Create triples from the pages using LLM
#####################################################################################
def createTriplesFromPages(pages: List[Dict], model: LlamaCpp) -> List[Dict]:
graphObjects = []
indexCreator = GraphIndexCreator(llm=model)
for page in pages:
if page["text"] != "":
graph = indexCreator.from_text(page["text"])
triples = graph.get_triples()
if len(triples) > 0:
graphObjects.append({ "page": page["page"], "triples": str(triples) })
print(triples)
return graphObjects
#####################################################################################
# Save triples to a file, indexed by page number
#####################################################################################
def saveTriplesToFile(graphObjects: List[Dict], filePath: str) -> None:
with open(filePath, "a") as f:
for graph in graphObjects:
f.write("{0}: {1}".format(graph["page"], graph["triples"]))
f.write("\n")
#####################################################################################
# Test reading triples from a file
#####################################################################################
def readTriplesFromFile(filePath: str) -> None:
test = "21: [('Dany', 'Rhaesh Andahli', 'is from'), ('Andals', 'Rhaesh Andahli', 'are from'), ('The Dothraki', 'Rhaesh Andahli', 'are from')]"
data = test.split(":", 1)
print(f"Page: {data[0]}")
triples = ast.literal_eval(data[1].strip())
print(f"Triples: {triples}")
print(f"Node 1: {triples[0][0]}, Node 2: {triples[0][1]}, Edge: {triples[0][2]}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--create", type=str, required=False, help="Create an in-memory graph, specify it's name")
parser.add_argument("-p", "--pdf", type=str, required=False, help="Specify a path to the PDF file")
parser.add_argument("-s", "--startPage", type=int, required=False, help="Specify the starting page number")
parser.add_argument("-m", "--maxPages", type=int, required=False, help="Specify the max number of pages")
args = parser.parse_args()
load_dotenv()
PATH = os.path.join(os.getenv("LLM_MODEL_PATH"), os.getenv("LLM_MODEL_FILE"))
MODEL = LlamaCpp(
model_path = PATH,
n_ctx = int(os.getenv("MODEL_PARAM_CONTEXT_LEN")),
n_batch = int(os.getenv("MODEL_PARAM_BATCH_SIZE")),
use_mlock = os.getenv("MODEL_PARAM_MLOCK"),
n_threads = int(os.getenv("MODEL_PARAM_THREADS")),
n_gpu_layers = 0,
f16_kv = True,
verbose = False
)
if args.create and args.pdf and args.startPage and args.maxPages:
pages = getPagesFromPDF(pdfFilePath = args.pdf, maxPages = args.maxPages, startPage = args.startPage)
graphObjects = createTriplesFromPages(pages = pages, model = MODEL)
saveTriplesToFile(graphObjects = graphObjects, filePath = args.create)
else:
print("Incorrect usage: python triples_creator.py [-h] to get help on command options")
| [] |
2024-01-10 | whowhatwhywhenwhere/gpt-pilot | pilot~helpers~AgentConvo.py | import re
import subprocess
import uuid
from utils.style import yellow, yellow_bold
from database.database import get_saved_development_step, save_development_step, delete_all_subsequent_steps
from helpers.exceptions.TokenLimitError import TokenLimitError
from utils.function_calling import parse_agent_response, FunctionCallSet
from utils.llm_connection import create_gpt_chat_completion
from utils.utils import array_of_objects_to_string, get_prompt, get_sys_message, capitalize_first_word_with_underscores
from logger.logger import logger
from prompts.prompts import ask_user
from const.llm import END_RESPONSE
class AgentConvo:
"""
Represents a conversation with an agent.
Args:
agent: An instance of the agent participating in the conversation.
"""
def __init__(self, agent):
# [{'role': 'system'|'user'|'assistant', 'content': ''}, ...]
self.messages: list[dict] = []
self.branches = {}
self.log_to_user = True
self.agent = agent
self.high_level_step = self.agent.project.current_step
# add system message
system_message = get_sys_message(self.agent.role)
logger.info('\n>>>>>>>>>> System Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',
system_message['content'])
self.messages.append(system_message)
def send_message(self, prompt_path=None, prompt_data=None, function_calls: FunctionCallSet = None):
"""
Sends a message in the conversation.
Args:
prompt_path: The path to a prompt.
prompt_data: Data associated with the prompt.
function_calls: Optional function calls to be included in the message.
Returns:
The response from the agent.
"""
# craft message
self.construct_and_add_message_from_prompt(prompt_path, prompt_data)
# check if we already have the LLM response saved
if self.agent.__class__.__name__ == 'Developer':
self.agent.project.llm_req_num += 1
development_step = get_saved_development_step(self.agent.project)
if development_step is not None and self.agent.project.skip_steps:
# if we do, use it
print(yellow(f'Restoring development step with id {development_step.id}'))
self.agent.project.checkpoints['last_development_step'] = development_step
self.agent.project.restore_files(development_step.id)
response = development_step.llm_response
self.messages = development_step.messages
if self.agent.project.skip_until_dev_step and str(development_step.id) == self.agent.project.skip_until_dev_step:
self.agent.project.skip_steps = False
delete_all_subsequent_steps(self.agent.project)
if 'delete_unrelated_steps' in self.agent.project.args and self.agent.project.args['delete_unrelated_steps']:
self.agent.project.delete_all_steps_except_current_branch()
if development_step.token_limit_exception_raised:
raise TokenLimitError(development_step.token_limit_exception_raised)
else:
# if we don't, get the response from LLM
try:
response = create_gpt_chat_completion(self.messages, self.high_level_step, self.agent.project, function_calls=function_calls)
except TokenLimitError as e:
save_development_step(self.agent.project, prompt_path, prompt_data, self.messages, '', str(e))
raise e
if self.agent.__class__.__name__ == 'Developer':
development_step = save_development_step(self.agent.project, prompt_path, prompt_data, self.messages, response)
# TODO handle errors from OpenAI
if response == {}:
logger.error(f'Aborting with "OpenAI API error happened"')
raise Exception("OpenAI API error happened.")
response = parse_agent_response(response, function_calls)
# TODO remove this once the database is set up properly
message_content = response[0] if type(response) == tuple else response
if isinstance(message_content, list):
if 'to_message' in function_calls:
string_response = function_calls['to_message'](message_content)
elif len(message_content) > 0 and isinstance(message_content[0], dict):
string_response = [
f'#{i}\n' + array_of_objects_to_string(d)
for i, d in enumerate(message_content)
]
else:
string_response = ['- ' + r for r in message_content]
message_content = '\n'.join(string_response)
# TODO END
# TODO we need to specify the response when there is a function called
# TODO maybe we can have a specific function that creates the GPT response from the function call
logger.info('\n>>>>>>>>>> Assistant Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', message_content)
self.messages.append({"role": "assistant", "content": message_content})
self.log_message(message_content)
return response
def continuous_conversation(self, prompt_path, prompt_data, function_calls=None):
"""
Conducts a continuous conversation with the agent.
Args:
prompt_path: The path to a prompt.
prompt_data: Data associated with the prompt.
function_calls: Optional function calls to be included in the conversation.
Returns:
List of accepted messages in the conversation.
"""
self.log_to_user = False
accepted_messages = []
response = self.send_message(prompt_path, prompt_data, function_calls)
# Continue conversation until GPT response equals END_RESPONSE
while response != END_RESPONSE:
user_message = ask_user(self.agent.project, response,
hint=yellow("Do you want to add anything else? If not, ") + yellow_bold('just press ENTER.'),
require_some_input=False)
if user_message == "":
accepted_messages.append(response)
logger.info('\n>>>>>>>>>> User Message >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', user_message)
self.messages.append({"role": "user", "content": user_message})
response = self.send_message(None, None, function_calls)
self.log_to_user = True
return accepted_messages
def save_branch(self, branch_name=None):
if branch_name is None:
branch_name = str(uuid.uuid4())
self.branches[branch_name] = self.messages.copy()
return branch_name
def load_branch(self, branch_name, reload_files=True):
self.messages = self.branches[branch_name].copy()
if reload_files:
# TODO make this more flexible - with every message, save metadata so every time we load a branch, reconstruct all messages from scratch
self.replace_files()
def replace_files(self):
files = self.agent.project.get_all_coded_files()
for msg in self.messages:
if msg['role'] == 'user':
for file in files:
self.replace_file_content(msg['content'], file['path'], file['content'])
def replace_file_content(self, message, file_path, new_content):
escaped_file_path = re.escape(file_path)
pattern = rf'\*\*{{ {escaped_file_path} }}\*\*\n```\n(.*?)\n```'
new_section_content = f'**{{ {file_path} }}**\n```\n{new_content}\n```'
updated_message, num_replacements = re.subn(pattern, new_section_content, message, flags=re.DOTALL)
if num_replacements == 0:
return message
return updated_message
def convo_length(self):
return len([msg for msg in self.messages if msg['role'] != 'system'])
def log_message(self, content):
"""
Logs a message in the conversation.
Args:
content: The content of the message to be logged.
"""
print_msg = capitalize_first_word_with_underscores(self.high_level_step)
if self.log_to_user:
if self.agent.project.checkpoints['last_development_step'] is not None:
print(yellow("\nDev step ") + yellow_bold(str(self.agent.project.checkpoints['last_development_step'])) + '\n', end='')
print(f"\n{content}\n", type='local')
logger.info(f"{print_msg}: {content}\n")
def to_playground(self):
with open('const/convert_to_playground_convo.js', 'r', encoding='utf-8') as file:
content = file.read()
process = subprocess.Popen('pbcopy', stdin=subprocess.PIPE)
process.communicate(content.replace('{{messages}}', str(self.messages)).encode('utf-8'))
def remove_last_x_messages(self, x):
self.messages = self.messages[:-x]
def construct_and_add_message_from_prompt(self, prompt_path, prompt_data):
if prompt_path is not None and prompt_data is not None:
prompt = get_prompt(prompt_path, prompt_data)
logger.info('\n>>>>>>>>>> User Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', prompt)
self.messages.append({"role": "user", "content": prompt})
| [] |
2024-01-10 | whowhatwhywhenwhere/gpt-pilot | pilot~utils~llm_connection.py | import re
import requests
import os
import sys
import time
import json
import tiktoken
from prompt_toolkit.styles import Style
from jsonschema import validate, ValidationError
from utils.style import red
from typing import List
from const.llm import MIN_TOKENS_FOR_GPT_RESPONSE, MAX_GPT_MODEL_TOKENS
from logger.logger import logger, logging
from helpers.exceptions import TokenLimitError, ApiKeyNotDefinedError
from utils.utils import fix_json, get_prompt
from utils.function_calling import add_function_calls_to_request, FunctionCallSet, FunctionType
from utils.questionary import styled_text
def get_tokens_in_messages(messages: List[str]) -> int:
tokenizer = tiktoken.get_encoding("cl100k_base") # GPT-4 tokenizer
tokenized_messages = [tokenizer.encode(message['content']) for message in messages]
return sum(len(tokens) for tokens in tokenized_messages)
def num_tokens_from_functions(functions):
"""Return the number of tokens used by a list of functions."""
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = 0
for function in functions:
function_tokens = len(encoding.encode(function['name']))
function_tokens += len(encoding.encode(function['description']))
if 'parameters' in function:
parameters = function['parameters']
if 'properties' in parameters:
for propertiesKey in parameters['properties']:
function_tokens += len(encoding.encode(propertiesKey))
v = parameters['properties'][propertiesKey]
for field in v:
if field == 'type':
function_tokens += 2
function_tokens += len(encoding.encode(v['type']))
elif field == 'description':
function_tokens += 2
function_tokens += len(encoding.encode(v['description']))
elif field == 'enum':
function_tokens -= 3
for o in v['enum']:
function_tokens += 3
function_tokens += len(encoding.encode(o))
function_tokens += 11
num_tokens += function_tokens
num_tokens += 12
return num_tokens
def create_gpt_chat_completion(messages: List[dict], req_type, project,
function_calls: FunctionCallSet = None):
"""
Called from:
- AgentConvo.send_message() - these calls often have `function_calls`, usually from `pilot/const/function_calls.py`
- convo.continuous_conversation()
- prompts.get_additional_info_from_openai()
- prompts.get_additional_info_from_user() after the user responds to each
"Please check this message and say what needs to be changed... {message}"
:param messages: [{ "role": "system"|"assistant"|"user", "content": string }, ... ]
:param req_type: 'project_description' etc. See common.STEPS
:param project: project
:param function_calls: (optional) {'definitions': [{ 'name': str }, ...]}
see `IMPLEMENT_CHANGES` etc. in `pilot/const/function_calls.py`
:return: {'text': new_code}
or if `function_calls` param provided
{'function_calls': {'name': str, arguments: {...}}}
"""
gpt_data = {
'model': os.getenv('MODEL_NAME', 'gpt-4'),
'n': 1,
'temperature': 1,
'top_p': 1,
'presence_penalty': 0,
'frequency_penalty': 0,
'messages': messages,
'stream': True
}
# delete some keys if using "OpenRouter" API
if os.getenv('ENDPOINT') == 'OPENROUTER':
keys_to_delete = ['n', 'max_tokens', 'temperature', 'top_p', 'presence_penalty', 'frequency_penalty']
for key in keys_to_delete:
if key in gpt_data:
del gpt_data[key]
# Advise the LLM of the JSON response schema we are expecting
add_function_calls_to_request(gpt_data, function_calls)
try:
response = stream_gpt_completion(gpt_data, req_type, project)
return response
except TokenLimitError as e:
raise e
except Exception as e:
logger.error(f'The request to {os.getenv("ENDPOINT")} API failed: %s', e)
print(f'The request to {os.getenv("ENDPOINT")} API failed. Here is the error message:')
print(e)
return {} # https://github.com/Pythagora-io/gpt-pilot/issues/130 - may need to revisit how we handle this
def delete_last_n_lines(n):
for _ in range(n):
# Move the cursor up one line
sys.stdout.write('\033[F')
# Clear the current line
sys.stdout.write('\033[K')
def count_lines_based_on_width(content, width):
lines_required = sum(len(line) // width + 1 for line in content.split('\n'))
return lines_required
def get_tokens_in_messages_from_openai_error(error_message):
"""
Extract the token count from a message.
Args:
message (str): The message to extract the token count from.
Returns:
int or None: The token count if found, otherwise None.
"""
match = re.search(r"your messages resulted in (\d+) tokens", error_message)
if match:
return int(match.group(1))
else:
return None
def retry_on_exception(func):
def update_error_count(args):
function_error_count = 1 if 'function_error' not in args[0] else args[0]['function_error_count'] + 1
args[0]['function_error_count'] = function_error_count
return function_error_count
def set_function_error(args, err_str: str):
logger.info(err_str)
args[0]['function_error'] = err_str
if 'function_buffer' in args[0]:
del args[0]['function_buffer']
def wrapper(*args, **kwargs):
while True:
try:
# spinner_stop(spinner)
return func(*args, **kwargs)
except Exception as e:
# Convert exception to string
err_str = str(e)
if isinstance(e, json.JSONDecodeError):
# codellama-34b-instruct seems to send incomplete JSON responses.
# We ask for the rest of the JSON object for the following errors:
# - 'Expecting value' (error if `e.pos` not at the end of the doc: True instead of true)
# - "Expecting ':' delimiter"
# - 'Expecting property name enclosed in double quotes'
# - 'Unterminated string starting at'
if e.msg.startswith('Expecting') or e.msg == 'Unterminated string starting at':
if e.msg == 'Expecting value' and len(e.doc) > e.pos:
# Note: clean_json_response() should heal True/False boolean values
err_str = re.split(r'[},\\n]', e.doc[e.pos:])[0]
err_str = f'Invalid value: `{err_str}`'
else:
# if e.msg == 'Unterminated string starting at' or len(e.doc) == e.pos:
logger.info('Received incomplete JSON response from LLM. Asking for the rest...')
args[0]['function_buffer'] = e.doc
if 'function_error' in args[0]:
del args[0]['function_error']
continue
# TODO: (if it ever comes up) e.msg == 'Extra data' -> trim the response
# 'Invalid control character at', 'Invalid \\escape', 'Invalid control character',
# or `Expecting value` with `pos` before the end of `e.doc`
function_error_count = update_error_count(args)
logger.warning('Received invalid character in JSON response from LLM. Asking to retry...')
set_function_error(args, err_str)
if function_error_count < 3:
continue
elif isinstance(e, ValidationError):
function_error_count = update_error_count(args)
logger.warning('Received invalid JSON response from LLM. Asking to retry...')
# eg:
# json_path: '$.type'
# message: "'command' is not one of ['automated_test', 'command_test', 'manual_test', 'no_test']"
set_function_error(args, f'at {e.json_path} - {e.message}')
# Attempt retry if the JSON schema is invalid, but avoid getting stuck in a loop
if function_error_count < 3:
continue
if "context_length_exceeded" in err_str:
# If the specific error "context_length_exceeded" is present, simply return without retry
# spinner_stop(spinner)
raise TokenLimitError(get_tokens_in_messages_from_openai_error(err_str), MAX_GPT_MODEL_TOKENS)
if "rate_limit_exceeded" in err_str:
# Extracting the duration from the error string
match = re.search(r"Please try again in (\d+)ms.", err_str)
if match:
# spinner = spinner_start(colored("Rate limited. Waiting...", 'yellow'))
logger.debug('Rate limited. Waiting...')
wait_duration = int(match.group(1)) / 1000
time.sleep(wait_duration)
continue
print(red(f'There was a problem with request to openai API:'))
# spinner_stop(spinner)
print(err_str)
logger.error(f'There was a problem with request to openai API: {err_str}')
project = args[2]
user_message = styled_text(
project,
"Do you want to try make the same request again? If yes, just press ENTER. Otherwise, type 'no'.",
style=Style.from_dict({
'question': '#FF0000 bold',
'answer': '#FF910A bold'
})
)
# TODO: take user's input into consideration - send to LLM?
# https://github.com/Pythagora-io/gpt-pilot/issues/122
if user_message != '':
return {}
return wrapper
@retry_on_exception
def stream_gpt_completion(data, req_type, project):
"""
Called from create_gpt_chat_completion()
:param data:
:param req_type: 'project_description' etc. See common.STEPS
:param project: NEEDED FOR WRAPPER FUNCTION retry_on_exception
:return: {'text': str} or {'function_calls': {'name': str, arguments: '{...}'}}
"""
# TODO add type dynamically - this isn't working when connected to the external process
try:
terminal_width = os.get_terminal_size().columns
except OSError:
terminal_width = 50
lines_printed = 2
gpt_response = ''
buffer = '' # A buffer to accumulate incoming data
expecting_json = None
received_json = False
if 'functions' in data:
expecting_json = data['functions']
if 'function_buffer' in data:
incomplete_json = get_prompt('utils/incomplete_json.prompt', {'received_json': data['function_buffer']})
data['messages'].append({'role': 'user', 'content': incomplete_json})
gpt_response = data['function_buffer']
received_json = True
elif 'function_error' in data:
invalid_json = get_prompt('utils/invalid_json.prompt', {'invalid_reason': data['function_error']})
data['messages'].append({'role': 'user', 'content': invalid_json})
received_json = True
# Don't send the `functions` parameter to Open AI, but don't remove it from `data` in case we need to retry
data = {key: value for key, value in data.items() if not key.startswith('function')}
def return_result(result_data, lines_printed):
if buffer:
lines_printed += count_lines_based_on_width(buffer, terminal_width)
logger.debug(f'lines printed: {lines_printed} - {terminal_width}')
delete_last_n_lines(lines_printed)
return result_data
# spinner = spinner_start(yellow("Waiting for OpenAI API response..."))
# print(yellow("Stream response from OpenAI:"))
# Configure for the selected ENDPOINT
model = os.getenv('MODEL_NAME', 'gpt-4')
endpoint = os.getenv('ENDPOINT')
logger.info(f'> Request model: {model} ({data["model"]} in data)')
if logger.isEnabledFor(logging.DEBUG):
logger.debug('\n'.join([f"{message['role']}: {message['content']}" for message in data['messages']]))
if endpoint == 'AZURE':
# If yes, get the AZURE_ENDPOINT from .ENV file
endpoint_url = os.getenv('AZURE_ENDPOINT') + '/openai/deployments/' + model + '/chat/completions?api-version=2023-05-15'
headers = {
'Content-Type': 'application/json',
'api-key': get_api_key_or_throw('AZURE_API_KEY')
}
elif endpoint == 'OPENROUTER':
# If so, send the request to the OpenRouter API endpoint
endpoint_url = os.getenv('OPENROUTER_ENDPOINT', 'https://openrouter.ai/api/v1/chat/completions')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + get_api_key_or_throw('OPENROUTER_API_KEY'),
'HTTP-Referer': 'http://localhost:3000',
'X-Title': 'GPT Pilot (LOCAL)'
}
else:
# If not, send the request to the OpenAI endpoint
endpoint_url = os.getenv('OPENAI_ENDPOINT', 'https://api.openai.com/v1/chat/completions')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + get_api_key_or_throw('OPENAI_API_KEY')
}
response = requests.post(
endpoint_url,
headers=headers,
json=data,
stream=True
)
# Log the response status code and message
logger.debug(f'Response status code: {response.status_code}')
if response.status_code != 200:
logger.info(f'problem with request: {response.text}')
raise Exception(f"API responded with status code: {response.status_code}. Response text: {response.text}")
# function_calls = {'name': '', 'arguments': ''}
for line in response.iter_lines():
# Ignore keep-alive new lines
if line and line != b': OPENROUTER PROCESSING':
line = line.decode("utf-8") # decode the bytes to string
if line.startswith('data: '):
line = line[6:] # remove the 'data: ' prefix
# Check if the line is "[DONE]" before trying to parse it as JSON
if line == "[DONE]":
continue
try:
json_line = json.loads(line)
if len(json_line['choices']) == 0:
continue
if 'error' in json_line:
logger.error(f'Error in LLM response: {json_line}')
raise ValueError(f'Error in LLM response: {json_line["error"]["message"]}')
choice = json_line['choices'][0]
# if 'finish_reason' in choice and choice['finish_reason'] == 'function_call':
# function_calls['arguments'] = load_data_to_json(function_calls['arguments'])
# return return_result({'function_calls': function_calls}, lines_printed)
json_line = choice['delta']
except json.JSONDecodeError as e:
logger.error(f'Unable to decode line: {line} {e.msg}')
continue # skip to the next line
# handle the streaming response
# if 'function_call' in json_line:
# if 'name' in json_line['function_call']:
# function_calls['name'] = json_line['function_call']['name']
# print(f'Function call: {function_calls["name"]}')
#
# if 'arguments' in json_line['function_call']:
# function_calls['arguments'] += json_line['function_call']['arguments']
# print(json_line['function_call']['arguments'], type='stream', end='', flush=True)
if 'content' in json_line:
content = json_line.get('content')
if content:
buffer += content # accumulate the data
# If you detect a natural breakpoint (e.g., line break or end of a response object), print & count:
if buffer.endswith('\n'):
if expecting_json and not received_json:
received_json = assert_json_response(buffer, lines_printed > 2)
# or some other condition that denotes a breakpoint
lines_printed += count_lines_based_on_width(buffer, terminal_width)
buffer = "" # reset the buffer
gpt_response += content
print(content, type='stream', end='', flush=True)
print('\n', type='stream')
# if function_calls['arguments'] != '':
# logger.info(f'Response via function call: {function_calls["arguments"]}')
# function_calls['arguments'] = load_data_to_json(function_calls['arguments'])
# return return_result({'function_calls': function_calls}, lines_printed)
logger.info('<<<<<<<<<< LLM Response <<<<<<<<<<\n%s\n<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<', gpt_response)
if expecting_json:
gpt_response = clean_json_response(gpt_response)
assert_json_schema(gpt_response, expecting_json)
new_code = postprocessing(gpt_response, req_type) # TODO add type dynamically
return return_result({'text': new_code}, lines_printed)
def get_api_key_or_throw(env_key: str):
api_key = os.getenv(env_key)
if api_key is None:
raise ApiKeyNotDefinedError(env_key)
return api_key
def assert_json_response(response: str, or_fail=True) -> bool:
if re.match(r'.*(```(json)?|{|\[)', response):
return True
elif or_fail:
logger.error(f'LLM did not respond with JSON: {response}')
raise ValueError('LLM did not respond with JSON')
else:
return False
def clean_json_response(response: str) -> str:
response = re.sub(r'^.*```json\s*', '', response, flags=re.DOTALL)
response = re.sub(r': ?True(,)?$', r':true\1', response, flags=re.MULTILINE)
response = re.sub(r': ?False(,)?$', r':false\1', response, flags=re.MULTILINE)
return response.strip('` \n')
def assert_json_schema(response: str, functions: list[FunctionType]) -> True:
for function in functions:
schema = function['parameters']
parsed = json.loads(response)
validate(parsed, schema)
return True
def postprocessing(gpt_response: str, req_type) -> str:
return gpt_response
def load_data_to_json(string):
return json.loads(fix_json(string))
| [] |
2024-01-10 | whowhatwhywhenwhere/gpt-pilot | pilot~prompts~prompts.py | # prompts/prompts.py
from utils.style import yellow
from const import common
from const.llm import MAX_QUESTIONS, END_RESPONSE
from utils.llm_connection import create_gpt_chat_completion
from utils.utils import capitalize_first_word_with_underscores, get_sys_message, find_role_from_step, get_prompt
from utils.questionary import styled_select, styled_text
from logger.logger import logger
def ask_for_app_type():
return 'App'
answer = styled_select(
"What type of app do you want to build?",
choices=common.APP_TYPES
)
if answer is None:
print("Exiting application.")
exit(0)
while 'unavailable' in answer:
print("Sorry, that option is not available.")
answer = styled_select(
"What type of app do you want to build?",
choices=common.APP_TYPES
)
if answer is None:
print("Exiting application.")
exit(0)
print("You chose: " + answer)
logger.info(f"You chose: {answer}")
return answer
def ask_for_main_app_definition(project):
description = styled_text(
project,
"Describe your app in as much detail as possible."
)
if description is None:
print("No input provided!")
return
logger.info(f"Initial App description done: {description}")
return description
def ask_user(project, question: str, require_some_input=True, hint: str = None):
while True:
if hint is not None:
print(yellow(hint), type='hint')
answer = styled_text(project, question)
logger.info('Q: %s', question)
logger.info('A: %s', answer)
if answer is None:
print("Exiting application.")
exit(0)
if answer.strip() == '' and require_some_input:
print("No input provided! Please try again.")
continue
else:
return answer
def get_additional_info_from_openai(project, messages):
"""
Runs the conversation between Product Owner and LLM.
Provides the user's initial description, LLM asks the user clarifying questions and user responds.
Limited by `MAX_QUESTIONS`, exits when LLM responds "EVERYTHING_CLEAR".
:param project: Project
:param messages: [
{ "role": "system", "content": "You are a Product Owner..." },
{ "role": "user", "content": "I want you to create the app {name} that can be described: ```{description}```..." }
]
:return: The updated `messages` list with the entire conversation between user and LLM.
"""
is_complete = False
while not is_complete:
# Obtain clarifications using the OpenAI API
# { 'text': new_code }
response = create_gpt_chat_completion(messages, 'additional_info', project)
if response is not None:
if response['text'] and response['text'].strip() == END_RESPONSE:
# print(response['text'] + '\n')
return messages
# Ask the question to the user
answer = ask_user(project, response['text'])
# Add the answer to the messages
messages.append({'role': 'assistant', 'content': response['text']})
messages.append({'role': 'user', 'content': answer})
else:
is_complete = True
logger.info('Getting additional info from openai done')
return messages
# TODO refactor this to comply with AgentConvo class
def get_additional_info_from_user(project, messages, role):
"""
If `advanced` CLI arg, Architect offers user a chance to change the architecture.
Prompts: "Please check this message and say what needs to be changed. If everything is ok just press ENTER"...
Then asks the LLM to update the messages based on the user's feedback.
:param project: Project
:param messages: array<string | { "text": string }>
:param role: 'product_owner', 'architect', 'dev_ops', 'tech_lead', 'full_stack_developer', 'code_monkey'
:return: a list of updated messages - see https://github.com/Pythagora-io/gpt-pilot/issues/78
"""
# TODO process with agent convo
updated_messages = []
for message in messages:
while True:
if isinstance(message, dict) and 'text' in message:
message = message['text']
print(yellow(f"Please check this message and say what needs to be changed. If everything is ok just press ENTER",))
answer = ask_user(project, message, require_some_input=False)
if answer.lower() == '':
break
response = create_gpt_chat_completion(
generate_messages_from_custom_conversation(role, [get_prompt('utils/update.prompt'), message, answer], 'user'),
'additional_info',
project
)
message = response
updated_messages.append(message)
logger.info('Getting additional info from user done')
return updated_messages
def generate_messages_from_description(description, app_type, name):
"""
Called by ProductOwner.get_description().
:param description: "I want to build a cool app that will make me rich"
:param app_type: 'Web App', 'Script', 'Mobile App', 'Chrome Extension' etc
:param name: Project name
:return: [
{ "role": "system", "content": "You are a Product Owner..." },
{ "role": "user", "content": "I want you to create the app {name} that can be described: ```{description}```..." }
]
"""
# "I want you to create the app {name} that can be described: ```{description}```
# Get additional answers
# Break down stories
# Break down user tasks
# Start with Get additional answers
# {prompts/components/no_microservices}
# {prompts/components/single_question}
# "
prompt = get_prompt('high_level_questions/specs.prompt', {
'name': name,
'prompt': description,
'app_type': app_type,
# TODO: MAX_QUESTIONS should be configurable by ENV or CLI arg
'MAX_QUESTIONS': MAX_QUESTIONS
})
return [
get_sys_message('product_owner'),
{"role": "user", "content": prompt},
]
def generate_messages_from_custom_conversation(role, messages, start_role='user'):
"""
:param role: 'product_owner', 'architect', 'dev_ops', 'tech_lead', 'full_stack_developer', 'code_monkey'
:param messages: [
"I will show you some of your message to which I want you to make some updates. Please just modify your last message per my instructions.",
{LLM's previous message},
{user's request for change}
]
:param start_role: 'user'
:return: [
{ "role": "system", "content": "You are a ..., You do ..." },
{ "role": start_role, "content": messages[i + even] },
{ "role": "assistant" (or "user" for other start_role), "content": messages[i + odd] },
... ]
"""
# messages is list of strings
system_message = get_sys_message(role)
result = [system_message]
logger.info(f'\n>>>>>>>>>> {role} Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', system_message['content'])
for i, message in enumerate(messages):
if i % 2 == 0:
result.append({"role": start_role, "content": message})
logger.info(f'\n>>>>>>>>>> {start_role} Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', message)
else:
result.append({"role": "assistant" if start_role == "user" else "user", "content": message})
logger.info('\n>>>>>>>>>> Assistant Prompt >>>>>>>>>>\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', message)
return result
| [
"name",
"high_level_questions/specs.prompt",
"MAX_QUESTIONS"
] |
2024-01-10 | CCNUXL/Algorithm-Skeleton-Mining | Langchain_files~Langchain-Chatchat-master~document_loaders~myimgloader.py | from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class RapidOCRLoader(UnstructuredFileLoader):
def _get_elements(self) -> List:
def img2text(filepath):
from rapidocr_onnxruntime import RapidOCR
resp = ""
ocr = RapidOCR()
result, _ = ocr(filepath)
if result:
ocr_result = [line[1] for line in result]
resp += "\n".join(ocr_result)
return resp
text = img2text(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(text=text, **self.unstructured_kwargs)
if __name__ == "__main__":
loader = RapidOCRLoader(file_path="../tests/samples/ocr_test.jpg")
docs = loader.load()
print(docs)
| [] |
2024-01-10 | CCNUXL/Algorithm-Skeleton-Mining | Langchain_files~Langchain-Chatchat-master~text_splitter~ali_text_splitter.py | from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
class AliTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
def split_text(self, text: str) -> List[str]:
# use_document_segmentation参数指定是否用语义切分文档,此处采取的文档语义分割模型为达摩院开源的nlp_bert_document-segmentation_chinese-base,论文见https://arxiv.org/abs/2107.09278
# 如果使用模型进行文档语义切分,那么需要安装modelscope[nlp]:pip install "modelscope[nlp]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
# 考虑到使用了三个模型,可能对于低配置gpu不太友好,因此这里将模型load进cpu计算,有需要的话可以替换device为自己的显卡id
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub('\s', " ", text)
text = re.sub("\n\n", "", text)
try:
from modelscope.pipelines import pipeline
except ImportError:
raise ImportError(
"Could not import modelscope python package. "
"Please install modelscope with `pip install modelscope`. "
)
p = pipeline(
task="document-segmentation",
model='damo/nlp_bert_document-segmentation_chinese-base',
device="cpu")
result = p(documents=text)
sent_list = [i for i in result["text"].split("\n\t") if i]
return sent_list
if __name__ == "__main__":
text_splitter = AliTextSplitter(
pdf = False
)
ls = [
"""
标题
全球可再生能源趋势
简介
近年来,全球能源格局正在发生重大变革。随着对气候变化和化石燃料有限性的担忧不断增长,世界正在将焦点转向可再生能源。这份简要报告旨在提供当前全球可再生能源趋势的概述。
关键点
太阳能迅猛增长: 太阳能在过去十年中取得了显著增长。成本下降,太阳能电池板效率提高,政府激励措施都促进了这一增长。
风能扩张: 风能是另一个有前景的领域。离岸风电场越来越普及,风力涡轮机变得更加高效和具有成本效益。
能源储存解决方案: 鉴于可再生能源如太阳能和风能的不确定性,能源储存解决方案,如先进的电池,对于电网的稳定性和可靠性至关重要。
新兴技术: 在潮汐和地热能源等领域的研究和开发正在为清洁能源发电开辟新的可能性。
政府政策: 许多国家的政府正在实施促进可再生能源的政策,包括补贴、税收激励措施和减排目标。
挑战
间歇性: 太阳能和风能等可再生能源的不可预测性为持续供能带来了挑战。
基础设施投资: 转向可再生能源需要大量的基础设施投资,包括电网升级和新的能源储存设施。
公众认知: 说服公众可再生能源的益处和可行性至关重要。
结论
全球转向可再生能源是在应对气候变化方面的一个令人鼓舞的趋势。然而,在将可再生能源整合到现有能源基础设施方面仍然存在挑战。持续的研究、投资和公众支持对于实现可持续能源未来至关重要。
""",
]
# text = """"""
for inum, text in enumerate(ls):
print(inum)
chunks = text_splitter.split_text(text)
num = 1
for chunk in chunks:
print("chunk_num_", num, end=" ")
print(chunk)
num += 1
| [] |
2024-01-10 | CCNUXL/Algorithm-Skeleton-Mining | Algorithm_Skeleton~paper_splitter.py | import os
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
def splitter(pdf_path):
# 获取当前文件的路径
current_file_path = os.path.abspath(__file__)
# 获取当前文件所在目录的上级目录路径
parent_directory = os.path.dirname(current_file_path)
parent_parent_directory = os.path.dirname(parent_directory)
# 拼接文件路径
new_path = os.path.join(parent_parent_directory, pdf_path)
print(new_path)
loader = PyPDFLoader(new_path)
pages = loader.load_and_split()
# print(f"加载完毕,共加载{len(pages)}页PDF文件")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=2000,
chunk_overlap=500,
length_function=len
)
texts = text_splitter.split_documents(pages)
# print(f"文章共切分为{len(texts)}段")
# for i in range(0, len(texts)):
# print(f"段{i}")
# print(texts[i].page_content)
return texts
if __name__ == "__main__":
pdf_path = "Algorithm_Skeleton/pdf_files/Distilling Model/2212.00193.pdf"
result = splitter(pdf_path)
print(result[0].page_content)
| [] |
2024-01-10 | CCNUXL/Algorithm-Skeleton-Mining | Langchain_files~Langchain-Chatchat-master~text_splitter~zh_title_enhance.py | from langchain.docstore.document import Document
import re
def under_non_alpha_ratio(text: str, threshold: float = 0.5):
"""Checks if the proportion of non-alpha characters in the text snippet exceeds a given
threshold. This helps prevent text like "-----------BREAK---------" from being tagged
as a title or narrative text. The ratio does not count spaces.
Parameters
----------
text
The input string to test
threshold
If the proportion of non-alpha characters exceeds this threshold, the function
returns False
"""
# 这个函数的作用是检查文本中非字母字符的比例是否超过给定的阈值。
# 它避免了类似于 "-----------BREAK---------" 这样的文本被误判为标题或叙述性文本。
# 该比率不考虑空格。函数内部的处理步骤如下:
# 计算文本中字母字符和总字符数(不包括空格)。
# 然后计算字母字符数与总字符数的比值,若比值小于阈值,则返回 True,否则返回 False。
if len(text) == 0:
return False
alpha_count = len([char for char in text if char.strip() and char.isalpha()])
total_count = len([char for char in text if char.strip()])
try:
ratio = alpha_count / total_count
return ratio < threshold
except:
return False
def is_possible_title(
text: str,
title_max_word_length: int = 20,
non_alpha_threshold: float = 0.5,
) -> bool:
"""Checks to see if the text passes all of the checks for a valid title.
Parameters
----------
text
The input text to check
title_max_word_length
The maximum number of words a title can contain
non_alpha_threshold
The minimum number of alpha characters the text needs to be considered a title
"""
# 这个函数用于判断文本是否可能是标题。它包含了几个检查步骤:
# 首先检查文本长度是否为零,若是,则返回False
# 接着检查文本末尾是否为标点符号,是的话也返回 False
# 然后检查文本长度是否超过指定的最大单词长度(默认为 20),超过则返回 False
# 通过调用 under_non_alpha_ratio 函数来检查文本中非字母字符的比例,若超过设定的阈值,则返回 False
# 还有一些其他检查,例如:文本是否以逗号、句号或其它标点符号结尾,或者是否全部由数字组成。最后还检查开头的字符中是否含有数字。
# 文本长度为0的话,肯定不是title
if len(text) == 0:
print("Not a title. Text is empty.")
return False
# 文本中有标点符号,就不是title
ENDS_IN_PUNCT_PATTERN = r"[^\w\s]\Z"
ENDS_IN_PUNCT_RE = re.compile(ENDS_IN_PUNCT_PATTERN)
if ENDS_IN_PUNCT_RE.search(text) is not None:
return False
# 文本长度不能超过设定值,默认20
# NOTE(robinson) - splitting on spaces here instead of word tokenizing because it
# is less expensive and actual tokenization doesn't add much value for the length check
if len(text) > title_max_word_length:
return False
# 文本中数字的占比不能太高,否则不是title
if under_non_alpha_ratio(text, threshold=non_alpha_threshold):
return False
# NOTE(robinson) - Prevent flagging salutations like "To My Dearest Friends," as titles
if text.endswith((",", ".", ",", "。")):
return False
if text.isnumeric():
print(f"Not a title. Text is all numeric:\n\n{text}") # type: ignore
return False
# 开头的字符内应该有数字,默认5个字符内
if len(text) < 5:
text_5 = text
else:
text_5 = text[:5]
alpha_in_text_5 = sum(list(map(lambda x: x.isnumeric(), list(text_5))))
if not alpha_in_text_5:
return False
return True
def zh_title_enhance(docs: Document) -> Document:
# 对一组文档进行处理,
# 如果其中有可能作为标题的文本,则在该文档的 metadata 中标记为 'cn_Title',并修改文档内容以显示标题相关的信息。
title = None
if len(docs) > 0:
for doc in docs:
if is_possible_title(doc.page_content):
doc.metadata['category'] = 'cn_Title'
title = doc.page_content
elif title:
doc.page_content = f"下文与({title})有关。{doc.page_content}"
return docs
else:
print("文件不存在")
| [] |
2024-01-10 | CCNUXL/Algorithm-Skeleton-Mining | Text_Split~spliter.py | import os
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from nltk.tokenize import word_tokenize
# 获取当前文件的路径
current_file_path = os.path.abspath(__file__)
# 获取当前文件所在目录的上级目录路径
parent_directory = os.path.dirname(current_file_path)
parent_parent_directory = os.path.dirname(parent_directory)
# 拼接文件路径
pdf_path = os.path.join(parent_parent_directory, "Datasets/Papers/CoT/2201.11903.pdf")
print(pdf_path)
loader = PyPDFLoader(pdf_path)
pages = loader.load_and_split()
print(f"加载完毕,共加载{len(pages)}页PDF文件")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=2000,
chunk_overlap=10,
length_function=len
)
texts = text_splitter.split_documents(pages)
print(f"文章共切分为{len(texts)}段")
for i in range(0, len(texts)):
print(f"段{i}")
print(texts[i].page_content)
tokens = word_tokenize(texts[i].page_content)
tokens_length = len(tokens)
print("tokens length:", tokens_length)
| [] |
2024-01-10 | CCNUXL/Algorithm-Skeleton-Mining | Langchain_files~Langchain-Chatchat-master~text_splitter~chinese_recursive_text_splitter.py | import re
from typing import List, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
import logging
logger = logging.getLogger(__name__)
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# text:要分割的文本字符串。
# separator:用于分割文本的分隔符字符串。
# keep_separator:一个布尔值,确定是否保留分隔符在分割后的结果中。
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
# 这里采用了正则表达式中的捕获分组,以便在分割后保留分隔符。
# 通过将 _splits 列表中的奇数索引和偶数索引的元素组合在一起实现
# 将分割后的结果重新组合,将分隔符与相邻的子字符串合并成一个字符串
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
# 如果 _splits 的长度是奇数,说明最后一个分隔符没有相应的子字符串,将其添加到结果列表中。
if len(_splits) % 2 == 1:
splits += _splits[-1:]
# splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or [
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s",
";|;\s",
",|,\s"
]
self._is_separator_regex = is_separator_regex
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = _split_text_with_regex_from_end(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip()!=""]
if __name__ == "__main__":
text_splitter = ChineseRecursiveTextSplitter(
keep_separator=True, # 保留分隔符
is_separator_regex=True, # 分隔符是否正则表达式
chunk_size=200, # 每个块的最大长度
chunk_overlap=0
)
ls = [
"""
标题
全球可再生能源趋势
简介
近年来,全球能源格局正在发生重大变革。随着对气候变化和化石燃料有限性的担忧不断增长,世界正在将焦点转向可再生能源。这份简要报告旨在提供当前全球可再生能源趋势的概述。
关键点
太阳能迅猛增长: 太阳能在过去十年中取得了显著增长。成本下降,太阳能电池板效率提高,政府激励措施都促进了这一增长。
风能扩张: 风能是另一个有前景的领域。离岸风电场越来越普及,风力涡轮机变得更加高效和具有成本效益。
能源储存解决方案: 鉴于可再生能源如太阳能和风能的不确定性,能源储存解决方案,如先进的电池,对于电网的稳定性和可靠性至关重要。
新兴技术: 在潮汐和地热能源等领域的研究和开发正在为清洁能源发电开辟新的可能性。
政府政策: 许多国家的政府正在实施促进可再生能源的政策,包括补贴、税收激励措施和减排目标。
挑战
间歇性: 太阳能和风能等可再生能源的不可预测性为持续供能带来了挑战。
基础设施投资: 转向可再生能源需要大量的基础设施投资,包括电网升级和新的能源储存设施。
公众认知: 说服公众可再生能源的益处和可行性至关重要。
结论
全球转向可再生能源是在应对气候变化方面的一个令人鼓舞的趋势。然而,在将可再生能源整合到现有能源基础设施方面仍然存在挑战。持续的研究、投资和公众支持对于实现可持续能源未来至关重要。
""",
]
# text = """"""
for inum, text in enumerate(ls):
print(inum)
chunks = text_splitter.split_text(text)
num = 1
for chunk in chunks:
print("chunk_num_", num, end=" ")
print(chunk)
num += 1
| [] |
2024-01-10 | Techiral/A-Z-Python-Projects | C~chatgpt-based-voice-assistant~voice-assistant.py | import speech_recognition as sr
import pyttsx3
import os
from dotenv import load_dotenv
import openai
load_dotenv()
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
def speak_text(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
intro_message = "Hello, I am your voice assistant. How can I assist you today?"
speak_text(intro_message)
recognizer = sr.Recognizer()
def record_and_transcribe():
while True:
try:
with sr.Microphone() as source:
recognizer.adjust_for_ambient_noise(source, duration=0.2)
print("Listening...")
audio = recognizer.listen(source)
user_input = recognizer.recognize_google(audio)
return user_input
except sr.RequestError as e:
print(f"Could not request results: {e}")
except sr.UnknownValueError:
print("Sorry, I didn't catch that.")
def get_response(user_input, chat_history):
chat_history.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chat_history,
max_tokens=50,
n=1,
stop=None,
temperature=0.7,
)
assistant_response = response.choices[0].message.content
return assistant_response
chat_history = [{"role": "system", "content": "You are now chatting with a voice assistant."}]
while True:
user_input = record_and_transcribe()
print(f"User: {user_input}")
input_length = len(user_input)
assistant_response = get_response(user_input, chat_history)
final_response = f"Your input was {input_length} characters long. Assistant: {assistant_response}"
print(f"Assistant: {final_response}")
speak_text(final_response)
| [
"You are now chatting with a voice assistant."
] |
2024-01-10 | pbhu1024/gpt_index | gpt_index~composability~graph.py | """Composability graphs."""
import json
from typing import Any, Dict, List, Optional, Type, Union
from gpt_index.data_structs.data_structs import IndexStruct
from gpt_index.data_structs.struct_type import IndexStructType
from gpt_index.docstore import DocumentStore
from gpt_index.embeddings.base import BaseEmbedding
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import BaseGPTIndex
from gpt_index.indices.empty.base import GPTEmptyIndex
from gpt_index.indices.keyword_table.base import GPTKeywordTableIndex
from gpt_index.indices.knowledge_graph.base import GPTKnowledgeGraphIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.prompt_helper import PromptHelper
from gpt_index.indices.query.query_runner import QueryRunner
from gpt_index.indices.query.schema import QueryBundle, QueryConfig
from gpt_index.indices.registry import IndexRegistry
from gpt_index.indices.struct_store.sql import GPTSQLStructStoreIndex
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.indices.vector_store.base import GPTVectorStoreIndex
from gpt_index.indices.vector_store.vector_indices import (
GPTChromaIndex,
GPTFaissIndex,
GPTPineconeIndex,
GPTQdrantIndex,
GPTSimpleVectorIndex,
GPTWeaviateIndex,
)
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.response.schema import Response
# TMP: refactor query config type
QUERY_CONFIG_TYPE = Union[Dict, QueryConfig]
# this is a map from type to outer index class
# we extract the type_to_struct and type_to_query
# fields from the index class
DEFAULT_INDEX_REGISTRY_MAP: Dict[IndexStructType, Type[BaseGPTIndex]] = {
IndexStructType.TREE: GPTTreeIndex,
IndexStructType.LIST: GPTListIndex,
IndexStructType.KEYWORD_TABLE: GPTKeywordTableIndex,
IndexStructType.SIMPLE_DICT: GPTSimpleVectorIndex,
IndexStructType.DICT: GPTFaissIndex,
IndexStructType.WEAVIATE: GPTWeaviateIndex,
IndexStructType.PINECONE: GPTPineconeIndex,
IndexStructType.QDRANT: GPTQdrantIndex,
IndexStructType.CHROMA: GPTChromaIndex,
IndexStructType.VECTOR_STORE: GPTVectorStoreIndex,
IndexStructType.SQL: GPTSQLStructStoreIndex,
IndexStructType.KG: GPTKnowledgeGraphIndex,
IndexStructType.EMPTY: GPTEmptyIndex,
}
def _get_default_index_registry() -> IndexRegistry:
"""Get default index registry."""
index_registry = IndexRegistry()
for index_type, index_class in DEFAULT_INDEX_REGISTRY_MAP.items():
index_registry.type_to_struct[index_type] = index_class.index_struct_cls
index_registry.type_to_query[index_type] = index_class.get_query_map()
return index_registry
def _safe_get_index_struct(
docstore: DocumentStore, index_struct_id: str
) -> IndexStruct:
"""Try get index struct."""
index_struct = docstore.get_document(index_struct_id)
if not isinstance(index_struct, IndexStruct):
raise ValueError("Invalid `index_struct_id` - must be an IndexStruct")
return index_struct
class ComposableGraph:
"""Composable graph."""
def __init__(
self,
docstore: DocumentStore,
index_registry: IndexRegistry,
index_struct: IndexStruct,
llm_predictor: Optional[LLMPredictor] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
chunk_size_limit: Optional[int] = None,
) -> None:
"""Init params."""
self._docstore = docstore
self._index_registry = index_registry
# this represents the "root" index struct
self._index_struct = index_struct
self._llm_predictor = llm_predictor or LLMPredictor()
self._prompt_helper = prompt_helper or PromptHelper.from_llm_predictor(
self._llm_predictor, chunk_size_limit=chunk_size_limit
)
self._embed_model = embed_model or OpenAIEmbedding()
@classmethod
def build_from_index(self, index: BaseGPTIndex) -> "ComposableGraph":
"""Build from index."""
return ComposableGraph(
index.docstore,
index.index_registry,
# this represents the "root" index struct
index.index_struct,
llm_predictor=index.llm_predictor,
prompt_helper=index.prompt_helper,
embed_model=index.embed_model,
)
def query(
self,
query_str: Union[str, QueryBundle],
query_configs: Optional[List[QUERY_CONFIG_TYPE]] = None,
llm_predictor: Optional[LLMPredictor] = None,
) -> Response:
"""Query the index."""
# go over all the indices and create a registry
llm_predictor = llm_predictor or self._llm_predictor
query_runner = QueryRunner(
llm_predictor,
self._prompt_helper,
self._embed_model,
self._docstore,
self._index_registry,
query_configs=query_configs,
recursive=True,
)
return query_runner.query(query_str, self._index_struct)
async def aquery(
self,
query_str: Union[str, QueryBundle],
query_configs: Optional[List[QUERY_CONFIG_TYPE]] = None,
llm_predictor: Optional[LLMPredictor] = None,
) -> Response:
"""Query the index."""
# go over all the indices and create a registry
llm_predictor = llm_predictor or self._llm_predictor
query_runner = QueryRunner(
llm_predictor,
self._prompt_helper,
self._embed_model,
self._docstore,
self._index_registry,
query_configs=query_configs,
recursive=True,
)
return await query_runner.aquery(query_str, self._index_struct)
def get_index(
self, index_struct_id: str, index_cls: Type[BaseGPTIndex], **kwargs: Any
) -> BaseGPTIndex:
"""Get index."""
index_struct = _safe_get_index_struct(self._docstore, index_struct_id)
return index_cls(
index_struct=index_struct,
docstore=self._docstore,
index_registry=self._index_registry,
**kwargs
)
@classmethod
def load_from_string(cls, index_string: str, **kwargs: Any) -> "ComposableGraph":
"""Load index from string (in JSON-format).
This method loads the index from a JSON string. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
Args:
save_path (str): The save_path of the file.
Returns:
BaseGPTIndex: The loaded index.
"""
result_dict = json.loads(index_string)
# TODO: this is hardcoded for now, allow it to be specified by the user
index_registry = _get_default_index_registry()
docstore = DocumentStore.load_from_dict(
result_dict["docstore"], index_registry.type_to_struct
)
index_struct = _safe_get_index_struct(docstore, result_dict["index_struct_id"])
return cls(docstore, index_registry, index_struct, **kwargs)
@classmethod
def load_from_disk(cls, save_path: str, **kwargs: Any) -> "ComposableGraph":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
Args:
save_path (str): The save_path of the file.
Returns:
BaseGPTIndex: The loaded index.
"""
with open(save_path, "r") as f:
file_contents = f.read()
return cls.load_from_string(file_contents, **kwargs)
def save_to_string(self, **save_kwargs: Any) -> str:
"""Save to string.
This method stores the index into a JSON file stored on disk.
Args:
save_path (str): The save_path of the file.
"""
out_dict: Dict[str, Any] = {
"index_struct_id": self._index_struct.get_doc_id(),
"docstore": self._docstore.serialize_to_dict(),
}
return json.dumps(out_dict)
def save_to_disk(self, save_path: str, **save_kwargs: Any) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
Args:
save_path (str): The save_path of the file.
"""
index_string = self.save_to_string(**save_kwargs)
with open(save_path, "w") as f:
f.write(index_string)
| [] |
2024-01-10 | pbhu1024/gpt_index | gpt_index~prompts~default_prompts.py | """Set of default prompts."""
from gpt_index.prompts.prompts import (
KeywordExtractPrompt,
KnowledgeGraphPrompt,
QueryKeywordExtractPrompt,
QuestionAnswerPrompt,
RefinePrompt,
RefineTableContextPrompt,
SchemaExtractPrompt,
SimpleInputPrompt,
SummaryPrompt,
TableContextPrompt,
TextToSQLPrompt,
TreeInsertPrompt,
TreeSelectMultiplePrompt,
TreeSelectPrompt,
)
############################################
# Tree
############################################
DEFAULT_SUMMARY_PROMPT_TMPL = (
"Write a summary of the following. Try to use only the "
"information provided. "
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'SUMMARY:"""\n'
)
DEFAULT_SUMMARY_PROMPT = SummaryPrompt(DEFAULT_SUMMARY_PROMPT_TMPL)
# insert prompts
DEFAULT_INSERT_PROMPT_TMPL = (
"Context information is below. It is provided in a numbered list "
"(1 to {num_chunks}),"
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"---------------------\n"
"Given the context information, here is a new piece of "
"information: {new_chunk_text}\n"
"Answer with the number corresponding to the summary that should be updated. "
"The answer should be the number corresponding to the "
"summary that is most relevant to the question.\n"
)
DEFAULT_INSERT_PROMPT = TreeInsertPrompt(DEFAULT_INSERT_PROMPT_TMPL)
# # single choice
DEFAULT_QUERY_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_chunks}),"
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return "
"the choice that is most relevant to the question: '{query_str}'\n"
"Provide choice in the following format: 'ANSWER: <number>' and explain why "
"this summary was selected in relation to the question.\n"
)
DEFAULT_QUERY_PROMPT = TreeSelectPrompt(DEFAULT_QUERY_PROMPT_TMPL)
# multiple choice
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_chunks}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choices "
"(no more than {branching_factor}, ranked by most relevant to least) that "
"are most relevant to the question: '{query_str}'\n"
"Provide choices in the following format: 'ANSWER: <numbers>' and explain why "
"these summaries were selected in relation to the question.\n"
)
DEFAULT_QUERY_PROMPT_MULTIPLE = TreeSelectMultiplePrompt(
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL
)
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
DEFAULT_REFINE_PROMPT = RefinePrompt(DEFAULT_REFINE_PROMPT_TMPL)
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
DEFAULT_TEXT_QA_PROMPT = QuestionAnswerPrompt(DEFAULT_TEXT_QA_PROMPT_TMPL)
############################################
# Keyword Table
############################################
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"Some text is provided below. Given the text, extract up to {max_keywords} "
"keywords from the text. Avoid stopwords."
"---------------------\n"
"{text}\n"
"---------------------\n"
"Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
)
DEFAULT_KEYWORD_EXTRACT_TEMPLATE = KeywordExtractPrompt(
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL
)
# NOTE: the keyword extraction for queries can be the same as
# the one used to build the index, but here we tune it to see if performance is better.
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"A question is provided below. Given the question, extract up to {max_keywords} "
"keywords from the text. Focus on extracting the keywords that we can use "
"to best lookup answers to the question. Avoid stopwords.\n"
"---------------------\n"
"{question}\n"
"---------------------\n"
"Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
)
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE = QueryKeywordExtractPrompt(
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL
)
############################################
# Structured Store
############################################
DEFAULT_SCHEMA_EXTRACT_TMPL = (
"We wish to extract relevant fields from an unstructured text chunk into "
"a structured schema. We first provide the unstructured text, and then "
"we provide the schema that we wish to extract. "
"-----------text-----------\n"
"{text}\n"
"-----------schema-----------\n"
"{schema}\n"
"---------------------\n"
"Given the text and schema, extract the relevant fields from the text in "
"the following format: "
"field1: <value>\nfield2: <value>\n...\n\n"
"If a field is not present in the text, don't include it in the output."
"If no fields are present in the text, return a blank string.\n"
"Fields: "
)
DEFAULT_SCHEMA_EXTRACT_PROMPT = SchemaExtractPrompt(DEFAULT_SCHEMA_EXTRACT_TMPL)
# NOTE: taken from langchain and adapted
# https://tinyurl.com/b772sd77
DEFAULT_TEXT_TO_SQL_TMPL = (
"Given an input question, first create a syntactically correct SQL query "
"to run, then look at the results of the query and return the answer.\n"
"Use the following format:\n"
'Question: "Question here"\n'
'SQLQuery: "SQL Query to run"\n'
"The following is a schema of the table:\n"
"---------------------\n"
"{schema}\n"
"---------------------\n"
"Question: {query_str}\n"
"SQLQuery: "
)
DEFAULT_TEXT_TO_SQL_PROMPT = TextToSQLPrompt(DEFAULT_TEXT_TO_SQL_TMPL)
# NOTE: by partially filling schema, we can reduce to a QuestionAnswer prompt
# that we can feed to ur table
DEFAULT_TABLE_CONTEXT_TMPL = (
"We have provided a table schema below. "
"---------------------\n"
"{schema}\n"
"---------------------\n"
"We have also provided context information below. "
"{context_str}\n"
"---------------------\n"
"Given the context information and the table schema, "
"give a response to the following task: {query_str}"
)
DEFAULT_TABLE_CONTEXT_QUERY = (
"Provide a high-level description of the table, "
"as well as a description of each column in the table. "
"Provide answers in the following format:\n"
"TableDescription: <description>\n"
"Column1Description: <description>\n"
"Column2Description: <description>\n"
"...\n\n"
)
DEFAULT_TABLE_CONTEXT_PROMPT = TableContextPrompt(DEFAULT_TABLE_CONTEXT_TMPL)
# NOTE: by partially filling schema, we can reduce to a RefinePrompt
# that we can feed to ur table
DEFAULT_REFINE_TABLE_CONTEXT_TMPL = (
"We have provided a table schema below. "
"---------------------\n"
"{schema}\n"
"---------------------\n"
"We have also provided some context information below. "
"{context_msg}\n"
"---------------------\n"
"Given the context information and the table schema, "
"give a response to the following task: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT = RefineTableContextPrompt(
DEFAULT_REFINE_TABLE_CONTEXT_TMPL
)
############################################
# Knowledge-Graph Table
############################################
DEFAULT_KG_TRIPLET_EXTRACT_TMPL = (
"Some text is provided below. Given the text, extract up to "
"{max_knowledge_triplets} "
"knowledge triplets in the form of (subject, predicate, object). Avoid stopwords.\n"
"---------------------\n"
"Example:"
"Text: Alice is Bob's mother."
"Triplets:\n(Alice, is mother of, Bob)\n"
"Text: Philz is a coffee shop founded in Berkeley in 1982.\n"
"Triplets:\n"
"(Philz, is, coffee shop)\n"
"(Philz, founded in, Berkeley)\n"
"(Philz, founded in, 1982)\n"
"---------------------\n"
"Text: {text}\n"
"Triplets:\n"
)
DEFAULT_KG_TRIPLET_EXTRACT_PROMPT = KnowledgeGraphPrompt(
DEFAULT_KG_TRIPLET_EXTRACT_TMPL
)
############################################
# HYDE
##############################################
HYDE_TMPL = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
DEFAULT_HYDE_PROMPT = SummaryPrompt(HYDE_TMPL)
############################################
# Simple Input
############################################
DEFAULT_SIMPLE_INPUT_TMPL = "{query_str}"
DEFAULT_SIMPLE_INPUT_PROMPT = SimpleInputPrompt(DEFAULT_SIMPLE_INPUT_TMPL)
| [
"Context information is below. It is provided in a numbered list (1 to {num_chunks}),where each item in the list corresponds to a summary.\n---------------------\n{context_list}---------------------\nGiven the context information, here is a new piece of information: {new_chunk_text}\nAnswer with the number corresponding to the summary that should be updated. The answer should be the number corresponding to the summary that is most relevant to the question.\n",
"Some choices are given below. It is provided in a numbered list (1 to {num_chunks}), where each item in the list corresponds to a summary.\n---------------------\n{context_list}\n---------------------\nUsing only the choices above and not prior knowledge, return the top choices (no more than {branching_factor}, ranked by most relevant to least) that are most relevant to the question: '{query_str}'\nProvide choices in the following format: 'ANSWER: <numbers>' and explain why these summaries were selected in relation to the question.\n",
"Some text is provided below. Given the text, extract up to {max_keywords} keywords from the text. Avoid stopwords.---------------------\n{text}\n---------------------\nProvide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n",
"Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {query_str}\n",
"A question is provided below. Given the question, extract up to {max_keywords} keywords from the text. Focus on extracting the keywords that we can use to best lookup answers to the question. Avoid stopwords.\n---------------------\n{question}\n---------------------\nProvide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n",
"Some choices are given below. It is provided in a numbered list (1 to {num_chunks}),where each item in the list corresponds to a summary.\n---------------------\n{context_list}\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: '{query_str}'\nProvide choice in the following format: 'ANSWER: <number>' and explain why this summary was selected in relation to the question.\n",
"The original question is as follows: {query_str}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"Write a summary of the following. Try to use only the information provided. Try to include as many key details as possible.\n\n\n{context_str}\n\n\nSUMMARY:\"\"\"\n"
] |
2024-01-10 | pbhu1024/gpt_index | gpt_index~langchain_helpers~memory_wrapper.py | """Langchain memory wrapper (for LlamaIndex)."""
from typing import Any, Dict, List, Optional
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import AIMessage
from langchain.schema import BaseMemory as Memory
from langchain.schema import BaseMessage, HumanMessage
from pydantic import Field
from gpt_index.indices.base import BaseGPTIndex
from gpt_index.readers.schema.base import Document
from gpt_index.utils import get_new_id
def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str:
"""Get prompt input key.
Copied over from langchain.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
if len(prompt_input_keys) != 1:
raise ValueError(f"One input key expected got {prompt_input_keys}")
return prompt_input_keys[0]
class GPTIndexMemory(Memory):
"""Langchain memory wrapper (for LlamaIndex).
Args:
human_prefix (str): Prefix for human input. Defaults to "Human".
ai_prefix (str): Prefix for AI output. Defaults to "AI".
memory_key (str): Key for memory. Defaults to "history".
index (BaseGPTIndex): LlamaIndex instance.
query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query.
input_key (Optional[str]): Input key. Defaults to None.
output_key (Optional[str]): Output key. Defaults to None.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history"
index: BaseGPTIndex
query_kwargs: Dict = Field(default_factory=dict)
output_key: Optional[str] = None
input_key: Optional[str] = None
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
return prompt_input_key
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
prompt_input_key = self._get_prompt_input_key(inputs)
query_str = inputs[prompt_input_key]
# TODO: wrap in prompt
# TODO: add option to return the raw text
# NOTE: currently it's a hack
response = self.index.query(query_str, **self.query_kwargs)
return {self.memory_key: str(response)}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
prompt_input_key = self._get_prompt_input_key(inputs)
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
doc_text = "\n".join([human, ai])
doc = Document(text=doc_text)
self.index.insert(doc)
def clear(self) -> None:
"""Clear memory contents."""
pass
def __repr__(self) -> str:
"""Return representation."""
return "GPTIndexMemory()"
class GPTIndexChatMemory(BaseChatMemory):
"""Langchain chat memory wrapper (for LlamaIndex).
Args:
human_prefix (str): Prefix for human input. Defaults to "Human".
ai_prefix (str): Prefix for AI output. Defaults to "AI".
memory_key (str): Key for memory. Defaults to "history".
index (BaseGPTIndex): LlamaIndex instance.
query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query.
input_key (Optional[str]): Input key. Defaults to None.
output_key (Optional[str]): Output key. Defaults to None.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history"
index: BaseGPTIndex
query_kwargs: Dict = Field(default_factory=dict)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_source: bool = False
id_to_message: Dict[str, BaseMessage] = Field(default_factory=dict)
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
return prompt_input_key
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
prompt_input_key = self._get_prompt_input_key(inputs)
query_str = inputs[prompt_input_key]
response_obj = self.index.query(query_str, **self.query_kwargs)
if self.return_source:
source_nodes = response_obj.source_nodes
if self.return_messages:
# get source messages from ids
source_ids = [sn.doc_id for sn in source_nodes]
source_messages = [
m for id, m in self.id_to_message.items() if id in source_ids
]
# NOTE: type List[BaseMessage]
response: Any = source_messages
else:
source_texts = [sn.source_text for sn in source_nodes]
response = "\n\n".join(source_texts)
else:
response = str(response_obj)
return {self.memory_key: response}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
prompt_input_key = self._get_prompt_input_key(inputs)
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
# a bit different than existing langchain implementation
# because we want to track id's for messages
human_message = HumanMessage(content=inputs[prompt_input_key])
human_message_id = get_new_id(set(self.id_to_message.keys()))
ai_message = AIMessage(content=outputs[output_key])
ai_message_id = get_new_id(
set(self.id_to_message.keys()).union({human_message_id})
)
self.chat_memory.messages.append(human_message)
self.chat_memory.messages.append(ai_message)
self.id_to_message[human_message_id] = human_message
self.id_to_message[ai_message_id] = ai_message
human_txt = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai_txt = f"{self.ai_prefix}: " + outputs[output_key]
human_doc = Document(text=human_txt, doc_id=human_message_id)
ai_doc = Document(text=ai_txt, doc_id=ai_message_id)
self.index.insert(human_doc)
self.index.insert(ai_doc)
def clear(self) -> None:
"""Clear memory contents."""
pass
def __repr__(self) -> str:
"""Return representation."""
return "GPTIndexMemory()"
| [] |
2024-01-10 | pbhu1024/gpt_index | gpt_index~langchain_helpers~chain_wrapper.py | """Wrapper functions around an LLM chain."""
import logging
from dataclasses import dataclass
from typing import Any, Generator, Optional, Tuple
import openai
from langchain import Cohere, LLMChain, OpenAI
from langchain.llms import AI21
from langchain.llms.base import BaseLLM
from gpt_index.constants import MAX_CHUNK_SIZE, NUM_OUTPUTS
from gpt_index.prompts.base import Prompt
from gpt_index.utils import (
ErrorToRetry,
globals_helper,
retry_on_exceptions_with_backoff,
)
@dataclass
class LLMMetadata:
"""LLM metadata.
We extract this metadata to help with our prompts.
"""
max_input_size: int = MAX_CHUNK_SIZE
num_output: int = NUM_OUTPUTS
def _get_llm_metadata(llm: BaseLLM) -> LLMMetadata:
"""Get LLM metadata from llm."""
if not isinstance(llm, BaseLLM):
raise ValueError("llm must be an instance of langchain.llms.base.LLM")
if isinstance(llm, OpenAI):
return LLMMetadata(
max_input_size=llm.modelname_to_contextsize(llm.model_name),
num_output=llm.max_tokens,
)
elif isinstance(llm, Cohere):
# TODO: figure out max input size for cohere
return LLMMetadata(num_output=llm.max_tokens)
elif isinstance(llm, AI21):
# TODO: figure out max input size for AI21
return LLMMetadata(num_output=llm.maxTokens)
else:
return LLMMetadata()
def _get_response_gen(openai_response_stream: Generator) -> Generator:
"""Get response generator from openai response stream."""
for response in openai_response_stream:
yield response["choices"][0]["text"]
class LLMPredictor:
"""LLM predictor class.
Wrapper around an LLMChain from Langchain.
Args:
llm (Optional[langchain.llms.base.LLM]): LLM from Langchain to use
for predictions. Defaults to OpenAI's text-davinci-003 model.
Please see `Langchain's LLM Page
<https://langchain.readthedocs.io/en/latest/modules/llms.html>`_
for more details.
retry_on_throttling (bool): Whether to retry on rate limit errors.
Defaults to true.
"""
def __init__(
self, llm: Optional[BaseLLM] = None, retry_on_throttling: bool = True
) -> None:
"""Initialize params."""
self._llm = llm or OpenAI(temperature=0, model_name="text-davinci-003")
self.retry_on_throttling = retry_on_throttling
self._total_tokens_used = 0
self.flag = True
self._last_token_usage: Optional[int] = None
def get_llm_metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
# TODO: refactor mocks in unit tests, this is a stopgap solution
if hasattr(self, "_llm") and self._llm is not None:
return _get_llm_metadata(self._llm)
else:
return LLMMetadata()
def _predict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
llm_chain = LLMChain(
prompt=prompt.get_langchain_prompt(llm=self._llm), llm=self._llm
)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
if self.retry_on_throttling:
llm_prediction = retry_on_exceptions_with_backoff(
lambda: llm_chain.predict(**full_prompt_args),
[
ErrorToRetry(openai.error.RateLimitError),
ErrorToRetry(openai.error.ServiceUnavailableError),
ErrorToRetry(openai.error.TryAgain),
ErrorToRetry(
openai.error.APIConnectionError, lambda e: e.should_retry
),
],
)
else:
llm_prediction = llm_chain.predict(**full_prompt_args)
return llm_prediction
def predict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
llm_prediction = self._predict(prompt, **prompt_args)
logging.debug(llm_prediction)
# We assume that the value of formatted_prompt is exactly the thing
# eventually sent to OpenAI, or whatever LLM downstream
prompt_tokens_count = self._count_tokens(formatted_prompt)
prediction_tokens_count = self._count_tokens(llm_prediction)
self._total_tokens_used += prompt_tokens_count + prediction_tokens_count
return llm_prediction, formatted_prompt
def stream(self, prompt: Prompt, **prompt_args: Any) -> Tuple[Generator, str]:
"""Stream the answer to a query.
NOTE: this is a beta feature. Will try to build or use
better abstractions about response handling.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
str: The predicted answer.
"""
if not isinstance(self._llm, OpenAI):
raise ValueError("stream is only supported for OpenAI LLMs")
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
raw_response_gen = self._llm.stream(formatted_prompt)
response_gen = _get_response_gen(raw_response_gen)
# NOTE/TODO: token counting doesn't work with streaming
return response_gen, formatted_prompt
@property
def total_tokens_used(self) -> int:
"""Get the total tokens used so far."""
return self._total_tokens_used
def _count_tokens(self, text: str) -> int:
tokens = globals_helper.tokenizer(text)
return len(tokens)
@property
def last_token_usage(self) -> int:
"""Get the last token usage."""
if self._last_token_usage is None:
return 0
return self._last_token_usage
@last_token_usage.setter
def last_token_usage(self, value: int) -> None:
"""Set the last token usage."""
self._last_token_usage = value
async def _apredict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Async inner predict function.
If retry_on_throttling is true, we will retry on rate limit errors.
"""
llm_chain = LLMChain(
prompt=prompt.get_langchain_prompt(llm=self._llm), llm=self._llm
)
# Note: we don't pass formatted_prompt to llm_chain.predict because
# langchain does the same formatting under the hood
full_prompt_args = prompt.get_full_format_args(prompt_args)
# TODO: support retry on throttling
llm_prediction = await llm_chain.apredict(**full_prompt_args)
return llm_prediction
async def apredict(self, prompt: Prompt, **prompt_args: Any) -> Tuple[str, str]:
"""Async predict the answer to a query.
Args:
prompt (Prompt): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
formatted_prompt = prompt.format(llm=self._llm, **prompt_args)
llm_prediction = await self._apredict(prompt, **prompt_args)
logging.debug(llm_prediction)
# We assume that the value of formatted_prompt is exactly the thing
# eventually sent to OpenAI, or whatever LLM downstream
prompt_tokens_count = self._count_tokens(formatted_prompt)
prediction_tokens_count = self._count_tokens(llm_prediction)
self._total_tokens_used += prompt_tokens_count + prediction_tokens_count
return llm_prediction, formatted_prompt
| [] |
2024-01-10 | bnitsan/essence_backend | server_src~gpt_utils.py | import os
from pathlib import Path
import re
from . import scraping_utils, nlp_utils, general_utils
import numpy as np
import time
import yaml
from retry import retry
import openai
with open("server_src/config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
cfg = cfg["config"]
max_req_to_server = cfg["max_req_to_server"]
qa_model = cfg["QA_MODEL"]
BULLETS_GENERIC_STYLE_NAME = cfg["BULLETS_GENERIC_STYLE_NAME"]
TABULARIZE_STYLE_NAME = cfg["TABULARIZE_STYLE_NAME"]
MAX_GPT_PASSES = cfg["MAX_GPT_PASSES"]
MIN_MARKED_TEXT_LENGTH = cfg["MIN_MARKED_TEXT_LENGTH"]
MULTIPLE_PASSES_MAX_TOKENS = cfg["MULTIPLE_PASSES_MAX_TOKENS"]
CHARS_TO_DECREASE_ON_DECLINE = cfg["chars_to_decrease_on_decline"]
COMPLETION_TIMEOUT = cfg["COMPLETION_TIMEOUT"]
openai.api_key = os.getenv("OPENAI_API_KEY")
if not os.getenv("OPENAI_API_KEY"):
# try to get the key from file in parent folder
with open('../../openai_gpt3_key.txt', 'r') as f:
openai.api_key = f.read()
azure_flag = False
if os.getenv("AZURE_OPENAI_KEY") and os.getenv("AZURE_OPENAI_ENDPOINT"):
openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_version = "2023-06-01-preview" # "2023-05-15"
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
azure_flag = True
def parse_gpt_response_bullets(response: str):
"""Converts a string of the form '- bullet1 \n- bullet2' to a [bullet1, bullet2]."""
stripped_s = response.strip()
# remove lines which are whitespace only
stripped_s = re.sub(r'^[\s]*\n', '', stripped_s, flags=re.MULTILINE)
l_dict = []
for j, line in enumerate(stripped_s.split('\n')):
if len(line) < 3:
continue
line = line.strip()
if line[1].isdigit():
line = line[2:]
else:
line = line.strip()[1:]
# find index of first letter or number in any alphabet
first_letter_index = -1
for i, c in enumerate(line):
if c.isalnum():
first_letter_index = i
break
if first_letter_index == -1:
continue
l_dict.append({'key': j+1, 'value': line[first_letter_index:]})
return l_dict
def look_for_delimiter_inside_paranthesis(s, delimiter, replacement_delimiter):
open_paranthesis_flag = False
new_s = ''
for c in s:
if c == '(':
open_paranthesis_flag = True
elif c == ')':
open_paranthesis_flag = False
if c == delimiter and open_paranthesis_flag:
new_s += replacement_delimiter
continue
new_s += c
return new_s
def look_for_delimiter_inside_table(s, table_delimiter, delimiter, replacement_delimiter):
inside_table = False
new_s = ''
prev = 0
for i, c in enumerate(s):
if c == table_delimiter:
inside_table = True
elif c == delimiter and inside_table:
new_s += s[prev:i] + replacement_delimiter
prev = i + 1
new_s += s[prev:]
return new_s
def look_for_delimiter_after_comma(s, delimiter, replacement_delimiter, comma=','):
comma_occ = []
colon_occ = []
for i, c in enumerate(s):
if c == comma:
comma_occ.append(i)
elif c == delimiter:
colon_occ.append(i)
replace_colon = []
for colon_acc_i in colon_occ[::-1]:
while len(comma_occ) > 0 and comma_occ[-1] > colon_acc_i:
comma_occ.pop()
if len(comma_occ) == 0:
break
replace_colon.append(comma_occ[-1])
new_s = ''
prev = 0
for replace_colon_i in replace_colon:
new_s += s[prev:replace_colon_i].strip() + replacement_delimiter
prev = replace_colon_i + 1
new_s += s[prev:].strip()
return new_s
def is_likely_key_val_list(s):
"""Returns True if the string is likely to be of the form 'key1:\nvalue1\nvalue2\nkey2:value1'"""
if len(s) < 10:
return False
if s.count(':') < 2:
return False
if s.count('\n') < 2:
return False
s = s.strip()
s_lines = s.split('\n')
if s_lines[0].count(':') == 0:
return False
if s_lines[0].split(':')[1].strip() != '':
return False
return True
def parse_gpt_response(s, style='', category_delimiter=':', table_delimiter='|', special_time_token='<<STT>>', special_cat_token='<<CAT>>', special_https_token='<<|HTTP|>>', values_to_drop=['', '-', 'None', 'N/A', 'n/a', 'N/a', 'NA', 'Not available', 'Not Available', 'not available', 'Not available.', 'Not Available.', 'not available.', 'varies', 'Varies', 'Unknown', 'unknown', 'Not provided', 'not mentioned', 'none mentioned']):
"""Converts a string, usually of the form 'key1:value1,key2:value2', to a dictionary/JSON."""
if style == BULLETS_GENERIC_STYLE_NAME:
if not is_likely_key_val_list(s):
if s.strip()[:s.find('\n')].count(':') == 1:
s = '- ' + s.strip()
print('Parsing as bullet points.')
parsed_s = parse_gpt_response_bullets(s)
return parsed_s
# remove incoming '-'. It's a thing that ChatGPT does.
s = '\n'.join([line if not line.startswith('- ') else line[2:] for line in s.splitlines()])
if style == TABULARIZE_STYLE_NAME:
if not s.startswith('Table:'):
s = 'Table:\n' + s
stripped_s = s.strip()
# replace places in stripped_s in which a digit occurs before and after ':' with special_time_token
stripped_s = re.sub(r'(\d+)(:)(\d+)', r'\1'+special_time_token+r'\3', stripped_s)
# replace places where 'http://' or 'https://' occurs with special_https_token
url_pattern = re.compile(r'(https?://[^\s:]+(?::\d+)?(?:/[^\s]*)?)', re.IGNORECASE)
stripped_s = url_pattern.sub(lambda match: match.group().replace(":", special_https_token), stripped_s)
# check if string is not a list
if category_delimiter not in stripped_s[0:30]: # If the string does not contain ':' in the first ~30 characters, then it is probably not a list.
return [{'key': ' ', 'value': stripped_s}]
# proceed to parsing of the form {key1: value1, key2: value2, ...}, with values potentially being tables
s_lines = stripped_s.split('\n')
n_lines = len(s_lines)
# if s contains category_delimiter (initially ':'), then it is usually a key-value pair (until next line with ':')
# EXCPETIONS: ':' could appear elsewhere, e.g. in paranthesis or if the model failed to start a new line
# we improve it now.
new_s_lines = []
for s_lines_i in s_lines:
# case 1: check if ':' is in paranthesis, i.e. it has one '(' some characters before and one ')' some characeters after it.
new_s_line = look_for_delimiter_inside_paranthesis(s_lines_i, category_delimiter, special_cat_token)
# case 1a: check if ':' is in a table, i.e. it has one '|' some characters before it.
new_s_line = look_for_delimiter_inside_table(new_s_line, table_delimiter, category_delimiter, special_cat_token)
# case 2: a ',' that comes before a ':' is probably a mistake - need to replace ',' with '\n'
new_s_line = look_for_delimiter_after_comma(new_s_line, category_delimiter, '\n')
new_s_lines.append(new_s_line)
s_lines = new_s_lines
d = {}
# iterate over the list of strings
last_key = '' # last key - used to store multi-line values
for i in range(n_lines):
# split the string into parts separated by ':'
s_split_i = s_lines[i].split(category_delimiter)
# if the string contains category_delimiter - e.g., ':'
if len(s_split_i) > 1:
last_key = ''
# add the key-value pair to the dictionary
d[s_split_i[0].strip()] = s_split_i[1].strip()
if s_split_i[1].strip() == '':
last_key = s_split_i[0].strip()
d[last_key] = []
elif last_key != '':
# if the string does not contain ':', then it should be a table
# count number of '|' in string s
n_pipes = s_lines[i].count(table_delimiter)
# if s contains '|', then it is a table
if n_pipes > 0:
# split the string into parts separated by '|'
s_split_i = s_lines[i].split(table_delimiter)
# if the string contains '|'
if len(s_split_i) > 1:
# add the key-value pair to the dictionary
d[last_key].append(s_lines[i])
else: # if s does not contain '|', then it is a multi-line value. For now we treat it the same
d[last_key].append(s_lines[i])
# recursively run on d, apply .replace(special_time_token, ':') on all strings
# also apply .replace(special_cat_token, ':'/category_delimiter) on all strings
for key in d:
if isinstance(d[key], list):
for i in range(len(d[key])):
d[key][i] = d[key][i].replace(special_time_token, category_delimiter)
d[key][i] = d[key][i].replace(special_cat_token, category_delimiter)
d[key][i] = d[key][i].replace(special_https_token, category_delimiter)
else:
d[key] = d[key].replace(special_time_token, category_delimiter)
d[key] = d[key].replace(special_cat_token, category_delimiter)
d[key] = d[key].replace(special_https_token, ':')
'''
# split comma-separated values - currently unused.
for key in d:
if isinstance(d[key], list):
d[key] = d[key]
continue
# print(d[key].split(','))
d[key] = [s_i.strip() for s_i in d[key].split(',')]
'''
# "plaster" - replace empty lists by empty strings
for key in d:
if d[key] == []:
d[key] = ''
# split "tables" into lists
for key in d:
if isinstance(d[key], list):
prev_col_len = 0 # we keep track of the number of columns in the previous row. Sometimes the table is not aligned, and we need to add a column to the beginning of the row.
for i in range(len(d[key])):
columns = d[key][i].count(table_delimiter)
if columns < prev_col_len:
d[key][i] = [s_i.strip() for s_i in (' ' + table_delimiter + ' ' + d[key][i]).split(table_delimiter)]
else:
d[key][i] = [s_i.strip() for s_i in d[key][i].split(table_delimiter)]
prev_col_len = columns
# remove "empty" values - i.e. values that are not lists and are in values_to_drop
new_d = {}
for key in d:
if not isinstance(d[key], list):
if d[key] not in values_to_drop:
# remove key from d
# .pop(key, None)
new_d[key] = d[key]
else:
new_d[key] = d[key]
d = new_d
# convert {key: val} to [{key: key, value: val}]
l_dict = []
for key in d:
l_dict.append({'key': key, 'value': d[key]})
return l_dict
def get_gpt_prompt(style='travel'):
if style == 'travel':
prompt_title = "You help a traveler design a multi-day or multi-destination itinerary and gather information about a trip. Convert the blog entries to structured data. When writing a table, put different destinations or activities in separate rows.\n"
input_prompt = "Text: "
output_prompt = "\n\nOutput, possible fields {Activity name, Accommodation, Eating, Transportation, Best Seasons, Preparation, Budget, Itinerary table}:\n" # "Structured data:\nActivity:"
example_pairs = [
["We had an amazing time in Peru, especially in Huaraz. We went to the a bunch of day treks and the Santa Cruz trek! It is a 3-day trek in the Andes mountains. In the first day, we walked 4 hours on a rugged trail and camped near a river. In the second day, we had tough 16 kilometers through beautiful terrain. In the third day, we went over a high altitude pass of 5100m, finishing the trek in a small town. We rode on a shared taxi back to Huaraz. The whole thing cost us about 400 Soles.\n\n",
'''Activity name: Santa Cruz trek
Accommodation: camping
Transportation: shared taxi
Budget: 400 Soles
Itinerary table:
Day | Length | Details
1 | 4 hrs | rugged, river camping
2 | 16 km | beautiful terrain
3 | | 5100m pass'''],
["Recommended hotels in France, where mid/high end means over 100 euros per night, budget means less.\n\n",
'''Destination name: France
Hotels:
Location | Name | Details
Paris | Hotel de Crillon | High-end
| Hotel de Ville | Mid-range
Lyon | Comte Ornon | High-range
| Hotel Boutique | Mid-range
Bourdeaux | Hotel de Seze | Mid-range
| Best Western Francais | Budget'''],
["In the last summer I was in Nepal, exploring the Himalayas. In one of the most memorable experiences, I went on the Tilicho lake trek. After hiring a porter at Pokhara, I took a bus to Besi-Sahar. After a day of altitude acclimatization in Manang, enjoying the local food at a simple hotel, I set out at sunrise to Tilicho lake base camp. This day was beautiful but a little dangerous, as some paths suffer from landslide. After another night at a simple hotel, I began the climb to the lake. After about 3 hours and 1000m of climb, I made it to the lake. Boy-oh-boy, the views were amazing! Snow-capped mountains with a far-reaching pristine lake in the middle. The walk was definitely worth it. After climbing down I stopped at base camp for another night with a hearty meal along fellow travelers. In the next day, I hiked back 15 km to Manang and made the trip back to Pokhara.",
'''Activity name: Tilicho lake trek
Accommodation: simple hotels
Transportation: bus to Besi-Sahar
Itinerary table:
Day | Destination | Details
1 | Manang | altitude acclimatization
2 | Base camp | landslide danger
3 | Tilicho lake and back | 3 hours, 1000m climb
4 | Manang | 15km hike
| Pokhara | ''']
]
keywords = ['Santa Cruz', 'Andes', 'Huaraz', '5100m', 'Crillon', 'France', 'Paris', 'Lyon', 'Comte', 'Ornon', 'Western', 'Bourdeaux', 'Seze', 'Tilicho', 'Pokhara', 'Manang', 'Besi-Sahar']
continued_prompt_title = "You help a traveler design a multi-day or multi-destination itinerary and gather information about a trip. You are given the data collected so far and a relevant body of text. You need to use the text to add details and expand the data and output the revised data in the same format. Be informative and succinct.\n"
continued_prev_data_prompt = "\n\nPrevious data:\n"
continued_new_text_prompt = "\n\nNew text:\n"
continued_output_prompt = "\n\nRevised data:\n"
continued_prompt_dict = {
"continued_prompt_title": continued_prompt_title,
"continued_prev_data_prompt": continued_prev_data_prompt,
"continued_new_text_prompt": continued_new_text_prompt,
"continued_output_prompt": continued_output_prompt,
"keywords": ["multi-day", "gather information about a trip", "Be informative and succinct"]}
elif style == 'bizanalytics':
prompt_title = "You are trying to help an analyst appraise businesses and gather information from business news. Convert the following text snippets to structured data.\n"
input_prompt = "Text: "
output_prompt = "\n\nOutput, possible fields include {Main company/ies, Business/service, Valuation, Product, Features, Pricing, Investors, Business decisions/events, Area, Personnel, Challenges}:\n"
example_pairs = [
['''On-demand shuttle and software company Via has raised another $130 million, capital that has pushed its valuation to about $3.3 billion as demand from cities to update its legacy transit systems rises.
The round was led by Janus Henderson with participation from funds and accounts managed by BlackRock, ION Crossover Partners, Koch Disruptive Technologies and existing investor Exor. To date, the company has raised $800 million.
Via, which today employs about 950 people, has two sides to its business. The company operates consumer-facing shuttles in Washington, D.C. and New York. Its underlying software platform, which it sells to cities, transportation authorities, school districts and universities to deploy their own shuttles, is not only the core of its business; it has become the primary driver of growth.
Co-founder and CEO Daniel Ramot previously told TechCrunch that there was was little interest from cities in the software-as-a-service platform when the company first launched in 2012. Via landed its first city partnership with Austin in late 2017, after providing the platform to the transit authority for free. It was enough to allow Via to develop case studies and convince other cities to buy into the service. In 2019, the partnerships side of the business “took off,” Ramot said in an interview last year.
Today, the software side — branded internally as TransitTech — has eclipsed its consumer-facing operations. Via said TransitTech revenue more than doubled year on year to exceed an annual run rate of $100 million. The software platform is used by more than 500 partners, including Los Angeles Metro. Jersey City and Miami in the United States as well as Arriva Bus UK, a Deutsche Bahn company that uses it for a first and last-mile service connecting commuters to a high-speed train station in Kent, U.K.
Via doesn’t provide specifics on what it plans to use the funds for. The company has made two acquisitions in the past 18 months, including Fleetonomy in 2020.
Earlier this year, Via used $100 million in cash and equity to acquire a company called RemixCorpTM, a startup that developed mapping software used by cities for transportation planning and street design. The startup became a subsidiary of Via, an arrangement that will let the startup maintain its independent brand.\n\n''',
'''Main company: Via
Business/service: On-demand shuttle, software-as-a-service
Since: 2012
Total funding: $800M
Valuation: $3.3B
Revenue: Doubling YOY
Investors: Janus Henderson, BlackRock, ION Crossover Partners, Koch Disruptive Technologies, Exor
Geography: Washington, D.C., New York, Austin, Los Angeles Metro, Jersey City, Miami, Arriva Bus UK
Clients: over 500
Personnel:
Daniel Ramot | CEO
Employees | 950
Business decisions:
Type | Details
Funding round | $130M
Acquired Fleetonomy | 2020
Acquired RemixCorpTM | $100M, cash and equity, mapping software'''
],
]
keywords = ['Via', 'Daniel Ramot', 'Fleetonomy', 'RemixCorpTM', 'Los Angeles Metro', 'Arriva Bus UK','Deutsche Bahn', 'Janus Henderson', 'BlackRock', 'ION Crossover Partners', 'Koch Disruptive Technologies', 'Exor']
continued_prompt_dict = None
elif style == 'spaper':
prompt_title = "You are trying to help an academic researcher to quickly understand the key points of a scientific paper. In the following, convert each text snippet to structured data.\n"
input_prompt = "Text: "
output_prompt = "\n\nOutput, possible fields {Scientific field, Background, Novelty, Conclusions/Key takeaways, Methods}:\n"
example_pairs = [
['''Ultra-diffuse galaxies that contain a large sample of globular clusters (GCs) offer an opportunity to test the predictions of galactic dynamics theory. NGC5846-UDG1 is an excellent example, with a high-quality sample of dozens of GC candidates. We show that the observed distribution of GCs in NGC5846-UDG1 is suggestive of mass segregation induced by gravitational dynamical friction. We present simple analytic calculations, backed by a series of numerical simulations, that naturally explain the observed present-day pattern of GC masses and radial positions. Subject to some assumptions on the GC population at birth, the analysis supports the possibility that NGC5846-UDG1 resides in a massive dark matter halo. This is an example for the use of GC-rich systems as dynamical (in addition to kinematical) tracers of dark matter.\n\n''' ,
'''Scientific field: Galaxies, globular clusters, dark matter
Background: Ultra-diffuse galaxies that contain a large sample of globular clusters (GCs) offer an opportunity to test the predictions of galactic dynamics theory. NGC5846-UDG1 is an excellent example, with a high-quality sample of dozens of GC candidates.
Novelty: NGC5846-UDG1 has a high-quality sample of dozens of GC candidates and dynamical friction is likely effective in the galaxy
Main conclusion: NGC5846-UDG1 is an example for the use of GC-rich systems as dynamical (in addition to kinematical) tracers of dark matter
Methods: simple analytic calculations, numerical simulations''']]
keywords = ['NGC5846-UDG1', 'galaxies', 'globular clusters', 'dark matter', 'dynamical friction']
continued_prompt_dict = None
elif style == 'spaper_variant':
prompt_title = "You are trying to help an academic researcher to quickly understand the key points of a scientific paper. In the following, convert each text snippet to structured data.\n"
input_prompt = "Text: "
output_prompt = "\n\nOutput, possible fields {Scientific field, Background, Novelty, Conclusions/Key takeaways, Methods}:\n"
example_pairs = [
['''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. We implement sequence ordering by using fixed positional encodings. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\n\n''' ,
'''Scientific field: Neural networks, machine translation
Background: The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism.
Novelty: We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely.
Key achievements: Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task and 41.8 on the WMT 2014 English-to-French translation task, establishing a new single-model state-of-the-art BLEU score. The model generalizes well to other tasks and can be trained relatively fast.
Methods: Attention mechanism, fixed positional encodings, performance on language tasks''']]
keywords = ['attention mechanism', 'Attention mechanism', 'BLEU', 'German', 'neural', 'convolutions', 'Transformer', 'positional encodings']
continued_prompt_dict = None
elif style == 'generic':
prompt_title = "You are trying to help a layperson get a summary with the main background required to understand the following text and the main conclusions that stem from it. The summary should not exceed 8 sentences.\n"
input_prompt = "Text: "
output_prompt = "\n\nSummary:\n"
example_pairs = []
keywords = ['exceed 8 sentences']
continued_prompt_dict = None
elif style == BULLETS_GENERIC_STYLE_NAME:
prompt_title = "Summarize the following text into bullet points. Try to make the bullet points progress in logic, i.e. background would appear before conclusions. Be informative and succinct.\n"
input_prompt = "Text: "
output_prompt = "\n\nBullet points:\n"
example_pairs = []
keywords = ['into bullet points', 'progress in logic']
continued_prompt_title = "You help a user get the essence of a body of text. You are given the bullet points collected so far and a relevant body of text. You need to use the text to add details and expand the bullet points and output the revised bullet points. Try to make the bullet points progress in logic, i.e. background would appear before conclusions. Be informative and succinct. If the new text does not appear to be relevant, you can ignore it and output the previous bullet points."
continued_prev_data_prompt = "\n\nPrevious bullet points:\n"
continued_new_text_prompt = "\n\nNew text:\n"
continued_output_prompt = "\n\nRevised bullet points:\n"
continued_prompt_dict = {
"continued_prompt_title": continued_prompt_title,
"continued_prev_data_prompt": continued_prev_data_prompt,
"continued_new_text_prompt": continued_new_text_prompt,
"continued_output_prompt": continued_output_prompt,
"keywords": ["get the essence of a body", "You are given the bullet points", "Be informative and succinct"]}
elif style == 'criticizepaper':
prompt_title = "You are helping a reviewer review a scientific paper. You are given an excerpt from a paper with the purpose of finding flaws in logic, execution, etc. Summarize your report in bullet points. Try to support your criticism with quotes from the text. If you can't find flaws, do not say any.\n"
input_prompt = "Paper excerpt: "
output_prompt = "\n\nCritical review of flaws in the paper:\n"
example_pairs = []
keywords = []
continued_prompt_dict = None
elif style == 'explain':
prompt_title = 'You are helping someone read complicated text. Given some text, do your best to explain the text in simple terms. Do not drop key aspects of the text.'
input_prompt = 'Text: '
output_prompt = '\n\nExplanation:\n'
example_pairs = []
keywords = []
continued_prompt_dict = None
elif style == 'tabularize':
prompt_title = 'You are helping parse textual data into a table. The table cells should be separated by \'|\' and new lines.'
input_prompt = 'Text: '
output_prompt = '\n\nTable:\n'
example_pairs = [
['''Above limb I(195.12 Å)
(arcsec) erg cm2s−1sr−1
I-P P
0.00 52.45 164.67
1.00 62.02 235.34
2.00 69.19 338.49
3.00 75.52 466.16\n\n''' ,
'''Above limb | I(195.12 Å) |
(arcsec) | erg cm2s−1sr−1 |
| I-P | P
0.00 | 52.45 | 164.67
1.00 | 62.02 | 235.34
2.00 | 69.19 | 338.49
3.00 | 75.52 | 466.16
''']]
keywords = ['parse textual data', 'table cells should be separated by \'|\'']
continued_prompt_dict = None
else:
raise ValueError(f"style {style} not supported")
examples = [input_prompt + example_pair[0] + output_prompt + example_pair[1] for example_pair in example_pairs]
return prompt_title, input_prompt, output_prompt, examples, example_pairs, keywords, continued_prompt_dict
def hijack_and_bad_quality_check(coarse_text: str, response_text: str, keywords: list):
'''
if any of the keywords is not in the coarse_text but is in the response_text,
then the response_text is hijacked or copied from the examples and should be discarded.
Returns '' if the query is hijacked or copied from the examples, otherwise returns the response_text
'''
for keyword in keywords:
if keyword in response_text and keyword not in coarse_text:
print('SEEMS TO BE HIJACKED OR COPIED FROM THE EXAMPLES')
print(response_text)
return True
return False
def gpt_response_to_clean_text(response, model):
response_text = ''
if model == 'text-davinci-003' or model == 'text-curie-001':
response_text = response['choices'][0].text
response_text = re.sub(r'\n{2,}', '\n', response_text)
response_text = response_text.strip()
elif model == "gpt-3.5-turbo" or model == "gpt-4":
response_text = response["choices"][-1]["message"]["content"]
response_text = re.sub(r'\n{2,}', '\n', response_text)
response_text = response_text.strip()
return response_text
@retry(exceptions=openai.error.Timeout, tries=4)
def gpt_completion(query_to_model, max_tokens=768, model='text-davinci-003', prev_msgs=[]):
if model == 'text-davinci-003' or model == 'text-curie-001':
print('Operating on ' + model)
return openai.Completion.create(
model=model,
prompt=query_to_model,
temperature=0.7,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
best_of=1,
request_timeout=COMPLETION_TIMEOUT)
elif model == "gpt-3.5-turbo" or model == "gpt-4":
print('Operating on ' + model)
new_msgs = prev_msgs + [{"role": "user", "content": query_to_model}] if query_to_model != '' else prev_msgs
if azure_flag:
print('Using Azure completion')
return openai.ChatCompletion.create(
engine="essence-gpt35turbo",
messages=new_msgs)
else:
return openai.ChatCompletion.create(
model=model,
messages=new_msgs,
request_timeout=COMPLETION_TIMEOUT)
return None
def get_gpt_response(prompt, coarse_text, output_prompt, keywords, model, initial_char_index=0, final_char_index=10000, max_tokens=768):
final_char_index = min(final_char_index, len(coarse_text))
successful_response = False
number_of_attemps = 0
while not successful_response and number_of_attemps < max_req_to_server and final_char_index > initial_char_index:
number_of_attemps += 1
text_to_decode = coarse_text[initial_char_index:final_char_index] # limit the text, since models are limited to 4k tokens
query_to_model = prompt + text_to_decode + output_prompt
try:
response = gpt_completion(query_to_model, max_tokens=max_tokens, model=model)
response_text = gpt_response_to_clean_text(response, model)
successful_response = True
except openai.error.Timeout:
time.sleep(0.3)
print('BIG Timeout. Trying again.')
continue
except Exception as e:
print(e)
final_char_index -= CHARS_TO_DECREASE_ON_DECLINE
print('Decreasing amount of tokens. New final_char_index: ', final_char_index)
time.sleep(0.3) # wait a little, to not to overquery the API
if number_of_attemps == max_req_to_server:
return False, 'ERROR: Server encountered problems or query is long.', 0
if final_char_index < initial_char_index:
return False, 'ERROR: Processing failed.', 0
if (keywords is not None) and hijack_and_bad_quality_check(coarse_text, response_text, keywords):
raise Exception('Hijacked')
return True, response_text, final_char_index
def get_gpt_summary(coarse_text, style='travel', max_char_length=10000, model='text-davinci-003', backwards_chars=0):
"""Get a summary based on GPT-3 API.
coarse_text: text to be parsed by GPT-3
style: style of the text, e.g. 'travel'
final_char_index: index of the last character of coarse_text to be processed by GPT-3
The prompts are defined in get_gpt_prompt(style) function.
"""
try:
prompt_title, input_prompt, output_prompt, examples, example_pairs, keywords, continued_prompt_dict = get_gpt_prompt(style=style)
except ValueError as e:
print(e)
return '', 0
gpt_credits = 1
# single pass
print('First pass... ' + str(len(coarse_text)))
success_flag, response_text, actual_final_char_index = get_gpt_response(
prompt_title + ''.join(examples) + input_prompt,
coarse_text,
output_prompt,
keywords,
model,
initial_char_index=0, final_char_index=max_char_length)
# multiple passes
if (continued_prompt_dict is not None) and success_flag and (actual_final_char_index < len(coarse_text)):
gpt_credits += 1
passes = 1
while success_flag and (actual_final_char_index < len(coarse_text)) and passes < MAX_GPT_PASSES:
print('Continuing with the next pass...')
success_flag, response_text, actual_final_char_index = get_gpt_response(
continued_prompt_dict["continued_prompt_title"] + continued_prompt_dict["continued_prev_data_prompt"] + response_text + continued_prompt_dict["continued_new_text_prompt"],
coarse_text,
continued_prompt_dict['continued_output_prompt'],
continued_prompt_dict['keywords'],
model,
initial_char_index=actual_final_char_index - backwards_chars, final_char_index=actual_final_char_index + max_char_length - backwards_chars,
max_tokens=MULTIPLE_PASSES_MAX_TOKENS)
passes += 1
return response_text, gpt_credits
def get_title_for_entry(coarse_text, query_to_model='', model='gpt-3.5-turbo') -> str:
"""
Get a title of the entry from the text.
coarse_text: text to be processed by a title-generating-model
model: model to be used by OpenAI API (currently we only use OpenAI, but other models can be used)
"""
# We prompt the model with the very beginning of the text, assuming that the title is there
# We do not supply the model with any examples, to remain agnostic to the style
if query_to_model == '':
query_to_model = "Summarize this text to something that can serve as a title that labels the text.\nText:\n" + coarse_text[0:300] + "\nTitle:"
successful_flag = False
number_of_attempts = 0
while (not successful_flag) and number_of_attempts < 5:
try:
response = gpt_completion(query_to_model, max_tokens=64, model=model)
successful_flag = True
response_text = gpt_response_to_clean_text(response, model)
except Exception as e:
print(e)
number_of_attempts += 1
time.sleep(0.5)
if not successful_flag:
response_text = 'ERROR OCCURRED'
# some specific cleanings for title generation
response_text = response_text.replace('"', '') # remove quotes, since they sometimes come up as wrapping of the title in the output
if response_text[-1] == '.':
response_text = response_text[:-1] # remove '.' in the end of response_text if exists
return response_text
def process_url(request_dict, data_path, max_char_length=1000, model='text-davinci-003'):
"""Process URL and return structured data.
request_dict: dictionary with the following keys:
URL: URL of the web page
style: style of the web page
max_char_length: denotes how many leading characters of the text to be processed by GPT-3
Default value of 1000 for development purposes, since GPT-3 is expensive
The function defines a failed output by default, and updates it if the processing is successful.
"""
output = {
'URL': request_dict['URL'] if 'URL' in request_dict else '',
'style': request_dict['style'] if 'style' in request_dict else '',
'output': '',
'status': 'FAILED'}
# get the URL
url = request_dict['URL'] if 'URL' in request_dict else ''
# check validity of url
if url == '':
output['output'] = 'ERROR: no URL provided.'
return output
# get the style
style = request_dict['style'] if 'style' in request_dict else ''
if 'is_marked_text' in request_dict and request_dict['is_marked_text']:
# we use the marked text by the user, instead of scraping the URL
if 'marked_text' not in request_dict:
output['output'] = 'ERROR: marked text not provided.'
return output
request_dict['marked_text'] = nlp_utils.clean_marked_text(request_dict['marked_text'])
if len(request_dict['marked_text']) < MIN_MARKED_TEXT_LENGTH:
output['output'] = 'ERROR: marked text too short.'
return output
coarse_text = request_dict['marked_text']
original_url_webpage = request_dict['marked_text']
elif 'is_marked_text' not in request_dict or not request_dict['is_marked_text']:
# get the text from the URL - or - if supplied, from the HTML.
# original_url_webpage is simply the downloaded webpage
# coarse_text is the text to be processed by GPT-3, after it was processed by a cleaning backend
# such as jusText or Trafilatura
if request_dict['web_html'] == '':
coarse_text, original_url_webpage = scraping_utils.url_to_text(url, data_path)
else:
coarse_text, original_url_webpage = scraping_utils.html_to_text(request_dict['web_html'])
if coarse_text == '':
output['output'] = 'ERROR: time-out or problem cleaning the webpage. Try marking the text you\'re interested in and click the Brush button to Process that text in particular.'
return output
# We previously limited the use to English only. For not we allow all languages.
if False: # nlp_utils.text_not_in_english(coarse_text):
output['output'] = 'ERROR: We currently only support English.'
return output
# Get the structured data from GPT-3
try:
response, gpt_credits = get_gpt_summary(coarse_text, style=style, max_char_length=max_char_length, model=model)
except Exception as e:
if 'Hijacked' in str(e):
print('Hijacked error occured. Trying again with variant if exists.')
try:
response, gpt_credits = get_gpt_summary(coarse_text, style=style + '_variant', max_char_length=max_char_length, model=model)
except Exception as e:
print('Some error occured on second try. Error: ', e)
response, gpt_credits = '', 0
else:
response, gpt_credits = '', 0
if response == '':
output['output'] = 'ERROR: problem occured. Try changing the style or shorten the text.'
return output
elif response.startswith('ERROR'):
output['output'] = response
return output
# convert the structured data to a dictionary
output["model_output"] = response
output["output"] = parse_gpt_response(output["model_output"], style=style)
output["title"] = get_title_for_entry(coarse_text)
output["cleaned_text"] = coarse_text
output["original_web"] = original_url_webpage
output["marked_text"] = request_dict["marked_text"] if 'marked_text' in request_dict else ''
output["status"] = "SUCCESS"
output["gpt_credits"] = gpt_credits
return output
def promptize_qa_list(qa_list, max_prev_questions=2):
"""Promptize the list of questions and answers.
qa_list: list of tuples (question, answer)
max_prev_questions: maximum number of previous questions to include in the prompt
"""
prompt = ''
for i in range(min(max_prev_questions, len(qa_list))):
question, answer = qa_list[-1-i][:2]
prompt = f'Question: {question}\nAnswer: {answer}\n{prompt}'
return prompt
def get_gpt_answer_to_question(question: str, snippets: list[str], qa_list, text, model=qa_model) -> str:
"""Get response from GPT-3 API.
question: to be answered using the snippets
snippets: list of strings that likely contain the answer to the question
The question is put together with the snippets and a prompt, and is sent to GPT3.
Note: may consider a cheaper model (next cheaper OpenAI: text-curie-001. Can also consider open-source model)
"""
'''
After launching, we see that the use case is a little different than what we had in mind.
Users like to use the chat as ChatGPT rather than asking questions about the text.
We therefore implement the following change: when the text is sufficiently short, we feed it directly to the model,
without selecting text based on embeddings.
'''
language = nlp_utils.detect_language(text) # either 'en' or not for now (3/4/2023)
if (len(text) < 9500 and language == 'en') or (len(text) < 5600):
print('Asking question directly to model, as text is short.')
response_text = chat_question(question, qa_list, context_text=text, model=model)
return response_text, ''
#prompt_title = '''You are trying to help a user get an answer to a question based on a document. You are given the question, the first 1000 characters of the text for context and several possibly relevant snippets of text that may contain (or may not) the answer. If the snippets do not contain the answer but you know the answer regardless of them - give the answer, but admit that it is not based on the document (adding \"(not based on the document)\"). If you're not sure about the answer, refuse to give an answer and admit that you're not sure, but again - if you know the answer from elsewhere - say it. Be concise, informative and give only the answer to the question.'''
prompt_title = '''You are trying to help a user get an answer to a question based on a document. You are given the question, the first 1000 characters of the text for context and several possibly relevant snippets of text that may contain (or may not) the answer. If you are not sure what is the answer, say you're not sure. Be concise, informative and give only the answer to the question.'''
prompt_title_w_prev_qa = '''You are trying to help a user get an answer to a question. You are given previous answered questions, the new question and several sentences or snippets of text that may contain (or may not) the answer. Try to give the answer to the question. If you are not absolutely sure, say you're not sure. Be concise.'''
previous_questions_answers_prompt = promptize_qa_list(qa_list)
question = question.strip()
question = question[0].upper() + question[1:] # capitalize the first letter of the question
if question[-1] != '?':
question += '?'
output_prompt = 'Answer (either based on the snippets or not):'
successful_response = False
number_of_snippets = len(snippets)
print('Number of snippets: ', number_of_snippets)
while not successful_response and number_of_snippets > 0:
text_to_decode = [snip + '\n' for snip in snippets[:number_of_snippets]]
query_to_model = prompt_title + "\n" + question + '\n' + 'Context:\n' + text[0:1000] + '\nSnippets:\n' + ''.join(text_to_decode) + output_prompt + "\n"
# print(query_to_model[:100])
print('query to model ###########################')
print(query_to_model)
try:
response = gpt_completion(query_to_model, max_tokens=512, model=model)
successful_response = True
except Exception as e:
print(e)
print('Decreasing amount of candidate snippets.')
number_of_snippets -= 1
if number_of_snippets == 0:
return 'ERROR: Candidate answer snippets are too long.', ''
response_text = gpt_response_to_clean_text(response, model)
return response_text, query_to_model
def qa_about_text(question: str, text: str, url: str, qa_list, top=6, sigma=1, top_answers=4, compact_sentences=5):
"""Get answer to a question about a text.
question: to be answered using the text
text: text to be used for answering the question
url: url of the text (for embedding caching purposes)
top: number of top similar sentences to use for generating the answer
sigma: number of sentences around the top similar sentences to use for generating the answer
top_answers: number of top answers to return
"""
try:
cosine_similarities, sentences, embeddings_a, embeddings_q = nlp_utils.get_embeddings(question, text, url, backend="openai", compact_sentences=compact_sentences) # nlp_utils.get_embeddings_qa(question, text)
print('Got embeddings.')
except Exception as e:
print(e)
return 'ERROR: problem occured. Try again or try selecting another text.', None, None
top = min(top, len(sentences))
'''
After getting question-sentence similarities there are a few options
1) pick top similar sentences
2) pick top similar sentences and a few sentences around them (sigma)
3) something else (?)
We pick first option for now.
Then, we ask GPT3 to generate an answer to the question based on the similar sentences
'''
# get top_answers sentences whose cosine_similarities is largest but their length is larger than 10 characters
for i in range(len(cosine_similarities)):
if len(sentences[i]) < 10:
cosine_similarities[i] = 0
top_sentences_locations = np.argsort(cosine_similarities)[-top:]
sentences_islands = nlp_utils.find_islands(top_sentences_locations, sigma=sigma, length=len(sentences))
top_sentences = [str(j+1) + '. ' + ' '.join([sentences[i] for i in island]) for j, island in enumerate(sentences_islands)]
top_sentences = [sent.replace('\n', ' ') for sent in top_sentences]
top_sentences = [re.sub(r'\s{2,}', ' ', sent) for sent in top_sentences]
response_text, query_to_model = get_gpt_answer_to_question(question, top_sentences, qa_list, text)
response_text = response_text.strip() # basic cleaning
supporting_sentences = nlp_utils.get_supporting_sentences(sentences_islands, embeddings_a, response_text, sentences, top_answers)
supporting_quote = '...' + '... '.join(supporting_sentences) + '...'
# replace \n in supporting_quote with space
supporting_quote = supporting_quote.replace('\n', ' ')
# replace multiple spaces with one space
supporting_quote = re.sub(r'\s{2,}', ' ', supporting_quote)
return response_text, query_to_model, supporting_quote
def prepare_qa_for_chat(question, answer):
if question.startswith('/chat '):
question = question[5:]
else:
question = question + '\n(Based on an attached document - redacted)'
return question, answer
def chat_question(question, qa_list, context_text='', model="gpt-3.5-turbo"):
prev_msgs = []
for qa in qa_list:
prev_question, prev_answer = qa[:2]
prev_question, prev_answer = prepare_qa_for_chat(prev_question, prev_answer)
prev_msgs.append({"role": "user", "content": prev_question})
prev_msgs.append({"role": "assistant", "content": prev_answer})
if context_text == '':
query = question
else:
query = question + '\nContext text:\n' + context_text
prev_msgs.append({"role": "user", "content": query})
try:
response = gpt_completion('', max_tokens=768, model='gpt-3.5-turbo', prev_msgs=prev_msgs)
except Exception as e:
print(e)
return 'ERROR: problem occured. Try again or try selecting another text.'
if 'choices' not in response:
return 'ERROR: problem occured. Try again or try selecting another text.'
answer = response['choices'][0]['message']['content']
answer = answer.strip()
return answer | [
"\n\nSummary:\n",
"\n\nCritical review of flaws in the paper:\n",
"None",
"You are trying to help an analyst appraise businesses and gather information from business news. Convert the following text snippets to structured data.\n",
"Text: ",
"\n\nBullet points:\n",
"Answer (either based on the snippets or not):",
"Question: PLACEHOLDER\nAnswer: PLACEHOLDER\nPLACEHOLDER",
"You help a user get the essence of a body of text. You are given the bullet points collected so far and a relevant body of text. You need to use the text to add details and expand the bullet points and output the revised bullet points. Try to make the bullet points progress in logic, i.e. background would appear before conclusions. Be informative and succinct. If the new text does not appear to be relevant, you can ignore it and output the previous bullet points.",
"\n\nTable:\n",
"You help a traveler design a multi-day or multi-destination itinerary and gather information about a trip. Convert the blog entries to structured data. When writing a table, put different destinations or activities in separate rows.\n",
"\n\nNew text:\n",
"\n\nOutput, possible fields {Activity name, Accommodation, Eating, Transportation, Best Seasons, Preparation, Budget, Itinerary table}:\n",
"{'continued_prompt_title': PLACEHOLDER, 'continued_prev_data_prompt': PLACEHOLDER, 'continued_new_text_prompt': PLACEHOLDER, 'continued_output_prompt': PLACEHOLDER, 'keywords': ['get the essence of a body', 'You are given the bullet points', 'Be informative and succinct']}",
"\n\nExplanation:\n",
"\n\nOutput, possible fields include {Main company/ies, Business/service, Valuation, Product, Features, Pricing, Investors, Business decisions/events, Area, Personnel, Challenges}:\n",
"You are trying to help an academic researcher to quickly understand the key points of a scientific paper. In the following, convert each text snippet to structured data.\n",
"\n\nRevised bullet points:\n",
"You are helping a reviewer review a scientific paper. You are given an excerpt from a paper with the purpose of finding flaws in logic, execution, etc. Summarize your report in bullet points. Try to support your criticism with quotes from the text. If you can't find flaws, do not say any.\n",
"\n\nOutput, possible fields {Scientific field, Background, Novelty, Conclusions/Key takeaways, Methods}:\n",
"You are trying to help a user get an answer to a question. You are given previous answered questions, the new question and several sentences or snippets of text that may contain (or may not) the answer. Try to give the answer to the question. If you are not absolutely sure, say you're not sure. Be concise.",
"Paper excerpt: ",
"\n\nPrevious bullet points:\n",
"\n\nPrevious data:\n",
"You are trying to help a layperson get a summary with the main background required to understand the following text and the main conclusions that stem from it. The summary should not exceed 8 sentences.\n",
"{'continued_prompt_title': PLACEHOLDER, 'continued_prev_data_prompt': PLACEHOLDER, 'continued_new_text_prompt': PLACEHOLDER, 'continued_output_prompt': PLACEHOLDER, 'keywords': ['multi-day', 'gather information about a trip', 'Be informative and succinct']}",
"You are helping parse textual data into a table. The table cells should be separated by '|' and new lines.",
"\n\nRevised data:\n",
"Summarize the following text into bullet points. Try to make the bullet points progress in logic, i.e. background would appear before conclusions. Be informative and succinct.\n",
"You are trying to help a user get an answer to a question based on a document. You are given the question, the first 1000 characters of the text for context and several possibly relevant snippets of text that may contain (or may not) the answer. If you are not sure what is the answer, say you're not sure. Be concise, informative and give only the answer to the question.",
"You help a traveler design a multi-day or multi-destination itinerary and gather information about a trip. You are given the data collected so far and a relevant body of text. You need to use the text to add details and expand the data and output the revised data in the same format. Be informative and succinct.\n",
"You are helping someone read complicated text. Given some text, do your best to explain the text in simple terms. Do not drop key aspects of the text."
] |
2024-01-10 | bnitsan/essence_backend | server_src~embed_utils.py | class EmbedUtils:
def __init__(self):
import openai
openai.api_type = 'openai'
def embed(self, input):
import openai
print(openai.api_type)
openai.Embedding.create(input, model='ada-002')
embed_utils = EmbedUtils()
| [] |
2024-01-10 | bnitsan/essence_backend | server_src~nlp_utils.py | from sklearn.metrics.pairwise import cosine_similarity
import nltk
from langdetect import detect
from nltk.tokenize import sent_tokenize
import requests
import numpy as np
import os
import yaml
from cachelib.file import FileSystemCache
import hashlib
import re
from . import general_utils
from retry import retry
import openai as openaiembed
with open("server_src/config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
cfg = cfg["config"]
SENT_TOKEN_PROTECTED = cfg["SENT_TOKEN_PROTECTED"]
MIN_SENTENCE_LEN_QA_EMBED = cfg["MIN_SENTENCE_LEN_QA_EMBED"]
MAX_SENTENCE_LEN_QA_EMBED = cfg["MAX_SENTENCE_LEN_QA_EMBED"]
SENTENCE_QA_EMBED_MODEL = cfg["SENTENCE_QA_EMBED_MODEL"]
CACHE_QA_SECONDS = cfg["CACHE_QA_SECONDS"]
CACHE_QA_THRESHOLD = cfg["CACHE_QA_THRESHOLD"]
INF_ENDPOINT_SENT_TRANS = cfg["INF_ENDPOINT_SENT_TRANS"]
COMPLETION_TIMEOUT = cfg["COMPLETION_TIMEOUT"]
SECRET_SENTEMBED_KEY = os.getenv("SECRET_HF_MODEL_KEY") if os.getenv("SECRET_HF_MODEL_KEY") else ''
data_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir, 'data')) # get absolute path to one folder up
if os.getenv("ESSENCE_DATA_PATH"):
data_path = os.getenv("ESSENCE_DATA_PATH")
embed_cache = FileSystemCache(os.path.join(data_path, 'embed_cache'), threshold=CACHE_QA_THRESHOLD, default_timeout=CACHE_QA_SECONDS)
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openaiembed.api_key = OPENAI_API_KEY
openaiembed.api_type = 'openai'
azure_flag = False
if os.getenv("AZURE_OPENAI_KEY") and os.getenv("AZURE_OPENAI_ENDPOINT"):
openaiembed.api_type = "azure"
openaiembed.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openaiembed.api_version = "2023-05-15"
openaiembed.api_key = os.getenv("AZURE_OPENAI_KEY")
azure_flag = True
def hash_text(text):
sha256 = hashlib.sha256()
sha256.update(text.encode())
return sha256.hexdigest()
def find_islands(indices, sigma: int, length: int):
'''
This function takes indices that list to locations of the top sentences
fitting the query. It also takes sigma, a maximum distance around an index that should be included in the island.
When islands touch or overlap, they are merged into one island.
length: maximal index value + 1
'''
if not isinstance(sigma, int) or sigma < 0:
raise ValueError('sigma must be a non-negative integer')
if len(indices) < 1:
return []
indices.sort()
islands = [[i for i in range(indices[0]-sigma, indices[0]+sigma+1) if (i >= 0 and i < length)]]
for j in range(1, len(indices)):
if indices[j] - indices[j-1] <= (2*sigma+1):
islands[-1].extend([i for i in range(islands[-1][-1]+1, indices[j]+sigma+1) if (i >= 0 and i < length)])
else:
islands.append([i for i in range(indices[j]-sigma, indices[j]+sigma+1) if (i >= 0 and i < length)])
return islands
def post_request(url, data):
response_post = requests.post(url, json=data)
return response_post.json()
def multiple_replace(text: str, replacements: dict) -> str:
''' Thanks ChatGPT for this function!
Replace multiple substrings of a string with another substring.
replacements is a dictionary of {str_to_find: str_to_replace_with}
'''
# Compile a regular expression pattern that matches all the substrings
# to be replaced and capture them as groups
pattern = re.compile("|".join("(%s)" % re.escape(key) for key in replacements.keys()))
# Use the sub function to replace all the occurrences of the captured groups
# in the text with their corresponding replacements
return pattern.sub(lambda x: replacements[x.group(0)], text)
def prepare_text_for_sent_split(text):
pairs = {'Fig.': 'Figure', 'FIG.': 'Figure', 'Figs.': 'Figures', 'FIGS.': 'Figures',
'Sec.': 'Section', 'SEC.': 'Section', 'Secs.': 'Sections', 'SECS.': 'Sections',
'Eq.': 'Equation', 'EQ.': 'Equation', 'Eqs.': 'Equations', 'EQS.': 'Equations',
'Ref.': 'Reference', 'REF.': 'Reference', 'Refs.': 'References', 'REFS.': 'References',
'in App.': 'in Appendix', 'In App.': 'In Appendix', 'in APP.': 'in Appendix', 'In APP.': 'In Appendix'}
text = multiple_replace(text, pairs)
text = multiple_replace(text, SENT_TOKEN_PROTECTED)
return text
def rerun_text_after_sent_split(sentences):
SENT_TOKEN_PROTECTED_INV = {v: k for k, v in SENT_TOKEN_PROTECTED.items()}
sentences = [multiple_replace(sentence, SENT_TOKEN_PROTECTED_INV) for sentence in sentences]
return sentences
def quality_assurance_sentences(sentences, min_sentence_length=MIN_SENTENCE_LEN_QA_EMBED, max_sentence_length=MAX_SENTENCE_LEN_QA_EMBED):
return [sentence for sentence in sentences if len(sentence) >= min_sentence_length and len(sentence) <= 2000]
def set_embed(id: str, text: str, backend: str, embeddings: list):
embed_cache.add(id, {"text": text, "backend": backend, "embeddings": embeddings})
def get_embed_if_exists(id: str, text: str, backend: str):
if embed_cache.has(id):
elem = embed_cache.get(id)
if elem["text"] == text and elem["backend"] == backend:
print('Going to use cached embeddings...')
return elem["embeddings"]
return None
@retry(exceptions=openaiembed.error.Timeout, tries=4)
def OpenAIEmbeddings(input, model=SENTENCE_QA_EMBED_MODEL):
if azure_flag:
print('Using Azure OpenAI...')
if len(input) == 1:
openai_embeddings = openaiembed.Embedding.create(input=input, engine="essence-embed")
else:
# Azure OpenAI, as of May 22, 2023, does not support batch embeddings. Sad.
openai_embeddings = {}
openai_embeddings["data"] = []
for i in range(len(input)):
openai_embeddings["data"].append(openaiembed.Embedding.create(input=[input[i]], engine="essence-embed")["data"][0])
return openai_embeddings
else:
print('Using OpenAI... with model: ' + model)
openai_embeddings = openaiembed.Embedding.create(input=input, model=model, request_timeout=COMPLETION_TIMEOUT)
print('Finished.')
return openai_embeddings
def get_single_embedding(string, backend="openai"):
if backend == "sent_trans":
response = post_request(INF_ENDPOINT_SENT_TRANS + '/predict', {'sentences': [string], 'secret_key': SECRET_SENTEMBED_KEY})
embedding = response["embeddings"]
elif backend == "openai":
openai_embeddings = OpenAIEmbeddings([string], model=SENTENCE_QA_EMBED_MODEL) # openai.Embedding.create(input = [string], model=SENTENCE_QA_EMBED_MODEL)
embeddings = openai_embeddings["data"][0]["embedding"]
embedding = [embeddings]
else:
raise ValueError('backend not supported')
return embedding
def get_embeddings_similarity(emb1, emb2):
cosine_sim = cosine_similarity(emb1, emb2).flatten().tolist()
return cosine_sim
def combine_strings(l, m):
print('Shortening the text by combining sentences... m =', m)
if m < 2: return l
result = []
for i in range(0, len(l), m):
combined = ' '.join(l[i:i + m])
result.append(combined)
return result
# @general_utils.retry_on_timeout(retries=3, timeout_seconds=15)
def get_embeddings(question: str, text: str, url:str, backend="openai", max_sentences=100, compact_sentences=1):
text = prepare_text_for_sent_split(text)
sentences = sent_tokenize(text)
sentences = rerun_text_after_sent_split(sentences)
sentences = quality_assurance_sentences(sentences)
if compact_sentences > 1:
sentences = combine_strings(sentences, compact_sentences)
# we'd like to reduce the number of sentences to max_sentences. We do it by batching to nearest power of 2.
# sentences = combine_strings(sentences, 2 ** int(np.floor(np.log2(len(sentences) / max_sentences))))
# currently inactive.
if len(sentences) == 0:
print('ERROR: NO SENTENCES FOUND IN THE TEXT.')
raise ValueError('No sentences found in text.')
if backend == "sent_trans":
response_q = post_request(INF_ENDPOINT_SENT_TRANS + '/predict', {'sentences': [question], 'secret_key': SECRET_SENTEMBED_KEY})
embeddings_q = response_q["embeddings"]
cache_embed_response = get_embed_if_exists(url + hash_text(text), text, backend)
if cache_embed_response is not None:
embeddings_a = cache_embed_response
else:
response_a = post_request(INF_ENDPOINT_SENT_TRANS + '/predict', {'sentences': sentences, 'secret_key': SECRET_SENTEMBED_KEY})
embeddings_a = response_a["embeddings"]
set_embed(url + hash_text(text), text, backend, embeddings_a)
elif backend == "openai":
print('Going to use OpenAI embeddings...')
openai_embeddings_q = OpenAIEmbeddings([question], model=SENTENCE_QA_EMBED_MODEL)
if "data" not in openai_embeddings_q:
print('ERROR: OPENAI EMBEDDINGS API FAILED.')
raise ValueError('OpenAI Embeddings API failed.')
embeddings_q = openai_embeddings_q["data"][0]["embedding"]
embeddings_q = [embeddings_q]
cache_embed_response = get_embed_if_exists(url + hash_text(text), text, backend)
if cache_embed_response is not None:
embeddings_a = cache_embed_response
else:
openai_embeddings_a = OpenAIEmbeddings(sentences, model=SENTENCE_QA_EMBED_MODEL)
if "data" not in openai_embeddings_q:
raise ValueError('OpenAI Embeddings API failed.')
embeddings_a = [openai_embeddings_a["data"][i]["embedding"] for i in range(len(sentences))]
set_embed(url + hash_text(text), text, backend, embeddings_a)
else:
raise ValueError('backend not supported')
cosine_sim = get_embeddings_similarity(embeddings_q, embeddings_a)
return cosine_sim, sentences, embeddings_a, embeddings_q
def get_most_matching_sentences_to_answer(answer: str, embeddings, top=4):
answer_embeddings = get_single_embedding(answer)
similarities = get_embeddings_similarity(answer_embeddings, embeddings)
return similarities
def get_supporting_sentences(sentences_islands, embeddings_a, answer, sentences, top_answers):
candidate_sentences_locs = [i for island in sentences_islands for i in island]
candidate_embeddings = [embeddings_a[i] for i in candidate_sentences_locs]
cosine_sim_answer = get_most_matching_sentences_to_answer(answer, candidate_embeddings)
top_locs = np.sort(np.argsort(cosine_sim_answer)[-top_answers:])
top_locs_islands = [[top_locs[0]]]
for i in range(1, len(top_locs)):
if top_locs[i] - top_locs[i-1] == 1:
top_locs_islands[-1].append(top_locs[i])
else:
top_locs_islands.append([top_locs[i]])
supporting_sentences = [' '.join([sentences[candidate_sentences_locs[i]] for i in sublist]) for sublist in top_locs_islands]
return supporting_sentences
def text_not_in_english(text):
# use NLTK to check if text is in English by a simple heuristic
try:
if (detect(text[:150]) == 'en' or detect(text[len(text)//2:len(text)//2+150]) == 'en' or detect(text[-150:]) == 'en'):
return False
if (detect(text[:500]) == 'en' or detect(text[len(text)//2:len(text)//2+500]) == 'en' or detect(text[-500:]) == 'en'):
return False
except Exception as e:
# if language detection fails -- possibly throws LangDetectException -- assume it is not in English
print(e)
return True
return True
def detect_language(text):
return 'en' if not text_not_in_english(text) else 'non-en'
def clean_marked_text(marked_text, min_length=3):
# remove all lines with less than min_length characters
marked_text = '\n'.join([line for line in marked_text.split('\n') if len(line) >= min_length])
return marked_text
| [] |
2024-01-10 | EinfachAlex110/Chatbase | webquery.py | import os, trafilatura
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.docstore.document import Document
class WebQuery:
def __init__(self, openai_api_key = None) -> None:
self.embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
os.environ["OPENAI_API_KEY"] = openai_api_key
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
self.chain = None
self.db = None
def ask(self, question: str) -> str:
if self.chain is None:
response = "Please, add a document."
else:
docs = self.db.get_relevant_documents(question)
response = self.chain.run(input_documents=docs, question=question)
return response
def ingest(self, url: str) -> str:
result = trafilatura.extract(trafilatura.fetch_url(url))
documents = [Document(page_content=result, metadata={"source": url})]
splitted_documents = self.text_splitter.split_documents(documents)
self.db = Chroma.from_documents(splitted_documents, self.embeddings).as_retriever()
self.chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
return "Success"
def forget(self) -> None:
self.db = None
self.chain = None | [] |
2024-01-10 | ZiyueWang25/llm-security-challenge | llm_security~communicate.py | import time
import re
from tqdm import tqdm
from pwn import ssh
import pandas as pd
import numpy as np
import openai
import replicate
from llm_security import llm_config
from llm_security import game_config
def find_response(output):
splits = output.strip().split("\n")
for i, l in enumerate(splits):
if l.lstrip().startswith("#"):
continue
else:
return "\n".join(splits[:i+1])
return output
def get_gpt_response(model, messages):
response = openai.ChatCompletion.create(messages=messages, model=model, max_tokens=100)
output = response.choices[0].message.content
llm_response = find_response(output)
return {"role": "assistant", "content": llm_response}, llm_response
def get_gpt_comp_response(model, messages):
response = openai.Completion.create(prompt="\n".join(messages), model=model, max_tokens=100)
output = response.choices[0].text
llm_response = find_response(output)
return llm_response, llm_response
def get_replicate_response(model, messages, system_prompt):
output = "".join(replicate.run(
model,
input={
"system_prompt": system_prompt,
"prompt": "\n".join(messages),
}
))
llm_response = find_response(output)
return llm_response, llm_response
def get_model_response(model, messages, sys_content=None):
start_time = time.time()
abbr = llm_config.ABBR_BY_MODEL_NAME[model]
if abbr.startswith(llm_config.OPENAI_LIST):
response, llm_response = get_gpt_response(model, messages)
elif abbr.startswith(llm_config.OPENAI_COMP_LIST):
response, llm_response = get_gpt_comp_response(model, messages)
elif abbr.startswith(llm_config.REPLICATE_LIST):
response, llm_response = get_replicate_response(model, messages, sys_content)
elapse_secs = time.time() - start_time
return response, llm_response, elapse_secs
def get_initial_messages(model, sys_content):
abbr = llm_config.ABBR_BY_MODEL_NAME[model]
if abbr.startswith(llm_config.OPENAI_LIST):
messages = [
{'role': 'system', 'content': sys_content},
]
elif abbr.startswith(llm_config.OPENAI_COMP_LIST):
messages = [sys_content, "Let's code:"]
elif abbr.startswith(llm_config.REPLICATE_LIST):
messages = ["Let's code:"]
return messages
def get_ssh_arg(level, password, game):
return dict(
user = f"{game}{level}",
host = f"{game}.labs.overthewire.org",
password = password,
port=game_config.PORT_BY_GAME[game],
)
def get_ssh_response(io, llm_response, sleep_time, max_len=1000):
io.sendlines([i.encode() for i in llm_response.split('\n')])
time.sleep(sleep_time)
ssh_response = safe_decode(io.recv())
if len(ssh_response) > max_len:
ssh_response = "Too long"
if not ssh_response:
ssh_response = 'Empty'
ssh_response = "SHELL:" + ssh_response
return ssh_response
def safe_decode(b):
try:
decode = b.decode()
except:
decode = str(b)[2:-1]
decode = decode.replace("$ ", "") # it looks like the ssh generate message like "$ $ $ "
return decode
def make_response_df(llm_rs, ssh_rs, secs):
len_diff = len(llm_rs)-len(ssh_rs)
if len_diff > 0:
ssh_rs.extend([""] *len_diff)
elif len_diff < 0:
llm_rs.extend([""] * -len_diff)
secs.extend([0] * -len_diff)
return pd.DataFrame({"llm": llm_rs, "ssh": ssh_rs, "sec": secs})
def is_answer_correct(game, level, answer):
if answer is None:
return False
return answer == game_config.PSW_BY_GAME[game][level+1]
def find_answer(llm_response, verbose=True):
if m := re.findall(r'(?:^|\n)submit (\w+)', llm_response):
answer = m[-1].strip()
if verbose:
print('SYS: we are done. Predicted Answer: ', answer)
return True, answer
return False, None
def attempt_level(
game,
level,
n_cycles,
model="gpt-3.5-turbo",
password=None,
verbose=True,
sleep_time=.5,
session_timeout=30,
max_ssh_len = 1000,
):
if verbose:
print(f"The max ssh_len is: {max_ssh_len}")
abbr = llm_config.ABBR_BY_MODEL_NAME[model]
goal_by_level = game_config.GOAL_BY_GAME[game]
sys_goal_suffix = f"The goal is: {goal_by_level[level+1]}." if isinstance(goal_by_level, dict) else goal_by_level
if verbose:
print(f"Level: {level}. Goal: {sys_goal_suffix}")
sys_content = llm_config.PRELUDE + sys_goal_suffix
messages = get_initial_messages(model, sys_content)
llm_responses = []
ssh_responses = []
secs = []
answer = None
if password is None:
password = game_config.PSW_BY_GAME[game][level]
ssh_args = get_ssh_arg(level, password, game)
with ssh(raw=True, timeout=session_timeout, **ssh_args,) as session:
with session.process(['sh']) as io:
for i in range(n_cycles):
try:
response, llm_response, sec = get_model_response(model, messages, sys_content)
messages.append(response)
llm_responses.append(llm_response)
secs.append(sec)
if verbose:
print(f'SYS {i}: LLM: \n>>>{llm_response}<<<\n')
except openai.error.InvalidRequestError as err:
print(f"Maximum context length reached with error message: {err}")
messages = messages[:4] + messages[-4:]
llm_response = ""
found, answer = find_answer(llm_response, verbose=verbose)
if found:
break
try:
ssh_response = get_ssh_response(io, llm_response, sleep_time, max_ssh_len)
except EOFError:
break
if verbose:
print(f'SYS {i}: ssh: \n>>>{ssh_response}<<<\n')
if abbr.startswith(llm_config.OPENAI_LIST):
messages.append({'role': 'user', 'content': ssh_response})
elif abbr.startswith(llm_config.REPLICATE_LIST) or abbr.startswith(llm_config.OPENAI_COMP_LIST):
messages.append(ssh_response)
ssh_responses.append(ssh_response)
return make_response_df(llm_responses, ssh_responses, secs), answer
def attempt_level_multiple(game, model_name, level, num_trial=100):
logs = []
print(f"---------- Test Model {model_name} ------------")
for _ in tqdm(range(num_trial)):
try:
resp_df, answer = attempt_level(game, level, model=model_name, n_cycles=30, verbose=False)
except Exception as e:
print(f"Error: {e}")
logs.append((None, None, e))
continue
print(len(resp_df), answer)
logs.append((resp_df, answer, None))
return logs
def show_level_trials(abbrs, game, level, num_trial, logs_by_model):
print(f"Under {num_trial} trials, In level {level}")
for abbr in abbrs:
model = llm_config.MODEL_NAME_BY_ABBR[abbr]
if model not in logs_by_model:
continue
print("#"*10 + f" {abbr} " + "#"*10)
num_success = sum([is_answer_correct(game, level, l[1] ) for l in logs_by_model[model]])
rate = num_success / num_trial
attempts = [l[0].shape[0] if l[0] is not None else num_trial for l in logs_by_model[model]]
avg_attempts, std_attempts = np.mean(attempts), np.std(attempts)
print(f"model {model:15s}, success rate: {rate:.1%}, avg turns to solve the level: {avg_attempts:.1f}, std: {std_attempts:.1f}")
| [
"\n"
] |
2024-01-10 | johnreitano/foundation | app~foundation.py | import os
import openai
import argparse
import re
MAX_INPUT_LENGTH = 32
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", type=str, required=True)
args = parser.parse_args()
user_input = args.input
print(f"User input: {user_input}")
if not validate_length(user_input):
raise ValueError(f"Input must be less than {MAX_INPUT_LENGTH} characters.")
generate_branding_snippet(user_input)
generate_keywords(user_input)
def validate_length(prompt: str):
return len(prompt) <= MAX_INPUT_LENGTH
def generate_branding_snippet(prompt: str):
openai.api_key = os.getenv("OPENAI_API_KEY")
enriched_prompt = f"Generate upbeat branding snippet for {prompt}"
# print(enriched_prompt)
completion = generate_chat_completion(
"Generate upbeat branding snippet for the prompt entered by the user",
prompt,
)
branding_text = completion.strip(" \n,")
last_char = branding_text[-1]
if last_char not in {".", "!", "?"}:
branding_text += "..."
print(f"Snippet: {branding_text}")
return branding_text
def generate_keywords(prompt: str):
openai.api_key = os.getenv("OPENAI_API_KEY")
completion = generate_chat_completion(
"Generate related branding keywords for the prompt entered by the user",
prompt,
)
# print(completion)
keywords_array = re.split(",|\n|;|-", completion)
keywords_array = [k.lower().strip().lstrip("0123456789.- ") for k in keywords_array]
keywords_array = [k for k in keywords_array if len(k) > 0]
print(f"Keywords: {keywords_array}")
return keywords_array
def generate_chat_completion(system_prompt, user_prompt):
parameters = {
"model": "gpt-4",
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
"max_tokens": 500,
}
response = openai.ChatCompletion.create(**parameters)
return response["choices"][0]["message"]["content"]
if __name__ == "__main__":
main()
| [
"Generate upbeat branding snippet for PLACEHOLDER"
] |
2024-01-10 | shams02/test_project11 | libs~superagent~app~tools~e2b.py | # flake8: noqa
from decouple import config
from e2b.templates.data_analysis import DataAnalysis
from langchain.tools import BaseTool
class E2BCodeExecutor(BaseTool):
name = "Code interpreter"
description = "useful for running python code, it returns the output of the code"
# E2B session represents a sandbox runtime for LLM - it's a microVM for every instance of an agent.
#
# We probably should keep an active E2B session for the whole time an agent is active.
# If the "E2B_API_KEY" env var is set, E2B automatically loads it, no need to pass it to the constructor.
_session = DataAnalysis(api_key=config("E2B_API_KEY"))
# TODO: Once we know the the agent is done, we need to close the E2B session.
# You most likely want to keep the E2B session active for the whole lifecycle of an agent.
def _close_session(self):
self._session.close()
def _download_artifact(self, artifact):
# Artifact is a chart file created by matplotlib
# You can download it right from the E2B LLM Sandbox
#
# `artifact_bytes` is a chart file (.png) in bytes
# TODO: Send the artifact bytes to frontend, save it to DB, etc
artifact_bytes = artifact.download()
def _run(self, python_code: str) -> str:
# E2B offers both streaming output and artifacts or retrieving them after the code has finished running.
stdout, err, artifacts = self._session.run_python(
code=python_code,
# TODO: To create more responsive UI, you might want to stream stdout, stderr, and artifacts
on_stdout=lambda line: print("stdout", line),
on_stderr=lambda line: print("stderr", line),
on_artifact=self._download_artifact,
)
# Or you can download artifacts after the code has finished running:
# for artifact in artifacts:
# self._download_artifact(artifact)
if err:
return "There was following error during execution: " + err
return stdout
async def _arun(self, python_code: str) -> str:
# E2B doesn't support async/await flows anymore for now.
# We can either throw an exception or just call the sync version:
#
# raise NotImplementedError("E2B Code Executor doesn't support async")
return self._run(python_code)
| [
"useful for running python code, it returns the output of the code"
] |
2024-01-10 | shane-kercheval/llm-workflow | llm_workflow~agents.py | """
An Agent is an LLM that is given a set of tools and decides how to respond based on those tools.
Currently, the only agent in this library is the OpenAIFunctionAgent class, which wraps the logic
for OpenAI's "functions".
"""
from abc import ABC, abstractmethod
from collections import OrderedDict
import json
from typing import Any
from collections.abc import Callable
import functools
from llm_workflow.base import Record, _has_history, ExchangeRecord, LanguageModel
from llm_workflow.internal_utilities import has_method, retry_handler
from llm_workflow.openai import MODEL_COST_PER_TOKEN
class ToolBase(ABC):
"""
A tool is a callable object that has a name, description, and other properties that describe
the tool. The name, description, etc., may be passed to an LLM (e.g. OpenAI "functions") and,
therefore, should be a useful description for the LLM.
"""
@abstractmethod
def __call__(self, *args, **kwargs) -> Any: # noqa
"""A Tool object is callable, taking and returning any number of parameters."""
@property
@abstractmethod
def name(self) -> str:
"""The name of the tool. This value will be sent to an LLM."""
@property
@abstractmethod
def description(self) -> str:
"""The description of the tool. This value will be sent to an LLM."""
@property
@abstractmethod
def inputs(self) -> dict:
"""
Property that describes the inputs of the tool.
For example:
{
"variable_a": {
"type": "string",
"description": "This is a description of variable_a.",
},
"variable_b": {
"type": "string",
"enum": ["option_a", "option_b"],
"description": "This is a description of variable_b.",
},
}
"""
@property
@abstractmethod
def required(self) -> list:
"""Returns a list of inputs that are required."""
def to_dict(self) -> str:
"""
Returns a dictionary with properties that describe the tool.
Currently this dictinoary is in a formated expected by OpenAI "functions" API. The
dependency to OpenAI is not ideal.
"""
return {
'name': self.name,
'description': self.description,
'parameters': {
# this is based on OpenAI requirement; i don't love this dependency
"type": "object",
"properties": self.inputs,
"required": self.required,
},
}
class Tool(ToolBase):
"""
A tool is an object that has a name, description, and other properties that describe
the tool. The name, description, etc., may be passed to an LLM (e.g. OpenAI "functions") and,
therefore, should be a useful description for the LLM.
A tool object is callable if a function is passed into callable_obj in the constructor.
"""
def __init__(
self,
name: str,
description: str,
inputs: dict,
required: list[str] | None = None,
callable_obj: Callable | None = None):
self._name = name
self._description = description
self._inputs = inputs
self._required = required
self._callable_obj = callable_obj
def __call__(self, *args, **kwargs) -> Any: # noqa
return self._callable_obj(*args, **kwargs)
@classmethod
def from_dict(cls, dictionary): # noqa
"""Returns a Tool object from a dictionary."""
return cls(**dictionary)
@property
def name(self) -> str:
"""The name of the tool. This value will be sent to an LLM."""
return self._name
@property
def description(self) -> str:
"""The description of the tool. This value will be sent to an LLM."""
return self._description
@property
def inputs(self) -> dict:
"""
Property that describes the inputs of the tool.
For example:
{
"variable_a": {
"type": "string",
"description": "This is a description of variable_a.",
},
"variable_b": {
"type": "string",
"enum": ["option_a", "option_b"],
"description": "This is a description of variable_b.",
},
}
"""
return self._inputs
@property
def required(self) -> list:
"""Returns a list of inputs that are required."""
return self._required
def history(self) -> list[Record]:
"""Returns the history of the underlying callable object, if applicable."""
if has_method(self._callable_obj, 'history'):
return self._callable_obj.history()
return None
def tool(name: str, description: str, inputs: dict, required: list[str] | None = None) -> Tool:
"""
A tool is a callable object that has a name, description, and other properties that describe
the tool. The name, description, etc., may be passed to an LLM (e.g. OpenAI "functions") and,
therefore, should be a useful description for the LLM.
This decorator wraps a callable object.
"""
def decorator(callable_obj: Callable): # noqa: ANN202
@functools.wraps(callable_obj)
def wrapper(*args, **kwargs): # noqa: ANN003, ANN002, ANN202
return callable_obj(*args, **kwargs)
return Tool(name, description, inputs, required, wrapper)
return decorator
class OpenAIFunctions(LanguageModel):
"""
Wrapper around OpenAI "functions" (https://platform.openai.com/docs/guides/gpt/function-calling).
Calling the objec returns a list of tuples, where each tuple contains a Tool object and a
dictionary of arguments (chosen by OpenAI) to pass to the tool.
From OpenAI:
"Developers can now describe functions to gpt-4-0613 and gpt-3.5-turbo-0613, and have the
model intelligently choose to output a JSON object containing arguments to call those
functions. This is a new way to more reliably connect GPT's capabilities with external
tools and APIs.
See this notebooks for an example: https://github.com/shane-kercheval/llm-workflow/blob/main/examples/agents.ipynb
"""
def __init__(
self,
tools: list[Tool],
model_name: str = 'gpt-3.5-turbo-1106',
system_message: str = "Decide which function to use. Only use the functions you have been provided with. Don't make assumptions about what values to plug into functions.", # noqa
timeout: int = 10,
) -> dict | None:
"""
Args:
model_name:
e.g. 'gpt-3.5-turbo-1106'
tools:
a list of Tool objects (created with the `Tool` class or `tool` decorator).
system_message:
The content of the message associated with the "system" `role`.
timeout:
timeout value passed to OpenAI model.
"""
super().__init__()
self.model_name = model_name
self._tools = {}
for tool in tools:
if tool.name in self._tools:
raise ValueError(f"Tool name '{tool.name}' is already in use.")
self._tools[tool.name] = tool
self._system_message = system_message
self._history = []
self.timeout = timeout
def __call__(self, prompt: object) -> list[tuple[Tool, dict]]:
"""
Uses the OpenAI "functions" api to decide which tool to call based on the `prompt`. The
selected tool (which is a callable) is called and passed the arguments determined by
OpenAI. The response from the tool is retuned by the agent object.
Returns a list of tuples, where each tuple contains a Tool object and a dictionary of
arguments (chosen by OpenAI) to pass to the tool.
"""
from openai import OpenAI
messages = [
{"role": "system", "content": self._system_message},
{"role": "user", "content": prompt},
]
# we want to track to track costs/etc.; but we don't need the history to build up memory
# essentially, for now, this class won't have any memory/context of previous questions;
# it's only used to decide which tools/functions to call
client = OpenAI()
tools = [{'type': 'function', 'function': x.to_dict()} for x in self._tools.values()]
tools[0]
response = retry_handler()(
client.chat.completions.create,
model=self.model_name,
messages=messages,
tools=tools,
temperature=0,
# max_tokens=self.max_tokens,
timeout=self.timeout,
)
input_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
total_tokens = response.usage.total_tokens
cost = (input_tokens * self.cost_per_token['input']) + \
(completion_tokens * self.cost_per_token['output'])
record = ExchangeRecord(
prompt=prompt,
response='',
metadata={'model_name': self.model_name},
input_tokens=input_tokens,
response_tokens=completion_tokens,
total_tokens=total_tokens,
cost=cost,
)
self._history.append(record)
tool_calls = response.choices[0].message.tool_calls
if tool_calls:
tool_calls = [
(self._tools[x.function.name], json.loads(x.function.arguments))
for x in tool_calls
]
tool_names = ' | '.join([x[0].name for x in tool_calls])
record.response = f"tools: {tool_names}"
record.metadata['tool_names'] = tool_names
record.metadata['tool_args'] = ' | '.join([str(x[1]) for x in tool_calls])
return tool_calls
return None
@property
def cost_per_token(self) -> dict:
"""
Returns a dictionary containing 'input' and 'output' keys each containing a float
corresponding to the cost-per-token for the corresponding token type and model.
We need to dynamically look this up since the model_name can change over the course of the
object's lifetime.
"""
return MODEL_COST_PER_TOKEN[self.model_name]
def _get_history(self) -> list[Record]:
"""Returns a list of Records corresponding to any OpenAI call."""
return self._history
class OpenAIFunctionAgent(OpenAIFunctions):
"""
Overrides OpenAIFunctions to return the response from the tool selected (rather than a list of
tools/arguments).
NOTE: This implementation only extracts and calls the first tool returned by OpenAI.
This class uses the OpenAI "functions" api to decide which tool to use; the selected tool
(which is a callable) is called and passed the arguments determined by OpenAI.
The response from the tool is retuned by the agent object.
"""
def __call__(self, prompt: object) -> str:
"""
Uses the OpenAI "functions" api to decide which tool to call based on the `prompt`. The
selected tool (which is a callable) is called and passed the arguments determined by
OpenAI. The response from the tool is retuned by the agent object.
"""
tool_calls = super().__call__(prompt)
if tool_calls:
tool, args = tool_calls[0]
return tool(**args)
return None
def _get_history(self) -> list[Record]:
"""
Returns a list of Records corresponding to any OpenAI call as well as any Record object
associated with the underlying tools' history.
NOTE: the entire history of each tool is included. If you pass the OpenAIFunctionAgent
object a tool that was previously used (i.e. the tool "object" was instantiated and called
and has resulting history), that history will be included, even though it is not directly
related to the use of the Agent. As a best practice, you should only include tool objects
that have not been previously instantiated/used.
"""
histories = [tool.history() for tool in self._tools.values() if _has_history(tool)]
# Concatenate all the lists into a single list
histories = [record for sublist in histories for record in sublist]
histories += self._history
unique_records = OrderedDict((record.uuid, record) for record in histories)
unique_records = list(unique_records.values())
return sorted(unique_records, key=lambda r: r.timestamp)
| [] |
2024-01-10 | shane-kercheval/llm-workflow | tests~test_agents.py | """Test agents.py classes and functions."""
from time import sleep
from llm_workflow.agents import OpenAIFunctionAgent, OpenAIFunctions, Tool, tool
from llm_workflow.base import Record, ExchangeRecord
@tool(
name="ask_weather",
description="Use this function to answer questions about the weather for a particular city.",
inputs={
'location': {
'type': 'string',
'description': "The city and state, e.g. San Francisco, CA",
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit'],
'description': "The temperature unit to use. The model needs to infer this from the `location`.", # noqa
},
},
required=['location', 'unit'],
)
def fake_weather(location: str, unit: str) -> str:
"""Fake function to lookup weather."""
return f"The temperature of {location} is 1000 degrees {unit}."
@tool(
name="ask_stock_price",
description="Use this function to answer questions about the the stock price for a particular stock symbol.", # noqa
inputs={
'symbol': {
'type': 'string',
'description': "The stock symbol, e.g. 'AAPL'",
},
},
required= ['symbol'],
)
def fake_stock(symbol: str) -> str:
"""Fake function to lookup stock price."""
return f"The stock price of {symbol} is $1000."
def test_OpenAIToolAgent__Tool_class(): # noqa
class FakeWeatherAPI:
def __init__(self) -> None:
self._history = []
def __call__(self, location: str, unit: str) -> str:
result = f"The temperature of {location} is 1000 degrees {unit}."
# need a slight delay so we sort records consistently for test
# the ExchangeRecord is created before the function is called
sleep(0.01)
self._history.append(Record(metadata={'result': result}))
return result
def history(self) -> list[str]:
return self._history
class FakeStockAPI:
def __init__(self) -> None:
self._history = []
def __call__(self, symbol: str) -> str:
result = f"The stock price of {symbol} is $1000."
# need a slight delay so we sort records consistently for test
# the ExchangeRecord is created before the function is called
sleep(0.01)
self._history.append(Record(metadata={'result': result}))
return result
def history(self) -> list[str]:
return self._history
fake_weather_tool = Tool(
callable_obj=FakeWeatherAPI(),
name="ask_weather",
description="Use this function to answer questions about the weather for a particular city.", # noqa
inputs={
'location': {
'type': 'string',
'description': "The city and state, e.g. San Francisco, CA",
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit'],
'description': "The temperature unit to use. The model needs to infer this from the `location`.", # noqa
},
},
required= ['location', 'unit'],
)
fake_stock_tool = Tool(
callable_obj=FakeStockAPI(),
name="ask_stock_price",
description="Use this function to answer questions about the the stock price for a particular stock symbol.", # noqa
inputs={
'symbol': {
'type': 'string',
'description': "The stock symbol, e.g. 'AAPL'",
},
},
required= ['symbol'],
)
assert fake_weather_tool.name == fake_weather.name
assert fake_weather_tool.description == fake_weather.description
assert fake_weather_tool.inputs == fake_weather.inputs
assert fake_weather_tool.required == fake_weather.required
assert fake_weather_tool.to_dict() == fake_weather.to_dict()
assert fake_stock_tool.name == fake_stock.name
assert fake_stock_tool.description == fake_stock.description
assert fake_stock_tool.inputs == fake_stock.inputs
assert fake_stock_tool.required == fake_stock.required
assert fake_stock_tool.to_dict() == fake_stock.to_dict()
agent = OpenAIFunctionAgent(tools=[fake_weather_tool, fake_stock_tool])
question = "What is the temperature in Seattle WA."
response = agent(question)
assert 'Seattle' in response
assert 'degrees' in response
# assert 'fahrenheit' in response # model does not correctly infer fahrenheight
assert len(fake_weather_tool.history()) == 1
assert fake_weather_tool.history()[0].metadata['result'] == response
assert len(fake_stock_tool.history()) == 0
# the first record is the ExchangeRecord associated with the OpenAIFunctionAgent
# and the second record is from the tool we used
assert len(agent.history()) == 2
assert isinstance(agent.history()[0], ExchangeRecord)
assert agent.history()[0].prompt == question
assert fake_weather_tool.name in agent.history()[0].response
assert agent.history()[0].metadata['tool_names'] == fake_weather_tool.name
assert 'location' in agent.history()[0].metadata['tool_args']
assert 'unit' in agent.history()[0].metadata['tool_args']
assert agent.history()[0].input_tokens > 0
assert agent.history()[0].response_tokens > 0
assert agent.history()[0].total_tokens == agent.history()[0].input_tokens + agent.history()[0].response_tokens # noqa
assert agent.history()[0].total_tokens > 0
assert agent.history()[0].cost > 0
assert isinstance(agent.history()[1], Record)
assert agent.history()[1].metadata['result'] == response
question = "What is the stock price of Apple?"
response = agent(question)
assert 'AAPL' in response
# the first record (in the second use) is the ExchangeRecord associated with the
# OpenAIFunctionAgent and the second record is from the tool we used
assert len(fake_weather_tool.history()) == 1
assert len(fake_stock_tool.history()) == 1
assert fake_stock_tool.history()[0].metadata['result'] == response
assert len(agent.history()) == 4
assert isinstance(agent.history()[2], ExchangeRecord)
assert agent.history()[2].prompt == question
assert fake_stock_tool.name in agent.history()[2].response
assert agent.history()[2].metadata['tool_names'] == fake_stock_tool.name
assert 'symbol' in agent.history()[2].metadata['tool_args']
assert agent.history()[2].input_tokens > 0
assert agent.history()[2].response_tokens > 0
assert agent.history()[2].total_tokens == agent.history()[2].input_tokens + agent.history()[2].response_tokens # noqa
assert agent.history()[2].total_tokens > 0
assert agent.history()[2].cost > 0
assert isinstance(agent.history()[3], Record)
assert agent.history()[3].metadata['result'] == response
question = "No tool is applicable for this question."
response = agent(question)
assert response is None
assert len(agent.history()) == 5
assert agent.history()[4].prompt == question
assert agent.history()[4].response == ''
assert 'tool_name' not in agent.history()[4].metadata
assert agent.history()[4].input_tokens > 0
assert agent.history()[4].response_tokens > 0
assert agent.history()[4].total_tokens == agent.history()[4].input_tokens + agent.history()[4].response_tokens # noqa
assert agent.history()[4].total_tokens > 0
assert agent.history()[4].cost > 0
def test_OpenAIToolAgent__tool_decorator(): # noqa
assert isinstance(fake_weather, Tool)
assert isinstance(fake_stock, Tool)
agent = OpenAIFunctionAgent(
model_name='gpt-3.5-turbo-1106',
tools=[fake_weather, fake_stock],
)
question = "What is the temperature in Seattle WA."
response = agent(question)
assert 'Seattle' in response
assert 'degrees' in response
# assert 'fahrenheit' in response # model does not correctly infer fahrenheight
assert len(agent.history()) == 1
assert agent.history()[0].prompt == question
assert fake_weather.name in agent.history()[0].response
assert agent.history()[0].metadata['tool_names'] == fake_weather.name
assert 'location' in agent.history()[0].metadata['tool_args']
assert 'unit' in agent.history()[0].metadata['tool_args']
assert agent.history()[0].input_tokens > 0
assert agent.history()[0].response_tokens > 0
assert agent.history()[0].total_tokens == agent.history()[0].input_tokens + agent.history()[0].response_tokens # noqa
assert agent.history()[0].total_tokens > 0
assert agent.history()[0].cost > 0
question = "What is the stock price of Apple?"
response = agent(question)
assert 'AAPL' in response
assert len(agent.history()) == 2
assert agent.history()[1].prompt == question
assert fake_stock.name in agent.history()[1].response
assert agent.history()[1].metadata['tool_names'] == fake_stock.name
assert 'symbol' in agent.history()[1].metadata['tool_args']
assert agent.history()[1].input_tokens > 0
assert agent.history()[1].response_tokens > 0
assert agent.history()[1].total_tokens == agent.history()[1].input_tokens + agent.history()[1].response_tokens # noqa
assert agent.history()[1].total_tokens > 0
assert agent.history()[1].cost > 0
question = "No tool is applicable for this question."
response = agent(question)
assert response is None
assert len(agent.history()) == 3
assert agent.history()[2].prompt == question
assert agent.history()[2].response == ''
assert 'tool_names' not in agent.history()[2].metadata
assert agent.history()[2].input_tokens > 0
assert agent.history()[2].response_tokens > 0
assert agent.history()[2].total_tokens == agent.history()[2].input_tokens + agent.history()[2].response_tokens # noqa
assert agent.history()[2].total_tokens > 0
assert agent.history()[2].cost > 0
def test_OpenAIToolAgent__tools_via_yaml(): # noqa
# read in yaml file
import yaml
with open('tests/test_data/agents/mock_tools.yml') as f:
yaml_data = yaml.safe_load(f)
tools = {x['name']: Tool.from_dict(x) for x in yaml_data}
assert tools['ask_weather'].name == fake_weather.name
assert tools['ask_weather'].description == fake_weather.description
assert tools['ask_weather'].inputs == fake_weather.inputs
assert tools['ask_weather'].required == fake_weather.required
assert tools['ask_weather'].to_dict() == fake_weather.to_dict()
assert tools['ask_stock_price'].name == fake_stock.name
assert tools['ask_stock_price'].description == fake_stock.description
assert tools['ask_stock_price'].inputs == fake_stock.inputs
assert tools['ask_stock_price'].required == fake_stock.required
assert tools['ask_stock_price'].to_dict() == fake_stock.to_dict()
tools = OpenAIFunctions(
model_name='gpt-3.5-turbo-1106',
tools=tools.values(),
)
question = "What is the temperature in Seattle WA."
response = tools(question)
assert len(response) == 1
response_tool, response_arguments = response[0]
assert 'Seattle' in response_arguments['location']
# tool doesn't correctly infer fahrenheit
assert 'degrees' in response_arguments['unit'] \
or 'fahrenheit' in response_arguments['unit'] \
or 'celsius' in response_arguments['unit']
assert len(tools.history()) == 1
assert tools.history()[0].prompt == question
assert response_tool.name in tools.history()[0].response
assert tools.history()[0].metadata['tool_names'] == response_tool.name
assert 'location' in tools.history()[0].metadata['tool_args']
assert 'unit' in tools.history()[0].metadata['tool_args']
assert tools.history()[0].input_tokens > 0
assert tools.history()[0].response_tokens > 0
assert tools.history()[0].total_tokens == tools.history()[0].input_tokens + tools.history()[0].response_tokens # noqa
assert tools.history()[0].total_tokens > 0
assert tools.history()[0].cost > 0
question = "What is the stock price of Apple?"
response = tools(question)
response_tool, response_arguments = response[0]
assert 'AAPL' in response_arguments['symbol']
assert len(tools.history()) == 2
assert tools.history()[1].prompt == question
assert response_tool.name in tools.history()[1].response
assert tools.history()[1].metadata['tool_names'] == response_tool.name
assert 'symbol' in tools.history()[1].metadata['tool_args']
assert tools.history()[1].input_tokens > 0
assert tools.history()[1].response_tokens > 0
assert tools.history()[1].total_tokens == tools.history()[1].input_tokens + tools.history()[1].response_tokens # noqa
assert tools.history()[1].total_tokens > 0
assert tools.history()[1].cost > 0
question = "No tool is applicable for this question."
response = tools(question)
assert response is None
assert len(tools.history()) == 3
assert tools.history()[2].prompt == question
assert tools.history()[2].response == ''
assert 'tool_names' not in tools.history()[2].metadata
assert tools.history()[2].input_tokens > 0
assert tools.history()[2].response_tokens > 0
assert tools.history()[2].total_tokens == tools.history()[2].input_tokens + tools.history()[2].response_tokens # noqa
assert tools.history()[2].total_tokens > 0
assert tools.history()[2].cost > 0
| [] |
2024-01-10 | huangwl18/VoxPoser | src~LMP.py |
import openai
from time import sleep
from openai.error import RateLimitError, APIConnectionError
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
from utils import load_prompt, DynamicObservation, IterableDynamicObservation
import time
from LLM_cache import DiskCache
class LMP:
"""Language Model Program (LMP), adopted from Code as Policies."""
def __init__(self, name, cfg, fixed_vars, variable_vars, debug=False, env='rlbench'):
self._name = name
self._cfg = cfg
self._debug = debug
self._base_prompt = load_prompt(f"{env}/{self._cfg['prompt_fname']}.txt")
self._stop_tokens = list(self._cfg['stop'])
self._fixed_vars = fixed_vars
self._variable_vars = variable_vars
self.exec_hist = ''
self._context = None
self._cache = DiskCache(load_cache=self._cfg['load_cache'])
def clear_exec_hist(self):
self.exec_hist = ''
def build_prompt(self, query):
if len(self._variable_vars) > 0:
variable_vars_imports_str = f"from utils import {', '.join(self._variable_vars.keys())}"
else:
variable_vars_imports_str = ''
prompt = self._base_prompt.replace('{variable_vars_imports}', variable_vars_imports_str)
if self._cfg['maintain_session'] and self.exec_hist != '':
prompt += f'\n{self.exec_hist}'
prompt += '\n' # separate prompted examples with the query part
if self._cfg['include_context']:
assert self._context is not None, 'context is None'
prompt += f'\n{self._context}'
user_query = f'{self._cfg["query_prefix"]}{query}{self._cfg["query_suffix"]}'
prompt += f'\n{user_query}'
return prompt, user_query
def _cached_api_call(self, **kwargs):
# check whether completion endpoint or chat endpoint is used
if kwargs['model'] != 'gpt-3.5-turbo-instruct' and \
any([chat_model in kwargs['model'] for chat_model in ['gpt-3.5', 'gpt-4']]):
# add special prompt for chat endpoint
user1 = kwargs.pop('prompt')
new_query = '# Query:' + user1.split('# Query:')[-1]
user1 = ''.join(user1.split('# Query:')[:-1]).strip()
user1 = f"I would like you to help me write Python code to control a robot arm operating in a tabletop environment. Please complete the code every time when I give you new query. Pay attention to appeared patterns in the given context code. Be thorough and thoughtful in your code. Do not include any import statement. Do not repeat my question. Do not provide any text explanation (comment in code is okay). I will first give you the context of the code below:\n\n```\n{user1}\n```\n\nNote that x is back to front, y is left to right, and z is bottom to up."
assistant1 = f'Got it. I will complete what you give me next.'
user2 = new_query
# handle given context (this was written originally for completion endpoint)
if user1.split('\n')[-4].startswith('objects = ['):
obj_context = user1.split('\n')[-4]
# remove obj_context from user1
user1 = '\n'.join(user1.split('\n')[:-4]) + '\n' + '\n'.join(user1.split('\n')[-3:])
# add obj_context to user2
user2 = obj_context.strip() + '\n' + user2
messages=[
{"role": "system", "content": "You are a helpful assistant that pays attention to the user's instructions and writes good python code for operating a robot arm in a tabletop environment."},
{"role": "user", "content": user1},
{"role": "assistant", "content": assistant1},
{"role": "user", "content": user2},
]
kwargs['messages'] = messages
if kwargs in self._cache:
print('(using cache)', end=' ')
return self._cache[kwargs]
else:
ret = openai.ChatCompletion.create(**kwargs)['choices'][0]['message']['content']
# post processing
ret = ret.replace('```', '').replace('python', '').strip()
self._cache[kwargs] = ret
return ret
else:
if kwargs in self._cache:
print('(using cache)', end=' ')
return self._cache[kwargs]
else:
ret = openai.Completion.create(**kwargs)['choices'][0]['text'].strip()
self._cache[kwargs] = ret
return ret
def __call__(self, query, **kwargs):
prompt, user_query = self.build_prompt(query)
start_time = time.time()
while True:
try:
code_str = self._cached_api_call(
prompt=prompt,
stop=self._stop_tokens,
temperature=self._cfg['temperature'],
model=self._cfg['model'],
max_tokens=self._cfg['max_tokens']
)
break
except (RateLimitError, APIConnectionError) as e:
print(f'OpenAI API got err {e}')
print('Retrying after 3s.')
sleep(3)
print(f'*** OpenAI API call took {time.time() - start_time:.2f}s ***')
if self._cfg['include_context']:
assert self._context is not None, 'context is None'
to_exec = f'{self._context}\n{code_str}'
to_log = f'{self._context}\n{user_query}\n{code_str}'
else:
to_exec = code_str
to_log = f'{user_query}\n{to_exec}'
to_log_pretty = highlight(to_log, PythonLexer(), TerminalFormatter())
if self._cfg['include_context']:
print('#'*40 + f'\n## "{self._name}" generated code\n' + f'## context: "{self._context}"\n' + '#'*40 + f'\n{to_log_pretty}\n')
else:
print('#'*40 + f'\n## "{self._name}" generated code\n' + '#'*40 + f'\n{to_log_pretty}\n')
gvars = merge_dicts([self._fixed_vars, self._variable_vars])
lvars = kwargs
# return function instead of executing it so we can replan using latest obs(do not do this for high-level UIs)
if not self._name in ['composer', 'planner']:
to_exec = 'def ret_val():\n' + to_exec.replace('ret_val = ', 'return ')
to_exec = to_exec.replace('\n', '\n ')
if self._debug:
# only "execute" function performs actions in environment, so we comment it out
action_str = ['execute(']
try:
for s in action_str:
exec_safe(to_exec.replace(s, f'# {s}'), gvars, lvars)
except Exception as e:
print(f'Error: {e}')
import pdb ; pdb.set_trace()
else:
exec_safe(to_exec, gvars, lvars)
self.exec_hist += f'\n{to_log.strip()}'
if self._cfg['maintain_session']:
self._variable_vars.update(lvars)
if self._cfg['has_return']:
if self._name == 'parse_query_obj':
try:
# there may be multiple objects returned, but we also want them to be unevaluated functions so that we can access latest obs
return IterableDynamicObservation(lvars[self._cfg['return_val_name']])
except AssertionError:
return DynamicObservation(lvars[self._cfg['return_val_name']])
return lvars[self._cfg['return_val_name']]
def merge_dicts(dicts):
return {
k : v
for d in dicts
for k, v in d.items()
}
def exec_safe(code_str, gvars=None, lvars=None):
banned_phrases = ['import', '__']
for phrase in banned_phrases:
assert phrase not in code_str
if gvars is None:
gvars = {}
if lvars is None:
lvars = {}
empty_fn = lambda *args, **kwargs: None
custom_gvars = merge_dicts([
gvars,
{'exec': empty_fn, 'eval': empty_fn}
])
try:
exec(code_str, custom_gvars, lvars)
except Exception as e:
print(f'Error executing code:\n{code_str}')
raise e | [
"\nPLACEHOLDER",
"\n",
"You are a helpful assistant that pays attention to the user's instructions and writes good python code for operating a robot arm in a tabletop environment.",
"{variable_vars_imports}"
] |
2024-01-10 | yosief14/yummarizer-server | yummarize.py | #!/usr/bin/env python3
import json
import base64
import requests
import dotenv
import os
import openai
import sys
import urllib.parse as p
import urllib.request as r
import tiktoken
from flask import Flask, request as flrequest, jsonify, abort, g
import uuid
import time
# Constructs the request that graps the captions object from the video and returns it as a json object
def getCaptions(user_input):
video_id = get_video_id(user_input)
base64_string = base64.b64encode("\n\v{}".format(video_id).encode("utf-8")).decode("utf-8")
headers = {
"Content-Type": "application/json",
}
body = json.dumps(
{
"context": {"client": {"clientName": "WEB", "clientVersion": "2.9999099"}},
"params": base64_string,
}
)
response = requests.post(
"https://www.youtube.com/youtubei/v1/get_transcript?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8",
headers=headers,
data=body,
).json()
# Parses the json object and constructs the captions input to be passed to openAI
caption = ""
if "actions" not in response:
abort(400, description=f"Cannot locate captions for video with url \"{user_input}\"")
for cueGroup in response["actions"][0]["updateEngagementPanelAction"]["content"]["transcriptRenderer"]["body"]["transcriptBodyRenderer"]["cueGroups"]:
for cue in cueGroup["transcriptCueGroupRenderer"]["cues"]:
#this is the text of the caption
caption += cue["transcriptCueRenderer"]["cue"]["simpleText"] + "\n"
return caption
# Parses the url and returns the video id
def get_video_id(url):
if url.startswith("https://www.youtube.com/watch?v="):
query = p.urlparse(url).query
params = p.parse_qs(query)
return params["v"][0]
else:
abort(400, description=f"\"{url}\" is not a valid youtube url")
def check_context_length(context):
context_string = ""
for message in context:
context_string += message["content"] + "\n"
encoding = tiktoken.get_encoding("cl100k_base")
token_len = len(encoding.encode(context_string))
if(token_len > 12000):
abort(400, description=f"The transcript has a token length of {token_len} which is too long to process. Please try again with a shorter video. The maximum token length is 12,000.")
else:
return True
# Returns the recipe from the openAI model
def getRecipe(caption):
dotenv.load_dotenv()
openai.api_key = os.getenv("API_KEY")
query = "Summurize all of the recipes mentioned in the follwing transcript into Recipe: Ingredients: and Instructions: . For the Ingredients and Instructions, Be as detailed about measurements as possible"
context = "Transcript: \n" + caption
content = query + "\n" + context
system_messages=[
{"role": "system", "content": "You are a web server designed to output JSON objects with the following format for every recipe found: {Recipe: {Ingredients: , Instructions:}} . If the transcript doesn't contain a recipe, your return value should be -1. For the Instructions, each step should be its own value in a list. For the Ingredients each ingredient should be its own value in a list. Measurements for the Ingredients should be as detailed as possible."},
{"role": "system", "content": "If you are having trouble, try to break down the problem into smaller parts. For example, first try to find the recipe, then try to find the ingredients, then try to find the instructions."},
{"role": "system", "content": "The Ingredients and Instructions should be as detailed as possible. For example, if the recipe calls for 1 cup of flour, you should return 1 cup of flour, not just flour."}
]
prompt = {"role": "user", "content": f"{content}"}
system_messages.append(prompt)
if(not check_context_length(system_messages)):
return "The transcript is too long to process. Please try again with a shorter video."
completion = openai.ChatCompletion.create(model=f"gpt-3.5-turbo-1106", response_format={"type": "json_object"}, messages=system_messages)
return completion["choices"][0].message.content
def getVideoMetaData(video_id):
params = {
"format": "json",
"url": f"https://www.youtube.com/watch?v={video_id}"
}
query = p.urlencode(params)
url = "https://www.youtube.com/oembed"
url += "?" + query
with r.urlopen(url) as response:
response_text = response.read()
data = json.loads(response_text.decode())
return data["title"], data["author_name"]
#Mooved the app creation to a function so that it can be used in the test file
def create_app():
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello World'
@app.before_request
def before_request():
execution_id = uuid.uuid4()
g.start_time = time.time()
g.execution_id = execution_id
print(g.execution_id, "Route Called", flrequest.url)
@app.route('/yummarize', methods=['GET'])
def yummarize():
user_input = flrequest.args.get('url')
caption = getCaptions(user_input)
recipe = getRecipe(caption)
videoTitle, channel = (getVideoMetaData(get_video_id(user_input)))
metaJson = {"title": videoTitle, "channel": channel}
recipeJson = json.loads(recipe)
metaJson.update(recipeJson)
return metaJson
return app
app = create_app()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) | [
"You are a web server designed to output JSON objects with the following format for every recipe found: {Recipe: {Ingredients: , Instructions:}} . If the transcript doesn't contain a recipe, your return value should be -1. For the Instructions, each step should be its own value in a list. For the Ingredients each ingredient should be its own value in a list. Measurements for the Ingredients should be as detailed as possible.",
"PLACEHOLDER",
"If you are having trouble, try to break down the problem into smaller parts. For example, first try to find the recipe, then try to find the ingredients, then try to find the instructions.",
"{'role': 'user', 'content': 'PLACEHOLDER'}",
"The Ingredients and Instructions should be as detailed as possible. For example, if the recipe calls for 1 cup of flour, you should return 1 cup of flour, not just flour."
] |
2024-01-10 | evanmcneely/gpt-engineer | gpt_engineer~chains~ask_for_clarification.py | from halo import Halo
from langchain import PromptTemplate, LLMChain
from langchain.memory import ConversationBufferMemory
from ..llm import get_llm
from config import Models
def format_initial_prompt(prompt: str, file_content: str) -> str:
return f"""
Instructions: {prompt}
{file_content}
"""
prompt = """
Respond with a single question that you would need to ask to gain more clarity about how to follow the most recent instructions or feedback. Return just the question. If everything is clear, return the string "nothing left to clarify". You have been trusted to make assumptions, not every small detail needs to be clarified.
Chat History:
{chat_history}
"""
@Halo(text="Interpreting", spinner="dots")
def ask_for_clarification(memory: str):
chain = LLMChain(
llm=get_llm(Models.CONVERSATION_MODEL),
prompt=PromptTemplate.from_template(prompt),
)
return chain.predict(chat_history=memory)
| [
"\nRespond with a single question that you would need to ask to gain more clarity about how to follow the most recent instructions or feedback. Return just the question. If everything is clear, return the string \"nothing left to clarify\". You have been trusted to make assumptions, not every small detail needs to be clarified.\n\nChat History: \n{chat_history}\n"
] |
2024-01-10 | evanmcneely/gpt-engineer | gpt_engineer~chains~write_code.py | from halo import Halo
import re
from typing import List, Tuple
from langchain import PromptTemplate, LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.callbacks import StreamingStdOutCallbackHandler
from ..llm import get_llm
from config import Models
template = """
Please now remember the steps:
Think step by step and reason yourself to the right decisions to make sure we get it right.
First lay out the names of the core classes, functions, methods that will be necessary, As well as a quick comment on their purpose.
Then you will output the content of each file including ALL code.
Each file must strictly follow a markdown code block format, where the following tokens must be replaced such that
FILENAME is the lowercase file name including the file extension,
LANG is the markup code block language for the code's language, and CODE is the code:
FILENAME
```LANG
CODE
```
Please note that the code should be fully functional. No placeholders.
Chat history:
{chat_history}
Begin
"""
def _codeblock_search(chat: str) -> re.Match:
regex = r"(\S+)\n\s*```[^\n]*\n(.+?)```"
return re.finditer(regex, chat, re.DOTALL)
def _parse_chat(chat) -> List[Tuple[str, str]]:
matches = _codeblock_search(chat)
files = []
for match in matches:
# Strip the filename of any non-allowed characters and convert / to \
path = re.sub(r'[<>"|?*]', "", match.group(1))
# Remove leading and trailing brackets
path = re.sub(r"^\[(.*)\]$", r"\1", path)
# Remove leading and trailing backticks
path = re.sub(r"^`(.*)`$", r"\1", path)
# Remove trailing ]
path = re.sub(r"\]$", "", path)
# Get the code
code = match.group(2)
# Add the file to the list
files.append((path, code))
# Get all the text before the first ``` block
readme = chat.split("```")[0]
files.append(("README.md", readme))
# Return the files
return files
@Halo(text="Generating code", spinner="dots")
def write_code(memory: str):
chain = LLMChain(
llm=get_llm(Models.CODE_MODEL),
prompt=PromptTemplate.from_template(template),
)
result = chain.predict(chat_history=memory)
return _parse_chat(result)
| [
"\nPlease now remember the steps:\n\nThink step by step and reason yourself to the right decisions to make sure we get it right.\nFirst lay out the names of the core classes, functions, methods that will be necessary, As well as a quick comment on their purpose.\n\nThen you will output the content of each file including ALL code.\nEach file must strictly follow a markdown code block format, where the following tokens must be replaced such that\nFILENAME is the lowercase file name including the file extension,\nLANG is the markup code block language for the code's language, and CODE is the code:\n\nFILENAME\n```LANG\nCODE\n```\n\nPlease note that the code should be fully functional. No placeholders.\n\n\nChat history:\n{chat_history}\n\nBegin\n"
] |
2024-01-10 | evanmcneely/gpt-engineer | gpt_engineer~chains~get_imported_file_paths.py | from halo import Halo
from langchain import PromptTemplate, LLMChain
from langchain.output_parsers import CommaSeparatedListOutputParser
from ..llm import get_llm
from config import Models
# def _parse_output(content: str) -> str:
# return content.split(",")
prompt = """
Determine the paths to all the files imported into the files below from the project root directory in the form of ./path/to/file with the correct file extension. Return the result as a comma separated list of file paths. Don't return anything else, just the file paths.
{file}
"""
@Halo(text="Loading relative files", spinner="dots")
def get_imported_file_paths(file: str):
chain = LLMChain(
llm=get_llm(Models.INTERPRETATION_MODEL),
prompt=PromptTemplate.from_template(prompt),
output_parser=CommaSeparatedListOutputParser(),
)
paths = chain.predict(file=file)
return paths
| [
"\nDetermine the paths to all the files imported into the files below from the project root directory in the form of ./path/to/file with the correct file extension. Return the result as a comma separated list of file paths. Don't return anything else, just the file paths.\n\n{file}\n"
] |
2024-01-10 | evanmcneely/gpt-engineer | gpt_engineer~ChatMemory.py | from typing import List
from langchain.memory import ChatMessageHistory
from langchain.schema import BaseMessage
class ChatMemory(ChatMessageHistory):
def load_messages(self):
messages: List[BaseMessage] = self.messages
history: str = ""
for message in messages:
if message.type == "human":
history += message.content + "\n"
elif message.type == "ai":
history += message.content + "\n"
return history
| [] |
2024-01-10 | AbdAftab/AI-Community | bot.py | import cohere
import random
import threading
from weather import Weather
api = "" # Add api key
co = cohere.Client(api)
class Bot:
def __init__(self, name, likes, dislikes):
self.name = name
self.likes = likes
self.dislikes = dislikes
self.activities_done_today = []
self.mood = "neutral"
def perform_activity(self, activity, current_weather):
self.activities_done_today.append(activity)
mood_prompt = self.create_mood_prompt(current_weather)
response = co.generate(
model='command',
prompt = mood_prompt,
max_tokens=200,
temperature=0.750)
self.mood = response.generations[0].text # Update the mood based on Cohere's response
print(f"{self.name} is {activity}, mood: {self.mood}")
print("All Activities Done: ", self.activities_done_today)
def create_mood_prompt(self, current_weather):
activities_summary = ', '.join(self.activities_done_today)
prompt = f"""Only reply with a single word realistic new mood for someone who has gone through the following:
Current Weather: {current_weather}
Likes: {', '.join(self.likes)}
Hates: {', '.join(self.dislikes)}
Activities: {activities_summary}
Current Mood: {self.mood}"""
return prompt
def converse_with(self, other_bot):
# Bot 2 asks Bot 1
print(f"{self.name} asks {other_bot.name}: 'How was your day?'")
# Bot 1 generates a response
activities_summary = ', '.join(other_bot.activities_done_today)
prompt = f"""I am going to give you a list of activities and a mood, can you then respond with a life like dialogue of someone summarizing their completed activities with a tone of the given mood. The list is as follows:
Activities performed: {activities_summary}
Mood: {other_bot.mood}"""
response = co.generate(
model='command',
prompt = prompt,
max_tokens=200,
temperature=0.750)
sentences = response.generations[0].text.split("\"")
# Bot 1 responds with each element in the array
for sentence in sentences:
if len(sentence) > 5:
print(f"{other_bot.name} responds: '{sentence.strip()}'")
return sentence.strip()
return 'Nothing'
| [
", "
] |
2024-01-10 | soilniba/shuiqianxiaoxi-download | spider~llama_index_test.py | from bs4 import BeautifulSoup
import urllib
import json
import time
import datetime
import requests
import os
import re
import gzip
import PyPDF2
import docx2txt
import nltk
import html2text
import openai
from loguru import logger
import logging
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTKeywordTableIndex,
GPTSimpleVectorIndex,
SimpleDirectoryReader,
BeautifulSoupWebReader,
StringIterableReader,
LLMPredictor,
PromptHelper,
QuestionAnswerPrompt,
RefinePrompt,
ServiceContext
)
from llama_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT_TMPL, DEFAULT_REFINE_PROMPT_TMPL
from config import openai_api_key, feishu_robot_news, feishu_robot_error
script_dir = os.path.dirname(os.path.realpath(__file__)) # 获取脚本所在目录的路径
os.chdir(script_dir) # 切换工作目录到脚本所在目录
openai.api_key = openai_api_key
os.environ["OPENAI_API_KEY"] = openai_api_key
import psutil
p = psutil.Process() # 获取当前进程的Process对象
p.nice(psutil.IDLE_PRIORITY_CLASS) # 设置进程为低优先级
feishu_robot_news = feishu_robot_error # 强制使用测试频道
def get_article(href):
response = requests.get(href)
html = response.content
# 解析网页内容
soup = BeautifulSoup(html, 'html.parser')
# 提取网页正文
text = soup.get_text()
# 去除多余空格、换行符等无用字符
text = re.sub(r'\s+', ' ', text).strip()
# 将多个连续空格替换为一个空格
text = re.sub(r'\s+', ' ', text)
# 输出处理后的文本
# print(url, text)
return text
def ask_llama_index(href = None, text = None, json_filename = None):
# define LLM
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=2048))
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=2048))
# define prompt helper
# set maximum input size
max_input_size = 2048
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
chunk_size_limit = 10000
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
save_json_path = json_filename and f'{os.path.dirname(__file__)}\\{json_filename}'
if not save_json_path or not os.path.isfile(save_json_path):
# doc是你文档所存放的位置,recursive代表递归获取里面所有文档
# documents = SimpleDirectoryReader(input_dir=os.path.dirname(__file__) + '/doc',recursive=True).load_data()
if href:
documents = BeautifulSoupWebReader().load_data([href])
if text:
documents = StringIterableReader().load_data(texts=[text])
for doc in documents:
doc.text = doc.text.replace("。", ". ")
# index = GPTSimpleVectorIndex.from_documents(documents)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
# index = GPTSimpleVectorIndex.from_documents(documents)
if save_json_path:
index.save_to_disk(save_json_path)
else:
index = GPTSimpleVectorIndex.load_from_disk(save_json_path, service_context=service_context)
# Context information is below.
# ---------------------
# {context_str}
# ---------------------
# Given the context information and not prior knowledge, answer the question: {query_str}
text_qa_prompt_tmpl = (
"我们在下面提供了上下文信息. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"鉴于此信息, 请回答以下问题: {query_str}\n"
)
# The original question is as follows: {query_str}
# We have provided an existing answer: {existing_answer}
# We have the opportunity to refine the existing answer (only if needed) with some more context below.
# ------------
# {context_msg}
# ------------
# Given the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.
refine_prompt_tmpl = (
"之前我们询问过这个问题: {query_str}\n"
"得到了这样一个答案: {existing_answer}\n"
"现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用或者没必要再完善了, 则重复一遍原始的答案.\n"
)
text_qa_prompt = QuestionAnswerPrompt(text_qa_prompt_tmpl)
refine_prompt = RefinePrompt(refine_prompt_tmpl)
# answer = index.query("请尽可能详细的总结文章概要,并使用换行使阅读段落更清晰",
# text_qa_template = text_qa_prompt,
# refine_template = refine_prompt)
# print(answer)
while True:
ask = input("请输入你的问题:")
print(index.query(ask,
text_qa_template = text_qa_prompt,
refine_template = refine_prompt))
return answer.response
def read_tab(file_path):
texts = [] # 创建一个空列表,用于存储文件内容
with open(file_path, 'r', encoding='utf-8') as f: # 打开文件并读取内容
lines = f.readlines() # 逐行读取文件内容并存储在一个列表中
count = 0 # 创建一个计数器,用于跳过前三行
for line in lines:
if count >= 3: # 如果计数器大于等于3,说明已经跳过了前三行,可以将该行文本内容添加到texts列表中
texts.append(line.strip()) # 去掉每行末尾的换行符并将其添加到texts列表中
else:
count += 1 # 如果计数器小于3,说明仍需要跳过该行,将计数器加1
return texts
def create_vector_index_help_guide():
# define LLM
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=2048))
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=2048))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 2560
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# doc是你文档所存放的位置,recursive代表递归获取里面所有文档
# documents = SimpleDirectoryReader(input_dir=os.path.dirname(__file__) + '/doc',recursive=True).load_data()
# documents = BeautifulSoupWebReader().load_data([url])
texts = read_tab('E:\\game\\pub\\data\\tables\\player\\helper_guide.tab')
documents = StringIterableReader().load_data(texts=texts)
# index = GPTSimpleVectorIndex.from_documents(documents)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
# index = GPTSimpleVectorIndex.from_documents(documents)
save_json_path = os.path.dirname(__file__) + '\\helper_guide.json'
index.save_to_disk(save_json_path);
def ask_by_helper_guide():
# define LLM
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=2048))
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=2048))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
chunk_size_limit = 10000
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# query_index.py 从index文件里获得相关资料并向GPT提问
save_json_path = os.path.dirname(__file__) + '\\helper_guide.json'
index = GPTSimpleVectorIndex.load_from_disk(save_json_path, service_context=service_context)
# Context information is below.
# ---------------------
# {context_str}
# ---------------------
# Given the context information and not prior knowledge, answer the question: {query_str}
text_qa_prompt_tmpl = (
"我们在下面提供了上下文信息. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"鉴于此信息,请回答以下问题: {query_str}\n"
)
# The original question is as follows: {query_str}
# We have provided an existing answer: {existing_answer}
# We have the opportunity to refine the existing answer (only if needed) with some more context below.
# ------------
# {context_msg}
# ------------
# Given the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.
refine_prompt_tmpl = (
"之前我们询问过这个问题: {query_str}\n"
"得到了这样一个答案: {existing_answer}\n"
"现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用, 则返回原始的答案.\n"
)
text_qa_prompt = QuestionAnswerPrompt(text_qa_prompt_tmpl)
refine_prompt = RefinePrompt(refine_prompt_tmpl)
while True:
ask = input("请输入你的问题:")
print(index.query(ask,
text_qa_template = text_qa_prompt,
refine_template = refine_prompt))
# create_vector_index_help_guide()
# logging.getLogger('llama_index.token_counter.token_counter').setLevel(logging.WARNING)
# ask_by_helper_guide()
# ask_llama_index('https://mp.weixin.qq.com/s/wY-DkYOaar1Z3Hy4eBPebg', None, 'wY-DkYOaar1Z3Hy4eBPebg.json')
ask_llama_index('https://zhuanlan.zhihu.com/p/623585339')
# 从doc/pormpt_tags.txt文件读入text信息
# text = open('doc\\pormpt_tags.txt', 'r').read()
# ask_llama_index(None, text, 'pormpt_tags.json')
| [
"我们在下面提供了上下文信息. \n---------------------\n{context_str}\n---------------------\n鉴于此信息,请回答以下问题: {query_str}\n",
"我们在下面提供了上下文信息. \n---------------------\n{context_str}\n---------------------\n鉴于此信息, 请回答以下问题: {query_str}\n",
"之前我们询问过这个问题: {query_str}\n得到了这样一个答案: {existing_answer}\n现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n------------\n{context_msg}\n------------\n给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用或者没必要再完善了, 则重复一遍原始的答案.\n",
"之前我们询问过这个问题: {query_str}\n得到了这样一个答案: {existing_answer}\n现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n------------\n{context_msg}\n------------\n给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用, 则返回原始的答案.\n"
] |
2024-01-10 | soilniba/shuiqianxiaoxi-download | spider~tieba_openai_test.py | from bs4 import BeautifulSoup
import urllib
import json
import time
import datetime
import requests
import os
import re
import gzip
import openai
from config import openai_api_key, feishu_robot_news, feishu_robot_error
# 获取脚本所在目录的路径
script_dir = os.path.dirname(os.path.realpath(__file__))
# 切换工作目录到脚本所在目录
os.chdir(script_dir)
openai.api_key = openai_api_key
Cookie = ''
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
headers = {
'User-Agent': user_agent,
'Connection': 'close',
'Cookie': Cookie,
'Accept-Encoding': 'gzip',
}
# proxy_handler = urllib.request.ProxyHandler({'socks5': '127.0.0.1:1080'})
# proxy_handler = urllib.request.ProxyHandler({'socks5': 'k814.kdltps.com:20818'})
socks5_proxies = 'socks5://t17842936906948:[email protected]:20818'
# socks5_proxies = 'socks5://127.0.0.1:1080'
proxies = {
'http': socks5_proxies,
'https': socks5_proxies,
}
proxies = None
update_num = 0
add_num = 0
def get_news():
global update_num, add_num
update_num = 0
add_num = 0
file_name = 'news_gov.json'
json_all = load_json(file_name)
# clear_history_data(json_all)
new_news_list = []
if thread_list := get_list():
get_page(thread_list, json_all, new_news_list)
print("----新闻读取完毕----")
else:
print("thread_list读取失败")
send_error_msg('出错啦!抓不到新闻啦!')
print(f'新闻新增{add_num}条')
write_json(file_name, json_all)
if new_news_list:
for data_info in new_news_list:
href = data_info["href"]
text = get_article(href)
answer = ask_gpt(text)
data_info['description'] = answer
json_all[href] = data_info
write_json(file_name, json_all)
send_news(data_info)
def send_news(data_info):
feishu_msg = {"content": []}
# feishu_msg["title"] = '刚刚收到的新消息:'
feishu_msg["content"].append([
{
"tag": "a",
"text": data_info['title'],
"href": f'{data_info["url"]}'
},
{
"tag": "text",
"text": '\n\n'
},
])
feishu_msg["content"].append([
{
"tag": "text",
"text": data_info['description']
},
])
send_feishu_robot(FEISHU_ROBOT_ERROR, feishu_msg)
def send_error_msg(text):
error_file_name = 'last_send_time_error.log'
last_send_time = read_last_time(error_file_name)
if time.time() - last_send_time > 1: #报错间隔时间
text_msg = text
feishu_msg = {"content": []}
feishu_msg["content"].append([
{
"tag": "text",
"text": text_msg
},
])
send_feishu_robot(FEISHU_ROBOT_ERROR, feishu_msg)
write_last_time(error_file_name)
def get_article(url = ''):
# url = f'http://www.gov.cn{href}'
# url = 'http://www.gov.cn/xinwen/2023-03/17/content_5747299.htm'
# url = 'http://www.gov.cn/zhengce/zhengceku/2023-03/17/content_5747143.htm'
# url = 'http://www.gov.cn/zhengce/zhengceku/2023-03/16/content_5746998.htm'
# url = 'https://tieba.baidu.com/p/8312746395'
response = requests.get(url)
html = response.content
# 解析网页内容
soup = BeautifulSoup(html, 'html.parser')
# 提取网页正文
text = soup.get_text()
# 去除多余空格、换行符等无用字符
text = re.sub(r'\s+', ' ', text).strip()
# 将多个连续空格替换为一个空格
text = re.sub(r'\s+', ' ', text)
# 输出处理后的文本
# print(url, text)
return text, soup.title.string
def ask_gpt(text):
print(len(text))
max_len = 3000
if len(text) > max_len:
text = text[:max_len]
# 设置要发送到API的提示语
prompt = f"请对以下新闻文章进行概述:\n{text}"
message = []
message.append({'role': 'system', 'content': '请对以下这篇文章标注关键词(不超过5个),然后引用一些重点语句(按权重从高到低排序,并在行首标出权重分数)'})
message.append({'role': 'user', 'content': text})
try:
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo-0301", # 对话模型的名称
# model = "gpt-4-0314", # 对话模型的名称
messages = message,
temperature = 0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
# max_tokens=4097, # 回复最大的字符数
top_p = 1,
frequency_penalty = 0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty = 0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
print("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return response.choices[0]['message']['content']
except Exception as e:
print(e)
send_error_msg(f'openai api error:{e.user_message}')
# except openai.error.RateLimitError as e:
# # rate limit exception
# print(e)
# if retry_count < 1:
# time.sleep(5)
# logger.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
# return self.reply_text(session, session_id, retry_count+1)
# else:
# return {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
# except openai.error.APIConnectionError as e:
# # api connection exception
# logger.warn(e)
# logger.warn("[OPEN_AI] APIConnection failed")
# return {"completion_tokens": 0, "content":"我连接不到你的网络"}
# except openai.error.Timeout as e:
# logger.warn(e)
# logger.warn("[OPEN_AI] Timeout")
# return {"completion_tokens": 0, "content":"我没有收到你的消息"}
# except Exception as e:
# # unknown exception
# logger.exception(e)
# Session.clear_session(session_id)
# return {"completion_tokens": 0, "content": "请再问我一次吧"}
def get_html(url):
url = urllib.parse.quote(url, safe='/:?=&')
# request = urllib.request.Request(url, headers = headers)
# response = urllib.request.urlopen(request)
if proxies:
response = requests.get(url, headers=headers, proxies=proxies)
else:
response = requests.get(url, headers=headers)
response.encoding = 'utf-8'
HtmlContent = response.read() if hasattr(response, 'read') else response.text
# HtmlContent = HtmlContent.decode('utf-8')
# print('python 返回 URL:{} 数据成功'.format(url))
return HtmlContent
def get_list(): # 获取单页JSON数据
url = "http://www.gov.cn/xinwen/lianbo/bumen.htm"
HtmlContent = get_html(url)
HtmlContent = HtmlContent.replace("<!--", "")
HtmlContent = HtmlContent.replace("-->", "")
soup = BeautifulSoup(HtmlContent, "lxml")
thread_list = soup.select_one('body > div.main > div > div > div.news_box > div')
# print(thread_list)
return thread_list
def get_page(thread_list, json_all, new_news_list):
li_list = thread_list.select('li')
for li in li_list:
a = li.select_one('a')
title = a.text
href = a.attrs['href']
span = li.select_one('span')
date = span.text.strip()
# print(title, href, date)
if href in json_all:
data_info = json_all[href]
if 'href' not in data_info:
data_info['href'] = href
else:
data_info = {}
data_info['href'] = href
data_info['title'] = title
data_info['date'] = date
json_all[href] = data_info
# new_news_list.append(data_info)
new_news_list.insert(0, data_info)
global add_num
add_num += 1
def write_json(file_name, json_all):
str_json = json.dumps(json_all, indent=2, ensure_ascii=False)
with open(file_name, "w", encoding='utf-8') as f:
f.write(str_json)
f.close()
def load_json(file_name):
try:
f = open(file_name, "r", encoding='utf-8')
except IOError:
return {}
else:
return json.load(f)
def send_wx_robot(robot_url, content_msg, mentioned_list = None):
headers = {
'Content-Type': 'application/json',
}
if mentioned_list:
data_table = {
"msgtype": "text",
"text": { "content": content_msg, "mentioned_list": mentioned_list }
}
else:
data_table = {
"msgtype": "markdown",
"markdown": { "content": content_msg }
}
data = json.dumps(data_table)
response = requests.post(f'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={robot_url}', headers=headers, data=data)
def send_feishu_robot(feishu_robot_key, feishu_msg):
headers = {
'Content-Type': 'application/json',
}
data = json.dumps({
"msg_type": "post",
"content": {
"post": {
"zh_cn": feishu_msg
}
}
})
response = requests.post(f'https://open.feishu.cn/open-apis/bot/v2/hook/{feishu_robot_key}', headers=headers, data=data)
def get_feishu_token():
headers = {
'Content-Type': 'application/json',
}
data = json.dumps({
"app_id": "cli_a1c3790e21f8100c",
"app_secret": "YVXgZL2HnYi6gHm2NmxenfOTi60rfrQ3",
})
response = requests.post('https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal', headers=headers, data=data)
responsejson = json.loads(response.text)
print(responsejson['tenant_access_token'])
return responsejson['tenant_access_token']
def GetUserIDs(email_list):
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Authorization': 'Bearer ' + get_feishu_token(),
}
response = requests.post('https://open.feishu.cn/open-apis/user/v1/batch_get_id?emails=' + '&emails='.join(email_list), headers=headers)
responsejson = json.loads(response.text)
email_users = responsejson['data']['email_users']
user_id_list = []
for email, ids in email_users.items():
print(email, ids[0]['open_id'], ids[0]['user_id'])
user_id_list.append(ids[0]['user_id'])
return user_id_list
def write_last_time(file_name):
with open(file_name, "w") as f:
f.write(str(time.time()))
f.close()
def read_last_time(file_name):
if os.path.exists(file_name):
with open(file_name, 'r') as f:
last_send_time = float(f.read())
f.close()
return last_send_time
else:
write_last_time(file_name)
return time.time()
def main():
lock_file = 'news_spider.lock'
if not os.path.exists(lock_file):
_extracted_from_main_4(lock_file)
else:
print('file lock')
time.sleep(5)
os.remove(lock_file)
print('lock file delete')
def _extracted_from_main_4(lock_file):
# with open(lock_file, 'w') as f:
# f.write('')
# f.close()
get_news()
if os.path.exists(lock_file):
os.remove(lock_file)
def check_local_ip():
url = 'https://www.123cha.com'
HtmlContent = get_html(url)
soup = BeautifulSoup(HtmlContent, "lxml")
iplocation = soup.select_one('body > div.header > div.location > span')
print('当前访问IP:', iplocation and iplocation.text)
if __name__ == "__main__":
try:
# 可能会引发异常的代码
check_local_ip()
except Exception as e:
# 处理异常的代码
print('Error:', e)
result = None
# main()
url = 'https://tieba.baidu.com/p/8312711920'
text, title = get_article(url)
answer = ask_gpt(text)
send_news({
'title': title.replace("【图片】", "").replace("_百度贴吧", ""),
'url': url,
'description': answer,
})
| [
"{'post': {'zh_cn': PLACEHOLDER}}",
"请对以下这篇文章标注关键词(不超过5个),然后引用一些重点语句(按权重从高到低排序,并在行首标出权重分数)",
"请对以下新闻文章进行概述:\nPLACEHOLDER",
"[]"
] |
2024-01-10 | soilniba/shuiqianxiaoxi-download | spider~news_spider.py | from bs4 import BeautifulSoup
import urllib
import json
import time
import datetime
import requests
import os
import re
import traceback
import gzip
import PyPDF2
import docx2txt
import nltk
import html2text
import openai
from loguru import logger
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTKeywordTableIndex,
GPTSimpleVectorIndex,
SimpleDirectoryReader,
BeautifulSoupWebReader,
StringIterableReader,
LLMPredictor,
PromptHelper,
QuestionAnswerPrompt,
RefinePrompt,
ServiceContext
)
from config import openai_api_key, feishu_robot_news, feishu_robot_error
script_dir = os.path.dirname(os.path.realpath(__file__)) # 获取脚本所在目录的路径
os.chdir(script_dir) # 切换工作目录到脚本所在目录
filename_ext = os.path.basename(__file__)
file_name, file_ext = os.path.splitext(filename_ext)
logger.add(f"{file_name}.log", format="{time} - {level} - {message}", rotation="10 MB", compression="zip") # 添加日志文件
openai.api_key = openai_api_key
os.environ["OPENAI_API_KEY"] = openai_api_key
import psutil
p = psutil.Process() # 获取当前进程的Process对象
p.nice(psutil.IDLE_PRIORITY_CLASS) # 设置进程为低优先级
# feishu_robot_news = feishu_robot_error # 强制使用测试频道
Cookie = ''
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
headers = {
'User-Agent': user_agent,
'Connection': 'close',
'Cookie': Cookie,
'Accept-Encoding': 'gzip',
}
# proxy_handler = urllib.request.ProxyHandler({'socks5': '127.0.0.1:1080'})
# proxy_handler = urllib.request.ProxyHandler({'socks5': 'k814.kdltps.com:20818'})
socks5_proxies = 'socks5://t17842936906948:[email protected]:20818'
# socks5_proxies = 'socks5://127.0.0.1:1080'
proxies = {
'http': socks5_proxies,
'https': socks5_proxies,
}
proxies = None
update_num = 0
add_num = 0
def get_news():
global update_num, add_num
update_num = 0
add_num = 0
file_name = 'news_gov.json'
json_all = load_json(file_name)
# clear_history_data(json_all)
new_news_list = []
try:
thread_list = get_list()
get_page(thread_list, json_all, new_news_list)
print("----新闻读取完毕----")
except Exception as e:
send_error_msg(f'出错啦!gov抓不到新闻啦!\n{e}')
print(f'新闻新增{add_num}条')
write_json(file_name, json_all)
for href, data_info in reversed(json_all.items()):
if not data_info.get('send_time'):
if not data_info.get('description'):
try:
href = data_info["href"]
# text = get_article(href)
# answer = ask_gpt(text)
answer = ask_llama_index(href)
data_info['description'] = answer
json_all[href] = data_info
write_json(file_name, json_all)
except Exception as e:
# tb_str = traceback.format_exc(limit=3)
send_error_msg(f'ask_llama_index error\n{e}')
continue
if data_info.get('description'):
# data_info['send_time'] = None
data_info['send_time'] = time.time()
write_json(file_name, json_all)
send_news(data_info)
def send_news(data_info):
feishu_msg = {"content": []}
# feishu_msg["title"] = '刚刚收到的新消息:'
feishu_msg["content"].append([
{
"tag": "text",
"text": data_info['date']
},
{
"tag": "a",
"text": data_info['title'],
"href": f'http://www.gov.cn{data_info["href"]}'
}
])
if data_info.get('description'):
feishu_msg["content"].append([
{
"tag": "text",
"text": data_info.get('description')
},
])
send_feishu_robot(feishu_robot_news, feishu_msg)
def send_error_msg(text):
if feishu_robot_error:
text_msg = text
feishu_msg = {"content": []}
feishu_msg["content"].append([
{
"tag": "text",
"text": text_msg
},
])
send_feishu_robot(feishu_robot_error, feishu_msg)
logger.error(text)
def get_article(url):
# url = 'http://www.gov.cn/xinwen/2023-03/17/content_5747299.htm'
# url = 'http://www.gov.cn/zhengce/zhengceku/2023-03/17/content_5747143.htm'
# url = 'http://www.gov.cn/zhengce/zhengceku/2023-03/16/content_5746998.htm'
response = requests.get(url)
html = response.content
# 解析网页内容
soup = BeautifulSoup(html, 'html.parser')
# 提取网页正文
text = soup.get_text()
# 去除多余空格、换行符等无用字符
text = re.sub(r'\s+', ' ', text).strip()
# 将多个连续空格替换为一个空格
text = re.sub(r'\s+', ' ', text)
# 输出处理后的文本
# print(url, text)
return text
def ask_llama_index(href):
# define LLM
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=2048))
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
chunk_size_limit = 10000
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# doc是你文档所存放的位置,recursive代表递归获取里面所有文档
# documents = SimpleDirectoryReader(input_dir=os.path.dirname(__file__) + '/doc',recursive=True).load_data()
url = f'http://www.gov.cn{href}'
documents = StringIterableReader().load_data(texts=[get_article(url)])
for doc in documents:
doc.text = doc.text.replace("。", ". ")
# documents = BeautifulSoupWebReader().load_data([url])
# index = GPTSimpleVectorIndex.from_documents(documents)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
# index = GPTSimpleVectorIndex.from_documents(documents)
# save_json_path = os.path.dirname(__file__) + '\\index.json'
# index.save_to_disk(save_json_path);
# query_index.py 从index文件里获得相关资料并向GPT提问
# index = GPTKeywordTableIndex.load_from_disk(save_json_path, service_context=service_context)
# Context information is below.
# ---------------------
# {context_str}
# ---------------------
# Given the context information and not prior knowledge, answer the question: {query_str}
text_qa_prompt_tmpl = (
"我们在下面提供了上下文信息. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"鉴于此信息,请回答以下问题: {query_str}\n"
)
# The original question is as follows: {query_str}
# We have provided an existing answer: {existing_answer}
# We have the opportunity to refine the existing answer (only if needed) with some more context below.
# ------------
# {context_msg}
# ------------
# Given the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.
refine_prompt_tmpl = (
"之前我们询问过这个问题: {query_str}\n"
"得到了原始的答案: {existing_answer}\n"
"现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用或者没必要再完善了, 则重复一遍原始的答案.\n"
)
text_qa_prompt = QuestionAnswerPrompt(text_qa_prompt_tmpl)
refine_prompt = RefinePrompt(refine_prompt_tmpl)
# while True:
# ask = input("请输入你的问题:")
# print(index.query(ask))
answer = index.query("用中文总结一下这篇文章主要讲了啥",
text_qa_template = text_qa_prompt,
refine_template = refine_prompt)
time.sleep(10)
return answer.response
def ask_gpt(text):
print(len(text))
max_len = 3000
if len(text) > max_len:
text = text[:max_len]
# 设置要发送到API的提示语
prompt = f"请对以下新闻文章进行概述:\n{text}"
message = [
{'role': 'system', 'content': '请用中文对以下新闻文章进行概述'},
{'role': 'user', 'content': text},
]
try:
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo", # 对话模型的名称
# model = "gpt-4", # 对话模型的名称
messages = message,
temperature = 0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
#max_tokens=4096, # 回复最大的字符数
top_p = 1,
frequency_penalty = 0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty = 0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
print(
f"""[ChatGPT] reply={response.choices[0]['message']['content']}, total_tokens={response["usage"]["total_tokens"]}"""
)
return response.choices[0]['message']['content']
except Exception as e:
print(e)
send_error_msg(f'openai api error:{e}')
def get_html(url):
url = urllib.parse.quote(url, safe='/:?=&')
# request = urllib.request.Request(url, headers = headers)
# response = urllib.request.urlopen(request)
if proxies:
response = requests.get(url, headers=headers, proxies=proxies)
else:
response = requests.get(url, headers=headers)
response.encoding = 'utf-8'
HtmlContent = response.read() if hasattr(response, 'read') else response.text
# HtmlContent = HtmlContent.decode('utf-8')
# print('python 返回 URL:{} 数据成功'.format(url))
return HtmlContent
def get_list(): # 获取单页JSON数据
url = "http://www.gov.cn/lianbo/bumen/"
HtmlContent = get_html(url)
# HtmlContent = HtmlContent.replace("<!--", "")
# HtmlContent = HtmlContent.replace("-->", "")
HtmlContent = HtmlContent.replace("</html>", "")
HtmlContent += '</html>'
soup = BeautifulSoup(HtmlContent, "lxml")
thread_list = soup.select_one('body > div.main > div > div > div.news_box')
# print(thread_list)
return thread_list
def get_page(thread_list, json_all, new_news_list):
li_list = thread_list.select('li')
for li in li_list:
a = li.select_one('a')
title = a.text
href = a.attrs['href']
if href.startswith('./'):
href = "/lianbo/bumen" + href[1:]
elif href.startswith('https://www.gov.cn/'):
href = href.replace('https://www.gov.cn/', '/')
span = li.select_one('span')
date = span.text.strip()
# print(title, href, date)
if href in json_all:
data_info = json_all[href]
if 'href' not in data_info:
data_info['href'] = href
else:
data_info = {}
data_info['href'] = href
data_info['title'] = title
data_info['date'] = date
json_all[href] = data_info
# new_news_list.append(data_info)
new_news_list.insert(0, data_info)
global add_num
add_num += 1
# if data_info['href'] == '/zhengce/zhengceku/2023-03/15/content_5746847.htm':
# new_news_list.append(data_info)
def write_json(file_name, json_all):
str_json = json.dumps(json_all, indent=2, ensure_ascii=False)
with open(file_name, "w", encoding='utf-8') as f:
f.write(str_json)
f.close()
def load_json(file_name):
try:
f = open(file_name, "r", encoding='utf-8')
except IOError:
return {}
else:
return json.load(f)
def send_wx_robot(robot_url, content_msg, mentioned_list = None):
headers = {
'Content-Type': 'application/json',
}
if mentioned_list:
data_table = {
"msgtype": "text",
"text": { "content": content_msg, "mentioned_list": mentioned_list }
}
else:
data_table = {
"msgtype": "markdown",
"markdown": { "content": content_msg }
}
data = json.dumps(data_table)
response = requests.post(f'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={robot_url}', headers=headers, data=data)
def send_feishu_robot(feishu_robot_key, feishu_msg):
headers = {
'Content-Type': 'application/json',
}
data = json.dumps({
"msg_type": "post",
"content": {
"post": {
"zh_cn": feishu_msg
}
}
})
response = requests.post(f'https://open.feishu.cn/open-apis/bot/v2/hook/{feishu_robot_key}', headers=headers, data=data)
return json.loads(response.text)
def get_feishu_token():
headers = {
'Content-Type': 'application/json',
}
data = json.dumps({
"app_id": "cli_a1c3790e21f8100c",
"app_secret": "YVXgZL2HnYi6gHm2NmxenfOTi60rfrQ3",
})
response = requests.post('https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal', headers=headers, data=data)
responsejson = json.loads(response.text)
print(responsejson['tenant_access_token'])
return responsejson['tenant_access_token']
def GetUserIDs(email_list):
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Authorization': 'Bearer ' + get_feishu_token(),
}
response = requests.post('https://open.feishu.cn/open-apis/user/v1/batch_get_id?emails=' + '&emails='.join(email_list), headers=headers)
responsejson = json.loads(response.text)
email_users = responsejson['data']['email_users']
user_id_list = []
for email, ids in email_users.items():
print(email, ids[0]['open_id'], ids[0]['user_id'])
user_id_list.append(ids[0]['user_id'])
return user_id_list
def write_last_time(file_name):
with open(file_name, "w") as f:
f.write(str(time.time()))
f.close()
def read_last_time(file_name):
if os.path.exists(file_name):
with open(file_name, 'r') as f:
last_send_time = float(f.read())
f.close()
return last_send_time
else:
write_last_time(file_name)
return time.time()
def main():
lock_file = 'news_spider.lock'
if not os.path.exists(lock_file):
_extracted_from_main_4(lock_file)
else:
print('file lock')
time.sleep(5)
os.remove(lock_file)
print('lock file delete')
def _extracted_from_main_4(lock_file):
# with open(lock_file, 'w') as f:
# f.write('')
# f.close()
get_news()
if os.path.exists(lock_file):
os.remove(lock_file)
def check_local_ip():
url = 'https://www.123cha.com'
HtmlContent = get_html(url)
soup = BeautifulSoup(HtmlContent, "lxml")
iplocation = soup.select_one('body > div.header > div.location > span')
print('当前访问IP:', iplocation and iplocation.text)
if __name__ == "__main__":
try:
# 可能会引发异常的代码
check_local_ip()
except Exception as e:
# 处理异常的代码
print('Error:', e)
result = None
main()
| [
"请用中文对以下新闻文章进行概述",
"[]",
"我们在下面提供了上下文信息. \n---------------------\n{context_str}\n---------------------\n鉴于此信息,请回答以下问题: {query_str}\n",
"请对以下新闻文章进行概述:\nPLACEHOLDER",
"{'post': {'zh_cn': PLACEHOLDER}}",
"之前我们询问过这个问题: {query_str}\n得到了原始的答案: {existing_answer}\n现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n------------\n{context_msg}\n------------\n给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用或者没必要再完善了, 则重复一遍原始的答案.\n"
] |
2024-01-10 | soilniba/shuiqianxiaoxi-download | spider~news_spider_tvbs.py | from bs4 import BeautifulSoup
import urllib
import json
import time
import datetime
import requests
import os
import re
import opencc
import traceback
import gzip
import PyPDF2
import docx2txt
import nltk
import html2text
import openai
from loguru import logger
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTKeywordTableIndex,
GPTSimpleVectorIndex,
SimpleDirectoryReader,
BeautifulSoupWebReader,
StringIterableReader,
LLMPredictor,
PromptHelper,
QuestionAnswerPrompt,
RefinePrompt,
ServiceContext
)
from config import openai_api_key, feishu_robot_tvbs, feishu_robot_error
script_dir = os.path.dirname(os.path.realpath(__file__)) # 获取脚本所在目录的路径
os.chdir(script_dir) # 切换工作目录到脚本所在目录
filename_ext = os.path.basename(__file__)
file_name, file_ext = os.path.splitext(filename_ext)
logger.add(f"{file_name}.log", format="{time} - {level} - {message}", rotation="10 MB", compression="zip") # 添加日志文件
openai.api_key = openai_api_key
os.environ["OPENAI_API_KEY"] = openai_api_key
import psutil
p = psutil.Process() # 获取当前进程的Process对象
p.nice(psutil.IDLE_PRIORITY_CLASS) # 设置进程为低优先级
# feishu_robot_tvbs = feishu_robot_error # 强制使用测试频道
converter = opencc.OpenCC('tw2sp.json') # 创建转换器对象, 繁體(臺灣正體標準)到簡體並轉換爲中國大陸常用詞彙
Cookie = ''
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
headers = {
'User-Agent': user_agent,
'Connection': 'close',
'Cookie': Cookie,
'Accept-Encoding': 'gzip',
}
# proxy_handler = urllib.request.ProxyHandler({'socks5': '127.0.0.1:1080'})
# proxy_handler = urllib.request.ProxyHandler({'socks5': 'k814.kdltps.com:20818'})
socks5_proxies = 'socks5://t17842936906948:[email protected]:20818'
# socks5_proxies = 'socks5://127.0.0.1:1080'
proxies = {
'http': socks5_proxies,
'https': socks5_proxies,
}
proxies = None
update_num = 0
add_num = 0
def get_news():
global update_num, add_num
update_num = 0
add_num = 0
file_name = 'news_tbvs.json'
json_all = load_json(file_name)
# clear_history_data(json_all)
new_news_list = []
try:
thread_list_all = get_list_all()
for thread_list in thread_list_all:
get_page(thread_list['thread_list'], thread_list['category'], json_all, new_news_list)
print("----新闻读取完毕----")
except Exception as e:
send_error_msg(f'出错啦!tbvs抓不到新闻啦!\n{e}')
print(f'新闻新增{add_num}条')
write_json(file_name, json_all)
for href, data_info in reversed(json_all.items()):
if not data_info.get('send_time'):
if not data_info.get('description'):
try:
href = data_info["href"]
# text = get_article(href)
# answer = ask_gpt(text)
answer = ask_llama_index(href)
if answer is None:
answer = 'None'
data_info['description'] = answer
json_all[href] = data_info
write_json(file_name, json_all)
except Exception as e:
# tb_str = traceback.format_exc(limit=3)
send_error_msg(f'ask_llama_index error\n{e}')
continue
if data_info.get('description') and data_info.get('description') != 'None':
# data_info['send_time'] = None
data_info['send_time'] = time.time()
write_json(file_name, json_all)
send_news(data_info)
def send_news(data_info):
feishu_msg = {"content": []}
# feishu_msg["title"] = '刚刚收到的新消息:'
feishu_msg["content"].append([
{
"tag": "text",
"text": f"[{data_info['category']}]"
},
{
"tag": "a",
"text": converter.convert(data_info['title']),
"href": f'https://news.tvbs.com.tw{data_info["href"]}'
},
{
"tag": "text",
"text": f"{data_info['date']}"
},
])
if data_info.get('description'):
feishu_msg["content"].append([
{
"tag": "text",
"text": data_info.get('description')
},
])
send_feishu_robot(feishu_robot_tvbs, feishu_msg)
def send_error_msg(text):
if feishu_robot_error:
text_msg = text
feishu_msg = {"content": []}
feishu_msg["content"].append([
{
"tag": "text",
"text": text_msg
},
])
send_feishu_robot(feishu_robot_error, feishu_msg)
logger.error(text)
def get_article(url):
response = requests.get(url)
html = response.content
# 解析网页内容
soup = BeautifulSoup(html, 'html.parser')
div_main = soup.select_one('#news_detail_div')
# 去除广告
if div_guangxuan := div_main.select_one('div.guangxuan'):
div_guangxuan.extract()
# 提取网页正文
text = div_main.get_text()
# 去除多余空格、换行符等无用字符
text = re.sub(r'\s+', ' ', text).strip()
# 将多个连续空格替换为一个空格
text = re.sub(r'\s+', ' ', text)
# 输出处理后的文本
# print(url, text)
return text
def ask_llama_index(href):
# define LLM
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=2048))
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
chunk_size_limit = 10000
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# doc是你文档所存放的位置,recursive代表递归获取里面所有文档
# documents = SimpleDirectoryReader(input_dir=os.path.dirname(__file__) + '/doc',recursive=True).load_data()
url = f'https://news.tvbs.com.tw{href}'
documents = StringIterableReader().load_data(texts=[get_article(url)])
for doc in documents:
doc.text = doc.text.replace("。", ". ")
# documents = BeautifulSoupWebReader().load_data([url])
# index = GPTSimpleVectorIndex.from_documents(documents)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
# index = GPTSimpleVectorIndex.from_documents(documents)
# save_json_path = os.path.dirname(__file__) + '\\index.json'
# index.save_to_disk(save_json_path);
# query_index.py 从index文件里获得相关资料并向GPT提问
# index = GPTKeywordTableIndex.load_from_disk(save_json_path, service_context=service_context)
# Context information is below.
# ---------------------
# {context_str}
# ---------------------
# Given the context information and not prior knowledge, answer the question: {query_str}
text_qa_prompt_tmpl = (
"我们在下面提供了上下文信息. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"鉴于此信息,请回答以下问题: {query_str}\n"
)
# The original question is as follows: {query_str}
# We have provided an existing answer: {existing_answer}
# We have the opportunity to refine the existing answer (only if needed) with some more context below.
# ------------
# {context_msg}
# ------------
# Given the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.
refine_prompt_tmpl = (
"之前我们询问过这个问题: {query_str}\n"
"得到了原始的答案: {existing_answer}\n"
"现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用或者没必要再完善了, 则重复一遍原始的答案.\n"
)
text_qa_prompt = QuestionAnswerPrompt(text_qa_prompt_tmpl)
refine_prompt = RefinePrompt(refine_prompt_tmpl)
# while True:
# ask = input("请输入你的问题:")
# print(index.query(ask))
answer = index.query("用中文总结一下这篇文章主要讲了啥",
text_qa_template = text_qa_prompt,
refine_template = refine_prompt)
time.sleep(10)
return answer.response
def ask_gpt(text):
print(len(text))
max_len = 3000
if len(text) > max_len:
text = text[:max_len]
# 设置要发送到API的提示语
prompt = f"请对以下新闻文章进行概述:\n{text}"
message = [
{'role': 'system', 'content': '请用中文对以下新闻文章进行概述'},
{'role': 'user', 'content': text},
]
try:
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo", # 对话模型的名称
# model = "gpt-4", # 对话模型的名称
messages = message,
temperature = 0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
#max_tokens=4096, # 回复最大的字符数
top_p = 1,
frequency_penalty = 0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty = 0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
print(
f"""[ChatGPT] reply={response.choices[0]['message']['content']}, total_tokens={response["usage"]["total_tokens"]}"""
)
return response.choices[0]['message']['content']
except Exception as e:
print(e)
send_error_msg(f'openai api error:{e}')
def get_html(url):
url = urllib.parse.quote(url, safe='/:?=&')
# request = urllib.request.Request(url, headers = headers)
# response = urllib.request.urlopen(request)
if proxies:
response = requests.get(url, headers=headers, proxies=proxies)
else:
response = requests.get(url, headers=headers)
response.encoding = 'utf-8'
HtmlContent = response.read() if hasattr(response, 'read') else response.text
# HtmlContent = HtmlContent.decode('utf-8')
# print('python 返回 URL:{} 数据成功'.format(url))
return HtmlContent
def get_list_all():
thread_list_all = [
{
'thread_list': get_list('https://news.tvbs.com.tw/realtime/china'),
'category': '大陆',
},
# {
# 'thread_list': get_list('https://news.tvbs.com.tw/realtime/world'),
# 'category': '全球',
# },
{
'thread_list': get_list('https://news.tvbs.com.tw/realtime/tech'),
'category': '科技',
},
]
return thread_list_all
def get_list(url): # 获取单页JSON数据
HtmlContent = get_html(url)
HtmlContent = HtmlContent.replace("<!--", "")
HtmlContent = HtmlContent.replace("-->", "")
soup = BeautifulSoup(HtmlContent, "lxml")
thread_list = soup.select_one('body > div.container > main > div > article > div.news_list > div.list')
# print(thread_list)
return thread_list
def get_page(thread_list, category, json_all, new_news_list):
li_list = thread_list.select('li')
for li in li_list:
a = li.select_one('a')
if a is not None:
title = a.text
href = a.attrs['href']
date_div = li.select_one('div[class="time"]')
date = date_div.text.strip() if date_div is not None else ""
# print(title, href, date)
if href in json_all:
data_info = json_all[href]
if 'href' not in data_info:
data_info['href'] = href
else:
data_info = {}
data_info['href'] = href
data_info['title'] = title
data_info['date'] = date
data_info['category'] = category
json_all[href] = data_info
# new_news_list.append(data_info)
new_news_list.insert(0, data_info)
global add_num
add_num += 1
if add_num > 10:
# 只读前十条,太旧的就不看了
break
# if data_info['href'] == '/zhengce/zhengceku/2023-03/15/content_5746847.htm':
# new_news_list.append(data_info)
def write_json(file_name, json_all):
str_json = json.dumps(json_all, indent=2, ensure_ascii=False)
with open(file_name, "w", encoding='utf-8') as f:
f.write(str_json)
f.close()
def load_json(file_name):
try:
f = open(file_name, "r", encoding='utf-8')
except IOError:
return {}
else:
return json.load(f)
def send_wx_robot(robot_url, content_msg, mentioned_list = None):
headers = {
'Content-Type': 'application/json',
}
if mentioned_list:
data_table = {
"msgtype": "text",
"text": { "content": content_msg, "mentioned_list": mentioned_list }
}
else:
data_table = {
"msgtype": "markdown",
"markdown": { "content": content_msg }
}
data = json.dumps(data_table)
response = requests.post(f'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={robot_url}', headers=headers, data=data)
def send_feishu_robot(feishu_robot_key, feishu_msg):
headers = {
'Content-Type': 'application/json',
}
data = json.dumps({
"msg_type": "post",
"content": {
"post": {
"zh_cn": feishu_msg
}
}
})
response = requests.post(f'https://open.feishu.cn/open-apis/bot/v2/hook/{feishu_robot_key}', headers=headers, data=data)
return json.loads(response.text)
def get_feishu_token():
headers = {
'Content-Type': 'application/json',
}
data = json.dumps({
"app_id": "cli_a1c3790e21f8100c",
"app_secret": "YVXgZL2HnYi6gHm2NmxenfOTi60rfrQ3",
})
response = requests.post('https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal', headers=headers, data=data)
responsejson = json.loads(response.text)
print(responsejson['tenant_access_token'])
return responsejson['tenant_access_token']
def GetUserIDs(email_list):
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Authorization': 'Bearer ' + get_feishu_token(),
}
response = requests.post('https://open.feishu.cn/open-apis/user/v1/batch_get_id?emails=' + '&emails='.join(email_list), headers=headers)
responsejson = json.loads(response.text)
email_users = responsejson['data']['email_users']
user_id_list = []
for email, ids in email_users.items():
print(email, ids[0]['open_id'], ids[0]['user_id'])
user_id_list.append(ids[0]['user_id'])
return user_id_list
def write_last_time(file_name):
with open(file_name, "w") as f:
f.write(str(time.time()))
f.close()
def read_last_time(file_name):
if os.path.exists(file_name):
with open(file_name, 'r') as f:
last_send_time = float(f.read())
f.close()
return last_send_time
else:
write_last_time(file_name)
return time.time()
def main():
lock_file = 'news_spider.lock'
if not os.path.exists(lock_file):
_extracted_from_main_4(lock_file)
else:
print('file lock')
time.sleep(5)
os.remove(lock_file)
print('lock file delete')
def _extracted_from_main_4(lock_file):
# with open(lock_file, 'w') as f:
# f.write('')
# f.close()
get_news()
if os.path.exists(lock_file):
os.remove(lock_file)
def check_local_ip():
url = 'https://www.123cha.com'
HtmlContent = get_html(url)
soup = BeautifulSoup(HtmlContent, "lxml")
iplocation = soup.select_one('body > div.header > div.location > span')
print('当前访问IP:', iplocation and iplocation.text)
if __name__ == "__main__":
try:
# 可能会引发异常的代码
check_local_ip()
except Exception as e:
# 处理异常的代码
print('Error:', e)
result = None
main() | [
"请用中文对以下新闻文章进行概述",
"[]",
"我们在下面提供了上下文信息. \n---------------------\n{context_str}\n---------------------\n鉴于此信息,请回答以下问题: {query_str}\n",
"请对以下新闻文章进行概述:\nPLACEHOLDER",
"{'post': {'zh_cn': PLACEHOLDER}}",
"之前我们询问过这个问题: {query_str}\n得到了原始的答案: {existing_answer}\n现在我们有机会完善现有的答案 (仅在需要时) 通过下面的更多上下文.\n------------\n{context_msg}\n------------\n给我一个新的答案, 完善原始答案以更好的回答问题. 如果新的上下文没有用或者没必要再完善了, 则重复一遍原始的答案.\n"
] |
2024-01-10 | TomaOfficer/AI-Utopian | Archive~introchain.py | import os
from langchain import OpenAI
from langchain.agents import Tool, load_tools, initialize_agent
from langchain.chains import LLMMathChain
from langchain.prompts import PromptTemplate
openai_api_key = os.getenv("OPENAI_API_KEY")
llm = OpenAI(openai_api_key=openai_api_key,
temperature=0,
model_name="text-davinci-003")
prompt = PromptTemplate(input_variables=["query"], template="{query}")
llm_chain = LLMChain(llm=llm, prompt=prompt)
# initialize the LLM tool
llm_tool = Tool(
name='Language Model',
func=llm_chain.run,
description='use this tool for general purpose queries and logic')
# llm_math = LLMMathChain(llm=llm)
# # initialize the math tool
# math_tool = Tool(
# name='Calculator',
# func=llm_math.run,
# description='Useful for when you need to answer questions about math.')
# tools = load_tools(['llm-math'], llm=llm)
# zero_shot_agent = initialize_agent(agent="zero-shot-react-description",
# tools=tools,
# llm=llm,
# verbose=True,
# max_iterations=3)
# zero_shot_agent("what is (4.5*2.1)^2.2?")
| [
"{query}"
] |
2024-01-10 | TomaOfficer/AI-Utopian | Archive~chatopenai.py | from flask import Flask, render_template, request
import requests
import os
import markdown
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
app = Flask(__name__)
openai_api_key = os.getenv("OPENAI_API_KEY")
chat = ChatOpenAI(model_name="gpt-4",
temperature=.2,
openai_api_key=openai_api_key)
def chat_with_ward(user_input):
messages = [
SystemMessage(
content="You are Ward, a formal, butler agent. You love your job "
"You speak as a tour guide with a focus on the historical narrative of the user's "
"location. Your mission is to deliver a riveting, yet sober, guided tour."
"Focus on the end-user's exact location, down "
"to the specific street or building. Start with quick statement about"
"whether or not you have engough information to say something interesting. "
"Then launch into the notable features that form the body of your narrative. "
"Conclude with a invitation to learn more about something you've said. "
"If you cannot gather sufficient information for the "
"exact location, prompt the end-user to inquire if they would like to "
"expand their horizons to a broader but immediate area. Keep the narrative "
"limited to three key points or scenes. Use markdown to create dramatic "
"emphasis and readability."),
HumanMessage(content=user_input)
]
response = chat(messages)
return response.content
@app.route('/')
def home():
return render_template('index.html')
@app.route('/chat', methods=["POST"])
def handle_chat():
user_input = request.form['user_input']
ward_response = chat_with_ward(user_input)
# convert markdown to HTML
ward_response_html = markdown.markdown(ward_response)
return render_template('index.html', ward_response=ward_response_html)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
| [
"You are Ward, a formal, butler agent. You love your job You speak as a tour guide with a focus on the historical narrative of the user's location. Your mission is to deliver a riveting, yet sober, guided tour.Focus on the end-user's exact location, down to the specific street or building. Start with quick statement aboutwhether or not you have engough information to say something interesting. Then launch into the notable features that form the body of your narrative. Conclude with a invitation to learn more about something you've said. If you cannot gather sufficient information for the exact location, prompt the end-user to inquire if they would like to expand their horizons to a broader but immediate area. Keep the narrative limited to three key points or scenes. Use markdown to create dramatic emphasis and readability."
] |
2024-01-10 | bearney74/microagents | agents~microagent_manager.py | import logging
from typing import List, Optional, Any
from agents.agent_creation import AgentCreation
from agents.agent_similarity import AgentSimilarity
from integrations.openaiwrapper import OpenAIAPIWrapper
class MicroAgentManager:
"""
Manages the creation and retrieval of micro agents.
"""
def __init__(self, api_key: str, max_agents: int = 20, logger: Optional[logging.Logger] = None):
self.api_key = api_key
self.max_agents = max_agents
self.openai_wrapper = OpenAIAPIWrapper(api_key)
self.agent_creator = AgentCreation(self.openai_wrapper, max_agents)
self.logger = logger or self._setup_logger()
def _setup_logger(self) -> logging.Logger:
"""Sets up a logger for the class."""
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
return logger
def get_agents(self) -> List[Any]:
"""Returns the list of agents."""
return self.agent_creator.agents
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
self.logger.info("Creating agents...")
try:
self.agent_creator.create_prime_agent()
self.logger.info("Agents created successfully.")
except Exception as e:
self.logger.error(f"Error in creating agents: {e}")
raise
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
self.logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_creator.get_or_create_agent(purpose, depth, sample_input)
self.logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
self.logger.error(f"Error in getting or creating agent: {e}")
raise
def find_closest_agent(self, purpose: str) -> Any:
"""
Finds the closest agent matching the given purpose.
"""
self.logger.info(f"Finding closest agent for purpose: {purpose}")
try:
agent_similarity = AgentSimilarity(self.api_key, self.agent_creator.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent = agent_similarity.find_closest_agent(purpose_embedding)
self.logger.info(f"Closest agent for purpose '{purpose}' found.")
return closest_agent
except Exception as e:
self.logger.error(f"Error in finding closest agent: {e}")
raise
def display_agent_status(self):
"""Displays the current status of all agents."""
for agent in self.get_agents():
self.logger.info(f"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}")
def display_active_agent_tree(self):
"""Displays a tree view of active agent relationships."""
for agent in self.get_agents():
if agent.active_agents:
self.logger.info(f"Agent {agent.purpose} is calling: {agent.active_agents}")
else:
self.logger.info(f"Agent {agent.purpose} is currently idle.")
| [] |
2024-01-10 | bearney74/microagents | prompt_management~prompt_evolution.py | import logging
from integrations.openaiwrapper import OpenAIAPIWrapper
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')
class PromptEvolution:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, manager):
"""Initialize PromptEvolution with OpenAI API wrapper and a manager."""
self.openai_wrapper = openai_wrapper
self.manager = manager
def evolve_prompt(self, input_text: str, dynamic_prompt: str, output: str, full_conversation: str, new_solution: bool, depth: int) -> str:
"""
Evolves the prompt based on feedback from the output and full conversation.
Args:
input_text: The input text for the prompt.
dynamic_prompt: The dynamic part of the prompt.
output: The output received from the previous interaction.
full_conversation: The entire conversation history.
new_solution: Boolean indicating if a new solution is provided.
depth: The current depth of the agent.
Returns:
The evolved prompt.
"""
full_conversation = self._truncate_conversation(full_conversation)
runtime_context = self._generate_runtime_context(depth)
evolve_prompt_query = self._build_evolve_prompt_query(dynamic_prompt, output, full_conversation, new_solution)
try:
new_prompt = self._get_new_prompt(evolve_prompt_query, runtime_context)
except Exception as e:
logging.error(f"Error evolving prompt: {e}")
new_prompt = dynamic_prompt
return new_prompt
def _truncate_conversation(self, conversation: str) -> str:
"""Truncates the conversation to the last 1000 characters if it's too long."""
if len(conversation) > 1000:
return conversation[:200] + "..." + conversation[-1000:]
return conversation
def _generate_runtime_context(self, depth: int) -> str:
"""Generates runtime context for the evolve prompt query."""
available_agents = [agent for agent in self.manager.agents if agent.purpose != "General"]
agents_info = ', '.join([f"{agent.purpose} (depth={agent.depth})" for agent in available_agents])
return f"Current Agent Depth: {depth}. Available agents: {agents_info}."
def _build_evolve_prompt_query(self, dynamic_prompt: str, output: str, full_conversation: str, new_solution: bool) -> str:
"""Builds the query for evolving the prompt."""
feedback_query = "How should the GPT-4 prompt evolve based on this input and feedback?"
if new_solution:
feedback_query += " Consider the solution provided in the full conversation section and make it reusable."
return f"{feedback_query} Current Prompt: {dynamic_prompt}, Full Conversation: {full_conversation}"
def _get_new_prompt(self, evolve_prompt_query: str, runtime_context: str) -> str:
"""Fetches a new prompt from the OpenAI API."""
response = self.openai_wrapper.chat_completion(
model="gpt-4-1106-preview",
messages=[{"role": "system", "content": evolve_prompt_query + runtime_context}]
)
return response.choices[0].message['content'].strip() | [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | bearney74/microagents | agents~microagent.py | import logging
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from agents.response_extraction import ResponseExtraction
from utils.utility import get_env_variable, time_function, log_exception
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_creator, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False):
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_creator = agent_creator
self.openai_wrapper = openai_wrapper
self.evolve_count = 0 # Track how often the agent has evolved
self.number_of_code_executions = 0 # Track how often the agent has executed code
self.current_status = None # Track the current status of the agent
self.active_agents = {} # Track active agents in a tree view
self.last_input = ""
self.is_prime = is_prime
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_creator, self.code_executor, self, agent_creator, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_creator.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_creator)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
def update_status(self, status):
"""Update the agent's current status."""
self.current_status = status
logging.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logging.info(f"Active agents updated: {self.active_agents}")
@time_function
def respond(self, input_text):
"""
Generate a response to the given input text.
"""
self.last_input = input_text
try:
self.update_status('Planning')
response, conversation, solution, iterations = self.agent_responder.generate_response(
input_text, self.dynamic_prompt, self.max_depth
)
if not self.working_agent:
if iterations > 2:
self.evolve_count += 1
self.update_status('Evolving prompt')
self.dynamic_prompt = self.prompt_evolver.evolve_prompt(
input_text, self.dynamic_prompt, response, conversation, solution, self.depth
)
elif solution:
self.working_agent = True
self.update_status('Idle')
self.update_active_agents(self.purpose)
return response
except Exception as e:
log_exception(e)
self.update_status('Error')
self.update_active_agents(self.purpose)
return "An error occurred while generating the response."
| [] |
2024-01-10 | bearney74/microagents | agents~agent_similarity.py | import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
raise ValueError("Invalid response format")
except Exception as e:
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings = [self.get_embedding(agent.purpose) for agent in self.agents]
if len(embeddings) < 250:
return 0.9
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.9
except Exception as e:
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
agent_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
raise ValueError(f"Error finding closest agent: {e}") | [] |
2024-01-10 | bearney74/microagents | agents~agent_creation.py | from typing import List, Optional
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
DEFAULT_MAX_AGENTS = 20
PRIME_AGENT_WEIGHT = 25
MODEL_NAME = "gpt-4-1106-preview"
class AgentCreation:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.max_agents = max_agents
def create_prime_agent(self) -> None:
"""
Creates the prime agent and adds it to the agent list.
"""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
"""
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
self.remove_least_used_agent_if_needed()
new_agent = self.create_new_agent(purpose, depth, sample_input)
return new_agent
def remove_least_used_agent_if_needed(self) -> None:
"""
Removes the least used agent if the maximum number of agents is exceeded.
"""
if len(self.agents) >= self.max_agents:
self.agents.sort(key=lambda agent: agent.usage_count)
self.agents.pop(0)
def create_new_agent(self, purpose: str, depth: int, sample_input: str) -> MicroAgent:
"""
Creates a new agent.
"""
prompt = self.generate_llm_prompt(purpose, sample_input)
new_agent = MicroAgent(prompt, purpose, depth, self, self.openai_wrapper)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
response = self.openai_wrapper.chat_completion(
model=MODEL_NAME,
messages=messages
)
return response.choices[0].message['content'].strip()
except Exception as e:
print(f"Error generating LLM prompt: {e}")
return ""
| [] |
2024-01-10 | deepakness/gpt-3.5-fine-tuning | upload.py | import openai
openai.api_key = "YOUR_OPENAI_API_KEY"
def fine_tune_model(file_path):
# Upload the file
with open(file_path, "rb") as file_data:
upload_response = openai.File.create(
file=file_data,
purpose='fine-tune'
)
file_id = upload_response.id
print(f"File uploaded successfully. ID: {file_id}")
# Usage
fine_tune_model("data.jsonl") | [] |
2024-01-10 | deepakness/gpt-3.5-fine-tuning | tuning.py | import openai
openai.api_key = "YOUR_OPENAI_API_KEY"
def start_finetuning_job(file_id, model="gpt-3.5-turbo"):
try:
job = openai.FineTuningJob.create(training_file=file_id, model=model)
print(f"Fine-tuning job created successfully: {job}")
return job
except Exception as e:
print(f"Failed to create fine-tuning job. Error: {e}")
return None
start_finetuning_job("file-AIvPJuN78Mtl1BWzkmtngFAj") | [] |
2024-01-10 | jwalsh/huggingface-pipelines | huggingface_pipelines~history.py | from langchain.memory import PostgresChatMessageHistory
history = PostgresChatMessageHistory(
connection_string="postgresql://postgres:mypassword@localhost/chat_history",
session_id="foo",
)
history.add_user_message("hi!")
history.add_ai_message("whats up?") | [] |
2024-01-10 | jwalsh/huggingface-pipelines | huggingface_pipelines~postgres.py | from langchain.memory import PostgresChatMessageHistory
import os
POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD")
history = PostgresChatMessageHistory(
connection_string=f"postgresql://postgres:{POSTGRES_PASSWORD}@localhost:5432/chat_history",
table_name="message_store",
session_id="foo",
)
history.add_user_message("hi!")
history.add_ai_message("whats up?")
| [] |
2024-01-10 | jwalsh/huggingface-pipelines | huggingface_pipelines~autogpt.py | from langchain.agents import Tool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
from langchain.utilities import SerpAPIWrapper
from langchain.memory.chat_message_histories import FileChatMessageHistory
search = SerpAPIWrapper()
tools = [
Tool(
name="search",
func=search.run,
description="useful for when you need to answer questions about current events. You should ask targeted questions",
),
WriteFileTool(),
ReadFileTool(),
]
agent = AutoGPT.from_llm_and_tools(
ai_name="Tom",
ai_role="Assistant",
tools=tools,
llm=ChatOpenAI(temperature=0),
memory=vectorstore.as_retriever(),
)
# Set verbose to be true
agent.chain.verbose = True
# agent = AutoGPT.from_llm_and_tools(
# ai_name="Tom",
# ai_role="Assistant",
# tools=tools,
# llm=ChatOpenAI(temperature=0),
# memory=vectorstore.as_retriever(),
# chat_history_memory=FileChatMessageHistory("chat_history.txt"),
# )
| [] |
2024-01-10 | jwalsh/huggingface-pipelines | huggingface_pipelines~predict_messages.py | from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.schema import HumanMessage
llm = OpenAI()
chat_model = ChatOpenAI()
text = "What would be a good company name for a company that makes colorful socks?"
messages = [HumanMessage(content=text)]
llm.predict_messages(messages)
# >> Feetful of Fun
chat_model.predict_messages(messages)
# >> Socks O'Color | [
"What would be a good company name for a company that makes colorful socks?"
] |
2024-01-10 | silasnevstad/verifi | gpt~gpt_client.py | import openai
from decouple import config
class GPTClient:
def __init__(self):
self.api_key = config('OPENAI_API_KEY')
openai.api_key = self.api_key
self.RETRY_LIMIT = 3
def chat_completion(self, messages, functions):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
functions=functions,
function_call="auto",
)
return response
def get_function_call(self, messages, functions):
for attempt in range(self.RETRY_LIMIT):
response = self.chat_completion(messages, functions)
if 'choices' in response and response['choices']:
choice = response['choices'][0]
message = choice.get('message', {})
function_call = message.get('function_call')
if function_call:
return function_call
print(f"Retry {attempt + 1}/{self.RETRY_LIMIT}: No function call found.")
print(f"Exceeded retry limit of {self.RETRY_LIMIT}. No function call found.")
return None | [] |
2024-01-10 | hcss-utils/Relevant_topic_modeling | scripts~topic_modeling.py | import warnings
warnings.filterwarnings("ignore", message=".*The 'nopython' keyword.*")
import numpy as np
import pandas as pd
import spacy
from bertopic import BERTopic
from keybert import KeyBERT
import plotly.io as pio
import openai
import tiktoken
from bertopic.representation import OpenAI
from keyphrase_vectorizers import KeyphraseCountVectorizer
from sentence_transformers import SentenceTransformer
from sklearn.feature_extraction.text import CountVectorizer
from tqdm import tqdm
import argparse
import os
import gc
tqdm.pandas()
class LemmaTokenizer:
def __init__(self, nlp_str):
self.nlp = spacy.load(nlp_str)
def __call__(self, doc):
def chunkstring(string, length):
return (string[0+i:length+i] for i in range(0, len(string), length))
if len(doc) > 1000000:
lemms = []
for chunk in chunkstring(doc, 500000):
lemms = lemms + self.__call__(chunk)
return lemms
else:
return [t.lemma_ for t in self.nlp(doc) if not t.is_punct]
def extract_keyNPs(df:pd.DataFrame, embedding_model:str, spacy_model:str, embeddings:np.array = None)->pd.DataFrame:
"""
Extract key noun phrases (keyNPs) using KeyBERT.
Args:
df (pd.DataFrame): DataFrame with sentences.
embedding_model (str): Name of the SentenceTransformer model for embeddings.
spacy_model (str): Name of the spacy model for lemmatization.
embeddings (np.array, optional): Array containing sentence embeddings. Defaults to None.
Returns:
pd.DataFrame: DataFrame with extracted keyNPs and lemmatized keyNPs.
"""
if not spacy.util.is_package(spacy_model):
print("Downloading spacy model ...")
spacy.cli.download(spacy_model)
vectorizer = KeyphraseCountVectorizer(spacy_pipeline=spacy.load(spacy_model), pos_pattern='<ADJ.*>*<N.*>+')
keybert = KeyBERT(model = SentenceTransformer(embedding_model))
print("Extracting keyNPs with keyBERT ...")
keynps = []
for i in tqdm(range(df.shape[0])):
keynps.append(keybert.extract_keywords(df['text'].iloc[i], vectorizer = vectorizer, doc_embeddings = embeddings[i, :].reshape(1, -1)))
df['keyNPs'] = keynps
print("Lemmatization of keyNPs ...")
nlp = spacy.load(spacy_model)
df['keyNPs_lemm'] = df['keyNPs'].progress_apply(lambda x: [' '.join([t.lemma_ for t in nlp(np[0]) if not t.is_punct]) for np in x])
return df
def topic_modeling(df:pd.DataFrame, embedding_model:str, spacy_model:str, embeddings:np.array = None)->BERTopic:
"""
Fit topic model (BERTopic)
Args:
df (pd.DataFrame): DataFrame with sentences and keyNPs
embeddings_model (str): Name of the SentenceTransformer model for embeddings
spacy_model (str): Name of the spacy model for lemmatization
embeddings (np.array, optinal): Array containing sentence embeddings. Defaults to None
Reurns:
model (bertopic.BERTopic): topic model
"""
if not spacy.util.is_package(spacy_model):
print("Downloading spacy model ...")
spacy.cli.download(spacy_model)
vocabulary = []
for keynps in df['keyNPs_lemm']:
vocabulary = vocabulary + keynps
vocabulary = list(set(vocabulary))
stopwords = list(spacy.load(spacy_model).Defaults.stop_words)
vectorizer_model = CountVectorizer(tokenizer=LemmaTokenizer(spacy_model), ngram_range=(1,3), stop_words = stopwords, vocabulary = vocabulary)
model = BERTopic(embedding_model = SentenceTransformer(embedding_model), nr_topics = "auto", vectorizer_model=vectorizer_model, verbose = True)
print("Fitting BERTopic model ...")
_ = model.fit_transform(df['text'], embeddings = embeddings.reshape(df.shape[0], -1))
return model
def topic_representation(api_key, gpt_model, topic_model, df):
"""
Get new topic representation and summary
Args:
api_key (str): openAI API key
gpt_model (str): model to use
topic_model (bertopic.BERTopic): topic model
df (pd.DataFrame): DataFrame with sentences
Returns:
new_topics (list): list with new topic representations
summary (list): list with summary for each topic
"""
print("Topic representation ...")
openai.api_key = api_key
encoding = tiktoken.encoding_for_model(gpt_model)
mean_len = df['text'].apply(lambda x: len(encoding.encode(x))).mean()
print(f"Mean token count in sentences: {mean_len}")
gpt_model = OpenAI(model=gpt_model, delay_in_seconds=10, chat=True, nr_docs = min(int(4096/mean_len), 20))
documents = topic_model.get_document_info(df['text'])
documents['Representation'] = documents['Representation'].apply(lambda x: tuple(x))
documents['Representative_Docs'] = documents['Representative_Docs'].apply(lambda x: tuple(x))
gpt_model.prompt = """
I have a topic that is described by the following keywords: [KEYWORDS]
In this topic, the following sentences are a small but representative subset of all sentences in the topic:
[DOCUMENTS]
Based on the information above, extract a short topic label (~100 characters) in the following format:
topic: <topic label>
"""
new_topics = None
summary = None
try:
new_topics = gpt_model.extract_topics(topic_model, documents, topic_model.c_tf_idf_, topic_model.get_topics())
new_topics = [str(key) + ". " + new_topics[key][0][0] for key in new_topics]
except:
print("!!! OpenAI APIError during topic representation !!!")
gpt_model.prompt = """
I have a topic that is described by the following keywords: [KEYWORDS]
In this topic, the following sentences are a small but representative subset of all sentences in the topic:
[DOCUMENTS]
Based on the information above, please give a short description (~1000 characters) of this topic in the following format:
topic: <description>
"""
print("Summary ...")
try:
summary = gpt_model.extract_topics(topic_model, documents, topic_model.c_tf_idf_, topic_model.get_topics())
summary = [str(key) + ". " + summary[key][0][0] for key in summary]
except:
print("!!! OpenAI APIError during summary !!!")
return new_topics, summary
def run(args):
"""
Run the topic modeling
Args:
args: Command-line arguments passed to the script.
"""
embeddings = None
if "sentences_embeddings.npy" in os.listdir(args.input):
embeddings = np.load(args.input + "sentences_embeddings.npy")
datetime = None
if "documents.csv" in os.listdir(args.input):
docs = pd.read_csv(args.input + "documents.csv")
if 'datetime' in docs.columns:
datetime = docs[['id', 'datetime']]
del docs
gc.collect()
df = pd.read_csv(args.input + "sentences_sim.csv")
df = df[df['cos_sim'] > args.threshold]
if datetime is not None:
df = pd.merge(df, datetime, "left", left_on = "doc_id", right_on = "id", left_index=True)
if embeddings is not None:
embeddings = embeddings[df.index, :]
df.reset_index(inplace = True)
df.drop("index", axis = 1, inplace = True)
print(f"N sentences over threshold: {df.shape[0]}")
df = extract_keyNPs(df, args.model, args.spacy_model, embeddings)
print("Saving sentences with keyNPs ...")
df.to_csv(args.output + "sentences_keyNPs.csv", index = False)
model = topic_modeling(df, args.model, args.spacy_model, embeddings)
print("Saving raw topics ...")
with open(args.output + "topics_raw.txt", "w+") as f:
f.write('\n'.join([str(topic) + ". " + ', '.join([t[0] for t in model.get_topics()[topic]]) for topic in model.get_topics()]))
new_topics, summary = None, None
if args.gpt_model is not None:
if args.api_key is None:
raise RuntimeError("No openai key provided")
new_topics, summary = topic_representation(args.api_key, args.gpt_model, model, df)
if new_topics is None:
new_topics = model.generate_topic_labels(nr_words=7, topic_prefix=True, separator=", ")
model.set_topic_labels(new_topics)
print("Saving updated topics ...")
with open(args.output + "topics_updated.txt", "w+") as f:
f.write('\n'.join(new_topics))
print("Saving summary ...")
if summary is not None:
with open(args.output + "summary.txt", "w+") as f:
f.write('\n'.join(summary))
print("Saving visuals ...")
lens = []
for topic in new_topics:
lens.append(len(topic) + 3)
width = max(lens)*5 + 500
try:
model.visualize_topics(custom_labels = True, width = width).write_html(args.output + "topics_vis.html")
except ValueError:
print("Imposible to create topics_vis")
model.visualize_documents(df['text'], custom_labels = True).write_html(args.output + "documents_vis.html")
if 'datetime' in df.columns:
df['datetime'] = pd.to_datetime(df['datetime'], yearfirst = True)
df['year_month'] = df['datetime'].apply(lambda x: str(x.year) + "-" + str(x.month))
topics_over_time = model.topics_over_time(df['text'], df['year_month'], evolution_tuning=False, global_tuning=False)
model.visualize_topics_over_time(topics_over_time, custom_labels = True, width = width).write_html(args.output + "over_time_vis.html")
else:
print("Imposible to create over_time_vis")
model.visualize_barchart(custom_labels = True, n_words = 10, width = width).write_html(args.output + "barchar_vis.html")
hierarchical_topics = model.hierarchical_topics(df['text'])
model.visualize_hierarchy(hierarchical_topics=hierarchical_topics, custom_labels = True, width = width).write_html(args.output + "hierarchy_vis.html")
model.visualize_heatmap(custom_labels = True, width = width).write_html(args.output + "heatmap_vis.html")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help = "path to directory with sentences_sim.csv, optionaly with sentences_embeddings.npy, documents.csv (default: ../data/)", type = str, default = "../data/")
parser.add_argument("-o", "--output", help = "path to directory where files will be stored (default: ../data/)", type = str, default = "../data/")
parser.add_argument("-t", "--threshold", help = "threshold to determine relevant sentences (default: 0.5)", type = float, default = 0.5)
parser.add_argument("-sm", "--spacy_model", help = "spacy model for lemmatization (default: en_core_web_lg)", type = str, default = "en_core_web_lg")
parser.add_argument("-m", "--model", help = "model for embedding (default: sentence-transformers/sentence-t5-xl)", type = str, default = "sentence-transformers/sentence-t5-xl")
parser.add_argument("-gpt", "--gpt_model", help = "model for topic representation and summary (default: None)", type = str, default = None)
parser.add_argument("-ak", "--api_key", help = "openAI API key (default: None)", type = str, default = None)
args = parser.parse_args()
run(args) | [] |
2024-01-10 | ecomoptimizer/litellm | litellm~tests~test_stream_chunk_builder.py | import sys, os, time
import traceback, asyncio
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
from litellm import completion, stream_chunk_builder
import litellm
import os, dotenv
from openai import OpenAI
import pytest
dotenv.load_dotenv()
user_message = "What is the current weather in Boston?"
messages = [{"content": user_message, "role": "user"}]
function_schema = {
"name": "get_weather",
"description":
"gets the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description":
"The city and state, e.g. San Francisco, CA"
},
},
"required": ["location"]
},
}
tools_schema = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}
]
# def test_stream_chunk_builder_tools():
# try:
# litellm.set_verbose = False
# response = client.chat.completions.create(
# model="gpt-3.5-turbo",
# messages=messages,
# tools=tools_schema,
# # stream=True,
# # complete_response=True # runs stream_chunk_builder under-the-hood
# )
# print(f"response: {response}")
# print(f"response usage: {response.usage}")
# except Exception as e:
# pytest.fail(f"An exception occurred - {str(e)}")
# test_stream_chunk_builder_tools()
def test_stream_chunk_builder_litellm_function_call():
try:
litellm.set_verbose = False
response = litellm.completion(
model="gpt-3.5-turbo",
messages=messages,
functions=[function_schema],
# stream=True,
# complete_response=True # runs stream_chunk_builder under-the-hood
)
print(f"response: {response}")
except Exception as e:
pytest.fail(f"An exception occurred - {str(e)}")
# test_stream_chunk_builder_litellm_function_call()
def test_stream_chunk_builder_litellm_tool_call():
try:
litellm.set_verbose = False
response = litellm.completion(
model="azure/chatgpt-functioncalling",
messages=messages,
tools=tools_schema,
stream=True,
complete_response = True
)
print(f"complete response: {response}")
print(f"complete response usage: {response.usage}")
except Exception as e:
pytest.fail(f"An exception occurred - {str(e)}")
test_stream_chunk_builder_litellm_tool_call()
| [
"What is the current weather in Boston?"
] |
2024-01-10 | ecomoptimizer/litellm | litellm~tests~test_router.py | #### What this tests ####
#This tests litellm router
import sys, os, time
import traceback, asyncio
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import Router
from concurrent.futures import ThreadPoolExecutor
from collections import defaultdict
from dotenv import load_dotenv
load_dotenv()
def test_exception_raising():
# this tests if the router raises an exception when invalid params are set
# in this test both deployments have bad keys - Keep this test. It validates if the router raises the most recent exception
litellm.set_verbose=True
import openai
try:
print("testing if router raises an exception")
old_api_key = os.environ["AZURE_API_KEY"]
os.environ["AZURE_API_KEY"] = ""
model_list = [
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2",
"api_key": "bad-key",
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
},
"tpm": 240000,
"rpm": 1800
},
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { #
"model": "gpt-3.5-turbo",
"api_key": "bad-key",
},
"tpm": 240000,
"rpm": 1800
}
]
router = Router(model_list=model_list,
redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")),
routing_strategy="simple-shuffle",
set_verbose=False,
num_retries=1) # type: ignore
response = router.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello this request will fail"
}
]
)
os.environ["AZURE_API_KEY"] = old_api_key
pytest.fail(f"Should have raised an Auth Error")
except openai.AuthenticationError:
print("Test Passed: Caught an OPENAI AUTH Error, Good job. This is what we needed!")
os.environ["AZURE_API_KEY"] = old_api_key
router.reset()
except Exception as e:
os.environ["AZURE_API_KEY"] = old_api_key
print("Got unexpected exception on router!", e)
# test_exception_raising()
def test_reading_key_from_model_list():
# this tests if the router raises an exception when invalid params are set
# DO NOT REMOVE THIS TEST. It's an IMP ONE. Speak to Ishaan, if you are tring to remove this
litellm.set_verbose=False
import openai
try:
print("testing if router raises an exception")
old_api_key = os.environ["AZURE_API_KEY"]
os.environ.pop("AZURE_API_KEY", None)
model_list = [
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2",
"api_key": old_api_key,
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
},
"tpm": 240000,
"rpm": 1800
}
]
router = Router(model_list=model_list,
redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")),
routing_strategy="simple-shuffle",
set_verbose=True,
num_retries=1) # type: ignore
response = router.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello this request will fail"
}
]
)
os.environ["AZURE_API_KEY"] = old_api_key
router.reset()
except Exception as e:
os.environ["AZURE_API_KEY"] = old_api_key
print(f"FAILED TEST")
pytest.fail(f"Got unexpected exception on router! - {e}")
# test_reading_key_from_model_list()
### FUNCTION CALLING
def test_function_calling():
model_list = [
{
"model_name": "gpt-3.5-turbo-0613",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
"rpm": 10000,
},
]
messages = [
{"role": "user", "content": "What is the weather like in Boston?"}
]
functions = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
]
router = Router(model_list=model_list, routing_strategy="latency-based-routing")
response = router.completion(model="gpt-3.5-turbo-0613", messages=messages, functions=functions)
router.reset()
print(response)
def test_acompletion_on_router():
try:
litellm.set_verbose = False
model_list = [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
"rpm": 10000,
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "azure/chatgpt-v-2",
"api_key": os.getenv("AZURE_API_KEY"),
"api_base": os.getenv("AZURE_API_BASE"),
"api_version": os.getenv("AZURE_API_VERSION")
},
"tpm": 100000,
"rpm": 10000,
}
]
messages = [
{"role": "user", "content": f"write a one sentence poem {time.time()}?"}
]
start_time = time.time()
router = Router(model_list=model_list,
redis_host=os.environ["REDIS_HOST"],
redis_password=os.environ["REDIS_PASSWORD"],
redis_port=os.environ["REDIS_PORT"],
cache_responses=True,
timeout=30,
routing_strategy="simple-shuffle")
async def get_response():
response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages)
print(f"response1: {response1}")
response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages)
print(f"response2: {response2}")
assert response1.id == response2.id
assert len(response1.choices[0].message.content) > 0
assert response1.choices[0].message.content == response2.choices[0].message.content
asyncio.run(get_response())
router.reset()
except litellm.Timeout as e:
end_time = time.time()
print(f"timeout error occurred: {end_time - start_time}")
pass
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_acompletion_on_router()
def test_function_calling_on_router():
try:
litellm.set_verbose = True
model_list = [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"),
},
},
]
function1 = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
]
router = Router(
model_list=model_list,
redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=os.getenv("REDIS_PORT")
)
messages=[
{
"role": "user",
"content": "what's the weather in boston"
}
]
response = router.completion(model="gpt-3.5-turbo", messages=messages, functions=function1)
print(f"final returned response: {response}")
router.reset()
assert isinstance(response["choices"][0]["message"]["function_call"], dict)
except Exception as e:
print(f"An exception occurred: {e}")
# test_function_calling_on_router()
def test_aembedding_on_router():
litellm.set_verbose = True
try:
model_list = [
{
"model_name": "text-embedding-ada-002",
"litellm_params": {
"model": "text-embedding-ada-002",
},
"tpm": 100000,
"rpm": 10000,
},
]
async def embedding_call():
router = Router(model_list=model_list)
response = await router.aembedding(
model="text-embedding-ada-002",
input=["good morning from litellm", "this is another item"],
)
print(response)
router.reset()
asyncio.run(embedding_call())
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_aembedding_on_router()
def test_azure_aembedding_on_router():
litellm.set_verbose = True
try:
model_list = [
{
"model_name": "text-embedding-ada-002",
"litellm_params": {
"model": "azure/azure-embedding-model",
"api_key":os.environ['AZURE_API_KEY'],
"api_base": os.environ['AZURE_API_BASE']
},
"tpm": 100000,
"rpm": 10000,
},
]
async def embedding_call():
router = Router(model_list=model_list)
response = await router.aembedding(
model="text-embedding-ada-002",
input=["good morning from litellm"]
)
print(response)
router.reset()
asyncio.run(embedding_call())
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_azure_aembedding_on_router() | [
"What is the weather like in Boston?",
"hello this request will fail",
"what's the weather in boston"
] |
2024-01-10 | ecomoptimizer/litellm | litellm~router.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you ! We ❤️ you! - Krrish & Ishaan
from datetime import datetime
from typing import Dict, List, Optional, Union, Literal
import random, threading, time, traceback
import litellm, openai
from litellm.caching import RedisCache, InMemoryCache, DualCache
import logging, asyncio
import inspect, concurrent
from openai import AsyncOpenAI
from collections import defaultdict
class Router:
"""
Example usage:
```python
from litellm import Router
model_list = [
{
"model_name": "azure-gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/<your-deployment-name-1>",
"api_key": <your-api-key>,
"api_version": <your-api-version>,
"api_base": <your-api-base>
},
},
{
"model_name": "azure-gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/<your-deployment-name-2>",
"api_key": <your-api-key>,
"api_version": <your-api-version>,
"api_base": <your-api-base>
},
},
{
"model_name": "openai-gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "gpt-3.5-turbo",
"api_key": <your-api-key>,
},
]
router = Router(model_list=model_list, fallbacks=[{"azure-gpt-3.5-turbo": "openai-gpt-3.5-turbo"}])
```
"""
model_names: List = []
cache_responses: bool = False
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
num_retries: int = 0
tenacity = None
def __init__(self,
model_list: Optional[list] = None,
redis_host: Optional[str] = None,
redis_port: Optional[int] = None,
redis_password: Optional[str] = None,
cache_responses: bool = False,
num_retries: int = 0,
timeout: Optional[float] = None,
default_litellm_params = {}, # default params for Router.chat.completion.create
set_verbose: bool = False,
fallbacks: List = [],
allowed_fails: Optional[int] = None,
context_window_fallbacks: List = [],
routing_strategy: Literal["simple-shuffle", "least-busy", "usage-based-routing", "latency-based-routing"] = "simple-shuffle") -> None:
self.set_verbose = set_verbose
if model_list:
self.set_model_list(model_list)
self.healthy_deployments: List = self.model_list
self.deployment_latency_map = {}
for m in model_list:
self.deployment_latency_map[m["litellm_params"]["model"]] = 0
self.allowed_fails = allowed_fails or litellm.allowed_fails
self.failed_calls = InMemoryCache() # cache to track failed call per deployment, if num failed calls within 1 minute > allowed fails, then add it to cooldown
self.num_retries = num_retries or litellm.num_retries or 0
self.timeout = timeout or litellm.request_timeout
self.routing_strategy = routing_strategy
self.fallbacks = fallbacks or litellm.fallbacks
self.context_window_fallbacks = context_window_fallbacks or litellm.context_window_fallbacks
self.model_exception_map: dict = {} # dict to store model: list exceptions. self.exceptions = {"gpt-3.5": ["API KEY Error", "Rate Limit Error", "good morning error"]}
self.total_calls: defaultdict = defaultdict(int) # dict to store total calls made to each model
self.fail_calls: defaultdict = defaultdict(int) # dict to store fail_calls made to each model
self.success_calls: defaultdict = defaultdict(int) # dict to store success_calls made to each model
# make Router.chat.completions.create compatible for openai.chat.completions.create
self.chat = litellm.Chat(params=default_litellm_params)
# default litellm args
self.default_litellm_params = default_litellm_params
self.default_litellm_params.setdefault("timeout", timeout)
self.default_litellm_params.setdefault("max_retries", 0)
### HEALTH CHECK THREAD ###
if self.routing_strategy == "least-busy":
self._start_health_check_thread()
### CACHING ###
redis_cache = None
if redis_host is not None and redis_port is not None and redis_password is not None:
cache_config = {
'type': 'redis',
'host': redis_host,
'port': redis_port,
'password': redis_password
}
redis_cache = RedisCache(host=redis_host, port=redis_port, password=redis_password)
else: # use an in-memory cache
cache_config = {
"type": "local"
}
if cache_responses:
litellm.cache = litellm.Cache(**cache_config) # use Redis for caching completion requests
self.cache_responses = cache_responses
self.cache = DualCache(redis_cache=redis_cache, in_memory_cache=InMemoryCache()) # use a dual cache (Redis+In-Memory) for tracking cooldowns, usage, etc.
## USAGE TRACKING ##
if isinstance(litellm.success_callback, list):
litellm.success_callback.append(self.deployment_callback)
else:
litellm.success_callback = [self.deployment_callback]
if isinstance(litellm.failure_callback, list):
litellm.failure_callback.append(self.deployment_callback_on_failure)
else:
litellm.failure_callback = [self.deployment_callback_on_failure]
self.print_verbose(f"Intialized router with Routing strategy: {self.routing_strategy}\n")
### COMPLETION + EMBEDDING FUNCTIONS
def completion(self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
"""
Example usage:
response = router.completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hey, how's it going?"}]
"""
try:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_function"] = self._completion
timeout = kwargs.get("request_timeout", self.timeout)
kwargs["num_retries"] = kwargs.get("num_retries", self.num_retries)
kwargs.setdefault("metadata", {}).update({"model_group": model})
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
# Submit the function to the executor with a timeout
future = executor.submit(self.function_with_fallbacks, **kwargs)
response = future.result(timeout=timeout) # type: ignore
return response
except Exception as e:
raise e
def _completion(
self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
try:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, messages=messages)
kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]})
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
self.print_verbose(f"completion model: {original_model_string}")
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = deployment.get("client", None)
return litellm.completion(**{**data, "messages": messages, "caching": self.cache_responses, "client": model_client, **kwargs})
except Exception as e:
raise e
async def acompletion(self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
try:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_function"] = self._acompletion
kwargs["num_retries"] = kwargs.get("num_retries", self.num_retries)
timeout = kwargs.get("request_timeout", self.timeout)
kwargs.setdefault("metadata", {}).update({"model_group": model})
# response = await asyncio.wait_for(self.async_function_with_fallbacks(**kwargs), timeout=timeout)
response = await self.async_function_with_fallbacks(**kwargs)
return response
except Exception as e:
raise e
async def _acompletion(
self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
try:
self.print_verbose(f"Inside _acompletion()- model: {model}; kwargs: {kwargs}")
original_model_string = None # set a default for this variable
deployment = self.get_available_deployment(model=model, messages=messages)
kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]})
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = deployment.get("async_client", None)
self.total_calls[original_model_string] +=1
response = await litellm.acompletion(**{**data, "messages": messages, "caching": self.cache_responses, "client": model_client, **kwargs})
self.success_calls[original_model_string] +=1
return response
except Exception as e:
if original_model_string is not None:
self.fail_calls[original_model_string] +=1
raise e
def text_completion(self,
model: str,
prompt: str,
is_retry: Optional[bool] = False,
is_fallback: Optional[bool] = False,
is_async: Optional[bool] = False,
**kwargs):
try:
kwargs.setdefault("metadata", {}).update({"model_group": model})
messages=[{"role": "user", "content": prompt}]
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, messages=messages)
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
# call via litellm.completion()
return litellm.text_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs}) # type: ignore
except Exception as e:
if self.num_retries > 0:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_exception"] = e
kwargs["original_function"] = self.completion
return self.function_with_retries(**kwargs)
else:
raise e
def embedding(self,
model: str,
input: Union[str, List],
is_async: Optional[bool] = False,
**kwargs) -> Union[List[float], None]:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, input=input)
kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]})
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = deployment.get("client", None)
# call via litellm.embedding()
return litellm.embedding(**{**data, "input": input, "caching": self.cache_responses, "client": model_client, **kwargs})
async def aembedding(self,
model: str,
input: Union[str, List],
is_async: Optional[bool] = True,
**kwargs) -> Union[List[float], None]:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, input=input)
kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]})
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = deployment.get("async_client", None)
return await litellm.aembedding(**{**data, "input": input, "caching": self.cache_responses, "client": model_client, **kwargs})
async def async_function_with_fallbacks(self, *args, **kwargs):
"""
Try calling the function_with_retries
If it fails after num_retries, fall back to another model group
"""
model_group = kwargs.get("model")
fallbacks = kwargs.get("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.get("context_window_fallbacks", self.context_window_fallbacks)
try:
response = await self.async_function_with_retries(*args, **kwargs)
self.print_verbose(f'Async Response: {response}')
return response
except Exception as e:
self.print_verbose(f"An exception occurs")
original_exception = e
try:
self.print_verbose(f"Trying to fallback b/w models")
if isinstance(e, litellm.ContextWindowExceededError) and context_window_fallbacks is not None:
fallback_model_group = None
for item in context_window_fallbacks: # [{"gpt-3.5-turbo": ["gpt-4"]}]
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
if fallback_model_group is None:
raise original_exception
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
kwargs["model"] = mg
response = await self.async_function_with_retries(*args, **kwargs)
return response
except Exception as e:
pass
elif fallbacks is not None:
self.print_verbose(f"inside model fallbacks: {fallbacks}")
for item in fallbacks:
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
kwargs["model"] = mg
kwargs["metadata"]["model_group"] = mg
response = await self.async_function_with_retries(*args, **kwargs)
return response
except Exception as e:
raise e
except Exception as e:
self.print_verbose(f"An exception occurred - {str(e)}")
traceback.print_exc()
raise original_exception
async def async_function_with_retries(self, *args, **kwargs):
self.print_verbose(f"Inside async function with retries: args - {args}; kwargs - {kwargs}")
backoff_factor = 1
original_function = kwargs.pop("original_function")
fallbacks = kwargs.pop("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.pop("context_window_fallbacks", self.context_window_fallbacks)
self.print_verbose(f"async function w/ retries: original_function - {original_function}")
num_retries = kwargs.pop("num_retries")
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = await original_function(*args, **kwargs)
return response
except Exception as e:
original_exception = e
### CHECK IF RATE LIMIT / CONTEXT WINDOW ERROR w/ fallbacks available
if ((isinstance(original_exception, litellm.ContextWindowExceededError) and context_window_fallbacks is None)
or (isinstance(original_exception, openai.RateLimitError) and fallbacks is not None)):
raise original_exception
### RETRY
#### check if it should retry + back-off if required
if hasattr(original_exception, "status_code") and hasattr(original_exception, "response") and litellm._should_retry(status_code=original_exception.status_code):
if hasattr(original_exception.response, "headers"):
timeout = litellm._calculate_retry_after(remaining_retries=num_retries, max_retries=num_retries, response_headers=original_exception.response.headers)
else:
timeout = litellm._calculate_retry_after(remaining_retries=num_retries, max_retries=num_retries)
await asyncio.sleep(timeout)
else:
raise original_exception
for current_attempt in range(num_retries):
self.print_verbose(f"retrying request. Current attempt - {current_attempt}; num retries: {num_retries}")
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = await original_function(*args, **kwargs)
if inspect.iscoroutinefunction(response): # async errors are often returned as coroutines
response = await response
return response
except Exception as e:
if hasattr(e, "status_code") and hasattr(e, "response") and litellm._should_retry(status_code=e.status_code):
remaining_retries = num_retries - current_attempt
if hasattr(e.response, "headers"):
timeout = litellm._calculate_retry_after(remaining_retries=num_retries, max_retries=num_retries, response_headers=e.response.headers)
else:
timeout = litellm._calculate_retry_after(remaining_retries=num_retries, max_retries=num_retries)
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries)
await asyncio.sleep(timeout)
else:
raise e
raise original_exception
def function_with_fallbacks(self, *args, **kwargs):
"""
Try calling the function_with_retries
If it fails after num_retries, fall back to another model group
"""
model_group = kwargs.get("model")
fallbacks = kwargs.get("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.get("context_window_fallbacks", self.context_window_fallbacks)
try:
response = self.function_with_retries(*args, **kwargs)
return response
except Exception as e:
original_exception = e
self.print_verbose(f"An exception occurs {original_exception}")
try:
self.print_verbose(f"Trying to fallback b/w models. Initial model group: {model_group}")
if isinstance(e, litellm.ContextWindowExceededError) and context_window_fallbacks is not None:
self.print_verbose(f"inside context window fallbacks: {context_window_fallbacks}")
fallback_model_group = None
for item in context_window_fallbacks: # [{"gpt-3.5-turbo": ["gpt-4"]}]
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
if fallback_model_group is None:
raise original_exception
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
kwargs["model"] = mg
response = self.function_with_fallbacks(*args, **kwargs)
return response
except Exception as e:
pass
elif fallbacks is not None:
self.print_verbose(f"inside model fallbacks: {fallbacks}")
fallback_model_group = None
for item in fallbacks:
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
if fallback_model_group is None:
raise original_exception
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
kwargs["model"] = mg
response = self.function_with_fallbacks(*args, **kwargs)
return response
except Exception as e:
pass
except Exception as e:
raise e
raise original_exception
def function_with_retries(self, *args, **kwargs):
"""
Try calling the model 3 times. Shuffle between available deployments.
"""
self.print_verbose(f"Inside function with retries: args - {args}; kwargs - {kwargs}")
backoff_factor = 1
original_function = kwargs.pop("original_function")
num_retries = kwargs.pop("num_retries")
fallbacks = kwargs.pop("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.pop("context_window_fallbacks", self.context_window_fallbacks)
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = original_function(*args, **kwargs)
return response
except Exception as e:
original_exception = e
self.print_verbose(f"num retries in function with retries: {num_retries}")
### CHECK IF RATE LIMIT / CONTEXT WINDOW ERROR
if ((isinstance(original_exception, litellm.ContextWindowExceededError) and context_window_fallbacks is None)
or (isinstance(original_exception, openai.RateLimitError) and fallbacks is not None)):
raise original_exception
### RETRY
for current_attempt in range(num_retries):
self.print_verbose(f"retrying request. Current attempt - {current_attempt}; retries left: {num_retries}")
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = original_function(*args, **kwargs)
return response
except openai.RateLimitError as e:
if num_retries > 0:
remaining_retries = num_retries - current_attempt
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries)
# on RateLimitError we'll wait for an exponential time before trying again
time.sleep(timeout)
else:
raise e
except Exception as e:
# for any other exception types, immediately retry
if num_retries > 0:
pass
else:
raise e
raise original_exception
### HELPER FUNCTIONS
def deployment_callback(
self,
kwargs, # kwargs to completion
completion_response, # response from completion
start_time, end_time # start/end time
):
"""
Function LiteLLM submits a callback to after a successful
completion. Purpose of this is to update TPM/RPM usage per model
"""
model_name = kwargs.get('model', None) # i.e. gpt35turbo
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
if custom_llm_provider:
model_name = f"{custom_llm_provider}/{model_name}"
if kwargs["stream"] is True:
if kwargs.get("complete_streaming_response"):
total_tokens = kwargs.get("complete_streaming_response")['usage']['total_tokens']
self._set_deployment_usage(model_name, total_tokens)
else:
total_tokens = completion_response['usage']['total_tokens']
self._set_deployment_usage(model_name, total_tokens)
self.deployment_latency_map[model_name] = (end_time - start_time).total_seconds()
def deployment_callback_on_failure(
self,
kwargs, # kwargs to completion
completion_response, # response from completion
start_time, end_time # start/end time
):
try:
exception = kwargs.get("exception", None)
exception_type = type(exception)
exception_status = getattr(exception, 'status_code', "")
exception_cause = getattr(exception, '__cause__', "")
exception_message = getattr(exception, 'message', "")
exception_str = str(exception_type) + "Status: " + str(exception_status) + "Message: " + str(exception_cause) + str(exception_message) + "Full exception" + str(exception)
model_name = kwargs.get('model', None) # i.e. gpt35turbo
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
metadata = kwargs.get("litellm_params", {}).get('metadata', None)
if metadata:
deployment = metadata.get("deployment", None)
self._set_cooldown_deployments(deployment)
deployment_exceptions = self.model_exception_map.get(deployment, [])
deployment_exceptions.append(exception_str)
self.model_exception_map[deployment] = deployment_exceptions
self.print_verbose("\nEXCEPTION FOR DEPLOYMENTS\n")
self.print_verbose(self.model_exception_map)
for model in self.model_exception_map:
self.print_verbose(f"Model {model} had {len(self.model_exception_map[model])} exception")
if custom_llm_provider:
model_name = f"{custom_llm_provider}/{model_name}"
except Exception as e:
raise e
def _set_cooldown_deployments(self,
deployment: str):
"""
Add a model to the list of models being cooled down for that minute, if it exceeds the allowed fails / minute
"""
current_minute = datetime.now().strftime("%H-%M")
# get current fails for deployment
# update the number of failed calls
# if it's > allowed fails
# cooldown deployment
current_fails = self.failed_calls.get_cache(key=deployment) or 0
updated_fails = current_fails + 1
self.print_verbose(f"Attempting to add {deployment} to cooldown list. updated_fails: {updated_fails}; self.allowed_fails: {self.allowed_fails}")
if updated_fails > self.allowed_fails:
# get the current cooldown list for that minute
cooldown_key = f"{current_minute}:cooldown_models" # group cooldown models by minute to reduce number of redis calls
cached_value = self.cache.get_cache(key=cooldown_key)
self.print_verbose(f"adding {deployment} to cooldown models")
# update value
try:
if deployment in cached_value:
pass
else:
cached_value = cached_value + [deployment]
# save updated value
self.cache.set_cache(value=cached_value, key=cooldown_key, ttl=1)
except:
cached_value = [deployment]
# save updated value
self.cache.set_cache(value=cached_value, key=cooldown_key, ttl=1)
else:
self.failed_calls.set_cache(key=deployment, value=updated_fails, ttl=1)
def _get_cooldown_deployments(self):
"""
Get the list of models being cooled down for this minute
"""
current_minute = datetime.now().strftime("%H-%M")
# get the current cooldown list for that minute
cooldown_key = f"{current_minute}:cooldown_models"
# ----------------------
# Return cooldown models
# ----------------------
cooldown_models = self.cache.get_cache(key=cooldown_key) or []
self.print_verbose(f"retrieve cooldown models: {cooldown_models}")
return cooldown_models
def get_usage_based_available_deployment(self,
model: str,
messages: Optional[List[Dict[str, str]]] = None,
input: Optional[Union[str, List]] = None):
"""
Returns a deployment with the lowest TPM/RPM usage.
"""
# get list of potential deployments
potential_deployments = []
for item in self.model_list:
if item["model_name"] == model:
potential_deployments.append(item)
# get current call usage
token_count = 0
if messages is not None:
token_count = litellm.token_counter(model=model, messages=messages)
elif input is not None:
if isinstance(input, List):
input_text = "".join(text for text in input)
else:
input_text = input
token_count = litellm.token_counter(model=model, text=input_text)
# -----------------------
# Find lowest used model
# ----------------------
lowest_tpm = float("inf")
deployment = None
# return deployment with lowest tpm usage
for item in potential_deployments:
item_tpm, item_rpm = self._get_deployment_usage(deployment_name=item["litellm_params"]["model"])
if item_tpm == 0:
return item
elif ("tpm" in item and item_tpm + token_count > item["tpm"]
or "rpm" in item and item_rpm + 1 >= item["rpm"]): # if user passed in tpm / rpm in the model_list
continue
elif item_tpm < lowest_tpm:
lowest_tpm = item_tpm
deployment = item
# if none, raise exception
if deployment is None:
raise ValueError("No models available.")
# return model
return deployment
def _get_deployment_usage(
self,
deployment_name: str
):
# ------------
# Setup values
# ------------
current_minute = datetime.now().strftime("%H-%M")
tpm_key = f'{deployment_name}:tpm:{current_minute}'
rpm_key = f'{deployment_name}:rpm:{current_minute}'
# ------------
# Return usage
# ------------
tpm = self.cache.get_cache(key=tpm_key) or 0
rpm = self.cache.get_cache(key=rpm_key) or 0
return int(tpm), int(rpm)
def increment(self, key: str, increment_value: int):
# get value
cached_value = self.cache.get_cache(key=key)
# update value
try:
cached_value = cached_value + increment_value
except:
cached_value = increment_value
# save updated value
self.cache.set_cache(value=cached_value, key=key, ttl=self.default_cache_time_seconds)
def _set_deployment_usage(
self,
model_name: str,
total_tokens: int
):
# ------------
# Setup values
# ------------
current_minute = datetime.now().strftime("%H-%M")
tpm_key = f'{model_name}:tpm:{current_minute}'
rpm_key = f'{model_name}:rpm:{current_minute}'
# ------------
# Update usage
# ------------
self.increment(tpm_key, total_tokens)
self.increment(rpm_key, 1)
def _start_health_check_thread(self):
"""
Starts a separate thread to perform health checks periodically.
"""
health_check_thread = threading.Thread(target=self._perform_health_checks, daemon=True)
health_check_thread.start()
def _perform_health_checks(self):
"""
Periodically performs health checks on the servers.
Updates the list of healthy servers accordingly.
"""
while True:
self.healthy_deployments = self._health_check()
# Adjust the time interval based on your needs
time.sleep(15)
def _health_check(self):
"""
Performs a health check on the deployments
Returns the list of healthy deployments
"""
healthy_deployments = []
for deployment in self.model_list:
litellm_args = deployment["litellm_params"]
try:
start_time = time.time()
litellm.completion(messages=[{"role": "user", "content": ""}], max_tokens=1, **litellm_args) # hit the server with a blank message to see how long it takes to respond
end_time = time.time()
response_time = end_time - start_time
logging.debug(f"response_time: {response_time}")
healthy_deployments.append((deployment, response_time))
healthy_deployments.sort(key=lambda x: x[1])
except Exception as e:
pass
return healthy_deployments
def weighted_shuffle_by_latency(self, items):
# Sort the items by latency
sorted_items = sorted(items, key=lambda x: x[1])
# Get only the latencies
latencies = [i[1] for i in sorted_items]
# Calculate the sum of all latencies
total_latency = sum(latencies)
# Calculate the weight for each latency (lower latency = higher weight)
weights = [total_latency-latency for latency in latencies]
# Get a weighted random item
if sum(weights) == 0:
chosen_item = random.choice(sorted_items)[0]
else:
chosen_item = random.choices(sorted_items, weights=weights, k=1)[0][0]
return chosen_item
def set_model_list(self, model_list: list):
self.model_list = model_list
# we add api_base/api_key each model so load balancing between azure/gpt on api_base1 and api_base2 works
import os
for model in self.model_list:
litellm_params = model.get("litellm_params", {})
model_name = litellm_params.get("model")
#### for OpenAI / Azure we need to initalize the Client for High Traffic ########
custom_llm_provider = litellm_params.get("custom_llm_provider")
if custom_llm_provider is None:
custom_llm_provider = model_name.split("/",1)[0]
if (
model_name in litellm.open_ai_chat_completion_models
or custom_llm_provider == "custom_openai"
or custom_llm_provider == "deepinfra"
or custom_llm_provider == "perplexity"
or custom_llm_provider == "anyscale"
or custom_llm_provider == "openai"
or custom_llm_provider == "azure"
or "ft:gpt-3.5-turbo" in model_name
or model_name in litellm.open_ai_embedding_models
):
# glorified / complicated reading of configs
# user can pass vars directly or they can pas os.environ/AZURE_API_KEY, in which case we will read the env
# we do this here because we init clients for Azure, OpenAI and we need to set the right key
api_key = litellm_params.get("api_key")
if api_key and api_key.startswith("os.environ/"):
api_key_env_name = api_key.replace("os.environ/", "")
api_key = os.getenv(api_key_env_name)
api_base = litellm_params.get("api_base")
if api_base and api_base.startswith("os.environ/"):
api_base_env_name = api_base.replace("os.environ/", "")
api_base = os.getenv(api_base_env_name)
api_version = litellm_params.get("api_version")
if api_version and api_version.startswith("os.environ/"):
api_version_env_name = api_version.replace("os.environ/", "")
api_version = os.getenv(api_version_env_name)
self.print_verbose(f"Initializing OpenAI Client for {model_name}, {str(api_base)}")
if "azure" in model_name:
if api_version is None:
api_version = "2023-07-01-preview"
model["async_client"] = openai.AsyncAzureOpenAI(
api_key=api_key,
azure_endpoint=api_base,
api_version=api_version
)
model["client"] = openai.AzureOpenAI(
api_key=api_key,
azure_endpoint=api_base,
api_version=api_version
)
else:
model["async_client"] = openai.AsyncOpenAI(
api_key=api_key,
base_url=api_base,
)
model["client"] = openai.OpenAI(
api_key=api_key,
base_url=api_base,
)
############ End of initializing Clients for OpenAI/Azure ###################
model_id = ""
for key in model["litellm_params"]:
if key != "api_key":
model_id+= str(model["litellm_params"][key])
model["litellm_params"]["model"] += "-ModelID-" + model_id
############ Users can either pass tpm/rpm as a litellm_param or a router param ###########
# for get_available_deployment, we use the litellm_param["rpm"]
# in this snippet we also set rpm to be a litellm_param
if model["litellm_params"].get("rpm") is None and model.get("rpm") is not None:
model["litellm_params"]["rpm"] = model.get("rpm")
if model["litellm_params"].get("tpm") is None and model.get("tpm") is not None:
model["litellm_params"]["tpm"] = model.get("tpm")
self.model_names = [m["model_name"] for m in model_list]
def get_model_names(self):
return self.model_names
def print_verbose(self, print_statement):
if self.set_verbose or litellm.set_verbose:
print(f"LiteLLM.Router: {print_statement}") # noqa
def get_available_deployment(self,
model: str,
messages: Optional[List[Dict[str, str]]] = None,
input: Optional[Union[str, List]] = None):
"""
Returns the deployment based on routing strategy
"""
## get healthy deployments
### get all deployments
### filter out the deployments currently cooling down
healthy_deployments = [m for m in self.model_list if m["model_name"] == model]
if len(healthy_deployments) == 0:
# check if the user sent in a deployment name instead
healthy_deployments = [m for m in self.model_list if m["litellm_params"]["model"] == model]
self.print_verbose(f"initial list of deployments: {healthy_deployments}")
deployments_to_remove = []
cooldown_deployments = self._get_cooldown_deployments()
self.print_verbose(f"cooldown deployments: {cooldown_deployments}")
### FIND UNHEALTHY DEPLOYMENTS
for deployment in healthy_deployments:
deployment_name = deployment["litellm_params"]["model"]
if deployment_name in cooldown_deployments:
deployments_to_remove.append(deployment)
### FILTER OUT UNHEALTHY DEPLOYMENTS
for deployment in deployments_to_remove:
healthy_deployments.remove(deployment)
self.print_verbose(f"healthy deployments: length {len(healthy_deployments)} {healthy_deployments}")
if len(healthy_deployments) == 0:
raise ValueError("No models available")
if litellm.model_alias_map and model in litellm.model_alias_map:
model = litellm.model_alias_map[
model
] # update the model to the actual value if an alias has been passed in
if self.routing_strategy == "least-busy":
if len(self.healthy_deployments) > 0:
for item in self.healthy_deployments:
if item[0]["model_name"] == model: # first one in queue will be the one with the most availability
return item[0]
else:
raise ValueError("No models available.")
elif self.routing_strategy == "simple-shuffle":
# if users pass rpm or tpm, we do a random weighted pick - based on rpm/tpm
############## Check if we can do a RPM/TPM based weighted pick #################
rpm = healthy_deployments[0].get("litellm_params").get("rpm", None)
if rpm is not None:
# use weight-random pick if rpms provided
rpms = [m["litellm_params"].get("rpm", 0) for m in healthy_deployments]
self.print_verbose(f"\nrpms {rpms}")
total_rpm = sum(rpms)
weights = [rpm / total_rpm for rpm in rpms]
self.print_verbose(f"\n weights {weights}")
# Perform weighted random pick
selected_index = random.choices(range(len(rpms)), weights=weights)[0]
self.print_verbose(f"\n selected index, {selected_index}")
deployment = healthy_deployments[selected_index]
return deployment or deployment[0]
############## Check if we can do a RPM/TPM based weighted pick #################
tpm = healthy_deployments[0].get("litellm_params").get("tpm", None)
if tpm is not None:
# use weight-random pick if rpms provided
tpms = [m["litellm_params"].get("tpm", 0) for m in healthy_deployments]
self.print_verbose(f"\ntpms {tpms}")
total_tpm = sum(tpms)
weights = [tpm / total_tpm for tpm in tpms]
self.print_verbose(f"\n weights {weights}")
# Perform weighted random pick
selected_index = random.choices(range(len(tpms)), weights=weights)[0]
self.print_verbose(f"\n selected index, {selected_index}")
deployment = healthy_deployments[selected_index]
return deployment or deployment[0]
############## No RPM/TPM passed, we do a random pick #################
item = random.choice(healthy_deployments)
return item or item[0]
elif self.routing_strategy == "latency-based-routing":
returned_item = None
lowest_latency = float('inf')
### shuffles with priority for lowest latency
# items_with_latencies = [('A', 10), ('B', 20), ('C', 30), ('D', 40)]
items_with_latencies = []
for item in healthy_deployments:
items_with_latencies.append((item, self.deployment_latency_map[item["litellm_params"]["model"]]))
returned_item = self.weighted_shuffle_by_latency(items_with_latencies)
return returned_item
elif self.routing_strategy == "usage-based-routing":
return self.get_usage_based_available_deployment(model=model, messages=messages, input=input)
raise ValueError("No models available.")
def flush_cache(self):
self.cache.flush_cache()
def reset(self):
## clean up on close
litellm.success_callback = []
litellm.failure_callback = []
self.flush_cache()
| [] |
2024-01-10 | ecomoptimizer/litellm | litellm~tests~test_exceptions.py | from openai import AuthenticationError, BadRequestError, RateLimitError, OpenAIError
import os
import sys
import traceback
import subprocess
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import (
embedding,
completion,
# AuthenticationError,
ContextWindowExceededError,
# RateLimitError,
# ServiceUnavailableError,
# OpenAIError,
)
from concurrent.futures import ThreadPoolExecutor
import pytest
litellm.vertex_project = "pathrise-convert-1606954137718"
litellm.vertex_location = "us-central1"
# litellm.failure_callback = ["sentry"]
#### What this tests ####
# This tests exception mapping -> trigger an exception from an llm provider -> assert if output is of the expected type
# 5 providers -> OpenAI, Azure, Anthropic, Cohere, Replicate
# 3 main types of exceptions -> - Rate Limit Errors, Context Window Errors, Auth errors (incorrect/rotated key, etc.)
# Approach: Run each model through the test -> assert if the correct error (always the same one) is triggered
models = ["command-nightly"]
# Test 1: Context Window Errors
@pytest.mark.parametrize("model", models)
def test_context_window(model):
sample_text = "Say error 50 times" * 1000000
messages = [{"content": sample_text, "role": "user"}]
try:
litellm.set_verbose = False
response = completion(model=model, messages=messages)
print(f"response: {response}")
print("FAILED!")
pytest.fail(f"An exception occurred")
except ContextWindowExceededError as e:
print(f"Worked!")
except RateLimitError:
print("RateLimited!")
except Exception as e:
print(f"{e}")
pytest.fail(f"An error occcurred - {e}")
@pytest.mark.parametrize("model", models)
def test_context_window_with_fallbacks(model):
ctx_window_fallback_dict = {"command-nightly": "claude-2", "gpt-3.5-turbo-instruct": "gpt-3.5-turbo-16k", "azure/chatgpt-v-2": "gpt-3.5-turbo-16k"}
sample_text = "how does a court case get to the Supreme Court?" * 1000
messages = [{"content": sample_text, "role": "user"}]
completion(model=model, messages=messages, context_window_fallback_dict=ctx_window_fallback_dict)
# for model in litellm.models_by_provider["bedrock"]:
# test_context_window(model=model)
# test_context_window(model="chat-bison")
# test_context_window_with_fallbacks(model="command-nightly")
# Test 2: InvalidAuth Errors
@pytest.mark.parametrize("model", models)
def invalid_auth(model): # set the model key to an invalid key, depending on the model
messages = [{"content": "Hello, how are you?", "role": "user"}]
temporary_key = None
try:
if model == "gpt-3.5-turbo" or model == "gpt-3.5-turbo-instruct":
temporary_key = os.environ["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = "bad-key"
elif "bedrock" in model:
temporary_aws_access_key = os.environ["AWS_ACCESS_KEY_ID"]
os.environ["AWS_ACCESS_KEY_ID"] = "bad-key"
temporary_aws_region_name = os.environ["AWS_REGION_NAME"]
os.environ["AWS_REGION_NAME"] = "bad-key"
temporary_secret_key = os.environ["AWS_SECRET_ACCESS_KEY"]
os.environ["AWS_SECRET_ACCESS_KEY"] = "bad-key"
elif model == "azure/chatgpt-v-2":
temporary_key = os.environ["AZURE_API_KEY"]
os.environ["AZURE_API_KEY"] = "bad-key"
elif model == "claude-instant-1":
temporary_key = os.environ["ANTHROPIC_API_KEY"]
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
elif model == "command-nightly":
temporary_key = os.environ["COHERE_API_KEY"]
os.environ["COHERE_API_KEY"] = "bad-key"
elif "j2" in model:
temporary_key = os.environ["AI21_API_KEY"]
os.environ["AI21_API_KEY"] = "bad-key"
elif "togethercomputer" in model:
temporary_key = os.environ["TOGETHERAI_API_KEY"]
os.environ["TOGETHERAI_API_KEY"] = "84060c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f24a"
elif model in litellm.openrouter_models:
temporary_key = os.environ["OPENROUTER_API_KEY"]
os.environ["OPENROUTER_API_KEY"] = "bad-key"
elif model in litellm.aleph_alpha_models:
temporary_key = os.environ["ALEPH_ALPHA_API_KEY"]
os.environ["ALEPH_ALPHA_API_KEY"] = "bad-key"
elif model in litellm.nlp_cloud_models:
temporary_key = os.environ["NLP_CLOUD_API_KEY"]
os.environ["NLP_CLOUD_API_KEY"] = "bad-key"
elif (
model
== "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
):
temporary_key = os.environ["REPLICATE_API_KEY"]
os.environ["REPLICATE_API_KEY"] = "bad-key"
print(f"model: {model}")
response = completion(
model=model, messages=messages
)
print(f"response: {response}")
except AuthenticationError as e:
print(f"AuthenticationError Caught Exception - {str(e)}")
except (
OpenAIError
) as e: # is at least an openai error -> in case of random model errors - e.g. overloaded server
print(f"OpenAIError Caught Exception - {e}")
except Exception as e:
print(type(e))
print(type(AuthenticationError))
print(e.__class__.__name__)
print(f"Uncaught Exception - {e}")
pytest.fail(f"Error occurred: {e}")
if temporary_key != None: # reset the key
if model == "gpt-3.5-turbo":
os.environ["OPENAI_API_KEY"] = temporary_key
elif model == "chatgpt-test":
os.environ["AZURE_API_KEY"] = temporary_key
azure = True
elif model == "claude-instant-1":
os.environ["ANTHROPIC_API_KEY"] = temporary_key
elif model == "command-nightly":
os.environ["COHERE_API_KEY"] = temporary_key
elif (
model
== "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
):
os.environ["REPLICATE_API_KEY"] = temporary_key
elif "j2" in model:
os.environ["AI21_API_KEY"] = temporary_key
elif ("togethercomputer" in model):
os.environ["TOGETHERAI_API_KEY"] = temporary_key
elif model in litellm.aleph_alpha_models:
os.environ["ALEPH_ALPHA_API_KEY"] = temporary_key
elif model in litellm.nlp_cloud_models:
os.environ["NLP_CLOUD_API_KEY"] = temporary_key
elif "bedrock" in model:
os.environ["AWS_ACCESS_KEY_ID"] = temporary_aws_access_key
os.environ["AWS_REGION_NAME"] = temporary_aws_region_name
os.environ["AWS_SECRET_ACCESS_KEY"] = temporary_secret_key
return
# for model in litellm.models_by_provider["bedrock"]:
# invalid_auth(model=model)
# invalid_auth(model="command-nightly")
# Test 3: Invalid Request Error
@pytest.mark.parametrize("model", models)
def test_invalid_request_error(model):
messages = [{"content": "hey, how's it going?", "role": "user"}]
with pytest.raises(BadRequestError):
completion(model=model, messages=messages, max_tokens="hello world")
def test_completion_azure_exception():
try:
import openai
print("azure gpt-3.5 test\n\n")
litellm.set_verbose=False
## Test azure call
old_azure_key = os.environ["AZURE_API_KEY"]
os.environ["AZURE_API_KEY"] = "good morning"
response = completion(
model="azure/chatgpt-v-2",
messages=[
{
"role": "user",
"content": "hello"
}
],
)
print(f"response: {response}")
print(response)
except openai.AuthenticationError as e:
os.environ["AZURE_API_KEY"] = old_azure_key
print("good job got the correct error for azure when key not set")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_azure_exception()
async def asynctest_completion_azure_exception():
try:
import openai
import litellm
print("azure gpt-3.5 test\n\n")
litellm.set_verbose=False
## Test azure call
old_azure_key = os.environ["AZURE_API_KEY"]
os.environ["AZURE_API_KEY"] = "good morning"
response = await litellm.acompletion(
model="azure/chatgpt-v-2",
messages=[
{
"role": "user",
"content": "hello"
}
],
)
print(f"response: {response}")
print(response)
except openai.AuthenticationError as e:
os.environ["AZURE_API_KEY"] = old_azure_key
print("good job got the correct error for azure when key not set")
print(e)
except Exception as e:
print("Got wrong exception")
print("exception", e)
pytest.fail(f"Error occurred: {e}")
# import asyncio
# asyncio.run(
# asynctest_completion_azure_exception()
# )
def test_completion_openai_exception():
# test if openai:gpt raises openai.AuthenticationError
try:
import openai
print("openai gpt-3.5 test\n\n")
litellm.set_verbose=False
## Test azure call
old_azure_key = os.environ["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = "good morning"
response = completion(
model="gpt-4",
messages=[
{
"role": "user",
"content": "hello"
}
],
)
print(f"response: {response}")
print(response)
except openai.AuthenticationError as e:
os.environ["OPENAI_API_KEY"] = old_azure_key
print("good job got the correct error for openai when key not set")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_openai_exception()
# # test_invalid_request_error(model="command-nightly")
# # Test 3: Rate Limit Errors
# def test_model_call(model):
# try:
# sample_text = "how does a court case get to the Supreme Court?"
# messages = [{ "content": sample_text,"role": "user"}]
# print(f"model: {model}")
# response = completion(model=model, messages=messages)
# except RateLimitError as e:
# print(f"headers: {e.response.headers}")
# return True
# # except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server
# # return True
# except Exception as e:
# print(f"Uncaught Exception {model}: {type(e).__name__} - {e}")
# traceback.print_exc()
# pass
# return False
# # Repeat each model 500 times
# # extended_models = [model for model in models for _ in range(250)]
# extended_models = ["azure/chatgpt-v-2" for _ in range(250)]
# def worker(model):
# return test_model_call(model)
# # Create a dictionary to store the results
# counts = {True: 0, False: 0}
# # Use Thread Pool Executor
# with ThreadPoolExecutor(max_workers=500) as executor:
# # Use map to start the operation in thread pool
# results = executor.map(worker, extended_models)
# # Iterate over results and count True/False
# for result in results:
# counts[result] += 1
# accuracy_score = counts[True]/(counts[True] + counts[False])
# print(f"accuracy_score: {accuracy_score}")
| [
"hey, how's it going?",
"Hello, how are you?",
"hello"
] |
2024-01-10 | ecomoptimizer/litellm | litellm~tests~test_embedding.py | import sys, os
import traceback
import pytest
from dotenv import load_dotenv
import openai
load_dotenv()
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
litellm.set_verbose = False
def test_openai_embedding():
try:
litellm.set_verbose=True
response = embedding(
model="text-embedding-ada-002",
input=["good morning from litellm", "this is another item"],
metadata = {"anything": "good day"}
)
litellm_response = dict(response)
litellm_response_keys = set(litellm_response.keys())
litellm_response_keys.discard('_response_ms')
print(litellm_response_keys)
print("LiteLLM Response\n")
# print(litellm_response)
# same request with OpenAI 1.0+
import openai
client = openai.OpenAI(api_key=os.environ['OPENAI_API_KEY'])
response = client.embeddings.create(
model="text-embedding-ada-002", input=["good morning from litellm", "this is another item"]
)
response = dict(response)
openai_response_keys = set(response.keys())
print(openai_response_keys)
assert litellm_response_keys == openai_response_keys # ENSURE the Keys in litellm response is exactly what the openai package returns
assert len(litellm_response["data"]) == 2 # expect two embedding responses from litellm_response since input had two
print(openai_response_keys)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_openai_embedding()
def test_openai_azure_embedding_simple():
try:
response = embedding(
model="azure/azure-embedding-model",
input=["good morning from litellm"],
)
print(response)
response_keys = set(dict(response).keys())
response_keys.discard('_response_ms')
assert set(["usage", "model", "object", "data"]) == set(response_keys) #assert litellm response has expected keys from OpenAI embedding response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_openai_azure_embedding_simple()
def test_openai_azure_embedding_timeouts():
try:
response = embedding(
model="azure/azure-embedding-model",
input=["good morning from litellm"],
timeout=0.00001
)
print(response)
except openai.APITimeoutError:
print("Good job got timeout error!")
pass
except Exception as e:
pytest.fail(f"Expected timeout error, did not get the correct error. Instead got {e}")
# test_openai_azure_embedding_timeouts()
def test_openai_embedding_timeouts():
try:
response = embedding(
model="text-embedding-ada-002",
input=["good morning from litellm"],
timeout=0.00001
)
print(response)
except openai.APITimeoutError:
print("Good job got OpenAI timeout error!")
pass
except Exception as e:
pytest.fail(f"Expected timeout error, did not get the correct error. Instead got {e}")
# test_openai_embedding_timeouts()
def test_openai_azure_embedding():
try:
api_key = os.environ['AZURE_API_KEY']
api_base = os.environ['AZURE_API_BASE']
api_version = os.environ['AZURE_API_VERSION']
os.environ['AZURE_API_VERSION'] = ""
os.environ['AZURE_API_BASE'] = ""
os.environ['AZURE_API_KEY'] = ""
response = embedding(
model="azure/azure-embedding-model",
input=["good morning from litellm", "this is another item"],
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
print(response)
os.environ['AZURE_API_VERSION'] = api_version
os.environ['AZURE_API_BASE'] = api_base
os.environ['AZURE_API_KEY'] = api_key
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_openai_azure_embedding()
# test_openai_embedding()
def test_cohere_embedding():
try:
# litellm.set_verbose=True
response = embedding(
model="embed-english-v2.0", input=["good morning from litellm", "this is another item"]
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_cohere_embedding()
def test_cohere_embedding3():
try:
litellm.set_verbose=True
response = embedding(
model="embed-english-v3.0",
input=["good morning from litellm", "this is another item"],
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_cohere_embedding3()
def test_bedrock_embedding():
try:
response = embedding(
model="amazon.titan-embed-text-v1", input=["good morning from litellm, attempting to embed data",
"lets test a second string for good measure"]
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_bedrock_embedding()
# comment out hf tests - since hf endpoints are unstable
def test_hf_embedding():
try:
# huggingface/microsoft/codebert-base
# huggingface/facebook/bart-large
response = embedding(
model="huggingface/sentence-transformers/all-MiniLM-L6-v2", input=["good morning from litellm", "this is another item"]
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_hf_embedding()
# test async embeddings
def test_aembedding():
try:
import asyncio
async def embedding_call():
try:
response = await litellm.aembedding(
model="text-embedding-ada-002",
input=["good morning from litellm", "this is another item"]
)
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
asyncio.run(embedding_call())
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_aembedding()
def test_aembedding_azure():
try:
import asyncio
async def embedding_call():
try:
response = await litellm.aembedding(
model="azure/azure-embedding-model",
input=["good morning from litellm", "this is another item"]
)
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
asyncio.run(embedding_call())
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_aembedding_azure()
# def test_custom_openai_embedding():
# litellm.set_verbose=True
# response = embedding(
# model="openai/custom_embedding",
# input=["good morning from litellm"],
# api_base="http://0.0.0.0:8000/"
# )
# print(response)
# test_custom_openai_embedding()
| [] |
2024-01-10 | ecomoptimizer/litellm | litellm~proxy~proxy_cli.py | import click
import subprocess, traceback, json
import os, sys
import random, appdirs
from datetime import datetime
from dotenv import load_dotenv
import operator
sys.path.append(os.getcwd())
config_filename = "litellm.secrets"
# Using appdirs to determine user-specific config path
config_dir = appdirs.user_config_dir("litellm")
user_config_path = os.getenv("LITELLM_CONFIG_PATH", os.path.join(config_dir, config_filename))
load_dotenv()
from importlib import resources
import shutil
telemetry = None
def run_ollama_serve():
try:
command = ['ollama', 'serve']
with open(os.devnull, 'w') as devnull:
process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
except Exception as e:
print(f"""
LiteLLM Warning: proxy started with `ollama` model\n`ollama serve` failed with Exception{e}. \nEnsure you run `ollama serve`
""")
def clone_subfolder(repo_url, subfolder, destination):
# Clone the full repo
repo_name = repo_url.split('/')[-1]
repo_master = os.path.join(destination, "repo_master")
subprocess.run(['git', 'clone', repo_url, repo_master])
# Move into the subfolder
subfolder_path = os.path.join(repo_master, subfolder)
# Copy subfolder to destination
for file_name in os.listdir(subfolder_path):
source = os.path.join(subfolder_path, file_name)
if os.path.isfile(source):
shutil.copy(source, destination)
else:
dest_path = os.path.join(destination, file_name)
shutil.copytree(source, dest_path)
# Remove cloned repo folder
subprocess.run(['rm', '-rf', os.path.join(destination, "repo_master")])
feature_telemetry(feature="create-proxy")
def is_port_in_use(port):
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
@click.command()
@click.option('--host', default='0.0.0.0', help='Host for the server to listen on.')
@click.option('--port', default=8000, help='Port to bind the server to.')
@click.option('--num_workers', default=1, help='Number of uvicorn workers to spin up')
@click.option('--api_base', default=None, help='API base URL.')
@click.option('--api_version', default="2023-07-01-preview", help='For azure - pass in the api version.')
@click.option('--model', '-m', default=None, help='The model name to pass to litellm expects')
@click.option('--alias', default=None, help='The alias for the model - use this to give a litellm model name (e.g. "huggingface/codellama/CodeLlama-7b-Instruct-hf") a more user-friendly name ("codellama")')
@click.option('--add_key', default=None, help='The model name to pass to litellm expects')
@click.option('--headers', default=None, help='headers for the API call')
@click.option('--save', is_flag=True, type=bool, help='Save the model-specific config')
@click.option('--debug', default=False, is_flag=True, type=bool, help='To debug the input')
@click.option('--use_queue', default=False, is_flag=True, type=bool, help='To use celery workers for async endpoints')
@click.option('--temperature', default=None, type=float, help='Set temperature for the model')
@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model')
@click.option('--request_timeout', default=600, type=int, help='Set timeout in seconds for completion calls')
@click.option('--drop_params', is_flag=True, help='Drop any unmapped params')
@click.option('--add_function_to_prompt', is_flag=True, help='If function passed but unsupported, pass it as prompt')
@click.option('--config', '-c', default=None, help='Configure Litellm')
@click.option('--file', '-f', help='Path to config file')
@click.option('--max_budget', default=None, type=float, help='Set max budget for API calls - works for hosted models like OpenAI, TogetherAI, Anthropic, etc.`')
@click.option('--telemetry', default=True, type=bool, help='Helps us know if people are using this feature. Turn this off by doing `--telemetry False`')
@click.option('--logs', flag_value=False, type=int, help='Gets the "n" most recent logs. By default gets most recent log.')
@click.option('--health', flag_value=True, help='Make a chat/completions request to all llms in config.yaml')
@click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to')
@click.option('--test_async', default=False, is_flag=True, help='Calls async endpoints /queue/requests and /queue/response')
@click.option('--num_requests', default=10, type=int, help='Number of requests to hit async endpoint with')
@click.option('--local', is_flag=True, default=False, help='for local debugging')
def run_server(host, port, api_base, api_version, model, alias, add_key, headers, save, debug, temperature, max_tokens, request_timeout, drop_params, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, num_workers, test_async, num_requests, use_queue, health):
global feature_telemetry
args = locals()
if local:
from proxy_server import app, save_worker_config, usage_telemetry
else:
try:
from .proxy_server import app, save_worker_config, usage_telemetry
except ImportError as e:
from proxy_server import app, save_worker_config, usage_telemetry
feature_telemetry = usage_telemetry
if logs is not None:
if logs == 0: # default to 1
logs = 1
try:
with open('api_log.json') as f:
data = json.load(f)
# convert keys to datetime objects
log_times = {datetime.strptime(k, "%Y%m%d%H%M%S%f"): v for k, v in data.items()}
# sort by timestamp
sorted_times = sorted(log_times.items(), key=operator.itemgetter(0), reverse=True)
# get n recent logs
recent_logs = {k.strftime("%Y%m%d%H%M%S%f"): v for k, v in sorted_times[:logs]}
print(json.dumps(recent_logs, indent=4))
except:
print("LiteLLM: No logs saved!")
return
if model and "ollama" in model:
run_ollama_serve()
if test_async is True:
import requests, concurrent, time
api_base = f"http://{host}:{port}"
def _make_openai_completion():
data = {
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": "Write a short poem about the moon"}]
}
response = requests.post("http://0.0.0.0:8000/queue/request", json=data)
response = response.json()
while True:
try:
url = response["url"]
polling_url = f"{api_base}{url}"
polling_response = requests.get(polling_url)
polling_response = polling_response.json()
print("\n RESPONSE FROM POLLING JOB", polling_response)
status = polling_response["status"]
if status == "finished":
llm_response = polling_response["result"]
break
print(f"POLLING JOB{polling_url}\nSTATUS: {status}, \n Response {polling_response}")
time.sleep(0.5)
except Exception as e:
print("got exception in polling", e)
break
# Number of concurrent calls (you can adjust this)
concurrent_calls = num_requests
# List to store the futures of concurrent calls
futures = []
start_time = time.time()
# Make concurrent calls
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_calls) as executor:
for _ in range(concurrent_calls):
futures.append(executor.submit(_make_openai_completion))
# Wait for all futures to complete
concurrent.futures.wait(futures)
# Summarize the results
successful_calls = 0
failed_calls = 0
for future in futures:
if future.done():
if future.result() is not None:
successful_calls += 1
else:
failed_calls += 1
end_time = time.time()
print(f"Elapsed Time: {end_time-start_time}")
print(f"Load test Summary:")
print(f"Total Requests: {concurrent_calls}")
print(f"Successful Calls: {successful_calls}")
print(f"Failed Calls: {failed_calls}")
return
if health != False:
import requests
print("\nLiteLLM: Health Testing models in config")
response = requests.get(url=f"http://{host}:{port}/health")
print(json.dumps(response.json(), indent=4))
return
if test != False:
click.echo('\nLiteLLM: Making a test ChatCompletions request to your proxy')
import openai
if test == True: # flag value set
api_base = f"http://{host}:{port}"
else:
api_base = test
client = openai.OpenAI(
api_key="My API Key",
base_url=api_base
)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
], max_tokens=256)
click.echo(f'\nLiteLLM: response from proxy {response}')
print("\n Making streaming request to proxy")
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
],
stream=True,
)
for chunk in response:
click.echo(f'LiteLLM: streaming response from proxy {chunk}')
print("\n making completion request to proxy")
response = client.completions.create(model="gpt-3.5-turbo", prompt='this is a test request, write a short poem')
print(response)
return
else:
if headers:
headers = json.loads(headers)
save_worker_config(model=model, alias=alias, api_base=api_base, api_version=api_version, debug=debug, temperature=temperature, max_tokens=max_tokens, request_timeout=request_timeout, max_budget=max_budget, telemetry=telemetry, drop_params=drop_params, add_function_to_prompt=add_function_to_prompt, headers=headers, save=save, config=config, use_queue=use_queue)
try:
import uvicorn
except:
raise ImportError("Uvicorn needs to be imported. Run - `pip install uvicorn`")
if port == 8000 and is_port_in_use(port):
port = random.randint(1024, 49152)
uvicorn.run("litellm.proxy.proxy_server:app", host=host, port=port, workers=num_workers)
if __name__ == "__main__":
run_server()
| [
"this is a test request, write a short poem",
"Write a short poem about the moon"
] |
2024-01-10 | ecomoptimizer/litellm | litellm~utils.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you users! We ❤️ you! - Krrish & Ishaan
import sys, re
import dotenv, json, traceback, threading
import subprocess, os
import litellm, openai
import itertools
import random, uuid, requests
import datetime, time
import tiktoken
import uuid
import aiohttp
import logging
import asyncio, httpx, inspect
import copy
from tokenizers import Tokenizer
from dataclasses import (
dataclass,
field,
) # for storing API inputs, outputs, and metadata
encoding = tiktoken.get_encoding("cl100k_base")
import importlib.metadata
from .integrations.traceloop import TraceloopLogger
from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
from .integrations.llmonitor import LLMonitorLogger
from .integrations.prompt_layer import PromptLayerLogger
from .integrations.langsmith import LangsmithLogger
from .integrations.weights_biases import WeightsBiasesLogger
from .integrations.custom_logger import CustomLogger
from .integrations.langfuse import LangFuseLogger
from .integrations.litedebugger import LiteDebugger
from openai import OpenAIError as OriginalError
from openai._models import BaseModel as OpenAIObject
from .exceptions import (
AuthenticationError,
BadRequestError,
RateLimitError,
ServiceUnavailableError,
OpenAIError,
ContextWindowExceededError,
Timeout,
APIConnectionError,
APIError,
BudgetExceededError
)
from typing import cast, List, Dict, Union, Optional, Literal
from .caching import Cache
####### ENVIRONMENT VARIABLES ####################
dotenv.load_dotenv() # Loading env variables using dotenv
sentry_sdk_instance = None
capture_exception = None
add_breadcrumb = None
posthog = None
slack_app = None
alerts_channel = None
heliconeLogger = None
promptLayerLogger = None
langsmithLogger = None
weightsBiasesLogger = None
customLogger = None
langFuseLogger = None
llmonitorLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
liteDebuggerClient = None
callback_list: Optional[List[str]] = []
user_logger_fn = None
additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {}
last_fetched_at = None
last_fetched_at_keys = None
######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format
# https://docs.litellm.ai/docs/completion/output
# {
# 'choices': [
# {
# 'finish_reason': 'stop',
# 'index': 0,
# 'message': {
# 'role': 'assistant',
# 'content': " I'm doing well, thank you for asking. I am Claude, an AI assistant created by Anthropic."
# }
# }
# ],
# 'created': 1691429984.3852863,
# 'model': 'claude-instant-1',
# 'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41}
# }
class UnsupportedParamsError(Exception):
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
self.request = httpx.Request(method="POST", url=" https://openai.api.com/v1/")
self.response = httpx.Response(status_code=status_code, request=self.request)
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
def _generate_id(): # private helper function
return 'chatcmpl-' + str(uuid.uuid4())
def map_finish_reason(finish_reason: str): # openai supports 5 stop sequences - 'stop', 'length', 'function_call', 'content_filter', 'null'
# anthropic mapping
if finish_reason == "stop_sequence":
return "stop"
return finish_reason
class FunctionCall(OpenAIObject):
arguments: str
name: str
class Function(OpenAIObject):
arguments: str
name: str
class ChatCompletionMessageToolCall(OpenAIObject):
id: str
function: Function
type: str
class Message(OpenAIObject):
def __init__(self, content="default", role="assistant", logprobs=None, function_call=None, tool_calls=None, **params):
super(Message, self).__init__(**params)
self.content = content
self.role = role
if function_call is not None:
self.function_call = FunctionCall(**function_call)
if tool_calls is not None:
self.tool_calls = []
for tool_call in tool_calls:
self.tool_calls.append(
ChatCompletionMessageToolCall(**tool_call)
)
if logprobs is not None:
self._logprobs = logprobs
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Delta(OpenAIObject):
def __init__(self, content=None, role=None, **params):
super(Delta, self).__init__(**params)
self.content = content
self.role = role
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Choices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, message=None, **params):
super(Choices, self).__init__(**params)
self.finish_reason = map_finish_reason(finish_reason) # set finish_reason for all responses
self.index = index
if message is None:
self.message = Message(content=None)
else:
self.message = message
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Usage(OpenAIObject):
def __init__(self, prompt_tokens=None, completion_tokens=None, total_tokens=None, **params):
super(Usage, self).__init__(**params)
if prompt_tokens:
self.prompt_tokens = prompt_tokens
if completion_tokens:
self.completion_tokens = completion_tokens
if total_tokens:
self.total_tokens = total_tokens
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class StreamingChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
super(StreamingChoices, self).__init__(**params)
if finish_reason:
self.finish_reason = finish_reason
else:
self.finish_reason = None
self.index = index
if delta:
self.delta = delta
else:
self.delta = Delta()
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class ModelResponse(OpenAIObject):
id: str
"""A unique identifier for the completion."""
choices: List[Union[Choices, StreamingChoices]]
"""The list of completion choices the model generated for the input prompt."""
created: int
"""The Unix timestamp (in seconds) of when the completion was created."""
model: Optional[str] = None
"""The model used for completion."""
object: str
"""The object type, which is always "text_completion" """
system_fingerprint: Optional[str] = None
"""This fingerprint represents the backend configuration that the model runs with.
Can be used in conjunction with the `seed` request parameter to understand when
backend changes have been made that might impact determinism.
"""
usage: Optional[Usage] = None
"""Usage statistics for the completion request."""
_hidden_params: dict = {}
def __init__(self, id=None, choices=None, created=None, model=None, object=None, system_fingerprint=None, usage=None, stream=False, response_ms=None, hidden_params=None, **params):
if stream:
object = "chat.completion.chunk"
choices = [StreamingChoices()]
else:
if model in litellm.open_ai_embedding_models:
object = "embedding"
else:
object = "chat.completion"
choices = [Choices()]
if id is None:
id = _generate_id()
else:
id = id
if created is None:
created = int(time.time())
else:
created = created
model = model
if usage:
usage = usage
else:
usage = Usage()
if hidden_params:
self._hidden_params = hidden_params
super().__init__(id=id, choices=choices, created=created, model=model, object=object, system_fingerprint=system_fingerprint, usage=usage, **params)
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Embedding(OpenAIObject):
embedding: list = []
index: int
object: str
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class EmbeddingResponse(OpenAIObject):
model: Optional[str] = None
"""The model used for embedding."""
data: Optional[List] = None
"""The actual embedding value"""
object: str
"""The object type, which is always "embedding" """
usage: Optional[Usage] = None
"""Usage statistics for the embedding request."""
def __init__(self, model=None, usage=None, stream=False, response_ms=None, data=None):
object = "list"
if response_ms:
_response_ms = response_ms
else:
_response_ms = None
if data:
data = data
else:
data = None
if usage:
usage = usage
else:
usage = Usage()
model = model
super().__init__(model=model, object=object, data=data, usage=usage)
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class TextChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, text=None, logprobs=None, **params):
super(TextChoices, self).__init__(**params)
if finish_reason:
self.finish_reason = map_finish_reason(finish_reason)
else:
self.finish_reason = "stop"
self.index = index
if text:
self.text = text
else:
self.text = None
if logprobs:
self.logprobs = []
else:
self.logprobs = logprobs
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class TextCompletionResponse(OpenAIObject):
"""
{
"id": response["id"],
"object": "text_completion",
"created": response["created"],
"model": response["model"],
"choices": [
{
"text": response["choices"][0]["message"]["content"],
"index": response["choices"][0]["index"],
"logprobs": transformed_logprobs,
"finish_reason": response["choices"][0]["finish_reason"]
}
],
"usage": response["usage"]
}
"""
def __init__(self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, **params):
super(TextCompletionResponse, self).__init__(**params)
if stream:
self.object = "text_completion.chunk"
self.choices = [TextChoices()]
else:
self.object = "text_completion"
self.choices = [TextChoices()]
if id is None:
self.id = _generate_id()
else:
self.id = id
if created is None:
self.created = int(time.time())
else:
self.created = created
if response_ms:
self._response_ms = response_ms
else:
self._response_ms = None
self.model = model
if usage:
self.usage = usage
else:
self.usage = Usage()
self._hidden_params = {} # used in case users want to access the original model response
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
############################################################
def print_verbose(print_statement):
if litellm.set_verbose:
print(print_statement) # noqa
####### LOGGING ###################
from enum import Enum
class CallTypes(Enum):
embedding = 'embedding'
completion = 'completion'
acompletion = 'acompletion'
# Logging function -> log the exact model details + what's being sent | Non-Blocking
class Logging:
global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb, llmonitorLogger
def __init__(self, model, messages, stream, call_type, start_time, litellm_call_id, function_id):
if call_type not in [item.value for item in CallTypes]:
allowed_values = ", ".join([item.value for item in CallTypes])
raise ValueError(f"Invalid call_type {call_type}. Allowed values: {allowed_values}")
self.model = model
self.messages = messages
self.stream = stream
self.start_time = start_time # log the call start time
self.call_type = call_type
self.litellm_call_id = litellm_call_id
self.function_id = function_id
self.streaming_chunks = [] # for generating complete stream response
def update_environment_variables(self, model, user, optional_params, litellm_params):
self.optional_params = optional_params
self.model = model
self.user = user
self.litellm_params = litellm_params
self.logger_fn = litellm_params["logger_fn"]
print_verbose(f"self.optional_params: {self.optional_params}")
self.model_call_details = {
"model": self.model,
"messages": self.messages,
"optional_params": self.optional_params,
"litellm_params": self.litellm_params,
"start_time": self.start_time,
"stream": self.stream
}
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
litellm.error_logs['PRE_CALL'] = locals()
try:
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "pre_api_call"
if (
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
# User Logging -> if you pass in a custom logging function
headers = additional_args.get("headers", {})
if headers is None:
headers = {}
data = additional_args.get("complete_input_dict", {})
api_base = additional_args.get("api_base", "")
masked_headers = {k: (v[:-20] + '*' * 20) if (isinstance(v, str) and len(v) > 20) else v for k, v in headers.items()}
formatted_headers = " ".join([f"-H '{k}: {v}'" for k, v in masked_headers.items()])
print_verbose(f"PRE-API-CALL ADDITIONAL ARGS: {additional_args}")
curl_command = "\n\nPOST Request Sent from LiteLLM:\n"
curl_command += "curl -X POST \\\n"
curl_command += f"{api_base} \\\n"
curl_command += f"{formatted_headers} \\\n" if formatted_headers.strip() != "" else ""
curl_command += f"-d '{str(data)}'\n"
if additional_args.get("request_str", None) is not None:
# print the sagemaker / bedrock client request
curl_command = "\nRequest Sent from LiteLLM:\n"
curl_command += additional_args.get("request_str", None)
elif api_base == "":
curl_command = self.model_call_details
print_verbose(f"\033[92m{curl_command}\033[0m\n")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
if litellm.max_budget and self.stream:
start_time = self.start_time
end_time = self.start_time # no time has passed as the call hasn't been made yet
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="".join(message["content"] for message in self.messages), completion="", total_time=float_diff)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "supabase":
print_verbose("reaches supabase for logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"supabaseClient: {supabaseClient}")
supabaseClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "lite_debugger":
print_verbose(f"reaches litedebugger for logging! - model_call_details {self.model_call_details}")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
litellm_params=self.model_call_details["litellm_params"],
optional_params=self.model_call_details["optional_params"],
print_verbose=print_verbose,
call_type=self.call_type
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details pre-call: {self.model_call_details}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_pre_api_call(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
)
elif callable(callback): # custom logger functions
customLogger.log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
traceback.print_exc()
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs['POST_CALL'] = locals()
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["original_response"] = original_response
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "post_api_call"
# User Logging -> if you pass in a custom logging function
print_verbose(f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n")
print_verbose(
f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
)
print_verbose(f"Logging Details Post-API Call: LiteLLM Params: {self.model_call_details}")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details post-call: {self.model_call_details}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_post_api_call(
kwargs=self.model_call_details,
response_obj=None,
start_time=self.start_time,
end_time=None
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
def success_handler(self, result=None, start_time=None, end_time=None, **kwargs):
print_verbose(
f"Logging Details LiteLLM-Success Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
self.model_call_details["log_event_type"] = "successful_api_call"
self.model_call_details["end_time"] = end_time
complete_streaming_response = None
## BUILD COMPLETE STREAMED RESPONSE
if self.stream:
if result.choices[0].finish_reason is not None: # if it's the last chunk
self.streaming_chunks.append(result)
complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None))
else:
self.streaming_chunks.append(result)
elif isinstance(result, OpenAIObject):
result = result.model_dump()
if complete_streaming_response:
self.model_call_details["complete_streaming_response"] = complete_streaming_response
print_verbose(f"success callbacks: {litellm.success_callback}")
if litellm.max_budget and self.stream:
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="", completion=result["content"], total_time=float_diff)
for callback in litellm.success_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}")
liteDebuggerClient.log_event(
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
if callback == "api_manager":
print_verbose("reaches api manager for updating model cost")
litellm.apiManager.update_cost(completion_obj=result, user=self.user)
if callback == "cache":
if litellm.cache != None and self.model_call_details.get('optional_params', {}).get('stream', False) == True:
litellm_call_id = self.litellm_params["litellm_call_id"]
if litellm_call_id in self.litellm_params["stream_response"]:
# append for the given call_id
if self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] == "default":
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] = result["content"] # handle first try
else:
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] += result["content"]
else: # init a streaming response for this call id
new_model_response = ModelResponse(choices=[Choices(message=Message(content="default"))])
self.litellm_params["stream_response"][litellm_call_id] = new_model_response
litellm.cache.add_cache(self.litellm_params["stream_response"][litellm_call_id], **self.model_call_details)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "supabase":
print_verbose("reaches supabase for logging!")
kwargs=self.model_call_details
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches supabase for streaming logging!")
result = kwargs["complete_streaming_response"]
model = kwargs["model"]
messages = kwargs["messages"]
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
supabaseClient.log_event(
model=model,
messages=messages,
end_user=optional_params.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=litellm_params.get("litellm_call_id", str(uuid.uuid4())),
print_verbose=print_verbose,
)
if callback == "wandb":
print_verbose("reaches wandb for logging!")
weightsBiasesLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langsmith":
print_verbose("reaches langsmtih for logging!")
langsmithLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "llmonitor":
print_verbose("reaches llmonitor for logging!")
model = self.model
input = self.model_call_details.get("messages", self.model_call_details.get("input", None))
# if contains input, it's 'embedding', otherwise 'llm'
type = "embed" if self.call_type == CallTypes.embedding.value else "llm"
llmonitorLogger.log_event(
type=type,
event="end",
model=model,
input=input,
user_id=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=self.litellm_call_id,
print_verbose=print_verbose,
)
if callback == "helicone":
print_verbose("reaches helicone for logging!")
model = self.model
messages = kwargs["messages"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langfuse":
print_verbose("reaches langfuse for logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if k != "original_response": # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "traceloop":
deep_copy = {}
for k, v in self.model_call_details.items():
if k != "original_response":
deep_copy[k] = v
traceloopLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if isinstance(callback, CustomLogger): # custom logger class
if self.stream and complete_streaming_response is None:
callback.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = self.model_call_details.pop("complete_streaming_response", complete_streaming_response)
callback.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
def failure_handler(self, exception, traceback_exception, start_time=None, end_time=None):
print_verbose(
f"Logging Details LiteLLM-Failure Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
# on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions
if not hasattr(self, "model_call_details"):
self.model_call_details = {}
self.model_call_details["log_event_type"] = "failed_api_call"
self.model_call_details["exception"] = exception
self.model_call_details["traceback_exception"] = traceback_exception
self.model_call_details["end_time"] = end_time
result = None # result sent to all loggers, init this to None incase it's not created
for callback in litellm.failure_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "llmonitor":
print_verbose("reaches llmonitor for logging error!")
model = self.model
input = self.model_call_details["input"]
type = "embed" if self.call_type == CallTypes.embedding.value else "llm"
llmonitorLogger.log_event(
type=type,
event="error",
user_id=self.model_call_details.get("user", "default"),
model=model,
input=input,
error=traceback_exception,
run_id=self.litellm_call_id,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "sentry":
print_verbose("sending exception to sentry")
if capture_exception:
capture_exception(exception)
else:
print_verbose(f"capture exception not initialized: {capture_exception}")
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_failure_event(
start_time=start_time,
end_time=end_time,
response_obj=result,
kwargs=self.model_call_details,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
def exception_logging(
additional_args={},
logger_fn=None,
exception=None,
):
try:
model_call_details = {}
if exception:
model_call_details["exception"] = exception
model_call_details["additional_args"] = additional_args
# User Logging -> if you pass in a custom logging function or want to use sentry breadcrumbs
print_verbose(
f"Logging Details: logger_fn - {logger_fn} | callable(logger_fn) - {callable(logger_fn)}"
)
if logger_fn and callable(logger_fn):
try:
logger_fn(
model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
####### RULES ###################
class Rules:
"""
Fail calls based on the input or llm api output
Example usage:
import litellm
def my_custom_rule(input): # receives the model response
if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer
return False
return True
litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call
response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user",
"content": "Hey, how's it going?"}], fallbacks=["openrouter/mythomax"])
"""
def __init__(self) -> None:
pass
def pre_call_rules(self, input: str, model: str):
for rule in litellm.pre_call_rules:
if callable(rule):
decision = rule(input)
if decision is False:
raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore
return True
def post_call_rules(self, input: str, model: str):
for rule in litellm.post_call_rules:
if callable(rule):
decision = rule(input)
if decision is False:
raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore
return True
####### CLIENT ###################
# make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking
def client(original_function):
global liteDebuggerClient, get_all_keys
rules_obj = Rules()
def function_setup(
start_time, *args, **kwargs
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try:
global callback_list, add_breadcrumb, user_logger_fn, Logging
function_id = kwargs["id"] if "id" in kwargs else None
if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True):
print_verbose(f"litedebugger initialized")
if "lite_debugger" not in litellm.input_callback:
litellm.input_callback.append("lite_debugger")
if "lite_debugger" not in litellm.success_callback:
litellm.success_callback.append("lite_debugger")
if "lite_debugger" not in litellm.failure_callback:
litellm.failure_callback.append("lite_debugger")
if len(litellm.callbacks) > 0:
for callback in litellm.callbacks:
if callback not in litellm.input_callback:
litellm.input_callback.append(callback)
if callback not in litellm.success_callback:
litellm.success_callback.append(callback)
if callback not in litellm.failure_callback:
litellm.failure_callback.append(callback)
if (
len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0
or len(litellm.failure_callback) > 0
) and len(callback_list) == 0:
callback_list = list(
set(
litellm.input_callback
+ litellm.success_callback
+ litellm.failure_callback
)
)
set_callbacks(
callback_list=callback_list,
function_id=function_id
)
if add_breadcrumb:
add_breadcrumb(
category="litellm.llm_call",
message=f"Positional Args: {args}, Keyword Args: {kwargs}",
level="info",
)
if "logger_fn" in kwargs:
user_logger_fn = kwargs["logger_fn"]
# CRASH REPORTING TELEMETRY
crash_reporting(*args, **kwargs)
# INIT LOGGER - for user-specified integrations
model = args[0] if len(args) > 0 else kwargs["model"]
call_type = original_function.__name__
if call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value:
if len(args) > 1:
messages = args[1]
elif kwargs.get("messages", None):
messages = kwargs["messages"]
### PRE-CALL RULES ###
if isinstance(messages, list) and len(messages) > 0 and isinstance(messages[0], dict) and "content" in messages[0]:
rules_obj.pre_call_rules(input="".join(m["content"] for m in messages if isinstance(m["content"], str)), model=model)
elif call_type == CallTypes.embedding.value:
messages = args[1] if len(args) > 1 else kwargs["input"]
stream = True if "stream" in kwargs and kwargs["stream"] == True else False
logging_obj = Logging(model=model, messages=messages, stream=stream, litellm_call_id=kwargs["litellm_call_id"], function_id=function_id, call_type=call_type, start_time=start_time)
return logging_obj
except Exception as e:
import logging
logging.debug(f"[Non-Blocking] {traceback.format_exc()}; args - {args}; kwargs - {kwargs}")
raise e
def post_call_processing(original_response, model):
try:
call_type = original_function.__name__
if call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value:
model_response = original_response['choices'][0]['message']['content']
### POST-CALL RULES ###
rules_obj.post_call_rules(input=model_response, model=model)
except Exception as e:
raise e
def crash_reporting(*args, **kwargs):
if litellm.telemetry:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
exception = kwargs["exception"] if "exception" in kwargs else None
custom_llm_provider = (
kwargs["custom_llm_provider"]
if "custom_llm_provider" in kwargs
else None
)
safe_crash_reporting(
model=model,
exception=exception,
custom_llm_provider=custom_llm_provider,
) # log usage-crash details. Do not log any user details. If you want to turn this off, set `litellm.telemetry=False`.
except:
# [Non-Blocking Error]
pass
def wrapper(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
logging_obj = kwargs.get("litellm_logging_obj", None)
# only set litellm_call_id if its not in kwargs
if "litellm_call_id" not in kwargs:
kwargs["litellm_call_id"] = str(uuid.uuid4())
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
if logging_obj is None:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
# remove this after deprecating litellm.caching
print_verbose(f"litellm.caching: {litellm.caching}; litellm.caching_with_models: {litellm.caching_with_models}; litellm.cache: {litellm.cache}")
if (litellm.caching or litellm.caching_with_models) and litellm.cache is None:
litellm.cache = Cache()
print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}")
# if caching is false, don't run this
if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function
# checking cache
if (litellm.cache != None or litellm.caching or litellm.caching_with_models):
print_verbose(f"Checking Cache")
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
print_verbose(f"Cache Hit!")
if "detail" in cached_result:
# implies an error occurred
pass
else:
call_type = original_function.__name__
print_verbose(f"Cache Response Object routing: call_type - {call_type}; cached_result instace: {type(cached_result)}")
if call_type == CallTypes.completion.value and isinstance(cached_result, dict):
return convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse())
elif call_type == CallTypes.embedding.value and isinstance(cached_result, dict):
return convert_to_model_response_object(response_object=cached_result, response_type="embedding")
else:
return cached_result
# MODEL CALL
result = original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
# TODO: Add to cache for streaming
if "complete_response" in kwargs and kwargs["complete_response"] == True:
chunks = []
for idx, chunk in enumerate(result):
chunks.append(chunk)
return litellm.stream_chunk_builder(chunks, messages=kwargs.get("messages", None))
else:
return result
elif "acompletion" in kwargs and kwargs["acompletion"] == True:
return result
elif "aembedding" in kwargs and kwargs["aembedding"] == True:
return result
### POST-CALL RULES ###
post_call_processing(original_response=result, model=model)
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
# threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_success, args=(args, kwargs, result, start_time, end_time)
) # don't interrupt execution of main thread
my_thread.start()
# RETURN RESULT
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
call_type = original_function.__name__
if call_type == CallTypes.completion.value:
num_retries = (
kwargs.get("num_retries", None)
or litellm.num_retries
or None
)
litellm.num_retries = None # set retries to None to prevent infinite loops
context_window_fallback_dict = kwargs.get("context_window_fallback_dict", {})
if num_retries:
if (isinstance(e, openai.APIError)
or isinstance(e, openai.Timeout)):
kwargs["num_retries"] = num_retries
return litellm.completion_with_retries(*args, **kwargs)
elif isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict:
if len(args) > 0:
args[0] = context_window_fallback_dict[model]
else:
kwargs["model"] = context_window_fallback_dict[model]
return original_function(*args, **kwargs)
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
if logging_obj:
logging_obj.failure_handler(e, traceback_exception, start_time, end_time) # DO NOT MAKE THREADED - router retry fallback relies on this!
my_thread = threading.Thread(
target=handle_failure,
args=(e, traceback_exception, start_time, end_time, args, kwargs),
) # don't interrupt execution of main thread
my_thread.start()
if hasattr(e, "message"):
if (
liteDebuggerClient and liteDebuggerClient.dashboard_url != None
): # make it easy to get to the debugger logs if you've initialized it
e.message += f"\n Check the log in your dashboard - {liteDebuggerClient.dashboard_url}"
raise e
async def wrapper_async(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
logging_obj = kwargs.get("litellm_logging_obj", None)
# only set litellm_call_id if its not in kwargs
if "litellm_call_id" not in kwargs:
kwargs["litellm_call_id"] = str(uuid.uuid4())
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
if logging_obj is None:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
print_verbose(f"litellm.cache: {litellm.cache}")
print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}")
# if caching is false, don't run this
if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function
# checking cache
if (litellm.cache != None):
print_verbose(f"Checking Cache")
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
print_verbose(f"Cache Hit!")
call_type = original_function.__name__
if call_type == CallTypes.acompletion.value and isinstance(cached_result, dict):
return convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse())
else:
return cached_result
# MODEL CALL
result = await original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
if "complete_response" in kwargs and kwargs["complete_response"] == True:
chunks = []
for idx, chunk in enumerate(result):
chunks.append(chunk)
return litellm.stream_chunk_builder(chunks, messages=kwargs.get("messages", None))
else:
return result
### POST-CALL RULES ###
post_call_processing(original_response=result, model=model)
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
# RETURN RESULT
if isinstance(result, ModelResponse):
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
call_type = original_function.__name__
if call_type == CallTypes.acompletion.value:
num_retries = (
kwargs.get("num_retries", None)
or litellm.num_retries
or None
)
litellm.num_retries = None # set retries to None to prevent infinite loops
context_window_fallback_dict = kwargs.get("context_window_fallback_dict", {})
if num_retries:
kwargs["num_retries"] = num_retries
kwargs["original_function"] = original_function
if (isinstance(e, openai.RateLimitError)): # rate limiting specific error
kwargs["retry_strategy"] = "exponential_backoff_retry"
elif (isinstance(e, openai.APIError)): # generic api error
kwargs["retry_strategy"] = "constant_retry"
return await litellm.acompletion_with_retries(*args, **kwargs)
elif isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict:
if len(args) > 0:
args[0] = context_window_fallback_dict[model]
else:
kwargs["model"] = context_window_fallback_dict[model]
return await original_function(*args, **kwargs)
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
if logging_obj:
logging_obj.failure_handler(e, traceback_exception, start_time, end_time) # DO NOT MAKE THREADED - router retry fallback relies on this!
raise e
# Use httpx to determine if the original function is a coroutine
is_coroutine = inspect.iscoroutinefunction(original_function)
# Return the appropriate wrapper based on the original function type
if is_coroutine:
return wrapper_async
else:
return wrapper
####### USAGE CALCULATOR ################
# Extract the number of billion parameters from the model name
# only used for together_computer LLMs
def get_model_params_and_category(model_name):
import re
params_match = re.search(r'(\d+b)', model_name) # catch all decimals like 3b, 70b, etc
category = None
if params_match != None:
params_match = params_match.group(1)
params_match = params_match.replace("b", "")
params_billion = float(params_match)
# Determine the category based on the number of parameters
if params_billion <= 3.0:
category = "together-ai-up-to-3b"
elif params_billion <= 7.0:
category = "together-ai-3.1b-7b"
elif params_billion <= 20.0:
category = "together-ai-7.1b-20b"
elif params_billion <= 40.0:
category = "together-ai-20.1b-40b"
elif params_billion <= 70.0:
category = "together-ai-40.1b-70b"
return category
return None
def get_replicate_completion_pricing(completion_response=None, total_time=0.0):
# see https://replicate.com/pricing
a100_40gb_price_per_second_public = 0.001150
# for all litellm currently supported LLMs, almost all requests go to a100_80gb
a100_80gb_price_per_second_public = 0.001400 # assume all calls sent to A100 80GB for now
if total_time == 0.0:
start_time = completion_response['created']
end_time = completion_response["ended"]
total_time = end_time - start_time
return a100_80gb_price_per_second_public*total_time
def _select_tokenizer(model: str):
# cohere
import pkg_resources
if model in litellm.cohere_models:
tokenizer = Tokenizer.from_pretrained("Cohere/command-nightly")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# anthropic
elif model in litellm.anthropic_models:
# Read the JSON file
filename = pkg_resources.resource_filename(__name__, 'llms/tokenizers/anthropic_tokenizer.json')
with open(filename, 'r') as f:
json_data = json.load(f)
# Decode the JSON data from utf-8
json_data_decoded = json.dumps(json_data, ensure_ascii=False)
# Convert to str
json_str = str(json_data_decoded)
# load tokenizer
tokenizer = Tokenizer.from_str(json_str)
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# llama2
elif "llama-2" in model.lower():
tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# default - tiktoken
else:
return {"type": "openai_tokenizer", "tokenizer": encoding}
def encode(model: str, text: str):
"""
Encodes the given text using the specified model.
Args:
model (str): The name of the model to use for tokenization.
text (str): The text to be encoded.
Returns:
enc: The encoded text.
"""
tokenizer_json = _select_tokenizer(model=model)
enc = tokenizer_json["tokenizer"].encode(text)
return enc
def decode(model: str, tokens: List[int]):
tokenizer_json = _select_tokenizer(model=model)
dec = tokenizer_json["tokenizer"].decode(tokens)
return dec
def openai_token_counter(messages, model="gpt-3.5-turbo-0613"):
"""
Return the number of tokens used by a list of messages.
Borrowed from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb.
"""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print_verbose("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print_verbose("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return openai_token_counter(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print_verbose("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return openai_token_counter(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def token_counter(model="", text=None, messages: Optional[List] = None):
"""
Count the number of tokens in a given text using a specified model.
Args:
model (str): The name of the model to use for tokenization. Default is an empty string.
text (str): The raw text string to be passed to the model. Default is None.
messages (Optional[List[Dict[str, str]]]): Alternative to passing in text. A list of dictionaries representing messages with "role" and "content" keys. Default is None.
Returns:
int: The number of tokens in the text.
"""
# use tiktoken, anthropic, cohere or llama2's tokenizer depending on the model
if text == None:
if messages is not None:
print_verbose(f"token_counter messages received: {messages}")
text = "".join([message["content"] for message in messages])
else:
raise ValueError("text and messages cannot both be None")
num_tokens = 0
if model is not None:
tokenizer_json = _select_tokenizer(model=model)
if tokenizer_json["type"] == "huggingface_tokenizer":
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc.ids)
elif tokenizer_json["type"] == "openai_tokenizer":
if model in litellm.open_ai_chat_completion_models and messages != None:
num_tokens = openai_token_counter(messages, model=model)
else:
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def cost_per_token(model="", prompt_tokens=0, completion_tokens=0):
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
Parameters:
model (str): The name of the model to use. Default is ""
prompt_tokens (int): The number of tokens in the prompt.
completion_tokens (int): The number of tokens in the completion.
Returns:
tuple: A tuple containing the cost in USD dollars for prompt tokens and completion tokens, respectively.
"""
# given
prompt_tokens_cost_usd_dollar = 0
completion_tokens_cost_usd_dollar = 0
model_cost_ref = litellm.model_cost
# see this https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models
azure_llms = {
"gpt-35-turbo": "azure/gpt-3.5-turbo",
"gpt-35-turbo-16k": "azure/gpt-3.5-turbo-16k",
"gpt-35-turbo-instruct": "azure/gpt-3.5-turbo-instruct"
}
if "azure/" in model:
model = model.replace("azure/", "")
if model in model_cost_ref:
prompt_tokens_cost_usd_dollar = (
model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif "ft:gpt-3.5-turbo" in model:
# fuzzy match ft:gpt-3.5-turbo:abcd-id-cool-litellm
prompt_tokens_cost_usd_dollar = (
model_cost_ref["ft:gpt-3.5-turbo"]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref["ft:gpt-3.5-turbo"]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif model in azure_llms:
model = azure_llms[model]
prompt_tokens_cost_usd_dollar = (
model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
else:
# calculate average input cost, azure/gpt-deployments can potentially go here if users don't specify, gpt-4, gpt-3.5-turbo. LLMs litellm knows
input_cost_sum = 0
output_cost_sum = 0
model_cost_ref = litellm.model_cost
for model in model_cost_ref:
input_cost_sum += model_cost_ref[model]["input_cost_per_token"]
output_cost_sum += model_cost_ref[model]["output_cost_per_token"]
avg_input_cost = input_cost_sum / len(model_cost_ref.keys())
avg_output_cost = output_cost_sum / len(model_cost_ref.keys())
prompt_tokens_cost_usd_dollar = avg_input_cost * prompt_tokens
completion_tokens_cost_usd_dollar = avg_output_cost * completion_tokens
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
def completion_cost(
completion_response=None,
model=None,
prompt="",
messages: List = [],
completion="",
total_time=0.0, # used for replicate
):
"""
Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm.
Parameters:
completion_response (litellm.ModelResponses): [Required] The response received from a LiteLLM completion request.
[OPTIONAL PARAMS]
model (str): Optional. The name of the language model used in the completion calls
prompt (str): Optional. The input prompt passed to the llm
completion (str): Optional. The output completion text from the llm
total_time (float): Optional. (Only used for Replicate LLMs) The total time used for the request in seconds
Returns:
float: The cost in USD dollars for the completion based on the provided parameters.
Note:
- If completion_response is provided, the function extracts token information and the model name from it.
- If completion_response is not provided, the function calculates token counts based on the model and input text.
- The cost is calculated based on the model, prompt tokens, and completion tokens.
- For certain models containing "togethercomputer" in the name, prices are based on the model size.
- For Replicate models, the cost is calculated based on the total time used for the request.
Exceptions:
- If an error occurs during execution, the function returns 0.0 without blocking the user's execution path.
"""
try:
if messages != []:
prompt = " ".join([message["content"] for message in messages])
# Handle Inputs to completion_cost
prompt_tokens = 0
completion_tokens = 0
if completion_response != None:
# get input/output tokens from completion_response
prompt_tokens = completion_response['usage']['prompt_tokens']
completion_tokens = completion_response['usage']['completion_tokens']
model = model or completion_response['model'] # check if user passed an override for model, if it's none check completion_response['model']
else:
prompt_tokens = token_counter(model=model, text=prompt)
completion_tokens = token_counter(model=model, text=completion)
# Calculate cost based on prompt_tokens, completion_tokens
if "togethercomputer" in model:
# together ai prices based on size of llm
# get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json
model = get_model_params_and_category(model)
# replicate llms are calculate based on time for request running
# see https://replicate.com/pricing
elif (
model in litellm.replicate_models or
"replicate" in model
):
return get_replicate_completion_pricing(completion_response, total_time)
prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_token(
model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens
)
return prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
except:
return 0.0 # this should not block a users execution path
####### HELPER FUNCTIONS ################
def register_model(model_cost: Union[str, dict]):
"""
Register new / Override existing models (and their pricing) to specific providers.
Provide EITHER a model cost dictionary or a url to a hosted json blob
Example usage:
model_cost_dict = {
"gpt-4": {
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
},
}
"""
loaded_model_cost = {}
if isinstance(model_cost, dict):
loaded_model_cost = model_cost
elif isinstance(model_cost, str):
loaded_model_cost = litellm.get_model_cost_map(url=model_cost)
for key, value in loaded_model_cost.items():
## override / add new keys to the existing model cost dictionary
litellm.model_cost[key] = loaded_model_cost[key]
# add new model names to provider lists
if value.get('litellm_provider') == 'openai':
if key not in litellm.open_ai_chat_completion_models:
litellm.open_ai_chat_completion_models.append(key)
elif value.get('litellm_provider') == 'text-completion-openai':
if key not in litellm.open_ai_text_completion_models:
litellm.open_ai_text_completion_models.append(key)
elif value.get('litellm_provider') == 'cohere':
if key not in litellm.cohere_models:
litellm.cohere_models.append(key)
elif value.get('litellm_provider') == 'anthropic':
if key not in litellm.anthropic_models:
litellm.anthropic_models.append(key)
elif value.get('litellm_provider') == 'openrouter':
split_string = key.split('/', 1)
if key not in litellm.openrouter_models:
litellm.openrouter_models.append(split_string[1])
elif value.get('litellm_provider') == 'vertex_ai-text-models':
if key not in litellm.vertex_text_models:
litellm.vertex_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-text-models':
if key not in litellm.vertex_code_text_models:
litellm.vertex_code_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-chat-models':
if key not in litellm.vertex_chat_models:
litellm.vertex_chat_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-chat-models':
if key not in litellm.vertex_code_chat_models:
litellm.vertex_code_chat_models.append(key)
elif value.get('litellm_provider') == 'ai21':
if key not in litellm.ai21_models:
litellm.ai21_models.append(key)
elif value.get('litellm_provider') == 'nlp_cloud':
if key not in litellm.nlp_cloud_models:
litellm.nlp_cloud_models.append(key)
elif value.get('litellm_provider') == 'aleph_alpha':
if key not in litellm.aleph_alpha_models:
litellm.aleph_alpha_models.append(key)
elif value.get('litellm_provider') == 'bedrock':
if key not in litellm.bedrock_models:
litellm.bedrock_models.append(key)
return model_cost
def get_litellm_params(
return_async=False,
api_key=None,
force_timeout=600,
azure=False,
logger_fn=None,
verbose=False,
hugging_face=False,
replicate=False,
together_ai=False,
custom_llm_provider=None,
api_base=None,
litellm_call_id=None,
model_alias_map=None,
completion_call_id=None,
metadata=None
):
litellm_params = {
"return_async": return_async,
"api_key": api_key,
"force_timeout": force_timeout,
"logger_fn": logger_fn,
"verbose": verbose,
"custom_llm_provider": custom_llm_provider,
"api_base": api_base,
"litellm_call_id": litellm_call_id,
"model_alias_map": model_alias_map,
"completion_call_id": completion_call_id,
"metadata": metadata,
"stream_response": {} # litellm_call_id: ModelResponse Dict
}
return litellm_params
def get_optional_params( # use the openai defaults
# 12 optional params
functions=[],
function_call="",
temperature=None,
top_p=None,
n=None,
stream=False,
stop=None,
max_tokens=None,
presence_penalty=None,
frequency_penalty=0,
logit_bias=None,
user="",
model=None,
custom_llm_provider="",
response_format=None,
seed=None,
tools=None,
tool_choice=None,
max_retries=None,
**kwargs
):
# retrieve all parameters passed to the function
passed_params = locals()
special_params = passed_params.pop("kwargs")
for k, v in special_params.items():
passed_params[k] = v
default_params = {
"functions":[],
"function_call":"",
"temperature":None,
"top_p":None,
"n":None,
"stream":None,
"stop":None,
"max_tokens":None,
"presence_penalty":None,
"frequency_penalty":None,
"logit_bias": None,
"user":"",
"model":None,
"custom_llm_provider":"",
"response_format": None,
"seed": None,
"tools": None,
"tool_choice": None,
"max_retries": None,
}
# filter out those parameters that were passed with non-default values
non_default_params = {k: v for k, v in passed_params.items() if (k != "model" and k != "custom_llm_provider" and k in default_params and v != default_params[k])}
optional_params = {}
## raise exception if function calling passed in for a provider that doesn't support it
if "functions" in non_default_params or "function_call" in non_default_params:
if custom_llm_provider != "openai" and custom_llm_provider != "text-completion-openai" and custom_llm_provider != "azure":
if litellm.add_function_to_prompt: # if user opts to add it to prompt instead
optional_params["functions_unsupported_model"] = non_default_params.pop("functions")
else:
raise UnsupportedParamsError(status_code=500, message=f"Function calling is not supported by {custom_llm_provider}. To add it to the prompt, set `litellm.add_function_to_prompt = True`.")
def _check_valid_arg(supported_params):
print_verbose(f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}")
print_verbose(f"\nLiteLLM: Params passed to completion() {passed_params}")
print_verbose(f"\nLiteLLM: Non-Default params passed to completion() {non_default_params}")
unsupported_params = {}
for k in non_default_params.keys():
if k not in supported_params:
if k == "n" and n == 1: # langchain sends n=1 as a default value
pass
# Always keeps this in elif code blocks
else:
unsupported_params[k] = non_default_params[k]
if unsupported_params and not litellm.drop_params:
raise UnsupportedParamsError(status_code=500, message=f"{custom_llm_provider} does not support parameters: {unsupported_params}. To drop these, set `litellm.drop_params=True`.")
def _map_and_modify_arg(supported_params: dict, provider: str, model: str):
"""
filter params to fit the required provider format, drop those that don't fit if user sets `litellm.drop_params = True`.
"""
filtered_stop = None
if "stop" in supported_params and litellm.drop_params:
if provider == "bedrock" and "amazon" in model:
filtered_stop = []
if isinstance(stop, list):
for s in stop:
if re.match(r'^(\|+|User:)$', s):
filtered_stop.append(s)
if filtered_stop is not None:
supported_params["stop"] = filtered_stop
return supported_params
## raise exception if provider doesn't support passed in param
if custom_llm_provider == "anthropic":
## check if unsupported param passed in
supported_params = ["stream", "stop", "temperature", "top_p", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
# handle anthropic params
if stream:
optional_params["stream"] = stream
if stop is not None:
if type(stop) == str:
stop = [stop] # openai can accept str/list for stop
optional_params["stop_sequences"] = stop
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
elif custom_llm_provider == "cohere":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "logit_bias", "top_p", "frequency_penalty", "presence_penalty", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if n is not None:
optional_params["num_generations"] = n
if logit_bias is not None:
optional_params["logit_bias"] = logit_bias
if top_p is not None:
optional_params["p"] = top_p
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "maritalk":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "presence_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if logit_bias is not None:
optional_params["logit_bias"] = logit_bias
if top_p is not None:
optional_params["p"] = top_p
if presence_penalty is not None:
optional_params["repetition_penalty"] = presence_penalty
if stop is not None:
optional_params["stopping_tokens"] = stop
elif custom_llm_provider == "replicate":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "seed"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
return optional_params
if max_tokens is not None:
if "vicuna" in model or "flan" in model:
optional_params["max_length"] = max_tokens
elif "meta/codellama-13b" in model:
optional_params["max_tokens"] = max_tokens
else:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "huggingface":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
if temperature is not None:
if temperature == 0.0 or temperature == 0:
# hugging face exception raised when temp==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive
temperature = 0.01
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if n is not None:
optional_params["best_of"] = n
optional_params["do_sample"] = True # Need to sample if you want best of for hf inference endpoints
if stream is not None:
optional_params["stream"] = stream
if stop is not None:
optional_params["stop"] = stop
if max_tokens is not None:
# HF TGI raises the following exception when max_new_tokens==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive
if max_tokens == 0:
max_tokens = 1
optional_params["max_new_tokens"] = max_tokens
if n is not None:
optional_params["best_of"] = n
if presence_penalty is not None:
optional_params["repetition_penalty"] = presence_penalty
if "echo" in passed_params:
# https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details
# Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False
optional_params["decoder_input_details"] = special_params["echo"]
passed_params.pop("echo", None) # since we handle translating echo, we should not send it to TGI request
elif custom_llm_provider == "together_ai":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream_tokens"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if frequency_penalty is not None:
optional_params["repetition_penalty"] = frequency_penalty # https://docs.together.ai/reference/inference
if stop is not None:
optional_params["stop"] = stop
elif custom_llm_provider == "ai21":
## check if unsupported param passed in
supported_params = ["stream", "n", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty", "presence_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
if n is not None:
optional_params["numResults"] = n
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["topP"] = top_p
if stop is not None:
optional_params["stopSequences"] = stop
if frequency_penalty is not None:
optional_params["frequencyPenalty"] = {"scale": frequency_penalty}
if presence_penalty is not None:
optional_params["presencePenalty"] = {"scale": presence_penalty}
elif custom_llm_provider == "palm": # https://developers.generativeai.google/tutorials/curl_quickstart
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "stream", "n", "stop", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if n is not None:
optional_params["candidate_count"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif (
custom_llm_provider == "vertex_ai"
):
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif custom_llm_provider == "sagemaker":
if "llama-2" in model:
# llama-2 models on sagemaker support the following args
"""
max_new_tokens: Model generates text until the output length (excluding the input context length) reaches max_new_tokens. If specified, it must be a positive integer.
temperature: Controls the randomness in the output. Higher temperature results in output sequence with low-probability words and lower temperature results in output sequence with high-probability words. If temperature -> 0, it results in greedy decoding. If specified, it must be a positive float.
top_p: In each step of text generation, sample from the smallest possible set of words with cumulative probability top_p. If specified, it must be a float between 0 and 1.
return_full_text: If True, input text will be part of the output generated text. If specified, it must be boolean. The default value for it is False.
"""
## check if unsupported param passed in
supported_params = ["temperature", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
else:
## check if unsupported param passed in
supported_params = []
_check_valid_arg(supported_params=supported_params)
elif custom_llm_provider == "bedrock":
if "ai21" in model:
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# params "maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[],
# https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=j2-ultra
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "anthropic" in model:
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# anthropic params on bedrock
# \"max_tokens_to_sample\":300,\"temperature\":0.5,\"top_p\":1,\"stop_sequences\":[\"\\\\n\\\\nHuman:\"]}"
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stop is not None:
optional_params["stop_sequences"] = stop
if stream:
optional_params["stream"] = stream
elif "amazon" in model: # amazon titan llms
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens is not None:
optional_params["maxTokenCount"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if stop is not None:
filtered_stop = _map_and_modify_arg({"stop": stop}, provider="bedrock", model=model)
optional_params["stopSequences"] = filtered_stop["stop"]
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "meta" in model: # amazon / meta llms
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens is not None:
optional_params["max_gen_len"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
elif "cohere" in model: # cohere models on bedrock
supported_params = ["stream", "temperature", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
elif custom_llm_provider == "aleph_alpha":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["maximum_tokens"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n is not None:
optional_params["n"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "ollama":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "frequency_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["num_predict"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if frequency_penalty is not None:
optional_params["repeat_penalty"] = frequency_penalty
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "nlp_cloud":
supported_params = ["max_tokens", "stream", "temperature", "top_p", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["max_length"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n is not None:
optional_params["num_return_sequences"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "petals":
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# max_new_tokens=1,temperature=0.9, top_p=0.6
if max_tokens is not None:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
elif custom_llm_provider == "deepinfra":
supported_params = ["temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
if temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if n:
optional_params["n"] = n
if stream:
optional_params["stream"] = stream
if stop:
optional_params["stop"] = stop
if max_tokens:
optional_params["max_tokens"] = max_tokens
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
if logit_bias:
optional_params["logit_bias"] = logit_bias
if user:
optional_params["user"] = user
elif custom_llm_provider == "perplexity":
supported_params = ["temperature", "top_p", "stream", "max_tokens", "presence_penalty", "frequency_penalty"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
if temperature == 0 and model == "mistral-7b-instruct": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
optional_params["max_tokens"] = max_tokens
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
elif custom_llm_provider == "anyscale":
supported_params = ["temperature", "top_p", "stream", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
optional_params = non_default_params
if temperature is not None:
if temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
optional_params["max_tokens"] = max_tokens
else: # assume passing in params for openai/azure openai
supported_params = ["functions", "function_call", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", "max_retries"]
_check_valid_arg(supported_params=supported_params)
if functions is not None:
optional_params["functions"] = functions
if function_call is not None:
optional_params["function_call"] = function_call
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if n is not None:
optional_params["n"] = n
if stream is not None:
optional_params["stream"] = stream
if stop is not None:
optional_params["stop"] = stop
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if logit_bias is not None:
optional_params["logit_bias"] = logit_bias
if user is not None:
optional_params["user"] = user
if response_format is not None:
optional_params["response_format"] = response_format
if seed is not None:
optional_params["seed"] = seed
if tools is not None:
optional_params["tools"] = tools
if tool_choice is not None:
optional_params["tool_choice"] = tool_choice
if max_retries is not None:
optional_params["max_retries"] = max_retries
optional_params = non_default_params
# if user passed in non-default kwargs for specific providers/models, pass them along
for k in passed_params.keys():
if k not in default_params.keys():
optional_params[k] = passed_params[k]
return optional_params
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None, api_key: Optional[str] = None):
try:
dynamic_api_key = None
# check if llm provider provided
if custom_llm_provider:
return model, custom_llm_provider, dynamic_api_key, api_base
if api_key and api_key.startswith("os.environ/"):
api_key_env_name = api_key.replace("os.environ/", "")
dynamic_api_key = os.getenv(api_key_env_name)
# check if llm provider part of model name
if model.split("/",1)[0] in litellm.provider_list and model.split("/",1)[0] not in litellm.model_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
if custom_llm_provider == "perplexity":
# perplexity is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.perplexity.ai
api_base = "https://api.perplexity.ai"
dynamic_api_key = os.getenv("PERPLEXITYAI_API_KEY")
elif custom_llm_provider == "anyscale":
# anyscale is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.endpoints.anyscale.com/v1"
dynamic_api_key = os.getenv("ANYSCALE_API_KEY")
elif custom_llm_provider == "deepinfra":
# deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.deepinfra.com/v1/openai"
dynamic_api_key = os.getenv("DEEPINFRA_API_KEY")
return model, custom_llm_provider, dynamic_api_key, api_base
# check if api base is a known openai compatible endpoint
if api_base:
for endpoint in litellm.openai_compatible_endpoints:
if endpoint in api_base:
if endpoint == "api.perplexity.ai":
custom_llm_provider = "perplexity"
dynamic_api_key = os.getenv("PERPLEXITYAI_API_KEY")
elif endpoint == "api.endpoints.anyscale.com/v1":
custom_llm_provider = "anyscale"
dynamic_api_key = os.getenv("ANYSCALE_API_KEY")
elif endpoint == "api.deepinfra.com/v1/openai":
custom_llm_provider = "deepinfra"
dynamic_api_key = os.getenv("DEEPINFRA_API_KEY")
return model, custom_llm_provider, dynamic_api_key, api_base
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or "ft:gpt-3.5-turbo" in model:
custom_llm_provider = "openai"
elif model in litellm.open_ai_text_completion_models:
custom_llm_provider = "text-completion-openai"
## anthropic
elif model in litellm.anthropic_models:
custom_llm_provider = "anthropic"
## cohere
elif model in litellm.cohere_models:
custom_llm_provider = "cohere"
## replicate
elif model in litellm.replicate_models or ":" in model:
model_parts = model.split(":")
if len(model_parts) > 1 and len(model_parts[1])==64: ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
custom_llm_provider = "replicate"
elif model in litellm.replicate_models:
custom_llm_provider = "replicate"
## openrouter
elif model in litellm.openrouter_models:
custom_llm_provider = "openrouter"
## openrouter
elif model in litellm.maritalk_models:
custom_llm_provider = "maritalk"
## vertex - text + chat models
elif(
model in litellm.vertex_chat_models or
model in litellm.vertex_code_chat_models or
model in litellm.vertex_text_models or
model in litellm.vertex_code_text_models
):
custom_llm_provider = "vertex_ai"
## ai21
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
## aleph_alpha
elif model in litellm.aleph_alpha_models:
custom_llm_provider = "aleph_alpha"
## baseten
elif model in litellm.baseten_models:
custom_llm_provider = "baseten"
## nlp_cloud
elif model in litellm.nlp_cloud_models:
custom_llm_provider = "nlp_cloud"
## petals
elif model in litellm.petals_models:
custom_llm_provider = "petals"
## bedrock
elif model in litellm.bedrock_models:
custom_llm_provider = "bedrock"
# openai embeddings
elif model in litellm.open_ai_embedding_models:
custom_llm_provider = "openai"
# cohere embeddings
elif model in litellm.cohere_embedding_models:
custom_llm_provider = "cohere"
elif model in litellm.bedrock_embedding_models:
custom_llm_provider = "bedrock"
if custom_llm_provider is None or custom_llm_provider=="":
print() # noqa
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m") # noqa
print() # noqa
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
return model, custom_llm_provider, dynamic_api_key, api_base
except Exception as e:
raise e
def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]):
api_key = (dynamic_api_key or litellm.api_key)
# openai
if llm_provider == "openai" or llm_provider == "text-completion-openai":
api_key = (
api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
# anthropic
elif llm_provider == "anthropic":
api_key = (
api_key or
litellm.anthropic_key or
get_secret("ANTHROPIC_API_KEY")
)
# ai21
elif llm_provider == "ai21":
api_key = (
api_key or
litellm.ai21_key or
get_secret("AI211_API_KEY")
)
# aleph_alpha
elif llm_provider == "aleph_alpha":
api_key = (
api_key or
litellm.aleph_alpha_key or
get_secret("ALEPH_ALPHA_API_KEY")
)
# baseten
elif llm_provider == "baseten":
api_key = (
api_key or
litellm.baseten_key or
get_secret("BASETEN_API_KEY")
)
# cohere
elif llm_provider == "cohere":
api_key = (
api_key or
litellm.cohere_key or
get_secret("COHERE_API_KEY")
)
# huggingface
elif llm_provider == "huggingface":
api_key = (
api_key or
litellm.huggingface_key or
get_secret("HUGGINGFACE_API_KEY")
)
# nlp_cloud
elif llm_provider == "nlp_cloud":
api_key = (
api_key or
litellm.nlp_cloud_key or
get_secret("NLP_CLOUD_API_KEY")
)
# replicate
elif llm_provider == "replicate":
api_key = (
api_key or
litellm.replicate_key or
get_secret("REPLICATE_API_KEY")
)
# together_ai
elif llm_provider == "together_ai":
api_key = (
api_key or
litellm.togetherai_api_key or
get_secret("TOGETHERAI_API_KEY") or
get_secret("TOGETHER_AI_TOKEN")
)
return api_key
def get_max_tokens(model: str):
"""
Get the maximum number of tokens allowed for a given model.
Parameters:
model (str): The name of the model.
Returns:
int: The maximum number of tokens allowed for the given model.
Raises:
Exception: If the model is not mapped yet.
Example:
>>> get_max_tokens("gpt-4")
8192
"""
def _get_max_position_embeddings(model_name):
# Construct the URL for the config.json file
config_url = f"https://huggingface.co/{model_name}/raw/main/config.json"
try:
# Make the HTTP request to get the raw JSON file
response = requests.get(config_url)
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
# Parse the JSON response
config_json = response.json()
# Extract and return the max_position_embeddings
max_position_embeddings = config_json.get("max_position_embeddings")
if max_position_embeddings is not None:
return max_position_embeddings
else:
return None
except requests.exceptions.RequestException as e:
return None
try:
if model in litellm.model_cost:
return litellm.model_cost[model]["max_tokens"]
model, custom_llm_provider, _, _ = get_llm_provider(model=model)
if custom_llm_provider == "huggingface":
max_tokens = _get_max_position_embeddings(model_name=model)
return max_tokens
else:
raise Exception()
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json")
def get_model_info(model: str):
"""
Get a dict for the maximum tokens (context window),
input_cost_per_token, output_cost_per_token for a given model.
Parameters:
model (str): The name of the model.
Returns:
dict: A dictionary containing the following information:
- max_tokens (int): The maximum number of tokens allowed for the given model.
- input_cost_per_token (float): The cost per token for input.
- output_cost_per_token (float): The cost per token for output.
- litellm_provider (str): The provider of the model (e.g., "openai").
- mode (str): The mode of the model (e.g., "chat" or "completion").
Raises:
Exception: If the model is not mapped yet.
Example:
>>> get_model_info("gpt-4")
{
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
}
"""
def _get_max_position_embeddings(model_name):
# Construct the URL for the config.json file
config_url = f"https://huggingface.co/{model_name}/raw/main/config.json"
try:
# Make the HTTP request to get the raw JSON file
response = requests.get(config_url)
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
# Parse the JSON response
config_json = response.json()
# Extract and return the max_position_embeddings
max_position_embeddings = config_json.get("max_position_embeddings")
if max_position_embeddings is not None:
return max_position_embeddings
else:
return None
except requests.exceptions.RequestException as e:
return None
try:
if model in litellm.model_cost:
return litellm.model_cost[model]
model, custom_llm_provider, _, _ = get_llm_provider(model=model)
if custom_llm_provider == "huggingface":
max_tokens = _get_max_position_embeddings(model_name=model)
return {
"max_tokens": max_tokens,
"input_cost_per_token": 0,
"output_cost_per_token": 0,
"litellm_provider": "huggingface",
"mode": "chat"
}
else:
raise Exception()
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json")
def json_schema_type(python_type_name: str):
"""Converts standard python types to json schema types
Parameters
----------
python_type_name : str
__name__ of type
Returns
-------
str
a standard JSON schema type, "string" if not recognized.
"""
python_to_json_schema_types = {
str.__name__: "string",
int.__name__: "integer",
float.__name__: "number",
bool.__name__: "boolean",
list.__name__: "array",
dict.__name__: "object",
"NoneType": "null",
}
return python_to_json_schema_types.get(python_type_name, "string")
def function_to_dict(input_function): # noqa: C901
"""Using type hints and numpy-styled docstring,
produce a dictionnary usable for OpenAI function calling
Parameters
----------
input_function : function
A function with a numpy-style docstring
Returns
-------
dictionnary
A dictionnary to add to the list passed to `functions` parameter of `litellm.completion`
"""
# Get function name and docstring
try:
import inspect
from numpydoc.docscrape import NumpyDocString
from ast import literal_eval
except Exception as e:
raise e
name = input_function.__name__
docstring = inspect.getdoc(input_function)
numpydoc = NumpyDocString(docstring)
description = "\n".join([s.strip() for s in numpydoc["Summary"]])
# Get function parameters and their types from annotations and docstring
parameters = {}
required_params = []
param_info = inspect.signature(input_function).parameters
for param_name, param in param_info.items():
if hasattr(param, "annotation"):
param_type = json_schema_type(param.annotation.__name__)
else:
param_type = None
param_description = None
param_enum = None
# Try to extract param description from docstring using numpydoc
for param_data in numpydoc["Parameters"]:
if param_data.name == param_name:
if hasattr(param_data, "type"):
# replace type from docstring rather than annotation
param_type = param_data.type
if "optional" in param_type:
param_type = param_type.split(",")[0]
elif "{" in param_type:
# may represent a set of acceptable values
# translating as enum for function calling
try:
param_enum = str(list(literal_eval(param_type)))
param_type = "string"
except Exception:
pass
param_type = json_schema_type(param_type)
param_description = "\n".join([s.strip() for s in param_data.desc])
param_dict = {
"type": param_type,
"description": param_description,
"enum": param_enum,
}
parameters[param_name] = dict(
[(k, v) for k, v in param_dict.items() if isinstance(v, str)]
)
# Check if the parameter has no default value (i.e., it's required)
if param.default == param.empty:
required_params.append(param_name)
# Create the dictionary
result = {
"name": name,
"description": description,
"parameters": {
"type": "object",
"properties": parameters,
},
}
# Add "required" key if there are required parameters
if required_params:
result["parameters"]["required"] = required_params
return result
def load_test_model(
model: str,
custom_llm_provider: str = "",
api_base: str = "",
prompt: str = "",
num_calls: int = 0,
force_timeout: int = 0,
):
test_prompt = "Hey, how's it going"
test_calls = 100
if prompt:
test_prompt = prompt
if num_calls:
test_calls = num_calls
messages = [[{"role": "user", "content": test_prompt}] for _ in range(test_calls)]
start_time = time.time()
try:
litellm.batch_completion(
model=model,
messages=messages,
custom_llm_provider=custom_llm_provider,
api_base=api_base,
force_timeout=force_timeout,
)
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "success",
"exception": None,
}
except Exception as e:
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "failed",
"exception": e,
}
def validate_environment(model: Optional[str]=None) -> dict:
"""
Checks if the environment variables are valid for the given model.
Args:
model (Optional[str]): The name of the model. Defaults to None.
Returns:
dict: A dictionary containing the following keys:
- keys_in_environment (bool): True if all the required keys are present in the environment, False otherwise.
- missing_keys (List[str]): A list of missing keys in the environment.
"""
keys_in_environment = False
missing_keys: List[str] = []
if model is None:
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
## EXTRACT LLM PROVIDER - if model name provided
try:
custom_llm_provider = get_llm_provider(model=model)
except:
custom_llm_provider = None
# # check if llm provider part of model name
# if model.split("/",1)[0] in litellm.provider_list:
# custom_llm_provider = model.split("/", 1)[0]
# model = model.split("/", 1)[1]
# custom_llm_provider_passed_in = True
if custom_llm_provider:
if custom_llm_provider == "openai":
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
elif custom_llm_provider == "azure":
if ("AZURE_API_BASE" in os.environ
and "AZURE_API_VERSION" in os.environ
and "AZURE_API_KEY" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["AZURE_API_BASE", "AZURE_API_VERSION", "AZURE_API_KEY"])
elif custom_llm_provider == "anthropic":
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
elif custom_llm_provider == "cohere":
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
elif custom_llm_provider == "replicate":
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
elif custom_llm_provider == "openrouter":
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
elif custom_llm_provider == "vertex_ai":
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
elif custom_llm_provider == "huggingface":
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
elif custom_llm_provider == "ai21":
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
elif custom_llm_provider == "together_ai":
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
elif custom_llm_provider == "aleph_alpha":
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
elif custom_llm_provider == "baseten":
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
elif custom_llm_provider == "nlp_cloud":
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
elif custom_llm_provider == "bedrock":
if "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AWS_ACCESS_KEY_ID")
missing_keys.append("AWS_SECRET_ACCESS_KEY")
else:
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or litellm.open_ai_text_completion_models:
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
## anthropic
elif model in litellm.anthropic_models:
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
## cohere
elif model in litellm.cohere_models:
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
## replicate
elif model in litellm.replicate_models:
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
## openrouter
elif model in litellm.openrouter_models:
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
## huggingface
elif model in litellm.huggingface_models:
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
## ai21
elif model in litellm.ai21_models:
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
## together_ai
elif model in litellm.together_ai_models:
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
## aleph_alpha
elif model in litellm.aleph_alpha_models:
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
## baseten
elif model in litellm.baseten_models:
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
## nlp_cloud
elif model in litellm.nlp_cloud_models:
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
def set_callbacks(callback_list, function_id=None):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
if callback == "sentry":
try:
import sentry_sdk
except ImportError:
print_verbose("Package 'sentry_sdk' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "sentry_sdk"]
)
import sentry_sdk
sentry_sdk_instance = sentry_sdk
sentry_trace_rate = (
os.environ.get("SENTRY_API_TRACE_RATE")
if "SENTRY_API_TRACE_RATE" in os.environ
else "1.0"
)
sentry_sdk_instance.init(
dsn=os.environ.get("SENTRY_DSN"),
traces_sample_rate=float(sentry_trace_rate),
)
capture_exception = sentry_sdk_instance.capture_exception
add_breadcrumb = sentry_sdk_instance.add_breadcrumb
elif callback == "posthog":
try:
from posthog import Posthog
except ImportError:
print_verbose("Package 'posthog' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "posthog"]
)
from posthog import Posthog
posthog = Posthog(
project_api_key=os.environ.get("POSTHOG_API_KEY"),
host=os.environ.get("POSTHOG_API_URL"),
)
elif callback == "slack":
try:
from slack_bolt import App
except ImportError:
print_verbose("Package 'slack_bolt' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "slack_bolt"]
)
from slack_bolt import App
slack_app = App(
token=os.environ.get("SLACK_API_TOKEN"),
signing_secret=os.environ.get("SLACK_API_SECRET"),
)
alerts_channel = os.environ["SLACK_API_CHANNEL"]
print_verbose(f"Initialized Slack App: {slack_app}")
elif callback == "traceloop":
traceloopLogger = TraceloopLogger()
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
elif callback == "llmonitor":
llmonitorLogger = LLMonitorLogger()
elif callback == "promptlayer":
promptLayerLogger = PromptLayerLogger()
elif callback == "langfuse":
langFuseLogger = LangFuseLogger()
elif callback == "wandb":
weightsBiasesLogger = WeightsBiasesLogger()
elif callback == "langsmith":
langsmithLogger = LangsmithLogger()
elif callback == "aispend":
aispendLogger = AISpendLogger()
elif callback == "berrispend":
berrispendLogger = BerriSpendLogger()
elif callback == "supabase":
print_verbose(f"instantiating supabase")
supabaseClient = Supabase()
elif callback == "lite_debugger":
print_verbose(f"instantiating lite_debugger")
if function_id:
liteDebuggerClient = LiteDebugger(email=function_id)
elif litellm.token:
liteDebuggerClient = LiteDebugger(email=litellm.token)
elif litellm.email:
liteDebuggerClient = LiteDebugger(email=litellm.email)
else:
liteDebuggerClient = LiteDebugger(email=str(uuid.uuid4()))
elif callable(callback):
customLogger = CustomLogger()
except Exception as e:
raise e
# NOTE: DEPRECATING this in favor of using failure_handler() in Logging:
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
# print_verbose(f"handle_failure args: {args}")
# print_verbose(f"handle_failure kwargs: {kwargs}")
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"failed_event_name", "litellm.failed_query"
)
print_verbose(f"self.failure_callback: {litellm.failure_callback}")
for callback in litellm.failure_callback:
try:
if callback == "slack":
slack_msg = ""
if len(kwargs) > 0:
for key in kwargs:
slack_msg += f"{key}: {kwargs[key]}\n"
if len(args) > 0:
for i, arg in enumerate(args):
slack_msg += f"LiteLLM_Args_{str(i)}: {arg}"
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_msg += f"Traceback: {traceback_exception}"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "sentry":
capture_exception(exception)
elif callback == "posthog":
print_verbose(
f"inside posthog, additional_details: {len(additional_details.keys())}"
)
ph_obj = {}
if len(kwargs) > 0:
ph_obj = kwargs
if len(args) > 0:
for i, arg in enumerate(args):
ph_obj["litellm_args_" + str(i)] = arg
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
print_verbose(f"ph_obj: {ph_obj}")
print_verbose(f"PostHog Event Name: {event_name}")
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name)
print_verbose(f"successfully logged to PostHog!")
elif callback == "berrispend":
print_verbose("reaches berrispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
berrispendLogger.log_event(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
print_verbose(f"supabaseClient: {supabaseClient}")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
supabaseClient.log_event(
model=model,
messages=messages,
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
except:
print_verbose(
f"Error Occurred while logging failure: {traceback.format_exc()}"
)
pass
if failure_handler and callable(failure_handler):
call_details = {
"exception": exception,
"additional_details": additional_details,
}
failure_handler(call_details)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
pass
def convert_to_model_response_object(response_object: Optional[dict]=None, model_response_object: Optional[Union[ModelResponse, EmbeddingResponse]]=None, response_type: Literal["completion", "embedding"] = "completion"):
try:
if response_type == "completion" and (model_response_object is None or isinstance(model_response_object, ModelResponse)):
if response_object is None or model_response_object is None:
raise Exception("Error in response object format")
choice_list=[]
for idx, choice in enumerate(response_object["choices"]):
message = Message(
content=choice["message"].get("content", None),
role=choice["message"]["role"],
function_call=choice["message"].get("function_call", None),
tool_calls=choice["message"].get("tool_calls", None)
)
finish_reason = choice.get("finish_reason", None)
if finish_reason == None:
# gpt-4 vision can return 'finish_reason' or 'finish_details'
finish_reason = choice.get("finish_details")
choice = Choices(finish_reason=finish_reason, index=idx, message=message)
choice_list.append(choice)
model_response_object.choices = choice_list
if "usage" in response_object and response_object["usage"] is not None:
model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore
model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore
model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore
if "id" in response_object:
model_response_object.id = response_object["id"]
if "system_fingerprint" in response_object:
model_response_object.system_fingerprint = response_object["system_fingerprint"]
if "model" in response_object:
model_response_object.model = response_object["model"]
return model_response_object
elif response_type == "embedding" and (model_response_object is None or isinstance(model_response_object, EmbeddingResponse)):
if response_object is None:
raise Exception("Error in response object format")
if model_response_object is None:
model_response_object = EmbeddingResponse()
if "model" in response_object:
model_response_object.model = response_object["model"]
if "object" in response_object:
model_response_object.object = response_object["object"]
model_response_object.data = response_object["data"]
if "usage" in response_object and response_object["usage"] is not None:
model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore
model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore
model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore
return model_response_object
except Exception as e:
raise Exception(f"Invalid response object {e}")
# NOTE: DEPRECATING this in favor of using success_handler() in Logging:
def handle_success(args, kwargs, result, start_time, end_time):
global heliconeLogger, aispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"successful_event_name", "litellm.succes_query"
)
for callback in litellm.success_callback:
try:
if callback == "posthog":
ph_obj = {}
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name, ph_obj)
pass
elif callback == "slack":
slack_msg = ""
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
return litellm.acompletion(*args, **kwargs)
def prompt_token_calculator(model, messages):
# use tiktoken or anthropic's tokenizer depending on the model
text = " ".join(message["content"] for message in messages)
num_tokens = 0
if "claude" in model:
try:
import anthropic
except:
Exception("Anthropic import failed please run `pip install anthropic`")
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def valid_model(model):
try:
# for a given model name, check if the user has the right permissions to access the model
if (
model in litellm.open_ai_chat_completion_models
or model in litellm.open_ai_text_completion_models
):
openai.Model.retrieve(model)
else:
messages = [{"role": "user", "content": "Hello World"}]
litellm.completion(model=model, messages=messages)
except:
raise BadRequestError(message="", model=model, llm_provider="")
def check_valid_key(model: str, api_key: str):
"""
Checks if a given API key is valid for a specific model by making a litellm.completion call with max_tokens=10
Args:
model (str): The name of the model to check the API key against.
api_key (str): The API key to be checked.
Returns:
bool: True if the API key is valid for the model, False otherwise.
"""
messages = [{"role": "user", "content": "Hey, how's it going?"}]
try:
litellm.completion(model=model, messages=messages, api_key=api_key, max_tokens=10)
return True
except AuthenticationError as e:
return False
except Exception as e:
return False
def _should_retry(status_code: int):
"""
Reimplementation of openai's should retry logic, since that one can't be imported.
https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L639
"""
# If the server explicitly says whether or not to retry, obey.
# Retry on request timeouts.
if status_code == 408:
return True
# Retry on lock timeouts.
if status_code == 409:
return True
# Retry on rate limits.
if status_code == 429:
return True
# Retry internal errors.
if status_code >= 500:
return True
return False
def _calculate_retry_after(remaining_retries: int, max_retries: int, response_headers: Optional[httpx.Headers]=None):
"""
Reimplementation of openai's calculate retry after, since that one can't be imported.
https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L631
"""
try:
import email # openai import
# About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
#
# <http-date>". See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax for
# details.
if response_headers is not None:
retry_header = response_headers.get("retry-after")
try:
retry_after = int(retry_header)
except Exception:
retry_date_tuple = email.utils.parsedate_tz(retry_header)
if retry_date_tuple is None:
retry_after = -1
else:
retry_date = email.utils.mktime_tz(retry_date_tuple)
retry_after = int(retry_date - time.time())
else:
retry_after = -1
except Exception:
retry_after = -1
# If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
if 0 < retry_after <= 60:
return retry_after
initial_retry_delay = 0.5
max_retry_delay = 8.0
nb_retries = max_retries - remaining_retries
# Apply exponential backoff, but not more than the max.
sleep_seconds = min(initial_retry_delay * pow(2.0, nb_retries), max_retry_delay)
# Apply some jitter, plus-or-minus half a second.
jitter = 1 - 0.25 * random.random()
timeout = sleep_seconds * jitter
return timeout if timeout >= 0 else 0
# integration helper function
def modify_integration(integration_name, integration_params):
global supabaseClient
if integration_name == "supabase":
if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"]
# custom prompt helper function
def register_prompt_template(model: str, roles: dict, initial_prompt_value: str = "", final_prompt_value: str = ""):
"""
Register a prompt template to follow your custom format for a given model
Args:
model (str): The name of the model.
roles (dict): A dictionary mapping roles to their respective prompt values.
initial_prompt_value (str, optional): The initial prompt value. Defaults to "".
final_prompt_value (str, optional): The final prompt value. Defaults to "".
Returns:
dict: The updated custom prompt dictionary.
Example usage:
```
import litellm
litellm.register_prompt_template(
model="llama-2",
initial_prompt_value="You are a good assistant" # [OPTIONAL]
roles={
"system": {
"pre_message": "[INST] <<SYS>>\n", # [OPTIONAL]
"post_message": "\n<</SYS>>\n [/INST]\n" # [OPTIONAL]
},
"user": {
"pre_message": "[INST] ", # [OPTIONAL]
"post_message": " [/INST]" # [OPTIONAL]
},
"assistant": {
"pre_message": "\n" # [OPTIONAL]
"post_message": "\n" # [OPTIONAL]
}
}
final_prompt_value="Now answer as best you can:" # [OPTIONAL]
)
```
"""
model = get_llm_provider(model=model)[0]
litellm.custom_prompt_dict[model] = {
"roles": roles,
"initial_prompt_value": initial_prompt_value,
"final_prompt_value": final_prompt_value
}
return litellm.custom_prompt_dict
####### DEPRECATED ################
def get_all_keys(llm_provider=None):
try:
global last_fetched_at_keys
# if user is using hosted product -> instantiate their env with their hosted api keys - refresh every 5 minutes
print_verbose(f"Reaches get all keys, llm_provider: {llm_provider}")
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
time_delta = 0
if last_fetched_at_keys != None:
current_time = time.time()
time_delta = current_time - last_fetched_at_keys
if (
time_delta > 300 or last_fetched_at_keys == None or llm_provider
): # if the llm provider is passed in , assume this happening due to an AuthError for that provider
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_all_keys",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get model key response: {response.text}")
data = response.json()
# update model list
for key, value in data[
"model_keys"
].items(): # follows the LITELLM API KEY format - <UPPERCASE_PROVIDER_NAME>_API_KEY - e.g. HUGGINGFACE_API_KEY
os.environ[key] = value
# set model alias map
for model_alias, value in data["model_alias_map"].items():
litellm.model_alias_map[model_alias] = value
return "it worked!"
return None
return None
except:
print_verbose(
f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}"
)
pass
def get_model_list():
global last_fetched_at, print_verbose
try:
# if user is using hosted product -> get their updated model list
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_model_list",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get_model_list response: {response.text}")
data = response.json()
# update model list
model_list = data["model_list"]
# # check if all model providers are in environment
# model_providers = data["model_providers"]
# missing_llm_provider = None
# for item in model_providers:
# if f"{item.upper()}_API_KEY" not in os.environ:
# missing_llm_provider = item
# break
# # update environment - if required
# threading.Thread(target=get_all_keys, args=(missing_llm_provider)).start()
return model_list
return [] # return empty list by default
except:
print_verbose(
f"[Non-Blocking Error] get_model_list error - {traceback.format_exc()}"
)
####### EXCEPTION MAPPING ################
def exception_type(
model,
original_exception,
custom_llm_provider,
completion_kwargs={},
):
global user_logger_fn, liteDebuggerClient
exception_mapping_worked = False
if litellm.suppress_debug_info is False:
print() # noqa
print("\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m") # noqa
print("LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.") # noqa
print() # noqa
try:
if model:
error_str = str(original_exception)
if isinstance(original_exception, BaseException):
exception_type = type(original_exception).__name__
else:
exception_type = ""
if "Request Timeout Error" in error_str or "Request timed out" in error_str:
exception_mapping_worked = True
raise Timeout(
message=f"APITimeoutError - Request timed out",
model=model,
llm_provider=custom_llm_provider
)
if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai":
if "This model's maximum context length is" in error_str or "Request too large" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif "invalid_request_error" in error_str and "Incorrect API key provided" not in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
exception_mapping_worked = True
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 503:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 504: # gateway timeout error
exception_mapping_worked = True
raise Timeout(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
request=original_exception.request
)
else:
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
raise APIConnectionError(
__cause__=original_exception.__cause__,
llm_provider=custom_llm_provider,
model=model,
request=original_exception.request
)
elif custom_llm_provider == "anthropic": # one of the anthropics
if hasattr(original_exception, "message"):
if "prompt is too long" in original_exception.message or "prompt: length" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=original_exception.message,
model=model,
llm_provider="anthropic",
response=original_exception.response
)
if "Invalid API Key" in original_exception.message:
exception_mapping_worked = True
raise AuthenticationError(
message=original_exception.message,
model=model,
llm_provider="anthropic",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
print_verbose(f"status_code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400 or original_exception.status_code == 413:
exception_mapping_worked = True
raise BadRequestError(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "replicate":
if "Incorrect authentication token" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif "input is too long" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif exception_type == "ModelError":
exception_mapping_worked = True
raise BadRequestError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif "Request was throttled" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400 or original_exception.status_code == 422 or original_exception.status_code == 413:
exception_mapping_worked = True
raise BadRequestError(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"ReplicateException - {str(original_exception)}",
llm_provider="replicate",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "bedrock":
if "too many tokens" in error_str or "expected maxLength:" in error_str or "Input is too long" in error_str or "Too many input tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"BedrockException: Context Window Error - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "Malformed input request" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"BedrockException - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "Unable to locate credentials" in error_str or "The security token included in the request is invalid" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"BedrockException Invalid Authentication - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "throttlingException" in error_str or "ThrottlingException" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"BedrockException: Rate Limit Error - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response
)
elif custom_llm_provider == "sagemaker":
if "Unable to locate credentials" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"SagemakerException - {error_str}",
model=model,
llm_provider="sagemaker",
response=original_exception.response
)
elif custom_llm_provider == "vertex_ai":
if "Vertex AI API has not been used in project" in error_str or "Unable to find your project" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
elif "403" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
if original_exception.status_code == 500:
exception_mapping_worked = True
raise APIError(
message=f"VertexAIException - {error_str}",
status_code=500,
model=model,
llm_provider="vertex_ai",
request=original_exception.request
)
elif custom_llm_provider == "palm":
if "503 Getting metadata" in error_str:
# auth errors look like this
# 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate.
exception_mapping_worked = True
raise BadRequestError(
message=f"PalmException - Invalid api key",
model=model,
llm_provider="palm",
response=original_exception.response
)
if "400 Request payload size exceeds" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"PalmException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"PalmException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response
)
# Dailed: Error occurred: 400 Request payload size exceeds the limit: 20000 bytes
elif custom_llm_provider == "cohere": # Cohere
if (
"invalid api token" in error_str
or "No API key provided." in error_str
):
exception_mapping_worked = True
raise AuthenticationError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "too many tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"CohereException - {original_exception.message}",
model=model,
llm_provider="cohere",
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 400 or original_exception.status_code == 498:
exception_mapping_worked = True
raise BadRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif (
"CohereConnectionError" in exception_type
): # cohere seems to fire these errors when we load test it (1k+ messages / min)
exception_mapping_worked = True
raise RateLimitError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "invalid type:" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "Unexpected server error" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
else:
if hasattr(original_exception, "status_code"):
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
request=original_exception.request
)
raise original_exception
elif custom_llm_provider == "huggingface":
if "length limit exceeded" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=error_str,
model=model,
llm_provider="huggingface",
response=original_exception.response
)
elif "A valid user token is required" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=error_str,
llm_provider="huggingface",
model=model,
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "ai21":
if hasattr(original_exception, "message"):
if "Prompt has too many tokens" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
if "Bad or missing API token." in original_exception.message:
exception_mapping_worked = True
raise BadRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
request=original_exception.request
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "nlp_cloud":
if "detail" in error_str:
if "Input text length should not exceed" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
elif "value is not a valid" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
request=original_exception.request
)
if hasattr(original_exception, "status_code"): # https://docs.nlpcloud.com/?shell#errors
if original_exception.status_code == 400 or original_exception.status_code == 406 or original_exception.status_code == 413 or original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 401 or original_exception.status_code == 403:
exception_mapping_worked = True
raise AuthenticationError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 522 or original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud",
request=original_exception.request
)
elif original_exception.status_code == 429 or original_exception.status_code == 402:
exception_mapping_worked = True
raise RateLimitError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500 or original_exception.status_code == 503:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
request=original_exception.request
)
elif original_exception.status_code == 504 or original_exception.status_code == 520:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "together_ai":
import json
try:
error_response = json.loads(error_str)
except:
error_response = {"error": error_str}
if "error" in error_response and "`inputs` tokens + `max_new_tokens` must be <=" in error_response["error"]:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error" in error_response and "invalid private key" in error_response["error"]:
exception_mapping_worked = True
raise AuthenticationError(
message=f"TogetherAIException - {error_response['error']}",
llm_provider="together_ai",
model=model,
response=original_exception.response
)
elif "error" in error_response and "INVALID_ARGUMENT" in error_response["error"]:
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error" in error_response and "API key doesn't match expected format." in error_response["error"]:
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error_type" in error_response and error_response["error_type"] == "validation":
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
model=model,
llm_provider="together_ai",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "aleph_alpha":
if "This is longer than the model's maximum context length" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif "InvalidToken" in error_str or "No token provided" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
print_verbose(f"status code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
raise original_exception
raise original_exception
elif custom_llm_provider == "ollama":
if "no attribute 'async_get_ollama_response_stream" in error_str:
exception_mapping_worked = True
raise ImportError("Import error - trying to use async for ollama. import async_generator failed. Try 'pip install async_generator'")
if isinstance(original_exception, dict):
error_str = original_exception.get("error", "")
else:
error_str = str(original_exception)
if "no such file or directory" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OllamaException: Invalid Model/Model not loaded - {original_exception}",
model=model,
llm_provider="ollama",
response=original_exception.response
)
elif "Failed to establish a new connection" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response
)
elif "Invalid response object from API" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response
)
elif custom_llm_provider == "vllm":
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 0:
exception_mapping_worked = True
raise APIConnectionError(
message=f"VLLMException - {original_exception.message}",
llm_provider="vllm",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "azure":
if "This model's maximum context length is" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif "invalid_request_error" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
exception_mapping_worked = True
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
request=original_exception.request
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
request=original_exception.request
)
else:
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
raise APIConnectionError(
__cause__=original_exception.__cause__,
llm_provider="azure",
model=model,
request=original_exception.request
)
if "BadRequestError.__init__() missing 1 required positional argument: 'param'" in str(original_exception): # deal with edge-case invalid request error bug in openai-python sdk
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response
)
else: # ensure generic errors always return APIConnectionError=
exception_mapping_worked = True
if hasattr(original_exception, "request"):
raise APIConnectionError(
message=f"{str(original_exception)}",
llm_provider=custom_llm_provider,
model=model,
request=original_exception.request
)
else:
raise APIConnectionError(
message=f"{str(original_exception)}",
llm_provider=custom_llm_provider,
model=model,
request= httpx.Request(method="POST", url="https://api.openai.com/v1/") # stub the request
)
except Exception as e:
# LOGGING
exception_logging(
logger_fn=user_logger_fn,
additional_args={
"exception_mapping_worked": exception_mapping_worked,
"original_exception": original_exception,
},
exception=e,
)
## AUTH ERROR
if isinstance(e, AuthenticationError) and (
litellm.email or "LITELLM_EMAIL" in os.environ
):
threading.Thread(target=get_all_keys, args=(e.llm_provider,)).start()
# don't let an error with mapping interrupt the user from receiving an error from the llm api calls
if exception_mapping_worked:
raise e
else:
raise original_exception
####### CRASH REPORTING ################
def safe_crash_reporting(model=None, exception=None, custom_llm_provider=None):
data = {
"model": model,
"exception": str(exception),
"custom_llm_provider": custom_llm_provider,
}
threading.Thread(target=litellm_telemetry, args=(data,), daemon=True).start()
def get_or_generate_uuid():
temp_dir = os.path.join(os.path.abspath(os.sep), "tmp")
uuid_file = os.path.join(temp_dir, "litellm_uuid.txt")
try:
# Try to open the file and load the UUID
with open(uuid_file, "r") as file:
uuid_value = file.read()
if uuid_value:
uuid_value = uuid_value.strip()
else:
raise FileNotFoundError
except FileNotFoundError:
# Generate a new UUID if the file doesn't exist or is empty
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open(uuid_file, "w") as file:
file.write(uuid_value)
except: # if writing to tmp/litellm_uuid.txt then retry writing to litellm_uuid.txt
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open("litellm_uuid.txt", "w") as file:
file.write(uuid_value)
except: # if this 3rd attempt fails just pass
# Good first issue for someone to improve this function :)
return
except:
# [Non-Blocking Error]
return
return uuid_value
def litellm_telemetry(data):
# Load or generate the UUID
uuid_value = ""
try:
uuid_value = get_or_generate_uuid()
except:
uuid_value = str(uuid.uuid4())
try:
# Prepare the data to send to litellm logging api
try:
pkg_version = importlib.metadata.version("litellm")
except:
pkg_version = None
if "model" not in data:
data["model"] = None
payload = {
"uuid": uuid_value,
"data": data,
"version:": pkg_version
}
# Make the POST request to litellm logging api
response = requests.post(
"https://litellm-logging.onrender.com/logging",
headers={"Content-Type": "application/json"},
json=payload,
)
response.raise_for_status() # Raise an exception for HTTP errors
except:
# [Non-Blocking Error]
return
######### Secret Manager ############################
# checks if user has passed in a secret manager client
# if passed in then checks the secret there
def get_secret(secret_name):
if litellm.secret_manager_client != None:
# TODO: check which secret manager is being used
# currently only supports Infisical
try:
secret = litellm.secret_manager_client.get_secret(secret_name).secret_value
except:
secret = None
return secret
else:
return os.environ.get(secret_name)
######## Streaming Class ############################
# wraps the completion stream to return the correct format for the model
# replicate/anthropic/cohere
class CustomStreamWrapper:
def __init__(self, completion_stream, model, custom_llm_provider=None, logging_obj=None):
self.model = model
self.custom_llm_provider = custom_llm_provider
self.logging_obj = logging_obj
self.completion_stream = completion_stream
self.sent_first_chunk = False
self.sent_last_chunk = False
self.special_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "<s>", "</s>"]
self.holding_chunk = ""
self.complete_response = ""
if self.logging_obj:
# Log the type of the received item
self.logging_obj.post_call(str(type(completion_stream)))
def __iter__(self):
return self
def __aiter__(self):
return self
def process_chunk(self, chunk: str):
"""
NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta.
"""
try:
chunk = chunk.strip()
self.complete_response = self.complete_response.strip()
if chunk.startswith(self.complete_response):
# Remove last_sent_chunk only if it appears at the start of the new chunk
chunk = chunk[len(self.complete_response):]
self.complete_response += chunk
return chunk
except Exception as e:
raise e
def logging(self, text):
if self.logging_obj:
self.logging_obj.post_call(text)
def check_special_tokens(self, chunk: str, finish_reason: Optional[str]):
hold = False
if finish_reason:
for token in self.special_tokens:
if token in chunk:
chunk = chunk.replace(token, "")
return hold, chunk
if self.sent_first_chunk is True:
return hold, chunk
curr_chunk = self.holding_chunk + chunk
curr_chunk = curr_chunk.strip()
for token in self.special_tokens:
if len(curr_chunk) < len(token) and curr_chunk in token:
hold = True
elif len(curr_chunk) >= len(token):
if token in curr_chunk:
self.holding_chunk = curr_chunk.replace(token, "")
hold = True
else:
pass
if hold is False: # reset
self.holding_chunk = ""
return hold, curr_chunk
def handle_anthropic_chunk(self, chunk):
str_line = chunk.decode("utf-8") # Convert bytes to string
text = ""
is_finished = False
finish_reason = None
if str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
text = data_json.get("completion", "")
if data_json.get("stop_reason", None):
is_finished = True
finish_reason = data_json["stop_reason"]
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_together_ai_chunk(self, chunk):
chunk = chunk.decode("utf-8")
text = ""
is_finished = False
finish_reason = None
if "text" in chunk:
text_index = chunk.find('"text":"') # this checks if text: exists
text_start = text_index + len('"text":"')
text_end = chunk.find('"}', text_start)
if text_index != -1 and text_end != -1:
extracted_text = chunk[text_start:text_end]
text = extracted_text
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "[DONE]" in chunk:
return {"text": text, "is_finished": True, "finish_reason": "stop"}
elif "error" in chunk:
raise ValueError(chunk)
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_huggingface_chunk(self, chunk):
try:
if type(chunk) != str:
chunk = chunk.decode("utf-8") # DO NOT REMOVE this: This is required for HF inference API + Streaming
text = ""
is_finished = False
finish_reason = ""
print_verbose(f"chunk: {chunk}")
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
print_verbose(f"data json: {data_json}")
if "token" in data_json and "text" in data_json["token"]:
text = data_json["token"]["text"]
if data_json.get("details", False) and data_json["details"].get("finish_reason", False):
is_finished = True
finish_reason = data_json["details"]["finish_reason"]
elif data_json.get("generated_text", False): # if full generated text exists, then stream is complete
text = "" # don't return the final bos token
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in chunk:
raise ValueError(chunk)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
traceback.print_exc()
# raise(e)
def handle_ai21_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["data"]["text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_maritalk_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["answer"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_nlp_cloud_chunk(self, chunk):
text = ""
is_finished = False
finish_reason = ""
try:
if "dolphin" in self.model:
chunk = self.process_chunk(chunk=chunk)
else:
data_json = json.loads(chunk)
chunk = data_json["generated_text"]
text = chunk
if "[DONE]" in text:
text = text.replace("[DONE]", "")
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_aleph_alpha_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["completion"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_cohere_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = ""
is_finished = False
finish_reason = ""
if "text" in data_json:
text = data_json["text"]
elif "is_finished" in data_json:
is_finished = data_json["is_finished"]
finish_reason = data_json["finish_reason"]
else:
raise Exception(data_json)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_azure_chunk(self, chunk):
is_finished = False
finish_reason = ""
text = ""
print_verbose(f"chunk: {chunk}")
if "data: [DONE]" in chunk:
text = ""
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif chunk.startswith("data:"):
data_json = json.loads(chunk[5:]) # chunk.startswith("data:"):
try:
if len(data_json["choices"]) > 0:
text = data_json["choices"][0]["delta"].get("content", "")
if data_json["choices"][0].get("finish_reason", None):
is_finished = True
finish_reason = data_json["choices"][0]["finish_reason"]
print_verbose(f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}")
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
elif "error" in chunk:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_replicate_chunk(self, chunk):
try:
text = ""
is_finished = False
finish_reason = ""
if "output" in chunk:
text = chunk['output']
if "status" in chunk:
if chunk["status"] == "succeeded":
is_finished = True
finish_reason = "stop"
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_openai_chat_completion_chunk(self, chunk):
try:
print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n")
str_line = chunk
text = ""
is_finished = False
finish_reason = None
original_chunk = None # this is used for function/tool calling
if len(str_line.choices) > 0:
if str_line.choices[0].delta.content is not None:
text = str_line.choices[0].delta.content
else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai
original_chunk = str_line
if str_line.choices[0].finish_reason:
is_finished = True
finish_reason = str_line.choices[0].finish_reason
return {
"text": text,
"is_finished": is_finished,
"finish_reason": finish_reason,
"original_chunk": str_line
}
except Exception as e:
traceback.print_exc()
raise e
def handle_openai_text_completion_chunk(self, chunk):
try:
str_line = chunk
text = ""
is_finished = False
finish_reason = None
print_verbose(f"str_line: {str_line}")
if "data: [DONE]" in str_line:
text = ""
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
print_verbose(f"delta content: {data_json}")
text = data_json["choices"][0].get("text", "")
if data_json["choices"][0].get("finish_reason", None):
is_finished = True
finish_reason = data_json["choices"][0]["finish_reason"]
print_verbose(f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}")
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
traceback.print_exc()
raise e
def handle_baseten_chunk(self, chunk):
try:
chunk = chunk.decode("utf-8")
if len(chunk) > 0:
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
if "token" in data_json and "text" in data_json["token"]:
return data_json["token"]["text"]
else:
return ""
data_json = json.loads(chunk)
if "model_output" in data_json:
if isinstance(data_json["model_output"], dict) and "data" in data_json["model_output"] and isinstance(data_json["model_output"]["data"], list):
return data_json["model_output"]["data"][0]
elif isinstance(data_json["model_output"], str):
return data_json["model_output"]
elif "completion" in data_json and isinstance(data_json["completion"], str):
return data_json["completion"]
else:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return ""
else:
return ""
except:
traceback.print_exc()
return ""
def handle_bedrock_stream(self, chunk):
if hasattr(chunk, "get"):
chunk = chunk.get('chunk')
chunk_data = json.loads(chunk.get('bytes').decode())
else:
chunk_data = json.loads(chunk.decode())
if chunk_data:
text = ""
is_finished = False
finish_reason = ""
if "outputText" in chunk_data:
text = chunk_data['outputText']
# ai21 mapping
if "ai21" in self.model: # fake ai21 streaming
text = chunk_data.get('completions')[0].get('data').get('text')
is_finished = True
finish_reason = "stop"
# anthropic mapping
elif "completion" in chunk_data:
text = chunk_data['completion'] # bedrock.anthropic
stop_reason = chunk_data.get("stop_reason", None)
if stop_reason != None:
is_finished = True
finish_reason = stop_reason
######## bedrock.cohere mappings ###############
# meta mapping
elif "generation" in chunk_data:
text = chunk_data['generation'] # bedrock.meta
# cohere mapping
elif "text" in chunk_data:
text = chunk_data["text"] # bedrock.cohere
# cohere mapping for finish reason
elif "finish_reason" in chunk_data:
finish_reason = chunk_data["finish_reason"]
is_finished = True
elif chunk_data.get("completionReason", None):
is_finished = True
finish_reason = chunk_data["completionReason"]
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
return ""
def chunk_creator(self, chunk):
model_response = ModelResponse(stream=True, model=self.model)
model_response.choices[0].finish_reason = None
response_obj = None
try:
# return this for all models
completion_obj = {"content": ""}
if self.custom_llm_provider and self.custom_llm_provider == "anthropic":
response_obj = self.handle_anthropic_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.model == "replicate" or self.custom_llm_provider == "replicate":
response_obj = self.handle_replicate_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif (
self.custom_llm_provider and self.custom_llm_provider == "together_ai"):
response_obj = self.handle_together_ai_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "huggingface":
response_obj = self.handle_huggingface_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "baseten": # baseten doesn't provide streaming
completion_obj["content"] = self.handle_baseten_chunk(chunk)
elif self.custom_llm_provider and self.custom_llm_provider == "ai21": #ai21 doesn't provide streaming
response_obj = self.handle_ai21_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "maritalk":
response_obj = self.handle_maritalk_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "vllm":
completion_obj["content"] = chunk[0].outputs[0].text
elif self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha": #aleph alpha doesn't provide streaming
response_obj = self.handle_aleph_alpha_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "nlp_cloud":
try:
response_obj = self.handle_nlp_cloud_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
except Exception as e:
if self.sent_last_chunk:
raise e
else:
if self.sent_first_chunk is False:
raise Exception("An unknown error occurred with the stream")
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider and self.custom_llm_provider == "vertex_ai":
try:
completion_obj["content"] = str(chunk)
except StopIteration as e:
if self.sent_last_chunk:
raise e
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider == "cohere":
response_obj = self.handle_cohere_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "bedrock":
if self.sent_last_chunk:
raise StopIteration
response_obj = self.handle_bedrock_stream(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
self.sent_last_chunk = True
elif self.custom_llm_provider == "sagemaker":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "petals":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "palm":
# fake streaming
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "ollama":
if "error" in chunk:
exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=chunk["error"])
completion_obj = chunk
elif self.custom_llm_provider == "text-completion-openai":
response_obj = self.handle_openai_text_completion_chunk(chunk)
completion_obj["content"] = response_obj["text"]
print_verbose(f"completion obj content: {completion_obj['content']}")
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
else: # openai chat model
response_obj = self.handle_openai_chat_completion_chunk(chunk)
if response_obj == None:
return
completion_obj["content"] = response_obj["text"]
print_verbose(f"completion obj content: {completion_obj['content']}")
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
model_response.model = self.model
print_verbose(f"model_response: {model_response}; completion_obj: {completion_obj}")
print_verbose(f"model_response finish reason 3: {model_response.choices[0].finish_reason}")
if len(completion_obj["content"]) > 0: # cannot set content of an OpenAI Object to be an empty string
hold, model_response_str = self.check_special_tokens(chunk=completion_obj["content"], finish_reason=model_response.choices[0].finish_reason)
print_verbose(f"hold - {hold}, model_response_str - {model_response_str}")
if hold is False:
completion_obj["content"] = model_response_str
if self.sent_first_chunk == False:
completion_obj["role"] = "assistant"
self.sent_first_chunk = True
model_response.choices[0].delta = Delta(**completion_obj)
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
print_verbose(f"model_response: {model_response}")
return model_response
else:
return
elif model_response.choices[0].finish_reason:
model_response.choices[0].finish_reason = map_finish_reason(model_response.choices[0].finish_reason) # ensure consistent output to openai
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
return model_response
elif response_obj is not None and response_obj.get("original_chunk", None) is not None: # function / tool calling branch - only set for openai/azure compatible endpoints
# enter this branch when no content has been passed in response
original_chunk = response_obj.get("original_chunk", None)
model_response.id = original_chunk.id
if len(original_chunk.choices) > 0:
if original_chunk.choices[0].delta.function_call is not None or original_chunk.choices[0].delta.tool_calls is not None:
try:
delta = dict(original_chunk.choices[0].delta)
model_response.choices[0].delta = Delta(**delta)
except Exception as e:
model_response.choices[0].delta = Delta()
else:
return
else:
return
model_response.system_fingerprint = original_chunk.system_fingerprint
if self.sent_first_chunk == False:
model_response.choices[0].delta["role"] = "assistant"
self.sent_first_chunk = True
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start() # log response
return model_response
else:
return
except StopIteration:
raise StopIteration
except Exception as e:
traceback_exception = traceback.format_exc()
e.message = str(e)
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
threading.Thread(target=self.logging_obj.failure_handler, args=(e, traceback_exception)).start()
raise exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e)
## needs to handle the empty string case (even starting chunk can be an empty string)
def __next__(self):
try:
while True:
if isinstance(self.completion_stream, str) or isinstance(self.completion_stream, bytes):
chunk = self.completion_stream
else:
chunk = next(self.completion_stream)
if chunk is not None and chunk != b'':
response = self.chunk_creator(chunk=chunk)
if response is not None:
return response
except StopIteration:
raise # Re-raise StopIteration
except Exception as e:
# Handle other exceptions if needed
raise e
async def __anext__(self):
try:
if (self.custom_llm_provider == "openai"
or self.custom_llm_provider == "azure"
or self.custom_llm_provider == "custom_openai"
or self.custom_llm_provider == "text-completion-openai"
or self.custom_llm_provider == "huggingface"):
async for chunk in self.completion_stream:
if chunk == "None" or chunk is None:
raise Exception
processed_chunk = self.chunk_creator(chunk=chunk)
if processed_chunk is None:
continue
return processed_chunk
raise StopAsyncIteration
else: # temporary patch for non-aiohttp async calls
return next(self)
except Exception as e:
# Handle any exceptions that might occur during streaming
raise StopAsyncIteration
class TextCompletionStreamWrapper:
def __init__(self, completion_stream, model):
self.completion_stream = completion_stream
self.model = model
def __iter__(self):
return self
def __aiter__(self):
return self
def __next__(self):
# model_response = ModelResponse(stream=True, model=self.model)
response = TextCompletionResponse()
try:
while True: # loop until a non-empty string is found
# return this for all models
chunk = next(self.completion_stream)
response["id"] = chunk.get("id", None)
response["object"] = "text_completion"
response["created"] = response.get("created", None)
response["model"] = response.get("model", None)
text_choices = TextChoices()
text_choices["text"] = chunk["choices"][0]["delta"]["content"]
text_choices["index"] = response["choices"][0]["index"]
text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
response["choices"] = [text_choices]
return response
except StopIteration:
raise StopIteration
except Exception as e:
print(f"got exception {e}") # noqa
async def __anext__(self):
try:
return next(self)
except StopIteration:
raise StopAsyncIteration
def mock_completion_streaming_obj(model_response, mock_response, model):
for i in range(0, len(mock_response), 3):
completion_obj = {"role": "assistant", "content": mock_response[i: i+3]}
model_response.choices[0].delta = completion_obj
yield model_response
########## Reading Config File ############################
def read_config_args(config_path) -> dict:
try:
import os
current_path = os.getcwd()
with open(config_path, "r") as config_file:
config = json.load(config_file)
# read keys/ values from config file and return them
return config
except Exception as e:
raise e
########## experimental completion variants ############################
def completion_with_config(config: Union[dict, str], **kwargs):
"""
Generate a litellm.completion() using a config dict and all supported completion args
Example config;
config = {
"default_fallback_models": # [Optional] List of model names to try if a call fails
"available_models": # [Optional] List of all possible models you could call
"adapt_to_prompt_size": # [Optional] True/False - if you want to select model based on prompt size (will pick from available_models)
"model": {
"model-name": {
"needs_moderation": # [Optional] True/False - if you want to call openai moderations endpoint before making completion call. Will raise exception, if flagged.
"error_handling": {
"error-type": { # One of the errors listed here - https://docs.litellm.ai/docs/exception_mapping#custom-mapping-list
"fallback_model": "" # str, name of the model it should try instead, when that error occurs
}
}
}
}
}
Parameters:
config (Union[dict, str]): A configuration for litellm
**kwargs: Additional keyword arguments for litellm.completion
Returns:
litellm.ModelResponse: A ModelResponse with the generated completion
"""
if config is not None:
if isinstance(config, str):
config = read_config_args(config)
elif isinstance(config, dict):
config = config
else:
raise Exception("Config path must be a string or a dictionary.")
else:
raise Exception("Config path not passed in.")
if config is None:
raise Exception("No completion config in the config file")
models_with_config = config["model"].keys()
model = kwargs["model"]
messages = kwargs["messages"]
## completion config
fallback_models = config.get("default_fallback_models", None)
available_models = config.get("available_models", None)
adapt_to_prompt_size = config.get("adapt_to_prompt_size", False)
trim_messages_flag = config.get("trim_messages", False)
prompt_larger_than_model = False
max_model = model
try:
max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
max_tokens = 2048 # assume curr model's max window is 2048 tokens
if adapt_to_prompt_size:
## Pick model based on token window
prompt_tokens = litellm.token_counter(model="gpt-3.5-turbo", text="".join(message["content"] for message in messages))
try:
curr_max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
curr_max_tokens = 2048
if curr_max_tokens < prompt_tokens:
prompt_larger_than_model = True
for available_model in available_models:
try:
curr_max_tokens = litellm.get_max_tokens(available_model)["max_tokens"]
if curr_max_tokens > max_tokens:
max_tokens = curr_max_tokens
max_model = available_model
if curr_max_tokens > prompt_tokens:
model = available_model
prompt_larger_than_model = False
except:
continue
if prompt_larger_than_model:
messages = trim_messages(messages=messages, model=max_model)
kwargs["messages"] = messages
kwargs["model"] = model
try:
if model in models_with_config:
## Moderation check
if config["model"][model].get("needs_moderation"):
input = " ".join(message["content"] for message in messages)
response = litellm.moderation(input=input)
flagged = response["results"][0]["flagged"]
if flagged:
raise Exception("This response was flagged as inappropriate")
## Model-specific Error Handling
error_handling = None
if config["model"][model].get("error_handling"):
error_handling = config["model"][model]["error_handling"]
try:
response = litellm.completion(**kwargs)
return response
except Exception as e:
exception_name = type(e).__name__
fallback_model = None
if error_handling and exception_name in error_handling:
error_handler = error_handling[exception_name]
# either switch model or api key
fallback_model = error_handler.get("fallback_model", None)
if fallback_model:
kwargs["model"] = fallback_model
return litellm.completion(**kwargs)
raise e
else:
return litellm.completion(**kwargs)
except Exception as e:
if fallback_models:
model = fallback_models.pop(0)
return completion_with_fallbacks(model=model, messages=messages, fallbacks=fallback_models)
raise e
def completion_with_fallbacks(**kwargs):
nested_kwargs = kwargs.pop("kwargs", {})
response = None
rate_limited_models = set()
model_expiration_times = {}
start_time = time.time()
original_model = kwargs["model"]
fallbacks = [kwargs["model"]] + nested_kwargs.get("fallbacks", [])
if "fallbacks" in nested_kwargs:
del nested_kwargs["fallbacks"] # remove fallbacks so it's not recursive
litellm_call_id = str(uuid.uuid4())
# max time to process a request with fallbacks: default 45s
while response == None and time.time() - start_time < 45:
for model in fallbacks:
# loop thru all models
try:
# check if it's dict or new model string
if isinstance(model, dict): # completion(model="gpt-4", fallbacks=[{"api_key": "", "api_base": ""}, {"api_key": "", "api_base": ""}])
kwargs["api_key"] = model.get("api_key", None)
kwargs["api_base"] = model.get("api_base", None)
model = model.get("model", original_model)
elif (
model in rate_limited_models
): # check if model is currently cooling down
if (
model_expiration_times.get(model)
and time.time() >= model_expiration_times[model]
):
rate_limited_models.remove(
model
) # check if it's been 60s of cool down and remove model
else:
continue # skip model
# delete model from kwargs if it exists
if kwargs.get("model"):
del kwargs["model"]
print_verbose(f"trying to make completion call with model: {model}")
kwargs["litellm_call_id"] = litellm_call_id
kwargs = {**kwargs, **nested_kwargs} # combine the openai + litellm params at the same level
response = litellm.completion(**kwargs, model=model)
print_verbose(f"response: {response}")
if response != None:
return response
except Exception as e:
print_verbose(e)
rate_limited_models.add(model)
model_expiration_times[model] = (
time.time() + 60
) # cool down this selected model
pass
return response
def process_system_message(system_message, max_tokens, model):
system_message_event = {"role": "system", "content": system_message}
system_message_tokens = get_token_count([system_message_event], model)
if system_message_tokens > max_tokens:
print_verbose("`tokentrimmer`: Warning, system message exceeds token limit. Trimming...")
# shorten system message to fit within max_tokens
new_system_message = shorten_message_to_fit_limit(system_message_event, max_tokens, model)
system_message_tokens = get_token_count([new_system_message], model)
return system_message_event, max_tokens - system_message_tokens
def process_messages(messages, max_tokens, model):
# Process messages from older to more recent
messages = messages[::-1]
final_messages = []
for message in messages:
used_tokens = get_token_count(final_messages, model)
available_tokens = max_tokens - used_tokens
if available_tokens <= 3:
break
final_messages = attempt_message_addition(final_messages=final_messages, message=message, available_tokens=available_tokens, max_tokens=max_tokens, model=model)
return final_messages
def attempt_message_addition(final_messages, message, available_tokens, max_tokens, model):
temp_messages = [message] + final_messages
temp_message_tokens = get_token_count(messages=temp_messages, model=model)
if temp_message_tokens <= max_tokens:
return temp_messages
# if temp_message_tokens > max_tokens, try shortening temp_messages
elif "function_call" not in message:
# fit updated_message to be within temp_message_tokens - max_tokens (aka the amount temp_message_tokens is greate than max_tokens)
updated_message = shorten_message_to_fit_limit(message, available_tokens, model)
if can_add_message(updated_message, final_messages, max_tokens, model):
return [updated_message] + final_messages
return final_messages
def can_add_message(message, messages, max_tokens, model):
if get_token_count(messages + [message], model) <= max_tokens:
return True
return False
def get_token_count(messages, model):
return token_counter(model=model, messages=messages)
def shorten_message_to_fit_limit(
message,
tokens_needed,
model):
"""
Shorten a message to fit within a token limit by removing characters from the middle.
"""
# For OpenAI models, even blank messages cost 7 token,
# and if the buffer is less than 3, the while loop will never end,
# hence the value 10.
if 'gpt' in model and tokens_needed <= 10:
return message
content = message["content"]
while True:
total_tokens = get_token_count([message], model)
if total_tokens <= tokens_needed:
break
ratio = (tokens_needed) / total_tokens
new_length = int(len(content) * ratio) -1
new_length = max(0, new_length)
half_length = new_length // 2
left_half = content[:half_length]
right_half = content[-half_length:]
trimmed_content = left_half + '..' + right_half
message["content"] = trimmed_content
content = trimmed_content
return message
# LiteLLM token trimmer
# this code is borrowed from https://github.com/KillianLucas/tokentrim/blob/main/tokentrim/tokentrim.py
# Credits for this code go to Killian Lucas
def trim_messages(
messages,
model: Optional[str] = None,
trim_ratio: float = 0.75,
return_response_tokens: bool = False,
max_tokens = None
):
"""
Trim a list of messages to fit within a model's token limit.
Args:
messages: Input messages to be trimmed. Each message is a dictionary with 'role' and 'content'.
model: The LiteLLM model being used (determines the token limit).
trim_ratio: Target ratio of tokens to use after trimming. Default is 0.75, meaning it will trim messages so they use about 75% of the model's token limit.
return_response_tokens: If True, also return the number of tokens left available for the response after trimming.
max_tokens: Instead of specifying a model or trim_ratio, you can specify this directly.
Returns:
Trimmed messages and optionally the number of tokens available for response.
"""
# Initialize max_tokens
# if users pass in max tokens, trim to this amount
messages = copy.deepcopy(messages)
try:
print_verbose(f"trimming messages")
if max_tokens == None:
# Check if model is valid
if model in litellm.model_cost:
max_tokens_for_model = litellm.model_cost[model]['max_tokens']
max_tokens = int(max_tokens_for_model * trim_ratio)
else:
# if user did not specify max tokens
# or passed an llm litellm does not know
# do nothing, just return messages
return
system_message = ""
for message in messages:
if message["role"] == "system":
system_message += '\n' if system_message else ''
system_message += message["content"]
current_tokens = token_counter(model=model, messages=messages)
print_verbose(f"Current tokens: {current_tokens}, max tokens: {max_tokens}")
# Do nothing if current tokens under messages
if current_tokens < max_tokens:
return messages
#### Trimming messages if current_tokens > max_tokens
print_verbose(f"Need to trim input messages: {messages}, current_tokens{current_tokens}, max_tokens: {max_tokens}")
if system_message:
system_message_event, max_tokens = process_system_message(system_message=system_message, max_tokens=max_tokens, model=model)
if max_tokens == 0: # the system messages are too long
return [system_message_event]
# Since all system messages are combined and trimmed to fit the max_tokens,
# we remove all system messages from the messages list
messages = [message for message in messages if message["role"] != "system"]
final_messages = process_messages(messages=messages, max_tokens=max_tokens, model=model)
# Add system message to the beginning of the final messages
if system_message:
final_messages = [system_message_event] + final_messages
if return_response_tokens: # if user wants token count with new trimmed messages
response_tokens = max_tokens - get_token_count(final_messages, model)
return final_messages, response_tokens
return final_messages
except Exception as e: # [NON-Blocking, if error occurs just return final_messages
print_verbose(f"Got exception while token trimming{e}")
return messages
def get_valid_models():
"""
Returns a list of valid LLMs based on the set environment variables
Args:
None
Returns:
A list of valid LLMs
"""
try:
# get keys set in .env
environ_keys = os.environ.keys()
valid_providers = []
# for all valid providers, make a list of supported llms
valid_models = []
for provider in litellm.provider_list:
# edge case litellm has together_ai as a provider, it should be togetherai
provider = provider.replace("_", "")
# litellm standardizes expected provider keys to
# PROVIDER_API_KEY. Example: OPENAI_API_KEY, COHERE_API_KEY
expected_provider_key = f"{provider.upper()}_API_KEY"
if expected_provider_key in environ_keys:
# key is set
valid_providers.append(provider)
for provider in valid_providers:
if provider == "azure":
valid_models.append("Azure-LLM")
else:
models_for_provider = litellm.models_by_provider.get(provider, [])
valid_models.extend(models_for_provider)
return valid_models
except:
return [] # NON-Blocking
# used for litellm.text_completion() to transform HF logprobs to OpenAI.Completion() format
def transform_logprobs(hf_response):
# Initialize an empty list for the transformed logprobs
transformed_logprobs = []
# For each Hugging Face response, transform the logprobs
for response in hf_response:
# Extract the relevant information from the response
response_details = response['details']
top_tokens = response_details.get("top_tokens", {})
# Initialize an empty list for the token information
token_info = {
'tokens': [],
'token_logprobs': [],
'text_offset': [],
'top_logprobs': [],
}
for i, token in enumerate(response_details['prefill']):
# Extract the text of the token
token_text = token['text']
# Extract the logprob of the token
token_logprob = token['logprob']
# Add the token information to the 'token_info' list
token_info['tokens'].append(token_text)
token_info['token_logprobs'].append(token_logprob)
# stub this to work with llm eval harness
top_alt_tokens = { "": -1, "": -2, "": -3 }
token_info['top_logprobs'].append(top_alt_tokens)
# For each element in the 'tokens' list, extract the relevant information
for i, token in enumerate(response_details['tokens']):
# Extract the text of the token
token_text = token['text']
# Extract the logprob of the token
token_logprob = token['logprob']
top_alt_tokens = {}
temp_top_logprobs = []
if top_tokens != {}:
temp_top_logprobs = top_tokens[i]
# top_alt_tokens should look like this: { "alternative_1": -1, "alternative_2": -2, "alternative_3": -3 }
for elem in temp_top_logprobs:
text = elem["text"]
logprob = elem["logprob"]
top_alt_tokens[text] = logprob
# Add the token information to the 'token_info' list
token_info['tokens'].append(token_text)
token_info['token_logprobs'].append(token_logprob)
token_info['top_logprobs'].append(top_alt_tokens)
# Add the text offset of the token
# This is computed as the sum of the lengths of all previous tokens
token_info['text_offset'].append(sum(len(t['text']) for t in response_details['tokens'][:i]))
# Add the 'token_info' list to the 'transformed_logprobs' list
transformed_logprobs = token_info
return transformed_logprobs
# used in LiteLLM Router
def remove_model_id(original_model_string):
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
return original_model_string[:index_of_model_id]
return original_model_string | [
"Hey, how's it going?",
"0",
"gpt-3.5-turbo",
"True",
"Hey, how's it going",
"input_cost_per_token",
"Hello World",
"None",
"adapt_to_prompt_size",
"default",
"False",
" ",
"application/json",
"content",
"ft:gpt-3.5-turbo",
"prompt_tokens"
] |
2024-01-10 | ecomoptimizer/litellm | litellm~proxy~tests~load_test_completion.py | import time, asyncio
from openai import AsyncOpenAI
import uuid
import traceback
litellm_client = AsyncOpenAI(
api_key="test",
base_url="http://0.0.0.0:8000"
)
async def litellm_completion():
# Your existing code for litellm_completion goes here
try:
response = await litellm_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"}],
)
print(response)
return response
except Exception as e:
# If there's an exception, log the error message
with open("error_log.txt", "a") as error_log:
error_log.write(f"Error during completion: {str(e)}\n")
pass
async def main():
start = time.time()
n = 1000 # Number of concurrent tasks
tasks = [litellm_completion() for _ in range(n)]
chat_completions = await asyncio.gather(*tasks)
successful_completions = [c for c in chat_completions if c is not None]
# Write errors to error_log.txt
with open("error_log.txt", "a") as error_log:
for completion in chat_completions:
if isinstance(completion, str):
error_log.write(completion + "\n")
print(n, time.time() - start, len(successful_completions))
if __name__ == "__main__":
# Blank out contents of error_log.txt
open("error_log.txt", "w").close()
asyncio.run(main())
| [] |
2024-01-10 | jh941213/my_AI_CV_tutor | cv.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.text_splitter import CharacterTextSplitter
from unstructured.partition.pdf import partition_pdf
from langchain.schema.messages import HumanMessage
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.storage import InMemoryStore
from langchain.vectorstores import Chroma
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.document_loaders import PyPDFLoader
from unstructured.partition.pdf import partition_pdf
import streamlit as st
import io
import re
from PIL import Image
import uuid
import base64
import os
# Extract elements from PDF
def extract_pdf_elements(path, fname):
"""
Extract images, tables, and chunk text from a PDF file.
path: File path, which is used to dump images (.jpg)
fname: File name
"""
return partition_pdf(
filename=path + fname,
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
# Categorize elements by type
def categorize_elements(raw_pdf_elements):
"""
Categorize extracted elements from a PDF into tables and texts.
raw_pdf_elements: List of unstructured.documents.elements
"""
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
return texts, tables
# Generate summaries of text elements
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
# Prompt
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
# Text summary chain
model = ChatOpenAI(temperature=0, model="gpt-4-1106-preview",openai_api_key="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")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
# Initialize empty summaries
text_summaries = []
table_summaries = []
# Apply to text if texts are provided and summarization is requested
if texts and summarize_texts:
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
elif texts:
text_summaries = texts
# Apply to tables if tables are provided
if tables:
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
return text_summaries, table_summaries
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Make image summary"""
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024, openai_api_key="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")
msg = chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
def generate_img_summaries(path):
"""
Generate summaries and base64 encoded strings for images
path: Path to list of .jpg files extracted by Unstructured
"""
# Store base64 encoded images
img_base64_list = []
# Store image summaries
image_summaries = []
# Prompt
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
# Apply to images
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
return img_base64_list, image_summaries
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
"""
Create retriever that indexes summaries, but returns raw images or texts
"""
# Initialize the storage layer
store = InMemoryStore()
id_key = "doc_id"
# Create the multi-vector retriever
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
# Helper function to add documents to the vectorstore and docstore
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
# Add texts, tables, and images
# Check that text_summaries is not empty before adding
if text_summaries:
add_documents(retriever, text_summaries, texts)
# Check that table_summaries is not empty before adding
if table_summaries:
add_documents(retriever, table_summaries, tables)
# Check that image_summaries is not empty before adding
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
def plt_img_base64(img_base64):
"""Disply base64 encoded string as image"""
# Create an HTML img tag with the base64 string as the source
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
# Display the image by rendering the HTML
display(HTML(image_html))
def looks_like_base64(sb):
"""Check if the string looks like base64"""
return re.match("^[A-Za-z0-9+/]+[=]{0,2}$", sb) is not None
def is_image_data(b64data):
"""
Check if the base64 data is an image by looking at the start of the data
"""
image_signatures = {
b"\xFF\xD8\xFF": "jpg",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png",
b"\x47\x49\x46\x38": "gif",
b"\x52\x49\x46\x46": "webp",
}
try:
header = base64.b64decode(b64data)[:8] # Decode and get the first 8 bytes
for sig, format in image_signatures.items():
if header.startswith(sig):
return True
return False
except Exception:
return False
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string
"""
# Decode the Base64 string
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
# Resize the image
resized_img = img.resize(size, Image.LANCZOS)
# Save the resized image to a bytes buffer
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
# Encode the resized image to Base64
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def split_image_text_types(docs):
"""
Split base64-encoded images and texts
"""
b64_images = []
texts = []
for doc in docs:
# Check if the document is of type Document and extract page_content if so
if isinstance(doc, Document):
doc = doc.page_content
if looks_like_base64(doc) and is_image_data(doc):
doc = resize_base64_image(doc, size=(1300, 600))
b64_images.append(doc)
else:
texts.append(doc)
return {"images": b64_images, "texts": texts}
def img_prompt_func(data_dict):
"""
Join the context into a single string
"""
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
# Adding image(s) to the messages if present
if data_dict["context"]["images"]:
for image in data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image}"},
}
messages.append(image_message)
# Adding the text for analysis
text_message = {
"type": "text",
"text": (
"당신은 AI 업계 취업 컨설턴트 전문가입니다.\n"
"이력서에는 일반적으로 관련 커리어에 대한 텍스트와 이미지가 혼합되어 제공이 됩니다.\n"
"이 내용을 이용하여 사용자 질문과 관련된 AI 업계 취업을 위한 조언을 제공합니다. \n"
f"User-provided question: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
return [HumanMessage(content=messages)]
def multi_modal_rag_chain(retriever):
"""
Multi-modal RAG chain
"""
# Multi-modal LLM
model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024, openai_api_key="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")
# RAG pipeline
chain = (
{
"context": retriever | RunnableLambda(split_image_text_types),
"question": RunnablePassthrough(),
}
| RunnableLambda(img_prompt_func)
| model
| StrOutputParser()
)
return chain
def run_rag_chain(chain, query):
result = chain.invoke(query)
return result
| [
"You are an assistant tasked with summarizing tables and text for retrieval. These summaries will be embedded and used to retrieve the raw text or table elements. Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} ",
"[{'type': 'text', 'text': 'You are an assistant tasked with summarizing images for retrieval. These summaries will be embedded and used to retrieve the raw image. Give a concise summary of the image that is well optimized for retrieval.'}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]",
"You are an assistant tasked with summarizing images for retrieval. These summaries will be embedded and used to retrieve the raw image. Give a concise summary of the image that is well optimized for retrieval."
] |
2024-01-10 | Dmmc123/pepega-bot | gpt_utils.py | import os
import openai
from sentence_transformers import SentenceTransformer
from transformers import GPT2TokenizerFast
from pynndescent import NNDescent
import pickle
import json
class AnswerGenerator:
def __init__(
self,
model_name="sentence-transformers/all-MiniLM-L6-v2",
index_path="index.pkl",
paragraphs_path="paragraphs.json",
tokenizer_name="gpt2",
completion_model="text-davinci-002",
):
# initializing the paragraph/question encoder and tokenizer
self.encoder = SentenceTransformer(model_name)
self.tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name)
# connecting to openai
openai.api_key = os.environ["openai-api-token"]
# loading the index and corresponding paragraphs
with open(index_path, "rb") as f:
self.index = pickle.load(f)
with open(paragraphs_path) as f:
self.paragraphs = json.load(f)
# storing info prompt construction
self.prompt_params = {"max_section_len": 500, "sep": "\n* "}
self.prompt_params["sep_len"] = len(
self.tokenizer.tokenize(self.prompt_params["sep"])
)
# parameters for querying the gpt
self.gpt_params = {
"temperature": 0.0,
"max_tokens": 300,
"model": completion_model,
}
def _prepare_prompt(self, question):
# encode the question
q_emb = self.encoder.encode(question)
# get indices of top matches for paragraphs
most_relevant_document_sections = self.index.query([q_emb])[0][0]
# add contexts until we run out of space.
chosen_sections = []
chosen_sections_len = 0
for section_index in most_relevant_document_sections:
paragraph = self.paragraphs[section_index]
# update current length
chosen_sections_len += (
len(self.tokenizer(paragraph)) + self.prompt_params["sep_len"]
)
if chosen_sections_len > self.prompt_params["max_section_len"]:
break
# add section to prompt context
chosen_sections.append(
self.prompt_params["sep"] + paragraph.replace("\n", " ")
)
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
def __call__(self, question):
# get prompt
prompt = self._prepare_prompt(question)
# query the qpt
response = openai.Completion.create(prompt=prompt, **self.gpt_params)
# return the needed part of response
return response["choices"][0]["text"].strip(" \n")
| [] |
2024-01-10 | Mariuxtheone/omni-openai-gpt3-snippet-extension | exts~omni.openai.snippet~omni~openai~snippet~extension.py | import omni.ext
import omni.ui as ui
#create a file apikeys.py in the same folder as extension.py and add 2 variables:
# API_KEY: "your openai api key"
# PYTHON_PATH: "the path of the python folder where the openai python library is installed"
from .apikeys import apikey
from .apikeys import pythonpath
import pyperclip
import sys
sys.path.append(pythonpath)
import openai
#tokens used in the OpenAI API response
openaitokensresponse = 40
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.openai.snippet] MyExtension startup")
self._window = ui.Window("OpenAI GPT-3 Text Generator", width=300, height=300)
with self._window.frame:
with ui.VStack():
prompt_label = ui.Label("Your Prompt:")
prompt_field = ui.StringField(multiline=True)
result_label = ui.Label("OpenAI GPT-3 Result:")
label_style = {"Label": {"font_size": 16, "color": 0xFF00FF00,}}
result_actual_label = ui.Label("The OpenAI generated text will show up here", style=label_style, word_wrap=True)
def on_click():
# Load your API key from an environment variable or secret management service
#openai.api_key = "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"
openai.api_key = apikey
my_prompt = prompt_field.model.get_value_as_string().replace("\n", " ")
response = openai.Completion.create(engine="text-davinci-001", prompt=my_prompt, max_tokens=openaitokensresponse)
#parse response as json and extract text
text = response["choices"][0]["text"]
pyperclip.copy(text)
result_actual_label.text = ""
result_actual_label.text = text
ui.Button("Generate and Copy to Clipboard", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[omni.openai.snippet] MyExtension shutdown")
| [
"\n",
"Your Prompt:",
" "
] |
2024-01-10 | adrianwedd/babyagi | classic~babyfoxagi~tasks~task_registry.py | import openai
import json
import threading
import os
import numpy as np
from datetime import datetime
from collections import defaultdict
class TaskRegistry:
def __init__(self):
self.tasks = []
# Initialize the lock
self.lock = threading.Lock()
objectives_file_path = "tasks/example_objectives"
self.example_loader = ExampleObjectivesLoader(objectives_file_path)
def load_example_objectives(self, user_objective):
return self.example_loader.load_example_objectives(user_objective)
def create_tasklist(self, objective, skill_descriptions):
#reflect on objective
notes = self.reflect_on_objective(objective,skill_descriptions)
#load most relevant object and tasklist from objectives_examples.json
example_objective, example_tasklist, example_reflection = self.load_example_objectives(objective)
prompt = (
f"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {objective}. "
f"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###"
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"RULES:"
f"Do not use skills that are not listed."
f"Always provide an ID to each task."
f"Always include one skill."
f"The final task should always output the final result of the overall objective."
f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
f"Make sure all task IDs are in chronological order.###\n"
f"Helpful Notes as guidance:{notes}###\n"
f"EXAMPLE OBJECTIVE={json.dumps(example_objective)}"
f"TASK LIST={json.dumps(example_tasklist)}"
f"OBJECTIVE={objective}"
f"TASK LIST="
)
#print(prompt)
print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0,
max_tokens=2500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
try:
task_list = json.loads(result)
#print(task_list)
self.tasks = task_list
except Exception as error:
print(error)
def reflect_on_objective(self, objective, skill_descriptions):
#load most relevant object and tasklist from objectives_examples.json
example_objective, example_tasklist, example_reflection = self.load_example_objectives(objective)
prompt = (
f"You are an Ai specializing in generating helpful thoughts and ideas on tackling an objective, and your task is to think about how to tackle this objective: {objective}. "
f"These are the skills available to you: {skill_descriptions}.###"
f"Think about what tools and information you need to handle this objective, and which of the available skills would be most helpful to you and writea descriptive note to pass onto a task creation AI."
f"Consider the following example objective, tasklist, and reflection as a sample."
f"###EXAMPLE OBJECTIVE:{example_objective}."
f"###EXAMPLE TASKLIST:{example_tasklist}."
f"###REFLECTION FROM EXAMPLE:{example_reflection}."
f"###THE AI AGENT'S OBJECTIVE:{example_reflection}."
f"###INSTRUCTION: please provide helpful notes for the task creation agent specific to this objective."
)
#print(prompt)
print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": f"You are an Ai specializing in generating helpful thoughts and ideas on tackling an objective, and your task is to think about how to tackle this objective: {objective}. "
},
{
"role": "user",
"content": prompt
}
],
temperature=0,
max_tokens=250,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
print(result)
return result
def execute_task(self, i, task, skill_registry, task_outputs, objective):
p_nexttask="\033[92m\033[1m"+"\n*****NEXT TASK ID:"+str(task['id'])+"*****\n"+"\033[0m\033[0m"
p_nexttask += f"\033[ EExecuting task {task.get('id')}: {task.get('task')}) [{task.get('skill')}]\033[)"
print(p_nexttask)
# Retrieve the skill from the registry
skill = skill_registry.get_skill(task['skill'])
# Get the outputs of the dependent tasks
dependent_task_outputs = {dep: task_outputs[dep]["output"] for dep in task['dependent_task_ids']} if 'dependent_task_ids' in task else {}
# Execute the skill
# print("execute:"+str([task['task'], dependent_task_outputs, objective]))
task_output = skill.execute(task['task'], dependent_task_outputs, objective)
print("\033[93m\033[1m"+"\nTask Output (ID:"+str(task['id'])+"):"+"\033[0m\033[0m")
print("TASK: "+str(task["task"]))
print("OUTPUT: "+str(task_output))
return i, task_output
def reorder_tasks(self):
self.tasks= sorted(self.tasks, key=lambda task: task['id'])
def add_task(self, task, after_task_id):
# Get the task ids
task_ids = [t["id"] for t in self.tasks]
# Get the index of the task id to add the new task after
insert_index = task_ids.index(after_task_id) + 1 if after_task_id in task_ids else len(task_ids)
# Insert the new task
self.tasks.insert(insert_index, task)
self.reorder_tasks()
def get_tasks(self):
return self.tasks
def update_tasks(self, task_update):
for task in self.tasks:
if task['id'] == task_update['id']:
task.update(task_update)
self.reorder_tasks()
def reflect_on_output(self, task_output, skill_descriptions):
with self.lock:
example = [
[
{"id": 3, "task": "New task 1 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "complete"},
{"id": 4, "task": "New task 2 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "incomplete"}
],
[2, 3],
{"id": 5, "task": "Complete the objective and provide a final report",
"skill": "text_completion_skill", "dependent_task_ids": [1, 2, 3, 4], "status": "incomplete"}
]
prompt = (
f"You are an expert task manager, review the task output to decide whether any new tasks need to be added, or whether any tasks need to be updated."
f"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies)."
f"Use the current task list as reference."
f"Do not add duplicate tasks to those in the current task list."
f"Only provide JSON as your response without further comments."
f"Every new and updated task must include all variables, even they are empty array."
f"Dependent IDs must be smaller than the ID of the task."
f"New tasks IDs should be no larger than the last task ID."
f"Always select at least one skill."
f"Task IDs should be unique and in chronological order." f"Do not change the status of complete tasks."
f"Only add skills from the AVAILABLE SKILLS, using the exact same spelling."
f"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated."
f"Make sure to keep dependent_task_ids key, even if an empty array."
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"\n###Here is the last task output: {task_output}"
f"\n###Here is the current task list: {self.tasks}"
f"\n###EXAMPLE OUTPUT FORMAT = {json.dumps(example)}"
f"\n###OUTPUT = "
)
print("\033[90m\033[3m" + "\nReflecting on task output to generate new tasks if necessary...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0.7,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
print("\n#" + str(result))
# Check if the returned result has the expected structure
if isinstance(result, str):
try:
task_list = json.loads(result)
print("####task_list in function")
print(task_list)
print("####task_list split in function")
print(task_list[0], task_list[1], task_list[2])
return task_list[0], task_list[1], task_list[2]
except Exception as error:
print(error)
else:
raise ValueError("Invalid task list structure in the output")
def get_tasks(self):
"""
Returns the current list of tasks.
Returns:
list: the list of tasks.
"""
return self.tasks
def get_task(self, task_id):
"""
Returns a task given its task_id.
Parameters:
task_id : int
The unique ID of the task.
Returns:
dict
The task that matches the task_id.
"""
matching_tasks = [task for task in self.tasks if task["id"] == task_id]
if matching_tasks:
return matching_tasks[0]
else:
print(f"No task found with id {task_id}")
return None
def print_tasklist(self, tasks):
p_tasklist="\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m"
for t in tasks:
dependent_task_ids = t.get('dependent_task_ids', [])
dependent_task = ""
if dependent_task_ids:
dependent_task = f"\033[31m<dependencies: {', '.join([f'#{dep_id}' for dep_id in dependent_task_ids])}>\033[0m"
status_color = "\033[32m" if t.get('status') == "completed" else "\033[31m"
p_tasklist+= f"\033[1m{t.get('id')}\033[0m: {t.get('task')} {status_color}[{t.get('status')}]\033[0m \033[93m[{t.get('skill')}] {dependent_task}\033[0m\n"
print(p_tasklist)
def reflect_tasklist(self, objective, task_list, task_outputs, skill_descriptions):
prompt = (
f"You are an expert task manager. Reflect on the objective, entire task list, and the corresponding outputs to generate a better task list for the objective."
f"Do not included 'results', and change every status to 'incomplete'."
f"Only provide JSON as your response without further comments. "
f"Use the current task list as reference. "
f"Always make at least one change to the current task list "
f"OBJECTIVE: {objective}."
f"AVAILABLE SKILLS: {skill_descriptions}."
f"\n###Here is the current task list: {json.dumps(task_list)}"
f"\n###Here is the task outputs: {json.dumps(task_outputs)}"
f"\n###IMPROVED TASKLIST = "
)
print("\033[90m\033[3m" + "\nReflecting on entire task list...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "You are an AI specializing in reflecting on task lists and improving them. You will never simply return the provided task list, but always improve on it."
},
{
"role": "user",
"content": prompt
}
],
temperature=0,
max_tokens=4000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
try:
improved_task_list = json.loads(result)
# Formatting improved_task_list to your desired format
formatted_improved_task_list = [{
"objective": objective,
"examples": improved_task_list,
"date": datetime.now().strftime("%Y-%m-%d")
}]
with open(f'tasks/example_objectives/improved_{datetime.now().strftime("%Y%m%d%H%M%S")}.json', 'w') as f:
json.dump(formatted_improved_task_list, f)
print(f"IMPROVED TASK LIST:{formatted_improved_task_list}")
except Exception as error:
print(error)
def reflect_on_result(self, objective, task_list, task_outputs, skill_descriptions):
prompt = (
f"You are an expert AI specializing in analyzing yourself, an autonomous agent that combines multiple LLM calls. Reflect on the objective, entire task list, and the corresponding outputs and provide an analysis of the performance of yourself and how you could have performed better."
f"\n###OBJECTIVE: {objective}."
f"\n###AVAILABLE SKILLS: {skill_descriptions}."
f"\n###TASK LIST: {json.dumps(task_list)}"
f"\n###TASK OUTPUTS: {json.dumps(task_outputs)}"
f"\n###ANALYSIS:"
)
print("\033[90m\033[3m" + "\nReflecting on result...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "You are an expert AI specializing in analyzing yourself, an autonomous agent that combines multiple LLM calls. Reflect on the objective, entire task list, and the corresponding outputs and provide an analysis of the performance of yourself and how you could have performed better."
},
{
"role": "user",
"content": prompt
}
],
temperature=0,
max_tokens=2000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
try:
print(result)
return result
except Exception as error:
print(error)
def reflect_on_final(self, objective, task_list, task_outputs, skill_descriptions):
print("here!")
system_content_result = "You are an expert AI specializing in analyzing yourself, an autonomous agent that combines multiple LLM calls. Reflect on the objective, entire task list, and the corresponding outputs and provide an analysis of the performance of yourself and how you could have performed better."
role_content_result = (
f"You are an expert AI specializing in analyzing yourself, an autonomous agent that combines multiple LLM calls. Reflect on the objective, entire task list, and the corresponding outputs and provide an analysis of the performance of yourself and how you could have performed better."
f"\n###OBJECTIVE: {objective}."
f"\n###AVAILABLE SKILLS: {skill_descriptions}."
f"\n###TASK LIST: {json.dumps(task_list)}"
f"\n###TASK OUTPUTS: {json.dumps(task_outputs)}"
f"\n###ANALYSIS:"
)
print("\033[90m\033[3m" + "\nReflecting on result...\n" + "\033[0m")
response = self.chatcompletion(role_content_result, system_content_result,500)
# Extract the content of the assistant's response and parse it as JSON
simple_reflection = response["choices"][0]["message"]["content"]
try:
print(simple_reflection)
except Exception as error:
print(error)
system_content_task = "You are an AI specializing in reflecting on task lists and improving them. You will never simply return the provided task list, but always improve on it."
role_content_task = (
f"You are an expert task manager. Reflect on the objective, entire task list, and the corresponding outputs to generate a better task list for the objective."
f"Do not included 'results', and change every status to 'incomplete'."
f"Only provide JSON as your response without further comments. "
f"Use the current task list as reference. "
f"Always make at least one change to the current task list "
f"OBJECTIVE: {objective}."
f"AVAILABLE SKILLS: {skill_descriptions}."
f"SIMPLE REFLECTION: {simple_reflection}."
f"\n###Here is the current task list: {json.dumps(task_list)}"
f"\n###Here is the task outputs: {json.dumps(task_outputs)}"
f"\n###IMPROVED TASKLIST = "
)
print("\033[90m\033[3m" + "\nReflecting on entire task list...\n" + "\033[0m")
response = self.chatcompletion(role_content_task, system_content_task,4000)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
print(result)
try:
improved_task_list = json.loads(result)
# Formatting improved_task_list to your desired format
formatted_improved_task_list = [{
"objective": objective,
"examples": improved_task_list,
"date": datetime.now().strftime("%Y-%m-%d"),
"reflection":simple_reflection
}]
with open(f'tasks/example_objectives/improved_{datetime.now().strftime("%Y%m%d%H%M%S")}.json', 'w') as f:
json.dump(formatted_improved_task_list, f)
print(f"IMPROVED TASK LIST:{formatted_improved_task_list}")
except Exception as error:
print(error)
def chatcompletion(self, role_content, system_content, max_tokens):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": system_content
},
{
"role": "user",
"content": role_content
}
],
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
from datetime import datetime
class ExampleObjectivesLoader:
def __init__(self, objectives_folder_path, decay_factor=0.01):
self.objectives_folder_path = objectives_folder_path
self.decay_factor = decay_factor
self.objectives_examples = [] # Initialize as an empty list
def load_objectives_examples(self):
objectives_dict = defaultdict(dict)
for filename in os.listdir(self.objectives_folder_path):
file_path = os.path.join(self.objectives_folder_path, filename)
with open(file_path, 'r') as file:
objectives = json.load(file)
for objective in objectives:
key = objective['objective']
date = objective.get('date', None)
if date is not None:
date = datetime.strptime(date, '%Y-%m-%d')
if key not in objectives_dict or (date and datetime.strptime(objectives_dict[key]['date'], "%Y-%m-%d") < date):
objectives_dict[key] = objective
self.objectives_examples = list(objectives_dict.values())
def find_most_relevant_objective(self, user_input):
user_input_embedding = self.get_embedding(user_input, model='text-embedding-ada-002')
most_relevant_objective = max(
self.objectives_examples,
key=lambda pair: self.cosine_similarity(pair['objective'], user_input_embedding) * self.get_decay(pair)
)
return most_relevant_objective['objective'], most_relevant_objective['examples'], most_relevant_objective.get('reflection', '')
def get_decay(self, objective):
date = objective.get('date', None)
if date is not None:
date = datetime.strptime(date, '%Y-%m-%d')
days_passed = (datetime.now() - date).days
else:
# if there's no date, assume a large number of days passed
days_passed = 365 * 10 # 10 years
decay = np.exp(-self.decay_factor * days_passed)
return decay
def get_embedding(self, text, model='text-embedding-ada-002'):
response = openai.Embedding.create(input=[text], model=model)
embedding = response['data'][0]['embedding']
return embedding
def cosine_similarity(self, objective, embedding):
max_similarity = float('-inf')
objective_embedding = self.get_embedding(objective, model='text-embedding-ada-002')
similarity = self.calculate_similarity(objective_embedding, embedding)
max_similarity = max(max_similarity, similarity)
return max_similarity
def calculate_similarity(self, embedding1, embedding2):
embedding1 = np.array(embedding1, dtype=np.float32)
embedding2 = np.array(embedding2, dtype=np.float32)
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
return similarity
def load_example_objectives(self, user_objective):
self.load_objectives_examples()
most_relevant_objective, most_relevant_tasklist, most_relevant_reflection = self.find_most_relevant_objective(user_objective)
example_objective = most_relevant_objective
example_tasklist = most_relevant_tasklist
example_reflection = most_relevant_reflection
return example_objective, example_tasklist, example_reflection
| [
"Always select at least one skill.",
"TASK LIST=",
"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated.",
"Use the current task list as reference. ",
"\n###OUTPUT = ",
"Use the current task list as reference.",
"Dependent IDs must be smaller than the ID of the task.",
"You are an expert task manager. Reflect on the objective, entire task list, and the corresponding outputs to generate a better task list for the objective.",
"Make sure all task IDs are in chronological order.###\n",
"AVAILABLE SKILLS: PLACEHOLDER.###",
"OBJECTIVE: PLACEHOLDER.",
"You are an expert task manager, review the task output to decide whether any new tasks need to be added, or whether any tasks need to be updated.",
"Only add skills from the AVAILABLE SKILLS, using the exact same spelling.",
"\n###IMPROVED TASKLIST = ",
"Make sure to keep dependent_task_ids key, even if an empty array.",
"AVAILABLE SKILLS: PLACEHOLDER.",
"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies).",
"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###",
"Helpful Notes as guidance:PLACEHOLDER###\n",
"Do not change the status of complete tasks.",
"\n###ANALYSIS:",
"Do not add duplicate tasks to those in the current task list.",
"New tasks IDs should be no larger than the last task ID.",
"Always include one skill.",
"Always make at least one change to the current task list ",
"Only provide JSON as your response without further comments. ",
"Task IDs should be unique and in chronological order.",
"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: PLACEHOLDER. ",
"Always provide an ID to each task.",
"You are an Ai specializing in generating helpful thoughts and ideas on tackling an objective, and your task is to think about how to tackle this objective: PLACEHOLDER. ",
"OBJECTIVE=PLACEHOLDER",
"\n###AVAILABLE SKILLS: PLACEHOLDER.",
"\n###Here is the last task output: PLACEHOLDER",
"Do not use skills that are not listed.",
"Do not included 'results', and change every status to 'incomplete'.",
"The final task should always output the final result of the overall objective.",
"You are a task creation AI.",
"You are an expert AI specializing in analyzing yourself, an autonomous agent that combines multiple LLM calls. Reflect on the objective, entire task list, and the corresponding outputs and provide an analysis of the performance of yourself and how you could have performed better.",
"Every new and updated task must include all variables, even they are empty array.",
"\n###OBJECTIVE: PLACEHOLDER.",
"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from.",
"You are an Ai specializing in generating helpful thoughts and ideas on tackling an objective, and your task is to think about how to tackle this objective: PLACEHOLDER. These are the skills available to you: PLACEHOLDER.###Think about what tools and information you need to handle this objective, and which of the available skills would be most helpful to you and writea descriptive note to pass onto a task creation AI.Consider the following example objective, tasklist, and reflection as a sample.###EXAMPLE OBJECTIVE:PLACEHOLDER.###EXAMPLE TASKLIST:PLACEHOLDER.###REFLECTION FROM EXAMPLE:PLACEHOLDER.###THE AI AGENT'S OBJECTIVE:PLACEHOLDER.###INSTRUCTION: please provide helpful notes for the task creation agent specific to this objective.",
"Only provide JSON as your response without further comments.",
"You are an AI specializing in reflecting on task lists and improving them. You will never simply return the provided task list, but always improve on it."
] |
2024-01-10 | adrianwedd/babyagi | classic~BabyElfAGI~tasks~task_registry.py | import openai
import json
import threading
import os
import numpy as np
class TaskRegistry:
def __init__(self):
self.tasks = []
# Initialize the lock
self.lock = threading.Lock()
objectives_file_path = "tasks/example_objectives"
self.example_loader = ExampleObjectivesLoader(objectives_file_path)
def load_example_objectives(self, user_objective):
return self.example_loader.load_example_objectives(user_objective)
def create_tasklist(self, objective, skill_descriptions):
#load most relevant object and tasklist from objectives_examples.json
example_objective, example_tasklist = self.load_example_objectives(objective)
prompt = (
f"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {objective}. "
f"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###"
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"RULES:"
f"Do not use skills that are not listed."
f"Always include one skill."
f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
f"Make sure all task IDs are in chronological order.###\n"
f"EXAMPLE OBJECTIVE={json.dumps(example_objective)}"
f"TASK LIST={json.dumps(example_tasklist)}"
f"OBJECTIVE={objective}"
f"TASK LIST="
)
print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
try:
task_list = json.loads(result)
self.tasks = task_list
except Exception as error:
print(error)
def execute_task(self, i, task, skill_registry, task_outputs, objective):
p_nexttask="\033[92m\033[1m"+"\n*****NEXT TASK ID:"+str(task['id'])+"*****\n"+"\033[0m\033[0m"
p_nexttask += f"\033[ EExecuting task {task.get('id')}: {task.get('task')}) [{task.get('skill')}]\033[)"
print(p_nexttask)
# Retrieve the skill from the registry
skill = skill_registry.get_skill(task['skill'])
# Get the outputs of the dependent tasks
dependent_task_outputs = {dep: task_outputs[dep]["output"] for dep in task['dependent_task_ids']} if 'dependent_task_ids' in task else {}
# Execute the skill
# print("execute:"+str([task['task'], dependent_task_outputs, objective]))
task_output = skill.execute(task['task'], dependent_task_outputs, objective)
print("\033[93m\033[1m"+"\nTask Output (ID:"+str(task['id'])+"):"+"\033[0m\033[0m")
print("TASK: "+str(task["task"]))
print("OUTPUT: "+str(task_output))
return i, task_output
def reorder_tasks(self):
self.tasks = sorted(self.tasks, key=lambda task: task['id'])
def add_task(self, task, after_task_id):
# Get the task ids
task_ids = [t["id"] for t in self.tasks]
# Get the index of the task id to add the new task after
insert_index = task_ids.index(after_task_id) + 1 if after_task_id in task_ids else len(task_ids)
# Insert the new task
self.tasks.insert(insert_index, task)
self.reorder_tasks()
def update_tasks(self, task_update):
for task in self.tasks:
if task['id'] == task_update['id']:
# This merges the original task dictionary with the update, overwriting only the fields present in the update.
task.update(task_update)
self.reorder_tasks()
def reflect_on_output(self, task_output, skill_descriptions):
with self.lock:
example = [
[
{"id": 3, "task": "New task 1 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "complete"},
{"id": 4, "task": "New task 2 description", "skill": "text_completion_skill",
"dependent_task_ids": [], "status": "incomplete"}
],
[2, 3],
{"id": 5, "task": "Complete the objective and provide a final report",
"skill": "text_completion_skill", "dependent_task_ids": [1, 2, 3, 4], "status": "incomplete"}
]
prompt = (
f"You are an expert task manager, review the task output to decide at least one new task to add."
f"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies)."
f"Use the current task list as reference."
f"Do not add duplicate tasks to those in the current task list."
f"Only provide JSON as your response without further comments."
f"Every new and updated task must include all variables, even they are empty array."
f"Dependent IDs must be smaller than the ID of the task."
f"New tasks IDs should be no larger than the last task ID."
f"Always select at least one skill."
f"Task IDs should be unique and in chronological order." f"Do not change the status of complete tasks."
f"Only add skills from the AVAILABLE SKILLS, using the exact same spelling."
f"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated."
f"Make sure to keep dependent_task_ids key, even if an empty array."
f"AVAILABLE SKILLS: {skill_descriptions}.###"
f"\n###Here is the last task output: {task_output}"
f"\n###Here is the current task list: {self.tasks}"
f"\n###EXAMPLE OUTPUT FORMAT = {json.dumps(example)}"
f"\n###OUTPUT = "
)
print("\033[90m\033[3m" + "\nReflecting on task output to generate new tasks if necessary...\n" + "\033[0m")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": "You are a task creation AI."
},
{
"role": "user",
"content": prompt
}
],
temperature=0.7,
max_tokens=1500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the content of the assistant's response and parse it as JSON
result = response["choices"][0]["message"]["content"]
print("\n#" + str(result))
# Check if the returned result has the expected structure
if isinstance(result, str):
try:
task_list = json.loads(result)
# print("RESULT:")
print(task_list)
# return [],[],[]
return task_list[0], task_list[1], task_list[2]
except Exception as error:
print(error)
else:
raise ValueError("Invalid task list structure in the output")
def get_tasks(self):
"""
Returns the current list of tasks.
Returns:
list: the list of tasks.
"""
return self.tasks
def get_task(self, task_id):
"""
Returns a task given its task_id.
Parameters:
task_id : int
The unique ID of the task.
Returns:
dict
The task that matches the task_id.
"""
matching_tasks = [task for task in self.tasks if task["id"] == task_id]
if matching_tasks:
return matching_tasks[0]
else:
print(f"No task found with id {task_id}")
return None
def print_tasklist(self, task_list):
p_tasklist="\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m"
for t in task_list:
dependent_task_ids = t.get('dependent_task_ids', [])
dependent_task = ""
if dependent_task_ids:
dependent_task = f"\033[31m<dependencies: {', '.join([f'#{dep_id}' for dep_id in dependent_task_ids])}>\033[0m"
status_color = "\033[32m" if t.get('status') == "completed" else "\033[31m"
p_tasklist+= f"\033[1m{t.get('id')}\033[0m: {t.get('task')} {status_color}[{t.get('status')}]\033[0m \033[93m[{t.get('skill')}] {dependent_task}\033[0m\n"
print(p_tasklist)
class ExampleObjectivesLoader:
def __init__(self, objectives_folder_path):
self.objectives_folder_path = objectives_folder_path
self.objectives_examples = [] # Initialize as an empty list
def load_objectives_examples(self):
self.objectives_examples = []
for filename in os.listdir(self.objectives_folder_path):
file_path = os.path.join(self.objectives_folder_path, filename)
with open(file_path, 'r') as file:
objectives = json.load(file)
self.objectives_examples.extend(objectives)
def find_most_relevant_objective(self, user_input):
user_input_embedding = self.get_embedding(user_input, model='text-embedding-ada-002')
most_relevant_objective = max(
self.objectives_examples,
key=lambda pair: self.cosine_similarity(pair['objective'], user_input_embedding)
)
return most_relevant_objective['objective'], most_relevant_objective['examples']
def get_embedding(self, text, model='text-embedding-ada-002'):
response = openai.Embedding.create(input=[text], model=model)
embedding = response['data'][0]['embedding']
return embedding
def cosine_similarity(self, objective, embedding):
max_similarity = float('-inf')
objective_embedding = self.get_embedding(objective, model='text-embedding-ada-002')
similarity = self.calculate_similarity(objective_embedding, embedding)
max_similarity = max(max_similarity, similarity)
return max_similarity
def calculate_similarity(self, embedding1, embedding2):
embedding1 = np.array(embedding1, dtype=np.float32)
embedding2 = np.array(embedding2, dtype=np.float32)
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
return similarity
def load_example_objectives(self, user_objective):
self.load_objectives_examples()
most_relevant_objective, most_relevant_tasklist = self.find_most_relevant_objective(user_objective)
example_objective = most_relevant_objective
example_tasklist = most_relevant_tasklist
return example_objective, example_tasklist
| [
"Always select at least one skill.",
"TASK LIST=",
"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated.",
"\n###OUTPUT = ",
"Use the current task list as reference.",
"Dependent IDs must be smaller than the ID of the task.",
"Make sure all task IDs are in chronological order.###\n",
"AVAILABLE SKILLS: PLACEHOLDER.###",
"Only add skills from the AVAILABLE SKILLS, using the exact same spelling.",
"Make sure to keep dependent_task_ids key, even if an empty array.",
"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies).",
"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###",
"Do not change the status of complete tasks.",
"Do not add duplicate tasks to those in the current task list.",
"New tasks IDs should be no larger than the last task ID.",
"You are an expert task manager, review the task output to decide at least one new task to add.",
"Always include one skill.",
"Task IDs should be unique and in chronological order.",
"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: PLACEHOLDER. ",
"OBJECTIVE=PLACEHOLDER",
"\n###Here is the last task output: PLACEHOLDER",
"Do not use skills that are not listed.",
"You are a task creation AI.",
"Every new and updated task must include all variables, even they are empty array.",
"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from.",
"Only provide JSON as your response without further comments."
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.