date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230519010534.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
#parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
#TODO : Do a google search limited to the websited given, of the articles, get their content
#TODO : Add a field to ask a quetion (maybe multiple choice field)
#TODO : Ask the article and the question to Chatgpt
#TODO : Display results to the user
#TODO :
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518162731.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518172729.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | victoireladreit/epf-ptp-docker-chatgpt-lab-main | hello.py | from flask import Flask,request
import os
import openai
app = Flask(__name__)
openai.api_key = os.environ.get('OPENAI_KEY')
@app.route('/')
def index():
return "<h1>Hello, World!</h1>"
@app.route('/chatgpt')
def chatgpt():
args = request.args
message =args.get("message")
print(message)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": message}]
)
return completion['choices'][0]['message']['content']
@app.route('/generate_code')
def generate_code():
# get user input
language = request.args.get('language')
content = request.args.get('content')
# create code using OpenAI
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": f"Generate {language} code: {content}"}]
)
return completion['choices'][0]['message']['content']
| [
"Generate PLACEHOLDER code: PLACEHOLDER"
] |
2024-01-10 | bettkipkemoi/meme_generator | meme.py | import streamlit as st
import requests
import openai
import random
st.set_page_config(page_title="Meme Generator", page_icon=":laughing:")
# Set your Unsplash API key here
UNSPLASH_API_KEY = "YOUR_API_KEY"
# Set your OpenAI GPT-3 API key here
GPT3_API_KEY = "YOUR_API_KEY"
# Function to generate a meme caption using GPT-3
def generate_meme_caption(prompt):
openai.api_key = GPT3_API_KEY
response = openai.Completion.create(
engine="davinci",
prompt=prompt,
max_tokens=30,
)
return response.choices[0].text
# Function to fetch a random image from Unsplash
def get_random_image(query):
headers = {
"Authorization": f"Client-ID {UNSPLASH_API_KEY}",
}
params = {
"query": query,
"per_page": 1,
}
response = requests.get("https://api.unsplash.com/photos/random", headers=headers, params=params)
if response.status_code == 200:
return response.json()
else:
return None
# Streamlit UI
st.title("Meme Generator")
st.sidebar.title("Generate Your Meme")
# Input for the meme topic
meme_topic = st.sidebar.text_input("Meme Topic", "funny cats")
# Button to generate a meme
if st.sidebar.button("Generate Meme"):
# Fetch a random image
image_data = get_random_image(meme_topic)
if image_data:
image_url = image_data["urls"]["regular"]
st.image(image_url, caption="Your Random Image")
# Generate a meme caption
meme_caption = generate_meme_caption(f"Create a meme about {meme_topic}.")
st.write("Meme Caption:", meme_caption)
else:
st.warning("Unable to fetch an image. Please try again with a different topic.")
# Add a footer
st.sidebar.markdown("Created by Your Name")
# Run the app
if __name__ == '__main__':
st.write("Welcome to the Meme Generator!")
| [] |
2024-01-10 | silasfelinus/serendipity | wip~chatbot~messaging_manager.py | #./app/chatbot/messaging_manager.py
from app.chatbot.prompt_builder import PromptBuilder
import openai
import requests
class MessagingManager:
def __init__(self, bot_config):
self.bot_config = bot_config
self.api_key = self.bot_config.get_api_key()
def send_message(self, message, chatbot_id):
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}'
}
payload = {
'prompt': message,
'temperature': self.bot_config.get_config('temperature'),
'max_tokens': self.bot_config.get_config('max_tokens'),
'top_p': self.bot_config.get_config('top_p'),
'frequency_penalty': self.bot_config.get_config('frequency_penalty'),
'presence_penalty': self.bot_config.get_config('presence_penalty'),
'stop': self.bot_config.get_config('stop')
}
response = requests.post(
f'https://api.openai.com/v1/chatbots/{chatbot_id}/messages',
headers=headers,
json=payload
)
response_data = response.json()
if response.status_code != 200:
error_message = response_data.get('error', {}).get('message', 'Unknown error')
raise ValueError(f'Failed to send message to chatbot: {error_message}')
return response_data['choices'][0]['text'] | [] |
2024-01-10 | OlivierDehaene/langchain | langchain~text_splitter.py | """Functionality for splitting text."""
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from typing import Any, Callable, Iterable, List, Optional
from langchain.docstore.document import Document
logger = logging.getLogger()
class TextSplitter(ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
):
"""Create a new TextSplitter."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
@abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
for chunk in self.split_text(text):
documents.append(Document(page_content=chunk, metadata=_metadatas[i]))
return documents
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if total + _len >= self._chunk_size:
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len > self._chunk_size and total > 0
):
total -= self._length_function(current_doc[0])
current_doc = current_doc[1:]
current_doc.append(d)
total += _len
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
@classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please it install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod
def from_tiktoken_encoder(
cls, encoding_name: str = "gpt2", **kwargs: Any
) -> TextSplitter:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please it install it with `pip install tiktoken`."
)
# create a GPT-3 encoder instance
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) -> int:
return len(enc.encode(text))
return cls(length_function=_tiktoken_encoder, **kwargs)
class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._separator:
splits = text.split(self._separator)
else:
splits = list(text)
return self._merge_splits(splits, self._separator)
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(self, encoding_name: str = "gpt2", **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please it install it with `pip install tiktoken`."
)
# create a GPT-3 encoder instance
self._tokenizer = tiktoken.get_encoding(encoding_name)
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = []
input_ids = self._tokenizer.encode(text)
start_idx = 0
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(self._tokenizer.decode(chunk_ids))
start_idx += self._chunk_size - self._chunk_overlap
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(self, separators: Optional[List[str]] = None, **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = self._separators[-1]
for _s in self._separators:
if _s == "":
separator = _s
break
if _s in text:
separator = _s
break
# Now that we have the separator, split the text
if separator:
splits = text.split(separator)
else:
splits = list(text)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
for s in splits:
if len(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
_good_splits = []
other_info = self.split_text(s)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
return final_chunks
class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
):
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
self._tokenizer = spacy.load(pipeline)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (str(s) for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
| [] |
2024-01-10 | OlivierDehaene/langchain | langchain~chains~loading.py | """Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"list_assertions_prompt",
"create_draft_answer_prompt",
"revised_answer_prompt",
"revised_answer_prompt_path",
"document_prompt",
"create_draft_answer_prompt_path",
"check_assertions_prompt",
"document_prompt_path",
"prompt_path",
"check_assertions_prompt_path",
"list_assertions_prompt_path"
] |
2024-01-10 | OlivierDehaene/langchain | langchain~vectorstores~milvus.py | """Wrapper around the Milvus vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
class Milvus(VectorStore):
"""Wrapper around the Milvus vector database."""
def __init__(
self,
embedding_function: Embeddings,
connection_args: dict,
collection_name: str,
text_field: str,
):
"""Initialize wrapper around the milvus vector database.
In order to use this you need to have `pymilvus` installed and a
running Milvus instance.
See the following documentation for how to run a Milvus instance:
https://milvus.io/docs/install_standalone-docker.md
Args:
embedding_function (Embeddings): Function used to embed the text
connection_args (dict): Arguments for pymilvus connections.connect()
collection_name (str): The name of the collection to search.
text_field (str): The field in Milvus schema where the
original text is stored.
"""
try:
from pymilvus import Collection, DataType, connections
except ImportError:
raise ValueError(
"Could not import pymilvus python package. "
"Please it install it with `pip install pymilvus`."
)
# Connecting to Milvus instance
if not connections.has_connection("default"):
connections.connect(**connection_args)
self.embedding_func = embedding_function
self.collection_name = collection_name
self.text_field = text_field
self.auto_id = False
self.primary_field = None
self.vector_field = None
self.fields = []
self.col = Collection(self.collection_name)
schema = self.col.schema
# Grabbing the fields for the existing collection.
for x in schema.fields:
self.fields.append(x.name)
if x.auto_id:
self.fields.remove(x.name)
if x.is_primary:
self.primary_field = x.name
if x.dtype == DataType.FLOAT_VECTOR or x.dtype == DataType.BINARY_VECTOR:
self.vector_field = x.name
# Default search params when one is not provided.
self.index_params = {
"IVF_FLAT": {"params": {"nprobe": 10}},
"IVF_SQ8": {"params": {"nprobe": 10}},
"IVF_PQ": {"params": {"nprobe": 10}},
"HNSW": {"params": {"ef": 10}},
"RHNSW_FLAT": {"params": {"ef": 10}},
"RHNSW_SQ": {"params": {"ef": 10}},
"RHNSW_PQ": {"params": {"ef": 10}},
"IVF_HNSW": {"params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"params": {"search_k": 10}},
}
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
partition_name: Optional[str] = None,
timeout: Optional[int] = None,
) -> List[str]:
"""Insert text data into Milvus.
When using add_texts() it is assumed that a collecton has already
been made and indexed. If metadata is included, it is assumed that
it is ordered correctly to match the schema provided to the Collection
and that the embedding vector is the first schema field.
Args:
texts (Iterable[str]): The text being embedded and inserted.
metadatas (Optional[List[dict]], optional): The metadata that
corresponds to each insert. Defaults to None.
partition_name (str, optional): The partition of the collection
to insert data into. Defaults to None.
timeout: specified timeout.
Returns:
List[str]: The resulting keys for each inserted element.
"""
insert_dict: Any = {self.text_field: list(texts)}
try:
insert_dict[self.vector_field] = self.embedding_func.embed_documents(
list(texts)
)
except NotImplementedError:
insert_dict[self.vector_field] = [
self.embedding_func.embed_query(x) for x in texts
]
# Collect the metadata into the insert dict.
if len(self.fields) > 2 and metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
# Convert dict to list of lists for insertion
insert_list = [insert_dict[x] for x in self.fields]
# Insert into the collection.
res = self.col.insert(
insert_list, partition_name=partition_name, timeout=timeout
)
# Flush to make sure newly inserted is immediately searchable.
self.col.flush()
return res.primary_keys
def _worker_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> Tuple[List[float], List[Tuple[Document, Any, Any]]]:
# Load the collection into memory for searching.
self.col.load()
# Decide to use default params if not passed in.
if param is None:
index_type = self.col.indexes[0].params["index_type"]
param = self.index_params[index_type]
# Embed the query text.
data = [self.embedding_func.embed_query(query)]
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self.vector_field)
# Perform the search.
res = self.col.search(
data,
self.vector_field,
param,
k,
expr=expr,
output_fields=output_fields,
partition_names=partition_names,
round_decimal=round_decimal,
timeout=timeout,
**kwargs,
)
# Organize results.
ret = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
ret.append(
(
Document(page_content=meta.pop(self.text_field), metadata=meta),
result.distance,
result.id,
)
)
return data[0], ret
def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results.
Args:
query (str): The text being searched.
k (int, optional): The amount of results ot return. Defaults to 4.
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
partition_names (List[str], optional): Partitions to search through.
Defaults to None.
round_decimal (int, optional): Round the resulting distance. Defaults
to -1.
timeout (int, optional): Amount to wait before timeout error. Defaults
to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[float], List[Tuple[Document, any, any]]: search_embedding,
(Document, distance, primary_field) results.
"""
_, result = self._worker_search(
query, k, param, expr, partition_names, round_decimal, timeout, **kwargs
)
return [(x, y) for x, y, _ in result]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
partition_names (List[str], optional): What partitions to search.
Defaults to None.
round_decimal (int, optional): Round the resulting distance. Defaults
to -1.
timeout (int, optional): Amount to wait before timeout error. Defaults
to None.
Returns:
List[Document]: Document results for search.
"""
data, res = self._worker_search(
query,
fetch_k,
param,
expr,
partition_names,
round_decimal,
timeout,
**kwargs,
)
# Extract result IDs.
ids = [x for _, _, x in res]
# Get the raw vectors from Milvus.
vectors = self.col.query(
expr=f"{self.primary_field} in {ids}",
output_fields=[self.primary_field, self.vector_field],
)
# Reorganize the results from query to match result order.
vectors = {x[self.primary_field]: x[self.vector_field] for x in vectors}
search_embedding = data
ordered_result_embeddings = [vectors[x] for x in ids]
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(search_embedding), ordered_result_embeddings, k=k
)
# Reorder the values and return.
ret = []
for x in new_ordering:
if x == -1:
break
else:
ret.append(res[x][0])
return ret
def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
query (str): The text to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
partition_names (List[str], optional): What partitions to search.
Defaults to None.
round_decimal (int, optional): What decimal point to round to.
Defaults to -1.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
Returns:
List[Document]: Document results for search.
"""
_, docs_and_scores = self._worker_search(
query, k, param, expr, partition_names, round_decimal, timeout, **kwargs
)
return [doc for doc, _, _ in docs_and_scores]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> Milvus:
"""Create a Milvus collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text to insert.
embedding (Embeddings): Embedding function to use.
metadatas (Optional[List[dict]], optional): Dict metatadata.
Defaults to None.
Returns:
VectorStore: The Milvus vector store.
"""
try:
from pymilvus import (
Collection,
CollectionSchema,
DataType,
FieldSchema,
connections,
)
from pymilvus.orm.types import infer_dtype_bydata
except ImportError:
raise ValueError(
"Could not import pymilvus python package. "
"Please it install it with `pip install pymilvus`."
)
# Connect to Milvus instance
if not connections.has_connection("default"):
connections.connect(**kwargs.get("connection_args", {"port": 19530}))
# Determine embedding dim
embeddings = embedding.embed_query(texts[0])
dim = len(embeddings)
# Generate unique names
primary_field = "c" + str(uuid.uuid4().hex)
vector_field = "c" + str(uuid.uuid4().hex)
text_field = "c" + str(uuid.uuid4().hex)
collection_name = "c" + str(uuid.uuid4().hex)
fields = []
# Determine metadata schema
if metadatas:
# Check if all metadata keys line up
key = metadatas[0].keys()
for x in metadatas:
if key != x.keys():
raise ValueError(
"Mismatched metadata. "
"Make sure all metadata has the same keys and datatype."
)
# Create FieldSchema for each entry in singular metadata.
for key, value in metadatas[0].items():
# Infer the corresponding datatype of the metadata
dtype = infer_dtype_bydata(value)
if dtype == DataType.UNKNOWN:
raise ValueError(f"Unrecognized datatype for {key}.")
elif dtype == DataType.VARCHAR:
# Find out max length text based metadata
max_length = 0
for subvalues in metadatas:
max_length = max(max_length, len(subvalues[key]))
fields.append(
FieldSchema(key, DataType.VARCHAR, max_length=max_length + 1)
)
else:
fields.append(FieldSchema(key, dtype))
# Find out max length of texts
max_length = 0
for y in texts:
max_length = max(max_length, len(y))
# Create the text field
fields.append(
FieldSchema(text_field, DataType.VARCHAR, max_length=max_length + 1)
)
# Create the primary key field
fields.append(
FieldSchema(primary_field, DataType.INT64, is_primary=True, auto_id=True)
)
# Create the vector field
fields.append(FieldSchema(vector_field, DataType.FLOAT_VECTOR, dim=dim))
# Create the schema for the collection
schema = CollectionSchema(fields)
# Create the collection
collection = Collection(collection_name, schema)
# Index parameters for the collection
index = {
"index_type": "HNSW",
"metric_type": "L2",
"params": {"M": 8, "efConstruction": 64},
}
# Create the index
collection.create_index(vector_field, index)
# Create the VectorStore
milvus = cls(
embedding,
kwargs.get("connection_args", {"port": 19530}),
collection_name,
text_field,
)
# Add the texts.
milvus.add_texts(texts, metadatas)
return milvus
| [] |
2024-01-10 | alanrios2001/PORT_NOIE | OIE~datasets~translate.py | from src.conll2bioes import Conversor
import os
import spacy
from tqdm.auto import tqdm
from main import criar_conll
import typer
from deep_translator import GoogleTranslator
import json
import pathlib
from diskcache import Cache
from OIE.datasets.validated_splits.contractions import transform_portuguese_contractions, clean_extraction
from OIE.final.matcher import OIE_Match
import openai
import httpx
import time
app = typer.Typer()
class LoadDataset:
def __init__(self,
dataset_path: str,
dataset_name: str,
out_path: str
):
self.dataset_name = dataset_name
self.dataset_path = dataset_path
with open(self.dataset_path +"/"+ self.dataset_name, "r", encoding="utf-8") as f:
data = f.read()
# selecionando apenas exts com arg0 rel e arg1
data = data.split("\n\t")
data_norm = []
for ext in data:
if "ARG5" not in ext:
if "ARG4" not in ext:
if "ARG3" not in ext:
if "ARG2" not in ext:
if "ARG1" in ext:
if "V" in ext:
if "ARG0" in ext:
data_norm.append(ext)
path = out_path + "/mod"
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(path + "/" + dataset_name, "a", encoding="utf-8") as f:
raw = data_norm
raw = "\n\t".join(raw)
f.write(raw)
Conversor(path+"/", dataset_name, out_path)
class ArgsRel2:
def __init__(self):
self.provavel_rel = []
self.alinhamentos = []
try:
self.nlp = spacy.load("pt_core_news_lg")
except:
os.system("python -m spacy download pt_core_news_lg")
self.nlp = spacy.load("pt_core_news_lg")
def root_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "VERB" and dep == "ROOT") and (idx != 0 and idx != len(doc_dict) - 1):#restringe primeiro e último caracter da frase inteira
self.provavel_rel.append("VERB")
return (idx, idx)
root_idx = self.verb_parse(doc_dict, root_idx)
return root_idx
def verb_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "VERB" and (dep == "xcomp" or dep == "acl" or dep == "acl:relacl")) and (idx != 0 and idx != len(doc_dict) - 1):#restringe primeiro e último caracter da frase inteira
self.provavel_rel.append("VERB")
return (idx, idx)
root_idx = self.aux_parse(doc_dict, root_idx)
return root_idx
def aux_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "AUX" and dep == "ROOT") and (idx != 0 and idx != len(doc_dict) - 1):#restringe primeiro e último caracter da frase inteira
self.provavel_rel.append("AUX")
return (idx, idx)
root_idx = self.aux_parse2(doc_dict, root_idx)
return root_idx
def aux_parse2(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "AUX" and dep == "cop") and (idx != 0 and idx != len(doc_dict) - 1):#restringe primeiro e último caracter da frase inteira
self.provavel_rel.append("AUX")
return (idx, idx)
root_idx = self.noun_parse(doc_dict, root_idx)
return root_idx
def noun_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "NOUN" and dep == "ROOT") and (idx != 0 and idx != len(doc_dict) - 1):#restringe primeiro e último caracter da frase inteira
self.provavel_rel.append("NOUN")
return (idx, idx)
return root_idx
def get_args_rel(self, ext, sent):
self.alinhamentos = []
doc = self.nlp(ext)
doc_dict = {}
i = 0
for token in doc:
doc_dict[i] = {"text": token.text, "pos": token.pos_, "dep": token.dep_}
i += 1
root_idx = (None, None)
self.provavel_rel = []
root_idx = self.root_parse(doc_dict, root_idx)
if len(self.provavel_rel)>0 and self.provavel_rel[0] == "VERB":
if root_idx[0]-1 != 0:
if doc_dict[root_idx[0]-1]["pos"] in ["AUX",'ADV']:
root_idx = (root_idx[0]-1, root_idx[1])
#verificando elementos que compoem a rel depois do centro
if root_idx != (None, None):
for j in range(root_idx[1]+1, len(doc_dict)):
pos = doc_dict[j]["pos"]
self.provavel_rel.append(pos)
adp_idxs = []
for idx, pos_ in enumerate(self.provavel_rel[1:-1]):
if pos_ in ['ADJ','ADV','NOUN', 'VERB','ADV']:
continue
elif pos_ == 'ADP':
adp_idxs.append(idx+1)
continue
else:
break
adp_idxs.append(0)
for idx in adp_idxs:
arg1 = ""
rel = ""
arg2 = ""
if root_idx != (None, None):
new_root_idx = (root_idx[0],root_idx[1]+idx)
j = new_root_idx[0]
while j <= new_root_idx[1]:
rel += doc_dict[j]["text"] + " "
j += 1
for idx in doc_dict:
token = doc_dict[idx]["text"]
if idx < new_root_idx[0]:
arg1 += token + " "
if idx > new_root_idx[1]:
arg2 += token + " "
self.alinhamentos.append((arg1,rel,arg2))
return self.alinhamentos
class ArgsRel:
def __init__(self):
self.current_root_sint = None
self.alinhamentos = []
try:
self.nlp = spacy.load("pt_core_news_lg")
except:
os.system("python -m spacy download pt_core_news_lg")
self.nlp = spacy.load("pt_core_news_lg")
def root_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "VERB" and dep == "ROOT") and (idx != 0 and idx != len(doc_dict) - 1):
root_idx = (idx, idx)
self.current_root_sint = "VERB-ROOT"
break
if root_idx == (None, None):
root_idx = self.aux_parse(doc_dict, root_idx)
return root_idx
def aux_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "AUX" and dep == "cop") and (idx != 0 and idx != len(doc_dict) - 1):
root_idx = (idx, idx)
self.current_root_sint = "AUX-cop"
break
elif (pos == "AUX" and dep == "ROOT") and (idx != 0 and idx != len(doc_dict) - 1):
root_idx = (idx, idx)
self.current_root_sint = "AUX-ROOT"
break
if root_idx == (None, None):
root_idx = self.x_comp_parse(doc_dict, root_idx)
return root_idx
def x_comp_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "VERB" and dep == "xcomp" and (idx != 0 and idx != len(doc_dict) - 1)):
root_idx = (idx, idx)
self.current_root_sint = "VERB-xcomp"
break
elif (pos == "VERB" and dep == "acl" and (idx != 0 and idx != len(doc_dict) - 1)):
root_idx = (idx, idx)
self.current_root_sint = "VERB-acl"
break
elif (pos == "VERB" and dep == "acl:relcl" and (idx != 0 and idx != len(doc_dict) - 1)):
root_idx = (idx, idx)
self.current_root_sint = "VERB-acl:relacl"
break
if root_idx == (None, None):
root_idx = self.noun_root_parse(doc_dict, root_idx)
return root_idx
def noun_root_parse(self, doc_dict, root_idx):
#encontra centro da extração pelo root
for idx in doc_dict:
pos = doc_dict[idx]["pos"]
dep = doc_dict[idx]["dep"]
if (pos == "NOUN" and dep == "ROOT" and (idx != 0 and idx != len(doc_dict) - 1)):
root_idx = (idx, idx)
self.current_root_sint = "NOUN-ROOT"
break
return root_idx
def get_args_rel(self, ext, sent):
self.alinhamentos = []
doc = self.nlp(ext)
doc_dict = {}
i = 0
for token in doc:
doc_dict[i] = {"text": token.text, "pos": token.pos_, "dep": token.dep_}
i += 1
arg1 = ""
rel = ""
arg2 = ""
root_idx = (None, None)
self.current_root_sint = None
root_idx = self.root_parse(doc_dict, root_idx)
#verificando elementos que compoem a rel antes do centro
if root_idx != (None, None):
before_root_pos_dep = ""
for i in range(0, root_idx[0]):
pos = doc_dict[i]["pos"]
dep = doc_dict[i]["dep"]
before_root_pos_dep += pos + "-" + dep + ", "
before_root_pos_dep = before_root_pos_dep[:-2]
splited = before_root_pos_dep.split(", ")
if self.current_root_sint == "NOUN-ROOT":
if "PRON-expl" in before_root_pos_dep and splited[-1] == "PRON-expl":
if root_idx[0]-1 > 0:
root_idx = (root_idx[0]-1, root_idx[1])
else:
root_idx = (root_idx[0], root_idx[1])
if "AUX-cop" in before_root_pos_dep and splited[-1] == "AUX-cop":
if root_idx[0]-1 > 0:
root_idx = (root_idx[0]-1, root_idx[1])
else:
root_idx = (root_idx[0], root_idx[1])
elif "AUX-cop, ADV-advmod" in before_root_pos_dep and splited[-1] == "ADV-advmod":
if root_idx[0]-2 > 0:
root_idx = (root_idx[0]-2, root_idx[1])
else:
root_idx = (root_idx[0]-1, root_idx[1])
elif "ADV-advmod" in before_root_pos_dep and splited[-1] == "ADV-advmod":
if root_idx[0]-1 > 0:
root_idx = (root_idx[0]-1, root_idx[1])
else:
root_idx = (root_idx[0], root_idx[1])
elif "AUX-aux" in before_root_pos_dep and splited[-1] == "AUX-aux":
if root_idx[0]-1 > 0:
root_idx = (root_idx[0]-1, root_idx[1])
else:
root_idx = (root_idx[0], root_idx[1])
elif "AUX-aux:pass" in before_root_pos_dep and splited[-1] == "AUX-aux:pass":
if root_idx[0]-1 > 0:
root_idx = (root_idx[0]-1, root_idx[1])
else:
root_idx = (root_idx[0], root_idx[1])
elif "AUX-aux:pass" in before_root_pos_dep and splited[-1] == "AUX-aux:pass":
if root_idx[0]-1 > 0:
root_idx = (root_idx[0]-1, root_idx[1])
else:
root_idx = (root_idx[0], root_idx[1])
elif "ADV-advmod, PRON-obj" in before_root_pos_dep and splited[-1] == "PRON-obj":
if root_idx[0]-2 > 0:
root_idx = (root_idx[0]-2, root_idx[1])
else:
root_idx = (root_idx[0]-1, root_idx[1])
elif "AUX-cop, ADP-case" in before_root_pos_dep and splited[-1] == "ADP-case":
if root_idx[0]-2 > 0:
root_idx = (root_idx[0]-2, root_idx[1])
else:
root_idx = (root_idx[0]-1, root_idx[1])
elif "AUX-cop, DET-det" in before_root_pos_dep and splited[-1] == "DET-det":
if root_idx[0]-2 > 0:
root_idx = (root_idx[0]-2, root_idx[1])
else:
root_idx = (root_idx[0]-1, root_idx[1])
#verificando elementos que compoem a rel depois do centro
if root_idx != (None, None):
after_root_pos_dep = ""
for i in range(root_idx[1]+1, len(doc_dict)):
pos = doc_dict[i]["pos"]
dep = doc_dict[i]["dep"]
after_root_pos_dep += pos + "-" + dep + ", "
after_root_pos_dep = after_root_pos_dep[:-2]
splited = after_root_pos_dep.split(", ")
if self.current_root_sint == "AUX-cop":
if "DET-det, NOUN-ROOT, ADJ-amod, ADP-case" in after_root_pos_dep and splited[0] == "DET-det":
if root_idx[1]+4 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+4)
else:
root_idx = (root_idx[0], root_idx[1])
if "ADP-case, DET-det, ADV-obl, VERB-xcomp" in after_root_pos_dep and splited[0] == "ADP-case":
if root_idx[1]+4 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+4)
else:
root_idx = (root_idx[0], root_idx[1]+3)
elif "ADJ-amod, ADP-case" in after_root_pos_dep and splited[0] == "ADJ-amod":
if root_idx[1]+2 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+2)
else:
root_idx = (root_idx[0], root_idx[1]+1)
elif "VERB-xcomp, DET-det, NOUN-obj, ADP-case" in after_root_pos_dep and splited[0] == "VERB-xcomp":
if root_idx[1]+4 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+4)
else:
root_idx = (root_idx[0], root_idx[1]+3)
elif "VERB-xcomp, SCONJ-mark, VERB-xcomp, ADP-case" in after_root_pos_dep and splited[0] == "VERB-xcomp":
if root_idx[1]+4 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+4)
else:
root_idx = (root_idx[0], root_idx[1]+3)
elif "VERB-xcomp, ADP-case" in after_root_pos_dep and splited[0] == "VERB-xcomp":
if root_idx[1]+2 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+2)
else:
root_idx = (root_idx[0], root_idx[1]+1)
elif "VERB-xcomp, VERB-xcomp" in after_root_pos_dep and splited[0] == "VERB-xcomp":
if root_idx[1]+2 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+2)
else:
root_idx = (root_idx[0], root_idx[1]+1)
elif "VERB-xcomp, SCONJ-mark, VERB-xcomp" in after_root_pos_dep and splited[0] == "VERB-xcomp":
if root_idx[1]+3 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+3)
else:
root_idx = (root_idx[0], root_idx[1]+2)
elif "VERB-xcomp, VERB-xcomp, DET-det, NOUN-obj, ADP-case" in after_root_pos_dep and splited[0] == "VERB-xcomp":
if root_idx[1]+5 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+5)
else:
root_idx = (root_idx[0], root_idx[1]+4)
elif "ADJ-amod, ADP-case" in after_root_pos_dep and splited[0] == "ADJ-amod":
if root_idx[1]+2 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+2)
else:
root_idx = (root_idx[0], root_idx[1]+1)
elif "ADV-advmod, ADP-case" in after_root_pos_dep and splited[0] == "ADV-advmod":
if root_idx[1]+2 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+2)
else:
root_idx = (root_idx[0], root_idx[1]+1)
elif "ADP-case, NOUN-obj, ADP-case" in after_root_pos_dep and splited[0] == "ADP-case":
if root_idx[1]+3 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+3)
else:
root_idx = (root_idx[0], root_idx[1]+2)
elif "ADV-advmod, ADV-advmod, SCONJ-dep" in after_root_pos_dep and splited[0] == "ADV-advmod":
if root_idx[1]+3 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+3)
else:
root_idx = (root_idx[0], root_idx[1]+2)
elif "VERB-xcomp" in after_root_pos_dep and splited[0] == "VERB-xcomp":
if root_idx[1]+1 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+1)
else:
root_idx = (root_idx[0], root_idx[1])
elif "ADP-case" in after_root_pos_dep and splited[0] == "ADP-case":
if root_idx[1]+1 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+1)
else:
root_idx = (root_idx[0], root_idx[1])
elif "AUX-cop" in after_root_pos_dep and splited[0] == "AUX-cop":
if root_idx[1]+1 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+1)
else:
root_idx = (root_idx[0], root_idx[1])
elif "DET-case" in after_root_pos_dep and splited[0] == "DET-case":
if root_idx[1]+1 < len(doc_dict) - 1:
root_idx = (root_idx[0], root_idx[1]+1)
else:
root_idx = (root_idx[0], root_idx[1])
j = root_idx[0]
if root_idx != (None, None):
while j <= root_idx[1]:
rel += doc_dict[j]["text"] + " "
j += 1
for idx in doc_dict:
token = doc_dict[idx]["text"]
if idx < root_idx[0]:
arg1 += token + " "
if idx > root_idx[1]:
arg2 += token + " "
self.alinhamentos.append((arg1, rel, arg2))
return self.alinhamentos
class ArgsRel3:
def __init__(self):
self.provavel_rel = []
self.alinhamentos = []
self.matcher = OIE_Match()
try:
self.nlp = spacy.load("pt_core_news_lg")
except:
os.system("python -m spacy download pt_core_news_lg")
self.nlp = spacy.load("pt_core_news_lg")
def get_args_rel(self, ext, sent):
self.alinhamentos = []
pos = []
ext_list = ext.split(" ")
sent_list = sent.split(" ")
sent_doc = self.nlp(sent)
# permutando relações
# Começa com o maior tamanho de subsequência e vai diminuindo
for length in range(len(ext_list) - 2, 0, -1):
for start in range(1, len(ext_list) - length):
end = start + length
rel = ext_list[start:end]
idx = (start, end)
arg0 = " ".join(ext_list[:idx[0]])
arg1 = " ".join(ext_list[idx[1]:len(sent_list)])
rel = " ".join(rel)
valid = self.matcher.match(sent, arg0, rel, arg1)
if valid[3]:
# colhe pos da relação do alinhamento, o pos usado é o da sent nos tokens da ext
aux = []
aux_dep = []
cur_ext = []
cur_dep = []
for span in valid[:-1]:
span_tk = sent_doc[span[0]:span[1] + 1]
for token in span_tk:
aux.append(token.pos_)
aux_dep.append(token.dep_)
cur_ext.append(aux)
cur_dep.append(aux_dep)
aux = []
aux_dep = []
pos.append(((arg0, rel, arg1), cur_ext, cur_dep))
# utiliza regras no pos da relação para filtrar alinhamentos
ali_gerado = ((arg0, rel, arg1), cur_ext, cur_dep)
rel_pos = ali_gerado[1][1]
rel_dep = ali_gerado[2][1]
inicio = [[rel_pos[0], rel_dep[0]]]
meio = []
for x, y in zip(rel_pos[1:-1], rel_dep[1:-1]):
meio.append([x, y])
fim = [[rel_pos[-1], rel_dep[-1]]]
first = False
middle = False
middle_counter = 0
# inicio
for i, tags in enumerate(inicio):
p_tag = tags[0]
p_dep = tags[1]
if p_tag == "ADV" and i == 0 and len(rel_pos) > 1 and rel_pos[1] in ['VERB', 'AUX']:
first = True
if len(rel_pos) == 2:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
elif p_tag == "ADV" and i == 0 and len(rel_pos) > 1 and rel_pos[1] == 'PRON':
first = True
elif p_tag == "PRON" and i == 0 and len(rel_pos) > 1 and rel_pos[1] in ['VERB', 'AUX']:
first = True
if len(rel_pos) == 2:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
elif p_tag == "AUX" and i == 0:
first = True
if len(rel_pos) == 1:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
elif (p_tag == "VERB" and p_dep == "ROOT") and i == 0:
first = True
if len(rel_pos) == 1:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
elif p_tag == "VERB" and i == 0:
first = True
if len(rel_pos) == 1:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
# meio
for i, tags in enumerate(meio):
p_tag = tags[0]
if p_tag in ['ADJ', 'NOUN', 'VERB', "AUX", "DET", "PRON", "SCONJ", "PROPN"] and first:
middle_counter += 1
if middle_counter == len(meio):
middle = True
# fim
for i, tags in enumerate(fim):
p_tag = tags[0]
if len(rel_pos) == 2 and p_tag == "VERB" and first:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
elif len(rel_pos) == 2 and p_tag == "AUX" and first:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
elif len(rel_pos) == 2 and p_tag == "ADP" and first:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
elif len(rel_pos) > 2 and p_tag in ["ADP", "VERB", "AUX"] and first and middle:
self.alinhamentos.append(ali_gerado[0])
return self.alinhamentos
if len(self.alinhamentos) == 0:
self.alinhamentos.append((" ", " ", " "))
return self.alinhamentos
class Translators:
def __init__(self, google: bool):
if not google:
#openai.api_key = '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'
self.prompt_tradução = "Por favor, traduza as seguintes sentenças do inglês para o português. Além disso, identifique e traduza os fatos específicos dentro de cada sentença. Certifique-se de que os fatos traduzidos sejam adaptados para corresponder diretamente à sua representação na sentença traduzida, se baseie nos seguintes exemplos:\n\n" \
"EXEMPLOS DE ENTRADA E SAÍDA:\n\n" \
"(entrada):\n" \
"SENTENÇA: The dog is walking through the park, he is very happy.\n" \
"FATO: The dog is very happy.\n" \
"(saida):\n" \
"SENTENÇA: O cachorro está andando pelo parque, ele está muito feliz.\n" \
"FATO: O cachorro está muito feliz.\n\n" \
"(entrada):\n" \
"SENTENÇA: He made a midnight requisition of all the printers he could lay hands on so that he could monitor all the telephone lines coming into the lab 's computers .\n" \
"FATO: telephone lines coming the lab 's computers \n" \
"(saida):\n" \
"SENTENÇA: Ele fez uma requisição à meia-noite de todas as impressoras que conseguiu encontrar para poder monitorar todas as linhas telefônicas que chegam aos computadores do laboratório.\n" \
"FATO: linhas telefônicas chegam aos computadores do laboratório.\n\n" \
"(entrada):\n" \
"SENTENÇA: The campaign , which started last week and runs through Nov. 23 , with funds earmarked for both the quake and Hugo , `` was Barry 's idea , '' a spokeswoman says .\n" \
"FATO: The campaign started last week \n" \
"(saida):\n" \
"SENTENÇA: A campanha, que começou na semana passada e vai até o dia 23 de novembro, com fundos destinados tanto para o terremoto quanto para o Hugo, 'foi ideia de Barry', disse uma porta-voz.\n" \
"FATO: A campanha começou na semana passada.\n\n" \
"(entrada):\n" \
"SENTENÇA: So far , Nissan 's new - model successes are mostly specialized vehicles with limited sales potential .\n" \
"FATO: Nissan 's new - model successes specialized limited sales potential \n" \
"(saida):\n" \
"SENTENÇA: Até agora, os sucessos dos novos modelos da Nissan são principalmente veículos especializados com potencial de venda limitado.\n" \
"FATO: Os sucessos dos novos modelos da Nissan são principalmente com potencial de venda limitado.\n"
#print(self.prompt_tradução)
else:
self.google_translator = GoogleTranslator(source="en", target="pt")
def batch_google(self, txt):
txt = self.google_translator.translate(txt)
return txt
def gpt(self, sent, ext):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=5,
messages=[
{"role": "system", "content": self.prompt_tradução},
{"role": "user", "content": f"SENTENÇA: {sent}"},
{"role": "user", "content": f"FATO: {ext}"}
]
)
sentence = response['choices'][0]['message']['content'].split("\n")[0].split(": ")[-1]
extraction = response['choices'][0]['message']['content'].split("\n")[-1].split(": ")[-1]
#print("sentence: ", sentence)
#print("extraction: ", extraction)
return sentence, extraction
def gptv2(self, sent, ext):
url = "http://43.153.203.236:3001/api/chat"
headers = {
"content-type": "application/json"
}
data = {
"model": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
"maxLength": 12000,
"tokenLimit": 3000
},
"temperature": 2,
"messages": [
{"role": "system",
"content": "Você é um tradutor de textos de ingles para portugues brasileiro."},
{"role": "user", "content": self.prompt_tradução},
{"role": "user", "content": f"SENTENÇA: {sent}"},
{"role": "user", "content": f"FATO: {ext}"}
]
}
response = httpx.post(url, headers=headers, data=json.dumps(data))
sentence = response.text.split("\n")[0].split(": ")[-1]
extraction = response.text.split("\n")[-1].split(": ")[-1]
if len(sentence) == 0 or len(extraction) == 0:
print("erro na tradução, tentando novamente")
return self.gptv2(sent, ext)
return sentence, extraction
def da_vinci(self, sent, ext):
pass
class TranslateDataset:
def __init__(self, dataset_dir: str,
dataset_name: str,
out_path: str,
batch_size: int,
google: bool,
debug: bool = False
):
self.batch_size = batch_size
self.google = google
self.debug = debug
self.dataset_dir = dataset_dir
self.dataset_name = dataset_name
self.out_path = out_path
self.translators = Translators(google)
self.matcher = OIE_Match(sequential=True)
self.argreleng = ArgsRel()
self.freezed = []
self.counter = 0
def debugging(self, sentence, ext, raw_sent, raw_ext):
alignments = self.argreleng.get_args_rel(ext)
for alignment in alignments:
arg0_trad = alignment[0]
rel_trad = alignment[1]
arg1_trad = alignment[2]
print("\nDebugging")
print(f"sent: {sentence}")
print(f"raw_sent: {raw_sent}")
print(f"ext: {ext}")
print(f"raw_ext: {raw_ext}")
print(f"arg0: {arg0_trad}")
print(f"rel: {rel_trad}")
print(f"arg1: {arg1_trad}\n")
def save_dict(self, data_dict):
path = self.out_path+"/saida_match"
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(self.out_path+"/saida_match/json_dump.json", "a", encoding="utf-8") as f:
f.write(json.dumps(data_dict))
def save_dict_threads(self, n_parts: int):
data_dict = {}
for i in range(n_parts):
with open(f"{self.out_path}/align/data_dict{i}.json", "r", encoding="utf-8") as f:
data = json.load(f)
data_dict.update(data)
self.save_dict(data_dict)
def save_translate(self, data):
path = self.out_path+"/translate"
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(self.out_path+"/translate/translate.json", "a", encoding="utf-8") as f:
open(self.out_path + "/translate/translate.json", "w", encoding="utf-8").close()
f.write(json.dumps(data))
def load_dataset(self):
# estrutura o dataset em um dicionario
with open(f"{self.out_path}/conll2bioes_output/{self.dataset_name.replace('.conll', '.txt')}",
"r", encoding="utf-8") as f:
data = f.read()
data = data.split("\n\t")
data = [ext.split("\n") for ext in data]
if self.debug:
data = data[:32]
for ext in data:
for i in range(len(ext)):
ext[i] = ext[i].split("\t")
dataset = []
sents = []
exts = []
for ext in tqdm(data, desc="Carregando dataset"):
sentence = ""
arg0 = ""
rel = ""
arg1 = ""
for e in ext:
if e != [""]:
sentence += e[0] + " "
if "ARG0" in e[8]:
arg0 += e[0] + " "
if "ARG1" in e[8]:
arg1 += e[0] + " "
if "V" in e[8]:
rel += e[0] + " "
ext = arg0 + rel + arg1
sents.append(sentence)
exts.append(ext)
dataset.append(sents)
dataset.append(exts)
return dataset
def half_translated(self):
try:
open(f"{self.out_path}/translate/translate.json", "r", encoding="utf-8")
return True
except:
return False
def translate_google(self, cache_dir: str):
cache = Cache(cache_dir)
dataset = self.load_dataset()
#traduz dataset
all_sent = []
all_ext = []
raw_sent = []
raw_ext = []
for i in tqdm(range(len(dataset[0])), desc=f"Traduzindo dataset"):
if dataset[0][i] in cache:
sent = cache[dataset[0][i]]
else:
sent = self.translators.batch_google(dataset[0][i])
cache[dataset[0][i]] = sent
if dataset[1][i] in cache:
ext = cache[dataset[1][i]]
else:
ext = self.translators.batch_google(dataset[1][i])
cache[dataset[1][i]] = ext
all_sent.append(sent)
all_ext.append(ext)
raw_sent.append(dataset[0][i])
raw_ext.append(dataset[1][i])
cache.clear()
cache.close()
trans_dict = {"sent": all_sent, "ext": all_ext, "raw_sent": raw_sent, "raw_ext": raw_ext}
self.save_translate(trans_dict)
def translate_gpt(self, dataset=None):
if dataset is None:
dataset = self.load_dataset()
# traduz dataset
all_sent = []
all_ext = []
raw_sent = []
raw_ext = []
if self.half_translated():
with open(f"{self.out_path}/translate/translate.json", "r", encoding="utf-8") as f:
data = json.load(f)
all_sent = data["sent"]
all_ext = data["ext"]
raw_sent = data["raw_sent"]
raw_ext = data["raw_ext"]
i = len(all_sent)
else:
i = 0
while i < len(dataset[0]):
try:
sent, ext = self.translators.gptv2(dataset[0][i], dataset[1][i])
all_sent.append(sent)
all_ext.append(ext)
raw_sent.append(dataset[0][i])
raw_ext.append(dataset[1][i])
os.system("cls")
print(f"{i/len(dataset[0])*100:.2f}% concluído ||| {i}/{len(dataset[0])}")
trans_dict = {"sent": all_sent, "ext": all_ext, "raw_sent": raw_sent, "raw_ext": raw_ext}
self.save_translate(trans_dict)
i+=1
except:
print("provavelmente o modelo está sobrecarregado, tentando novamente")
trans_dict = {"sent": all_sent, "ext": all_ext, "raw_sent": raw_sent, "raw_ext": raw_ext}
self.save_translate(trans_dict)
def save_translate_thread(self, data, part: int):
path = self.out_path + f"/translate"
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(self.out_path + f"/translate/translate{part}.json", "a", encoding="utf-8") as f:
open(self.out_path + f"/translate/translate{part}.json", "w", encoding="utf-8").close()
f.write(json.dumps(data))
def half_translated_thread(self, part: int):
try:
open(f"{self.out_path}/translate/translate{part}.json", "r", encoding="utf-8")
return True
except:
return False
def thread_gpt(self, part: int, dataset=None):
#TODO: dividir em micro_funções
if dataset is None:
dataset = self.load_dataset()
# traduz dataset
all_sent = []
all_ext = []
raw_sent = []
raw_ext = []
if self.half_translated_thread(part):
with open(f"{self.out_path}/translate/translate{part}.json", "r", encoding="utf-8") as f:
data = json.load(f)
all_sent = data["sent"]
all_ext = data["ext"]
raw_sent = data["raw_sent"]
raw_ext = data["raw_ext"]
i = len(all_sent)
else:
i = 0
while i < len(dataset[0]):
try:
sent, ext = self.translators.gptv2(dataset[0][i], dataset[1][i])
if sent == "Error" or ext == "Error":
print(f"thread {part} freezou, esperando 30 segundos")
self.freezed.append(part)
time.sleep(30)
print(f"thread {part} liberada")
self.freezed.remove(part)
raise Exception("Error")
all_sent.append(sent)
all_ext.append(ext)
raw_sent.append(dataset[0][i])
raw_ext.append(dataset[1][i])
os.system("cls")
print(f"{i / len(dataset[0]) * 100:.2f}% concluído ||| {i}/{len(dataset[0])} ||| Thread: {part} ||| Freezed: {self.freezed}")
trans_dict = {"sent": all_sent, "ext": all_ext, "raw_sent": raw_sent, "raw_ext": raw_ext}
self.save_translate_thread(trans_dict, part)
i += 1
except:
print("provavelmente o modelo está sobrecarregado, tentando novamente")
trans_dict = {"sent": all_sent, "ext": all_ext, "raw_sent": raw_sent, "raw_ext": raw_ext}
self.save_translate_thread(trans_dict, part)
def merge_translate_parts(self, total_parts:int):
all_sent = []
all_ext = []
raw_sent = []
raw_ext = []
with open(self.out_path + f"/translate/translate.json", "a", encoding="utf-8") as f:
for part in range(total_parts):
with open(self.out_path + f"/translate/translate{part}.json", "r", encoding="utf-8") as f2:
data = json.load(f2)
all_sent.extend(data["sent"])
all_ext.extend(data["ext"])
raw_sent.extend(data["raw_sent"])
raw_ext.extend(data["raw_ext"])
trans_dict = {"sent": all_sent, "ext": all_ext, "raw_sent": raw_sent, "raw_ext": raw_ext}
f.write(json.dumps(trans_dict))
def create_dict(self, translate = None, part = None):
argsRel_eng = ArgsRel3()
if translate is None:
with open(self.out_path + "/translate/translate.json", "r", encoding="utf-8") as f:
data = json.load(f)
else:
data = translate
all_sent = data["sent"]
all_ext = data["ext"]
raw_sent = data["raw_sent"]
raw_ext = data["raw_ext"]
if self.debug:
for sent, ext, rs, re in zip(all_sent, all_ext, raw_sent, raw_ext):
if not self.google:
self.debugging(sent, ext, rs, re)
else:
self.debugging(sent, ext, rs, re)
data_dict = {}
#identifica elementos da tripla traduzida e armazena em um dicionario
counter = 0
for sample in tqdm(zip(all_sent, all_ext), total=len(all_sent)):
curr_ext = sample[1]
if curr_ext[-1] == ".":
curr_ext = curr_ext[:-1]
alignments = argsRel_eng.get_args_rel(transform_portuguese_contractions(curr_ext), transform_portuguese_contractions(sample[0]))
for ali in alignments:
arg0_trad, rel_trad, arg1_trad = ali
if len(alignments) > 1:
match = self.matcher.match(transform_portuguese_contractions(sample[0]),
transform_portuguese_contractions(arg0_trad),
transform_portuguese_contractions(rel_trad),
transform_portuguese_contractions(arg1_trad)
)
if match[3] == True:
data_dict[str(self.counter)] = {"ID": self.counter,
"sent": transform_portuguese_contractions(sample[0]),
"ext": [{"arg1": transform_portuguese_contractions(arg0_trad),
"rel": transform_portuguese_contractions(rel_trad),
"arg2": transform_portuguese_contractions(arg1_trad)}]}
self.counter += 1
break
else:
data_dict[str(self.counter)] = {"ID": self.counter,
"sent": transform_portuguese_contractions(sample[0]),
"ext": [{"arg1": transform_portuguese_contractions(arg0_trad),
"rel": transform_portuguese_contractions(rel_trad),
"arg2": transform_portuguese_contractions(arg1_trad)}]}
self.counter += 1
#print(f"{self.counter / (len(all_sent) * 6):.2f}% concluído ||| {self.counter}/{len(all_sent)*6} ||| thread: {part}")
if part is not None:
path = self.out_path + f"/align/"
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(self.out_path + f"/align/data_dict{part}.json", "a", encoding="utf-8") as f:
f.write(json.dumps(data_dict))
else:
#salva dicionario
self.save_dict(data_dict)
def create_dict_thread(self, translate = None, part = None):
argsRel_eng = ArgsRel3()
if translate is None:
with open(self.out_path + "/translate/translate.json", "r", encoding="utf-8") as f:
data = json.load(f)
else:
data = translate
all_sent = data["sent"]
all_ext = data["ext"]
raw_sent = data["raw_sent"]
raw_ext = data["raw_ext"]
if self.debug:
for sent, ext, rs, re in zip(all_sent, all_ext, raw_sent, raw_ext):
if not self.google:
self.debugging(sent, ext, rs, re)
else:
self.debugging(sent, ext, rs, re)
data_dict = {}
#identifica elementos da tripla traduzida e armazena em um dicionario
counter = 0
for sample in zip(all_sent, all_ext):
curr_ext = sample[1]
if curr_ext[-1] == ".":
curr_ext = curr_ext[:-1]
alignments = argsRel_eng.get_args_rel(transform_portuguese_contractions(curr_ext), transform_portuguese_contractions(sample[0]))
for ali in alignments:
arg0_trad, rel_trad, arg1_trad = ali
if len(alignments) > 1:
match = self.matcher.match(transform_portuguese_contractions(sample[0]),
transform_portuguese_contractions(arg0_trad),
transform_portuguese_contractions(rel_trad),
transform_portuguese_contractions(arg1_trad)
)
if match[3] == True:
data_dict[str(self.counter)] = {"ID": self.counter,
"sent": transform_portuguese_contractions(sample[0]),
"ext": [{"arg1": transform_portuguese_contractions(arg0_trad),
"rel": transform_portuguese_contractions(rel_trad),
"arg2": transform_portuguese_contractions(arg1_trad)}]}
self.counter += 1
break
else:
data_dict[str(self.counter)] = {"ID": self.counter,
"sent": transform_portuguese_contractions(sample[0]),
"ext": [{"arg1": transform_portuguese_contractions(arg0_trad),
"rel": transform_portuguese_contractions(rel_trad),
"arg2": transform_portuguese_contractions(arg1_trad)}]}
self.counter += 1
print(f"{(self.counter / (len(all_sent) * 6))*100:.2f}% concluído ||| {self.counter}/{len(all_sent)*6} ||| thread: {part}")
if part is not None:
path = self.out_path + f"/align/"
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
with open(self.out_path + f"/align/data_dict{part}.json", "a", encoding="utf-8") as f:
f.write(json.dumps(data_dict))
else:
#salva dicionario
self.save_dict(data_dict)
def run(batch_size: int,
dataset_dir: str,
dataset_name: str,
test_size: float,
dev_size: float,
translated: bool,
debug: bool = False,
use_google: bool = True,
sequential: bool = True,
cache_dir: str = "cache"
):
converted = True
OUT_NAME = dataset_name.replace(".conll", "")
INPUT_PATH = ""
path = "outputs"+"/"+OUT_NAME
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
json_dir = path+"/saida_match"
pathlib.Path(json_dir).mkdir(parents=True, exist_ok=True)
if use_google or debug:
batch_size = 1
trans_eng = TranslateDataset(dataset_dir, dataset_name, path, debug=debug, batch_size=batch_size, google=use_google)
if translated:
pass
else:
if use_google:
LoadDataset(dataset_dir, dataset_name, path)
print("Traduzindo com Google")
trans_eng.translate_google(cache_dir=cache_dir)
else:
LoadDataset(dataset_dir, dataset_name, path)
print("Traduzindo com ChatGPT")
trans_eng.translate_gpt()
trans_eng.create_dict()
criar_conll(OUT_NAME, INPUT_PATH, test_size, dev_size, converted=converted, sequential=sequential)
| [
"SENTENÇA: PLACEHOLDER",
"application/json",
"FATO: PLACEHOLDER",
"Você é um tradutor de textos de ingles para portugues brasileiro."
] |
2024-01-10 | RUCAIBox/LLMRank | llmrank~model~rank.py | import os.path as osp
import torch
import openai
import time
import asyncio
import numpy as np
from tqdm import tqdm
from recbole.model.abstract_recommender import SequentialRecommender
from utils import dispatch_openai_requests, dispatch_single_openai_requests
class Rank(SequentialRecommender):
def __init__(self, config, dataset):
super().__init__(config, dataset)
self.config = config
self.max_tokens = config['max_tokens']
self.api_model_name = config['api_name']
openai.api_key = config['api_key']
openai.api_base = config['api_base']
self.api_batch = config['api_batch']
self.async_dispatch = config['async_dispatch']
self.temperature = config['temperature']
self.max_his_len = config['max_his_len']
self.recall_budget = config['recall_budget']
self.boots = config['boots']
self.data_path = config['data_path']
self.dataset_name = dataset.dataset_name
self.id_token = dataset.field2id_token['item_id']
self.item_text = self.load_text()
self.logger.info(f'Avg. t = {np.mean([len(_) for _ in self.item_text])}')
self.fake_fn = torch.nn.Linear(1, 1)
def load_text(self):
token_text = {}
item_text = ['[PAD]']
feat_path = osp.join(self.data_path, f'{self.dataset_name}.item')
if self.dataset_name == 'ml-1m':
with open(feat_path, 'r', encoding='utf-8') as file:
file.readline()
for line in file:
item_id, movie_title, release_year, genre = line.strip().split('\t')
token_text[item_id] = movie_title
for i, token in enumerate(self.id_token):
if token == '[PAD]': continue
raw_text = token_text[token]
if raw_text.endswith(', The'):
raw_text = 'The ' + raw_text[:-5]
elif raw_text.endswith(', A'):
raw_text = 'A ' + raw_text[:-3]
item_text.append(raw_text)
return item_text
elif self.dataset_name == 'Games':
with open(feat_path, 'r', encoding='utf-8') as file:
file.readline()
for line in file:
item_id, title = line.strip().split('\t')
token_text[item_id] = title
for i, token in enumerate(self.id_token):
if token == '[PAD]': continue
raw_text = token_text[token]
item_text.append(raw_text)
return item_text
else:
raise NotImplementedError()
def predict_on_subsets(self, interaction, idxs):
"""
Main function to rank with LLMs
:param interaction:
:param idxs: item id retrieved by candidate generation models [batch_size, candidate_size]
:return:
"""
origin_batch_size = idxs.shape[0]
if self.boots:
"""
bootstrapping is adopted to alleviate position bias
`fix_enc` is invalid in this case"""
idxs = np.tile(idxs, [self.boots, 1])
np.random.shuffle(idxs.T)
batch_size = idxs.shape[0]
pos_items = interaction[self.POS_ITEM_ID]
prompt_list = []
for i in tqdm(range(batch_size)):
user_his_text, candidate_text, candidate_text_order, candidate_idx = self.get_batch_inputs(interaction, idxs, i)
prompt = self.construct_prompt(self.dataset_name, user_his_text, candidate_text_order)
prompt_list.append([{'role': 'user', 'content': prompt}])
openai_responses = self.dispatch_openai_api_requests(prompt_list, batch_size)
scores = torch.full((idxs.shape[0], self.n_items), -10000.)
for i, openai_response in enumerate(tqdm(openai_responses)):
user_his_text, candidate_text, candidate_text_order, candidate_idx = self.get_batch_inputs(interaction, idxs, i)
response = openai_response['choices'][0]['message']['content']
response_list = response.split('\n')
self.logger.info(prompt_list[i])
self.logger.info(response)
self.logger.info(f'Here are candidates: {candidate_text}')
self.logger.info(f'Here are answer: {response_list}')
if self.dataset_name == 'ml-1m':
rec_item_idx_list = self.parsing_output_text(scores, i, response_list, idxs, candidate_text)
else:
rec_item_idx_list = self.parsing_output_indices(scores, i, response_list, idxs, candidate_text)
if int(pos_items[i % origin_batch_size]) in candidate_idx:
target_text = candidate_text[candidate_idx.index(int(pos_items[i % origin_batch_size]))]
try:
ground_truth_pr = rec_item_idx_list.index(target_text)
self.logger.info(f'Ground-truth [{target_text}]: Ranks {ground_truth_pr}')
except:
self.logger.info(f'Fail to find ground-truth items.')
print(target_text)
print(rec_item_idx_list)
if self.boots:
scores = scores.view(self.boots,-1,scores.size(-1))
scores = scores.sum(0)
return scores
def get_batch_inputs(self, interaction, idxs, i):
user_his = interaction[self.ITEM_SEQ]
user_his_len = interaction[self.ITEM_SEQ_LEN]
origin_batch_size = user_his.size(0)
real_his_len = min(self.max_his_len, user_his_len[i % origin_batch_size].item())
user_his_text = [str(j) + '. ' + self.item_text[user_his[i % origin_batch_size, user_his_len[i % origin_batch_size].item() - real_his_len + j].item()] \
for j in range(real_his_len)]
candidate_text = [self.item_text[idxs[i,j]]
for j in range(idxs.shape[1])]
candidate_text_order = [str(j) + '. ' + self.item_text[idxs[i,j].item()]
for j in range(idxs.shape[1])]
candidate_idx = idxs[i].tolist()
return user_his_text, candidate_text, candidate_text_order, candidate_idx
def construct_prompt(self, dataset_name, user_his_text, candidate_text_order):
if dataset_name == 'ml-1m':
prompt = f"I've watched the following movies in the past in order:\n{user_his_text}\n\n" \
f"Now there are {self.recall_budget} candidate movies that I can watch next:\n{candidate_text_order}\n" \
f"Please rank these {self.recall_budget} movies by measuring the possibilities that I would like to watch next most, according to my watching history. Please think step by step.\n" \
f"Please show me your ranking results with order numbers. Split your output with line break. You MUST rank the given candidate movies. You can not generate movies that are not in the given candidate list."
elif dataset_name == 'Games':
prompt = f"I've purchased the following products in the past in order:\n{user_his_text}\n\n" \
f"Now there are {self.recall_budget} candidate products that I can consider to purchase next:\n{candidate_text_order}\n" \
f"Please rank these {self.recall_budget} products by measuring the possibilities that I would like to purchase next most, according to the given purchasing records. Please think step by step.\n" \
f"Please only output the order numbers after ranking. Split these order numbers with line break."
else:
raise NotImplementedError(f'Unknown dataset [{dataset_name}].')
return prompt
def dispatch_openai_api_requests(self, prompt_list, batch_size):
openai_responses = []
self.logger.info('Launch OpenAI APIs')
if self.async_dispatch:
self.logger.info('Asynchronous dispatching OpenAI API requests.')
for i in tqdm(range(0, batch_size, self.api_batch)):
while True:
try:
openai_responses += asyncio.run(
dispatch_openai_requests(prompt_list[i:i+self.api_batch], self.api_model_name, self.temperature)
)
break
except Exception as e:
print(f'Error {e}, retry batch {i // self.api_batch} at {time.ctime()}', flush=True)
time.sleep(20)
else:
self.logger.info('Dispatching OpenAI API requests one by one.')
for message in tqdm(prompt_list):
openai_responses.append(dispatch_single_openai_requests(message, self.api_model_name, self.temperature))
self.logger.info('Received OpenAI Responses')
return openai_responses
def parsing_output_text(self, scores, i, response_list, idxs, candidate_text):
rec_item_idx_list = []
found_item_cnt = 0
for j, item_detail in enumerate(response_list):
if len(item_detail) < 1:
continue
if item_detail.endswith('candidate movies:'):
continue
pr = item_detail.find('. ')
if item_detail[:pr].isdigit():
item_name = item_detail[pr + 2:]
else:
item_name = item_detail
matched_name = None
for candidate_text_single in candidate_text:
if candidate_text_single in item_name:
if candidate_text_single in rec_item_idx_list:
break
rec_item_idx_list.append(candidate_text_single)
matched_name = candidate_text_single
break
if matched_name is None:
continue
candidate_pr = candidate_text.index(matched_name)
scores[i, idxs[i, candidate_pr]] = self.recall_budget - found_item_cnt
found_item_cnt += 1
return rec_item_idx_list
def parsing_output_indices(self, scores, i, response_list, idxs, candidate_text):
rec_item_idx_list = []
found_item_cnt = 0
for j, item_detail in enumerate(response_list):
if len(item_detail) < 1:
continue
if not item_detail.isdigit():
continue
pr = int(item_detail)
if pr >= self.recall_budget:
continue
matched_name = candidate_text[pr]
if matched_name in rec_item_idx_list:
continue
rec_item_idx_list.append(matched_name)
scores[i, idxs[i, pr]] = self.recall_budget - found_item_cnt
found_item_cnt += 1
if len(rec_item_idx_list) >= self.recall_budget:
break
return rec_item_idx_list
| [
"I've watched the following movies in the past in order:\nPLACEHOLDER\n\n",
"Please show me your ranking results with order numbers. Split your output with line break. You MUST rank the given candidate movies. You can not generate movies that are not in the given candidate list.",
"[]",
"Please only output the order numbers after ranking. Split these order numbers with line break.",
"I've purchased the following products in the past in order:\nPLACEHOLDER\n\n"
] |
2024-01-10 | approximatelabs/datadm | datadm~agents~cotmultistep.py | import guidance
from datadm.agent import Agent
from datadm.conversation import clean_conversation_list
base_prompt = '''
{{#user~}}
You are a helpful AI code-writing assistant, the perfect data analyst who is jovial, fun and writes great code to solve data problems!
Answer my questions with both text describing your plan (but not an answer), and then the code in markdown that will be executed!
* Use `print` to show results.
* Don't answer the question directly, instead suggest how you will solve the problem, then write in a ```python markdown block, the code you will use to solve the problem.
* For plotting, please use `matplotlib`. use `plt.show()` to display the plot to the user.
{{~/user}}
{{#each conversation}}
{{#if (equal this.role 'user')}}
{{#user~}}
{{this.content}}
{{~/user}}
{{/if}}
{{#if (equal this.role 'assistant')}}
{{#assistant~}}
{{this.content}}
{{~/assistant}}
{{/if}}
{{/each}}
'''
precode_prompt = '''
{{#assistant~}}
{{gen "thoughts" temperature=0.1 max_tokens=120 stop=["```", "<|end|>"]}}
```python
{{gen "code" temperature=0.0 max_tokens=800 stop=["```", "<|end|>"]}}
{{~/assistant}}
'''
postcode_prompt = '''
{{#assistant~}}
Looking at the executed results above, we can see {{gen "summary" temperature=0.0 max_tokens=120 stop=["```", "<|end|>"]}}
{{~/assistant}}
'''
class CoTMultiStep(Agent):
is_local = True
def _bot(self, repl, conversation, llm):
starting_convo = conversation
tries = 0
while tries < 2:
precode = guidance(base_prompt + precode_prompt, llm=llm)
for result in precode(conversation=clean_conversation_list(starting_convo), silent=True, stream=True):
resolved_content = result.get('thoughts') or ''
resolved_content += '\n```python\n'+(result.get('code') or '')+'\n```'
resolved_convo = starting_convo + [{'role': 'assistant', 'content': resolved_content}]
yield resolved_convo
starting_convo += [{'role': 'assistant', 'content': resolved_content}]
exec_result = repl.exec(result['code'])
starting_convo += [{'role': 'assistant', 'content': exec_result}]
yield starting_convo
if exec_result['tracebacks']:
tries += 1
continue
break
postcode = guidance(base_prompt + postcode_prompt, llm=llm)
for result in postcode(conversation=clean_conversation_list(starting_convo), silent=True, stream=True):
yield starting_convo + [{'role': 'assistant', 'content': f'Looking at the executed results above, we can see {result.get("summary") or ""}'}]
| [
"\n{{#user~}}\nYou are a helpful AI code-writing assistant, the perfect data analyst who is jovial, fun and writes great code to solve data problems!\n\nAnswer my questions with both text describing your plan (but not an answer), and then the code in markdown that will be executed!\n\n* Use `print` to show results.\n* Don't answer the question directly, instead suggest how you will solve the problem, then write in a ```python markdown block, the code you will use to solve the problem.\n* For plotting, please use `matplotlib`. use `plt.show()` to display the plot to the user.\n{{~/user}}\n{{#each conversation}}\n{{#if (equal this.role 'user')}}\n{{#user~}}\n{{this.content}}\n{{~/user}}\n{{/if}}\n{{#if (equal this.role 'assistant')}}\n{{#assistant~}}\n{{this.content}}\n{{~/assistant}}\n{{/if}}\n{{/each}}\n",
"\n{{#assistant~}}\n{{gen \"thoughts\" temperature=0.1 max_tokens=120 stop=[\"```\", \"<|end|>\"]}}\n```python\n{{gen \"code\" temperature=0.0 max_tokens=800 stop=[\"```\", \"<|end|>\"]}}\n{{~/assistant}}\n",
"\n{{#assistant~}}\nLooking at the executed results above, we can see {{gen \"summary\" temperature=0.0 max_tokens=120 stop=[\"```\", \"<|end|>\"]}}\n{{~/assistant}}\n"
] |
2024-01-10 | approximatelabs/datadm | datadm~backend.py | import guidance
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
# TODO: fix this to check devices and packages to dynamically adjust available LLMs and models
try:
import accelerate
local_available = True
except ImportError:
local_available = False
class StarChat(guidance.llms.Transformers):
def __init__(self, model_path=None, revision=None, **kwargs):
import torch
tokenizer = AutoTokenizer.from_pretrained(model_path, device_map='auto', revision=revision)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', torch_dtype=torch.bfloat16, revision=revision)
model.eval()
super().__init__(model, tokenizer=tokenizer, device_map='auto', **kwargs)
@staticmethod
def role_start(role):
return f"<|{role}|>"
@staticmethod
def role_end(role):
return '<|end|>'
class BackendLLMManager():
OPENAI_MODELS = ['gpt-3.5-turbo', 'gpt-4', 'gpt-3.5-turbo-16k', 'gpt-4-32k']
def __init__(self):
self.llms = {}
if local_available:
self.llms['starchat-alpha-cuda'] = {'state': 'unloaded', 'llm': None, 'mode': 'cuda', 'model_path': 'HuggingFaceH4/starchat-alpha', 'revision': '5058bd8557100137ade3c459bfc8100e90f71ec7'}
self.llms['starchat-beta-cuda'] = {'state': 'unloaded', 'llm': None, 'mode': 'cuda', 'model_path': 'HuggingFaceH4/starchat-beta', 'revision': 'b1bcda690655777373f57ea6614eb095ec2c886f'}
for model_name in self.OPENAI_MODELS:
self.llms[model_name] = {'state': 'unloaded', 'llm': None, 'mode': 'api'}
def load(self, llm_name):
if self.llms[llm_name]['state'] == 'unloaded':
self.llms[llm_name]['state'] = 'loading'
if llm_name in ['starchat-alpha-cuda', 'starchat-beta-cuda']:
self.llms[llm_name]['llm'] = StarChat(**self.llms[llm_name])
elif llm_name in self.OPENAI_MODELS:
if 'OPENAI_API_KEY' not in os.environ:
self.llms[llm_name]['state'] = 'error'
raise RuntimeError("OPENAI_API_KEY not found in environment")
self.llms[llm_name]['llm'] = guidance.llms.OpenAI(llm_name)
else:
self.llms[llm_name]['state'] = 'error'
raise RuntimeError(f"LLM {llm_name} not supported")
self.llms[llm_name]['state'] = 'ready'
return self.model_status(llm_name)
def unload(self, llm_name):
if llm_name in self.llms:
self.llms[llm_name]['state'] = 'unloaded'
self.llms[llm_name]['llm'] = None
def model_status(self, llm_name):
state = self.llms[llm_name]['state']
return [(llm_name, state)]
llm_manager = BackendLLMManager()
| [] |
2024-01-10 | approximatelabs/datadm | datadm~agents~baseline.py | import guidance
import re
from datadm.agent import Agent
from datadm.conversation import clean_conversation_list
base_prompt = '''
{{#user~}}
You are a helpful AI code-writing assistant, the perfect data analyst who is jovial, fun and writes great code to solve data problems!
Answer my questions with both text describing your plan (but not an answer), and then the code in markdown that will be executed!
* Use `print` to show results.
* Don't answer the question directly, instead suggest how you will solve the problem, then write in a ```python markdown block, the code you will use to solve the problem.
* For plotting, please use `matplotlib`. use `plt.show()` to display the plot to the user.
{{~/user}}
{{#each conversation}}
{{#if (equal this.role 'user')}}
{{#user~}}
{{this.content}}
{{~/user}}
{{/if}}
{{#if (equal this.role 'assistant')}}
{{#assistant~}}
{{this.content}}
{{~/assistant}}
{{/if}}
{{/each}}
'''
gensponse = '''
{{#assistant~}}
{{gen "response" temperature=0.5 max_tokens=800}}
{{~/assistant}}
'''
def extract_all_code_blocks(text):
starts = [m.start() for m in re.finditer('```', text)]
output = ""
for i in range(0, len(starts), 2):
res = text[starts[i]+3:starts[i+1]]
if res.startswith('python'):
res = res[6:]
output += res
return output
class Baseline(Agent):
def _bot(self, repl, conversation, llm):
starting_convo = conversation
tries = 0
while tries < 2:
precode = guidance(base_prompt + gensponse, llm=llm)
for result in precode(conversation=clean_conversation_list(starting_convo), silent=True, stream=True):
yield starting_convo + [{'role': 'assistant', 'content': result.get('response') or ''}]
starting_convo += [{'role': 'assistant', 'content': result.get('response')}]
exec_result = repl.exec(extract_all_code_blocks(result['response']))
starting_convo += [{'role': 'assistant', 'content': exec_result}]
yield starting_convo
if exec_result['tracebacks']:
tries += 1
continue
break
| [
"\n{{#user~}}\nYou are a helpful AI code-writing assistant, the perfect data analyst who is jovial, fun and writes great code to solve data problems!\n\nAnswer my questions with both text describing your plan (but not an answer), and then the code in markdown that will be executed!\n\n* Use `print` to show results.\n* Don't answer the question directly, instead suggest how you will solve the problem, then write in a ```python markdown block, the code you will use to solve the problem.\n* For plotting, please use `matplotlib`. use `plt.show()` to display the plot to the user.\n{{~/user}}\n{{#each conversation}}\n{{#if (equal this.role 'user')}}\n{{#user~}}\n{{this.content}}\n{{~/user}}\n{{/if}}\n{{#if (equal this.role 'assistant')}}\n{{#assistant~}}\n{{this.content}}\n{{~/assistant}}\n{{/if}}\n{{/each}}\n",
"response"
] |
2024-01-10 | tomasonjo/NeoGPT-Recommender | app~graph2text.py | import os
import openai
openai.api_key = os.environ.get('OPENAI_KEY')
system = f"""
You are an assistant that helps to generate text to form nice and human understandable answers based.
The latest prompt contains the information, and you need to generate a human readable response based on the given information.
Make it sound like the information are coming from an AI assistant, but don't add any information.
Do not add any additional information that is not explicitly provided in the latest prompt.
I repeat, do not add any information that is not explicitly given.
"""
def generate_response(messages):
messages = [
{"role": "system", "content": system}
] + messages
print(messages)
# Make a request to OpenAI
completions = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0
)
response = completions.choices[0].message.content
print(response)
# If the model apologized, remove the first line or sentence
if "apologi" in response:
if "\n" in response:
response = " ".join(response.split("\n")[1:])
else:
response = " ".join(response.split(".")[1:])
return response
if __name__ == '__main__':
data = [{'actor': 'Sigourney Weaver', 'role': "Witch"}, {'actor': 'Holly Hunter', "role": "Assassin"}, {
'actor': 'Dermot Mulroney'}, {'actor': 'William McNamara'}]
print(generate_response([{'role': 'user', 'content': str(data)}]))
| [
"[\n {\"role\": \"system\", \"content\": system}\n ] + messages",
"\nYou are an assistant that helps to generate text to form nice and human understandable answers based.\nThe latest prompt contains the information, and you need to generate a human readable response based on the given information.\nMake it sound like the information are coming from an AI assistant, but don't add any information.\nDo not add any additional information that is not explicitly provided in the latest prompt.\nI repeat, do not add any information that is not explicitly given.\n"
] |
2024-01-10 | tomasonjo/NeoGPT-Recommender | app~english2cypher.py | import os
import openai
from retry import retry
from training import examples
openai.api_key = os.environ.get('OPENAI_KEY')
system = f"""
You are an assistant with an ability to generate Cypher queries based off example Cypher queries.
Example Cypher queries are: \n {examples} \n
Do not response with any explanation or any other information except the Cypher query.
You do not ever apologize and strictly generate cypher statements based of the provided Cypher examples.
You need to update the database using an appropriate Cypher statement when a user mentions their likes or dislikes, or what they watched already.
Do not provide any Cypher statements that can't be inferred from Cypher examples.
Inform the user when you can't infer the cypher statement due to the lack of context of the conversation and state what is the missing context.
"""
@retry(tries=2, delay=5)
def generate_cypher(messages):
messages = [
{"role": "system", "content": system}
] + messages
print(messages)
# Make a request to OpenAI
completions = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.0
)
response = completions.choices[0].message.content
# Sometime the models bypasses system prompt and returns
# data based on previous dialogue history
if not "MATCH" in response and "{" in response:
raise Exception(
"GPT bypassed system message and is returning response based on previous conversation history" + response)
# If the model apologized, remove the first line
if "apologi" in response:
response = " ".join(response.split("\n")[1:])
# Sometime the model adds quotes around Cypher when it wants to explain stuff
if "`" in response:
response = response.split("```")[1].strip("`")
print(response)
return response
if __name__ == '__main__':
print(generate_cypher([{'role': 'user', 'content': 'What are some good cartoon?'},
{'role': 'assistant', 'content': 'Shrek 3'},
{'role': 'user',
'content': 'Which actors appeared in it?'}
]))
print(generate_cypher([{'role': 'user', 'content': 'What are some good cartoon?'},
{'role': 'assistant', 'content': 'Shrek 3'},
{'role': 'user',
'content': 'Who was the first person on the moon?'}
]))
| [
"Who was the first person on the moon?",
"\nYou are an assistant with an ability to generate Cypher queries based off example Cypher queries.\nExample Cypher queries are: \n PLACEHOLDER \n\nDo not response with any explanation or any other information except the Cypher query.\nYou do not ever apologize and strictly generate cypher statements based of the provided Cypher examples.\nYou need to update the database using an appropriate Cypher statement when a user mentions their likes or dislikes, or what they watched already.\nDo not provide any Cypher statements that can't be inferred from Cypher examples.\nInform the user when you can't infer the cypher statement due to the lack of context of the conversation and state what is the missing context.\n",
"What are some good cartoon?",
"[\n {\"role\": \"system\", \"content\": system}\n ] + messages",
"Which actors appeared in it?",
"Shrek 3"
] |
2024-01-10 | pkt1583/openai-samples | End_to_end_Solutions~AOAIVirtualAssistant~src~botapp~tasks~auto_insurance.py | import os
from data.chat_sessions.contracts.chat_session import DialogClassification
from data.chat_sessions.contracts.chat_session import ChatSession
from data.faqs.contracts.faq import InsuranceType
from cognition.openai.model_manager import OpenAIModelManager
from utilities.model_input_convertor import ModelInputConvertor
from data.user_profiles.api.manager_flat import UserProfileManagerFlat
from data.faqs.api.manager import FAQManager
from config import DefaultConfig
class AutoInsurance:
def __init__(self, **kwargs):
self.database_name = DefaultConfig.COSMOS_DB_NAME
self.user_profile_container_name = DefaultConfig.COSMOS_DB_USER_PROFILE_CONTAINER_NAME
self.faq_container_name = DefaultConfig.COSMOS_DB_FAQ_CONTAINER_NAME
self.name = "auto-insurance"
self.user_profile_manager = UserProfileManagerFlat(self.database_name, self.user_profile_container_name)
self.faq_manager = FAQManager(self.database_name, self.faq_container_name)
def run(self, conversation: ChatSession, user_id: str):
open_ai_config = {'api-key': DefaultConfig.OPENAI_RESOURCE_KEY,
'resource-name': DefaultConfig.OPENAI_RESOURCE_NAME,
'deployment-name': DefaultConfig.OPENAI_CHATGPT_DEPLOYMENT_NAME,
'api-version': DefaultConfig.OPENAI_API_VERSION
}
config_file_path = os.path.join(os.getcwd(), 'cognition', 'config.yml')
auto_insurance_model = OpenAIModelManager(config_file_path, self.name, open_ai_config)
filtered_transcript = conversation.get_transcript({'classification': DialogClassification.auto_insurance.value})
model_converted_transcript = ModelInputConvertor.model_input_convertor(filtered_transcript)
relevant_info = self.faq_manager.get_faqs(InsuranceType.auto).info.relevant_info
user_info = self.user_profile_manager.get_user_profile(user_id).__str__()
return auto_insurance_model.generate_dialog({"<CONTEXT>": model_converted_transcript,
"<CONTENT_A>": user_info,
"<CONTENT_B>": relevant_info})
| [] |
2024-01-10 | eggressive/corise-frontend | podcast_backend.py | import modal
def download_whisper():
# Load the Whisper model
import os
import whisper
print ("Download the Whisper model")
# Perform download only once and save to Container storage
whisper._download(whisper._MODELS["medium"], '/content/podcast/', False)
stub = modal.Stub("corise-podcast-project")
corise_image = modal.Image.debian_slim().pip_install("feedparser",
"https://github.com/openai/whisper/archive/9f70a352f9f8630ab3aa0d06af5cb9532bd8c21d.tar.gz",
"requests",
"ffmpeg",
"openai",
"tiktoken",
"wikipedia",
"ffmpeg-python").apt_install("ffmpeg").run_function(download_whisper)
@stub.function(image=corise_image, gpu="any", timeout=600)
def get_transcribe_podcast(rss_url, local_path):
print ("Starting Podcast Transcription Function")
print ("Feed URL: ", rss_url)
print ("Local Path:", local_path)
# Read from the RSS Feed URL
import feedparser
intelligence_feed = feedparser.parse(rss_url)
podcast_title = intelligence_feed['feed']['title']
episode_title = intelligence_feed.entries[0]['title']
episode_image = intelligence_feed['feed']['image'].href
for item in intelligence_feed.entries[0].links:
if (item['type'] == 'audio/mpeg'):
episode_url = item.href
episode_name = "podcast_episode.mp3"
print ("RSS URL read and episode URL: ", episode_url)
# Download the podcast episode by parsing the RSS feed
from pathlib import Path
p = Path(local_path)
p.mkdir(exist_ok=True)
print ("Downloading the podcast episode")
import requests
with requests.get(episode_url, stream=True) as r:
r.raise_for_status()
episode_path = p.joinpath(episode_name)
with open(episode_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print ("Podcast Episode downloaded")
# Load the Whisper model
import os
import whisper
# Load model from saved location
print ("Load the Whisper model")
model = whisper.load_model('medium', device='cuda', download_root='/content/podcast/')
# Perform the transcription
print ("Starting podcast transcription")
result = model.transcribe(local_path + episode_name)
# Return the transcribed text
print ("Podcast transcription completed, returning results...")
output = {}
output['podcast_title'] = podcast_title
output['episode_title'] = episode_title
output['episode_image'] = episode_image
output['episode_transcript'] = result['text']
return output
@stub.function(image=corise_image, secret=modal.Secret.from_name("dd-openai-secret"))
def get_podcast_summary(podcast_transcript):
import openai
import tiktoken
# Tokenize encoding
enc = tiktoken.encoding_for_model("gpt-4")
token_count = len(enc.encode(podcast_transcript))
print ("Number of tokens in input prompt in gpt-4", token_count)
instructPrompt = """
I am providing you with a transcription of a podcast.
Write an entertaining summary of the podcast in the tone of Joe Rogan.
"""
# Assuming podcast_transcript variable is already defined:
request = instructPrompt + podcast_transcript
chatOutput = openai.ChatCompletion.create(model="gpt-4",
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
podcastSummary = chatOutput.choices[0].message.content
print("Podcast Summary", podcastSummary)
return podcastSummary
@stub.function(image=corise_image, secret=modal.Secret.from_name("dd-openai-secret"))
def get_podcast_guest(podcast_transcript):
import openai
import wikipedia
import json
request = podcast_transcript[:5500]
try:
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": request}],
functions=[
{
"name": "get_podcast_guest_information",
"description": "Get information on the podcast guest using their full name and the name of the organization they are part of to search for them on Wikipedia or Google",
"parameters": {
"type": "object",
"properties": {
"guest_name": {
"type": "string",
"description": "The full name of the guest who is speaking in the podcast",
},
"guest_organization": {
"type": "string",
"description": "The full name of the organization that the podcast guest belongs to or runs",
},
"guest_title": {
"type": "string",
"description": "The title, designation or role of the podcast guest in their organization",
},
},
"required": ["guest_name"],
},
}
],
function_call={"name": "get_podcast_guest_information"}
)
except Exception as e:
return f"Error fetching characters from model: {str(e)}"
response_message = completion["choices"][0]["message"]
podcastGuest = []
if response_message.get("function_call"):
function_args = json.loads(response_message["function_call"]["arguments"])
characters = function_args.get("characters", [])
for character in characters:
character_name = character.get("character_name", "")
podcastGuest.append(character)
return podcastGuest
@stub.function(image=corise_image, secret=modal.Secret.from_name("dd-openai-secret"))
def get_podcast_highlights(podcast_transcript):
import openai
instructPrompt = """
I am providing you with a transcription of a podcast.
Provide highlights of the podcast episode.
* The host, [host name], interviewed [guest name], an expert on [guest's expertise].
* [guest name] shared some fascinating insights on [topic of discussion].
* Some of the key takeaways from the episode include:
* [Key takeaway 1]
* [Key takeaway 2]
* [Key takeaway 3]
"""
request = instructPrompt + podcast_transcript
try:
# Make the API call to get highlights
chatOutput = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
podcastHighlights = chatOutput.choices[0].message.content
except Exception as e:
return f"An error occurred while fetching podcast highlights: {str(e)}"
return podcastHighlights
@stub.function(image=corise_image, secret=modal.Secret.from_name("dd-openai-secret"), timeout=1200)
def process_podcast(url, path):
output = {}
podcast_details = get_transcribe_podcast.call(url, path)
podcast_summary = get_podcast_summary.call(podcast_details['episode_transcript'])
podcast_guest = get_podcast_guest.call(podcast_details['episode_transcript'])
podcast_highlights = get_podcast_highlights.call(podcast_details['episode_transcript'])
output['podcast_details'] = podcast_details
output['podcast_summary'] = podcast_summary
output['podcast_guest'] = podcast_guest
output['podcast_highlights'] = podcast_highlights
return output
@stub.local_entrypoint()
def test_method(url, path):
output = {}
podcast_details = get_transcribe_podcast.call(url, path)
print ("Podcast Summary: ", get_podcast_summary.call(podcast_details['episode_transcript']))
print ("Podcast Guest Information: ", get_podcast_guest.call(podcast_details['episode_transcript']))
print ("Podcast Highlights: ", get_podcast_highlights.call(podcast_details['episode_transcript']))
| [
"\n I am providing you with a transcription of a podcast. \n Provide highlights of the podcast episode.\n\n * The host, [host name], interviewed [guest name], an expert on [guest's expertise].\n * [guest name] shared some fascinating insights on [topic of discussion].\n * Some of the key takeaways from the episode include:\n * [Key takeaway 1]\n * [Key takeaway 2]\n * [Key takeaway 3]\n ",
"You are a helpful assistant.",
"\n I am providing you with a transcription of a podcast.\n Write an entertaining summary of the podcast in the tone of Joe Rogan.\n "
] |
2024-01-10 | TravinDSO/GPT_Terminal_Public | api~question_processing.py | import os
import gc
import gzip
import json
import math
import logging
import time
import tkinter as tk
import xml.etree.ElementTree as ET
from datetime import datetime
from box_sdk_gen.ccg_auth import CCGConfig,BoxCCGAuth
from box_sdk_gen.developer_token_auth import BoxDeveloperTokenAuth
from box_sdk_gen.client import BoxClient
from notion_client import Client as NotionClient
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import SeleniumURLLoader, CSVLoader, NotionDBLoader
from langchain.callbacks import get_openai_callback
# Process the question and return the answer
# Also perform the indexing of the documents if needed
def process_question(total_docs_var,max_tokens_var,query_temp,openai_status_var,doc_text,env_file,data_use, query, prompt_style, data_folder,reindex=False,chat_history=[]):
query_temp = query_temp.get()
max_tokens = int(float(max_tokens_var.get()))
doc_text.insert(tk.END, "Using environment file: " + env_file + "\n")
doc_text.insert(tk.END, "Using data folder: " + data_folder + "\n")
doc_text.update()
load_dotenv(env_file, override=True)
# Load the OPENAI environment variables from the .env file depending on use_azure
use_azure = os.getenv("USE_AZURE")
if use_azure.lower() == "true":
USE_AZURE = True
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = os.getenv("AZURE_OPENAI_API_ENDPOINT")
os.environ["OPENAI_API_KEY"] = os.getenv("AZURE_OPENAI_API_KEY")
EMBEDDINGS_MODEL = os.getenv("AZURE_EMBEDDINGS_MODEL")
AZURE_OPENAI_API_MODEL = os.getenv("AZURE_OPENAI_API_MODEL")
OpenAIEmbeddings.deployment = os.getenv("AZURE_OPENAI_API_MODEL")
else:
USE_AZURE = False
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
EMBEDDINGS_MODEL = os.getenv("EMBEDDINGS_MODEL")
OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL")
# Load the NOTION environment variables from the .env file depending on use_notion
use_notion = os.getenv("USE_NOTION")
if use_notion.lower() == "true":
USE_NOTION = True
NOTION_TOKEN = os.getenv("NOTION_API_KEY")
DATABASE_ID = os.getenv("NOTION_DATABASE_ID")
else:
USE_NOTION = False
# Load the BOX environment variables from the .env file depending on use_notion
use_box = os.getenv("USE_BOX")
if use_box.lower() == "true":
USE_BOX = True
BOX_TOKEN = os.getenv("BOX_TOKEN")
BOX_FOLDER_ID = os.getenv("BOX_FOLDER_ID")
else:
USE_BOX = False
# Text splitter for splitting the text into chunks
class CustomTextSplitter(CharacterTextSplitter):
def __init__(self, separators, *args, **kwargs):
super().__init__(*args, **kwargs)
self.separators = separators
def split_text(self, text):
import re
chunks = []
pattern = '|'.join(map(re.escape, self.separators))
splits = re.split(pattern, text)
return self._merge_splits(splits, self.separators[0])
previous_logging_level = logging.getLogger().getEffectiveLevel()
# Temporarily set the logging level to suppress warnings
logging.getLogger().setLevel(logging.ERROR) # Set to logging.ERROR to suppress warnings
if USE_AZURE:
llm = ChatOpenAI(max_tokens=max_tokens,deployment_id=AZURE_OPENAI_API_MODEL,temperature=query_temp,top_p=1,frequency_penalty=0,presence_penalty=0)
else:
llm = ChatOpenAI(max_tokens=max_tokens,model_name=OPENAI_API_MODEL,temperature=query_temp,top_p=1,frequency_penalty=0,presence_penalty=0)
prompt_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. The System defines the personality and instructions to modify the response.
System: {prompt_style}
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
doc_chain = load_qa_chain(llm, chain_type="stuff")
logging.getLogger().setLevel(previous_logging_level)
chain_path = os.path.join(data_folder, 'chain.json')
docsearch_path = os.path.join(data_folder, 'docsearch')
url_file = os.path.join(data_folder, 'urls.txt')
wurl_file = os.path.join(data_folder, 'wurls.txt') # A URL that will be walked to find more URLs and content
compressed_raw_text_file = os.path.join(data_folder, 'temporary_cached_data.gz')
add_docsearch_file = os.path.join(data_folder, 'add_docsearch.json')
if os.path.exists(compressed_raw_text_file):
os.remove(compressed_raw_text_file)
if not os.path.exists(url_file):
with open(url_file, 'w') as f:
f.write('http://travin.com/blank')
# Index the documents if needed
# Do this if the chain file doesn't exist or if reindex is True
# Do not index if data_use is 0 (no data query)
if (not os.path.exists(chain_path) or reindex) and data_use > 0:
skipped_path = ""
openai_status_var.set("Reindexing documents...")
with gzip.open(compressed_raw_text_file, 'wt', encoding='utf-8') as f:
for root, _, files in os.walk(data_folder):
for file in files:
if file.endswith('.pdf'):
pdf_path = os.path.join(root, file)
doc_text.insert(tk.END, f"Parsing: {pdf_path}\n")
doc_text.update()
doc_text.see(tk.END)
reader = PdfReader(pdf_path)
for i, page in enumerate(reader.pages):
text = page.extract_text()
if text:
f.write(text)
# Release memory after processing each PDF
del reader
gc.collect()
elif file.endswith('.csv'):
csv_path = os.path.join(root, file)
doc_text.insert(tk.END, f"Parsing: {csv_path}\n")
doc_text.update()
doc_text.see(tk.END)
reader = CSVLoader(csv_path)
data = reader.load()
for i, row in enumerate(data):
if row:
f.write(row.page_content)
# Release memory after processing each csv
del reader
gc.collect()
elif file.endswith('.txt'):
txt_path = os.path.join(root, file)
doc_text.insert(tk.END, f"Parsing: {txt_path}\n")
doc_text.update()
doc_text.see(tk.END)
with open(txt_path, 'r', encoding='utf-8') as txt_file:
txt_text = txt_file.read()
f.write(txt_text)
elif file.endswith('.xml'):
xml_path = os.path.join(root, file)
doc_text.insert(tk.END, f"Parsing: {xml_path}\n")
doc_text.update()
doc_text.see(tk.END)
# Create a context for iteratively parsing the XML file
context = ET.iterparse(xml_path, events=('start', 'end'))
context = iter(context)
# Process the XML file chunk by chunk
for event, elem in context:
if event == 'end':
# Write the text content of the current element to the gz file
if elem.text:
f.write(elem.text)
# Clean up the processed element to save memory
elem.clear()
else:
skipped_path = skipped_path + "--" + os.path.join(root, file) + "\n"
if skipped_path:
doc_text.insert(tk.END, f"Unsupported Files:\n{skipped_path}\n")
doc_text.update()
doc_text.see(tk.END)
if url_file and os.path.exists(url_file):
with open(url_file, 'r') as url_file_obj:
url_list = [line.strip() for line in url_file_obj]
url_loader = SeleniumURLLoader(urls=url_list)
url_loader.headless = True
url_loader.continue_on_failure = True
url_loader.arguments = ['--disable-gpu','--log-level=3']
url_data = url_loader.load()
for i, data in enumerate(url_data):
text = data.page_content
f.write(text)
if USE_BOX:
if os.getenv("BOX_DEVELOPER_TOKEN"):
box_auth: BoxDeveloperTokenAuth = BoxDeveloperTokenAuth(token=BOX_TOKEN)
else:
if os.getenv("BOX_ENTERPRISE_ID"):
box_oauth_config = CCGConfig(
client_id=os.getenv("BOX_CLIENT_ID"),
client_secret=os.getenv("BOX_CLIENT_SECRET"),
enterprise_id=os.getenv("BOX_ENTERPRISE_ID")
)
else:
box_oauth_config = CCGConfig(
client_id=os.getenv("BOX_CLIENT_ID"),
client_secret=os.getenv("BOX_CLIENT_SECRET"),
user_id=os.getenv("BOX_USER_ID")
)
box_auth = BoxCCGAuth(config=box_oauth_config)
box_client: BoxClient = BoxClient(auth=box_auth)
for box_item in box_client.folders.get_folder_items(BOX_FOLDER_ID).entries:
boxfile_ext = box_item.name.split('.')[-1]
if boxfile_ext in ['vtt', 'txt', 'boxnote']:
boxfile_is_readable = True
else:
boxfile_is_readable = False
if box_item.type == 'file' and boxfile_is_readable:
try:
box_file = box_client.downloads.download_file(box_item.id).read()
if boxfile_ext == 'boxnote':
boxfile_data = json.loads(box_file.decode('utf-8'))
# Get the lastEditTimestamp value
timestamp_in_millis = boxfile_data.get('lastEditTimestamp')
if timestamp_in_millis:
timestamp_in_seconds = timestamp_in_millis / 1000
boxfile_timestamp = datetime.fromtimestamp(timestamp_in_seconds).strftime('%Y-%m-%d %H:%M:%S')
boxfile_text = boxfile_data.get('atext', {}).get('text', '')
f.write("Note name:" + box_item.name + " Date of note:" + boxfile_timestamp + " Note:" + boxfile_text)
elif boxfile_ext in ['vtt', 'txt']:
boxfile_text = box_file.decode('utf-8')
f.write("File name:" + box_item.name + " File Text:" + boxfile_text)
doc_text.insert(tk.END, f"Loaded box file: {box_item.name}\n")
doc_text.update()
doc_text.see(tk.END)
except Exception as e:
doc_text.insert(tk.END, f"Failed to load box file {box_item.name}: {e}\n")
doc_text.update()
doc_text.see(tk.END)
time.sleep(1) # Rate limit pause
if USE_NOTION:
notion_loader = NotionDBLoader(
integration_token=NOTION_TOKEN,
database_id=DATABASE_ID,
request_timeout_sec=10, # optional, defaults to 10
)
try:
notion_page_summaries = notion_loader._retrieve_page_summaries()
except Exception as e:
doc_text.insert(tk.END, f"Failed to load notion pages: {e}\n")
doc_text.update()
doc_text.see(tk.END)
openai_status_var.set("Failed to load notion pages: " + str(e))
notion_metadata_client = NotionClient(auth=NOTION_TOKEN)
for each_page in notion_page_summaries:
attempt = 0
while attempt < 2:
try:
# https://developers.notion.com/reference/block
page_blocks = notion_loader.load_page(each_page)
page_metadata = notion_metadata_client.pages.retrieve(each_page['id'])
page_content = page_blocks.page_content
# Get page text from the page blocks
page_name = page_blocks.metadata['name']
try:
page_due = page_metadata['properties']['Due']['date']
except:
page_due = None
try:
page_status = page_metadata['properties']['Status']['select']['name']
except:
page_status = None
try:
page_labels = page_metadata['properties']['Label']['multi_select'][0]['name']
except:
page_labels = None
# Write the page text to the gz file
write_str = ''
if page_name:
write_str += f"Page Title:{page_name}\n"
if page_due:
write_str += f"|Page Date Due:{page_due}\n"
if page_status:
write_str += f"|Page Status:{page_status}\n"
if page_labels:
write_str += f"|Page Labels:{page_labels}\n"
if page_content:
write_str += f"|Page Content:{page_content}\n"
f.write(write_str)
if attempt == 0:
doc_text.insert(tk.END, f"Loaded page: {page_name}\n")
else:
doc_text.insert(tk.END, f"Surccessfly loaded page: {page_name} after retry\n")
doc_text.update()
doc_text.see(tk.END)
break # if successful, break out of the while loop
except Exception as e:
attempt += 1
doc_text.insert(tk.END, f"Attempt {attempt} failed to load page {each_page['id']} : {e}\n")
doc_text.update()
doc_text.see(tk.END)
if attempt >= 2:
#print(f"Failed to load page {page_id} after {attempt} attempts")
doc_text.insert(tk.END, f"Failed to load page {each_page['id']} after {attempt} attempts\n")
doc_text.update()
doc_text.see(tk.END)
if (not os.path.exists(chain_path) or reindex) and data_use > 0:
# Initialize an empty list to store processed text chunks
processed_texts_cache = []
#Need to replace the magic numbers with variables and include them in the environment file
with gzip.open(compressed_raw_text_file, 'rt', encoding='utf-8') as f:
text_splitter = CustomTextSplitter(
separators=['\n', '. '],
chunk_size=1000,
chunk_overlap=100,
length_function=len,
)
current_chunk = ''
for line in f:
current_chunk += line
if len(current_chunk) >= text_splitter._chunk_size: # Corrected attribute name
# Process the current chunk
processed_chunk = text_splitter.split_text(current_chunk)
# Append the processed chunk to the cache
processed_texts_cache.extend(processed_chunk)
# Keep the chunk_overlap part of the current chunk for the next iteration
current_chunk = current_chunk[-text_splitter._chunk_overlap:] # Corrected attribute name
# Process the remaining part of the last chunk
if current_chunk:
processed_chunk = text_splitter.split_text(current_chunk)
processed_texts_cache.extend(processed_chunk)
os.remove(compressed_raw_text_file)
if USE_AZURE:
embeddings = OpenAIEmbeddings(model=EMBEDDINGS_MODEL,chunk_size=16)
else:
embeddings = OpenAIEmbeddings(model=EMBEDDINGS_MODEL,chunk_size=500)
docsearch = FAISS.from_texts(processed_texts_cache, embeddings)
docsearch.save_local(docsearch_path)
doc_chain.save(chain_path)
elif data_use > 0:
if USE_AZURE:
embeddings = OpenAIEmbeddings(model=EMBEDDINGS_MODEL,chunk_size=16)
else:
embeddings = OpenAIEmbeddings(model=EMBEDDINGS_MODEL,chunk_size=500)
docsearch = FAISS.load_local(docsearch_path, embeddings)
if data_use > 0:
# Load additional docsearch instances and combine them
if os.path.exists(add_docsearch_file):
with open(add_docsearch_file, 'r') as f:
add_docsearch_config = json.load(f)
additional_folders = add_docsearch_config.get('additional_folders', [])
for folder in additional_folders:
additional_docsearch_path = os.path.join(folder, 'docsearch')
if os.path.exists(additional_docsearch_path):
#print(f"Loading additional docsearch from {additional_docsearch_path}")
additional_docsearch = FAISS.load_local(additional_docsearch_path, embeddings)
docsearch.merge_from(additional_docsearch)
else:
doc_text.insert(tk.END, "Additional docsearch path " + additional_docsearch_path + " does not exist" + "\n")
doc_text.update()
doc_text.see(tk.END)
openai_status = ""
if query != '':
total_tokens = ""
openai_status = ""
answer = ""
if prompt_style:
question = f"'role': 'system', 'content':{prompt_style}\n'role': 'system', 'user'{query}"
else:
question = f"{query}"
if data_use == 1:
number_of_docs = int(float(total_docs_var.get()))
docs = docsearch.similarity_search(query, k=number_of_docs)
with get_openai_callback() as cb:
try:
answer = doc_chain.run(input_documents=docs, question=question)
except Exception as e:
if "maximum context length" in str(e):
try:
#Rate limit pause
time.sleep(5)
# Extract max_context_length
max_context_length = int(str(e).split("maximum context length is ")[1].split(" tokens.")[0])
# Extract num_tokens
num_tokens = int(str(e).split("you requested ")[1].split(" tokens")[0])
number_of_docs = calculate_num_docs(num_tokens, max_context_length)
docs = docsearch.similarity_search(query, k=number_of_docs)
answer = doc_chain.run(input_documents=docs, question=question)
openai_status += "Maximum tokens exceeded. Temporary reduced documents to " + str(number_of_docs) + " | "
except:
try:
#Rate limit pause
time.sleep(5)
adjusted_number_of_docs = float(total_docs_var.get()) * 0.5
number_of_docs = (int(adjusted_number_of_docs))
docs = docsearch.similarity_search(query, k=number_of_docs)
answer = doc_chain.run(input_documents=docs, question=question)
openai_status += "Maximum tokens exceeded. Temporary reduced documents to " + str(number_of_docs) + " | "
except:
try:
#Rate limit pause
time.sleep(5)
number_of_docs = 5
docs = docsearch.similarity_search(query, k=number_of_docs)
answer = doc_chain.run(input_documents=docs, question=question)
openai_status += "Maximum tokens exceeded. Temporary reduced documents to 5. | "
except:
doc_text.insert(tk.END, "Error: " + str(e) + "\n")
doc_text.update()
answer = ""
openai_status += "Error: " + str(e) + " | "
total_tokens = cb.total_tokens
elif data_use == 2:
number_of_docs = int(float(total_docs_var.get()))
docs = docsearch.similarity_search_with_score(query, k=number_of_docs)
answer = ""
else:
# Initialize an empty lists to store processed text chunks
docs = []
with get_openai_callback() as cb:
try:
answer = doc_chain.run(input_documents=docs, question=question)
except Exception as e:
print(e)
answer = ""
total_tokens = cb.total_tokens
if total_tokens:
openai_status += "Total tokens used: " + str(total_tokens)
return answer, docs, openai_status
else:
return "", None, openai_status
def calculate_num_docs(num_tokens, max_context_length):
num_docs = 1000
ratio = max_context_length / num_tokens
num_docs = math.floor(ratio * num_docs)
num_docs = num_docs // 10 * 10 # round down to nearest 10
num_docs = num_docs - 5 # subtract 5 to be safe
return num_docs | [
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. The System defines the personality and instructions to modify the response.\n System: {prompt_style}\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:"
] |
2024-01-10 | bigcode-project/bigcode-evaluation-harness | bigcode_eval~tasks~humanevalpack_openai.py | """Testing
from datasets import load_dataset
ds = load_dataset("bigcode/humaneval-x-bugs", "python")["test"]
idx = 0
def get_prompt_base(doc, language="python"):
# See
# https://github.com/roG0d/CodeGeeX/blob/f66205b5f615a4eead9c26d7ec297e14738ea18d/codegeex/benchmark/evaluate_humaneval_x.py#L78
# https://github.com/THUDM/CodeGeeX/pull/76#issuecomment-1500653190
if language == "rust":
main = "fn main(){}\n"
prompt_base = main + doc["declaration"] + doc["prompt"]
else:
prompt_base = doc["prompt"]
return prompt_base
prompt_base = get_prompt_base(ds[idx], language="python")
messages = [
{
"role": "user",
"content": ds[idx]["instruction"],
},
{
"role": "assistant",
"content": prompt_base,
},
]
gpt-4-0613
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=messages
)
"""
import os
import openai
import jsonlines
import termcolor
from cdifflib import CSequenceMatcher
from camel_converter import to_snake
from datasets import load_dataset
from typing import List
from tqdm import tqdm
_CITATION = """
@article{muennighoff2023octopack,
title={OctoPack: Instruction Tuning Code Large Language Models},
author={Niklas Muennighoff and Qian Liu and Armel Zebaze and Qinkai Zheng and Binyuan Hui and Terry Yue Zhuo and Swayam Singh and Xiangru Tang and Leandro von Werra and Shayne Longpre},
journal={arXiv preprint arXiv:2308.07124},
year={2023}
}
"""
LANGUAGE_TO_NAME = {
"python": "Python",
"cpp": "C++",
"js": "JavaScript",
"java": "Java",
"go": "Go",
"rust": "Rust",
}
def get_prompt_base(doc, language):
# See
# https://github.com/roG0d/CodeGeeX/blob/f66205b5f615a4eead9c26d7ec297e14738ea18d/codegeex/benchmark/evaluate_humaneval_x.py#L78
# https://github.com/THUDM/CodeGeeX/pull/76#issuecomment-1500653190
if language == "rust":
main = "fn main(){}\n"
prompt_base = main + doc["declaration"]
else:
prompt_base = doc["declaration"]
return prompt_base
def get_prompt_synthesize(doc, language="python"):
# addon = f"Start your code with:\n{get_prompt_base(sample, language)}"
# return doc["instruction"] + "\n" + addon # Results in worse performance for GPT4
return doc["instruction"] # Problem: Difficult for problems that have helper functions
def get_base_prompt_fix(doc, language="python", mode="tests"):
if language == "rust":
if mode == "tests":
return "fn main(){}\n" + doc["declaration"]
elif mode == "docs":
return "fn main(){}\n" + doc["declaration"] + doc["prompt"]
else:
raise ValueError
else:
if mode == "tests":
return doc["declaration"]
elif mode == "docs":
return doc["prompt"]
else:
raise ValueError
def get_prompt_fix(doc, language="python", mode="tests"):
prompt_base = get_base_prompt_fix(doc, language, mode)
func = prompt_base + doc["buggy_solution"]
instruction = f'Fix bugs in {doc["entry_point"]}.'
return func + "\n" + instruction
def get_prompt_explain_desc(doc, language="python"):
if language == "rust":
main = "fn main(){}\n"
prompt_base = main + doc["declaration"]
else:
prompt_base = doc["declaration"]
docstring_len = len(doc["docstring"])
instruction = f"Provide a concise natural language description of the code using at most {docstring_len} characters."
func = prompt_base + doc["canonical_solution"]
return instruction + "\n" + func, docstring_len
def get_prompt_explain_syn(sample, desc, language="python"):
instruction = f"Write functional code in {LANGUAGE_TO_NAME[language]} according to the description."
addon = f"Start your code with:\n{get_prompt_base(sample, language)}"
return desc + "\n" + instruction + "\n" + addon
class ParseError(Exception):
pass
class ContentParser:
@staticmethod
def _entry_point_variations(entry_point: str) -> List[str]:
# NOTE: workaround dataset's bug with entry point naming
return [
entry_point,
to_snake(entry_point),
entry_point[0].lower() + entry_point[1:],
]
def __call__(self, prompt: str, content: str, entry_point: str):
# NOTE: Model doesn't follow instructions directly:
# adds description of change and sometimes fixes
# typos, or other "bugs" in description.
if "```" in content:
content = content.split("```")[1]
# first parse with assumption that content has description
matcher = CSequenceMatcher(None, prompt, content)
tag, _, _, j1, j2 = matcher.get_opcodes()[-1]
if tag == "insert":
return content[j1:j2]
# second parse content with assumption that model wrote code without description
for entry_point in self._entry_point_variations(entry_point):
if entry_point in content:
content = content.split(entry_point)[-1]
return "".join(content.splitlines(keepends=True)[1:])
raise ParseError(f"Prompt is not in content:\n{content}")
class ChatWrapper:
def __init__(self, model: str):
self._model = model
def __call__(self, prompt: str, n: int) -> str:
messages = [
{
"role": "user",
"content": prompt,
}
]
while True:
try:
response = openai.ChatCompletion.create(
model=self._model,
messages=messages,
temperature=0.2,
top_p=0.95,
n=n
)
content_list = list()
for i in range(n):
message = response["choices"][i]["message"]
assert message["role"] == "assistant"
content_list.append(message["content"])
return content_list
except Exception as e:
print("API EXCEPTION:", e)
if __name__ == '__main__':
TIMES = 1
VERBOSE = True
LANGUAGE = "python"
MODEL = "gpt-4-0613"
TASK = "humanevalsynthesize"
# Load descriptions
if TASK == "humanevalexplainsynthesize":
with jsonlines.open(f"completions_{LANGUAGE}_humanevalexplaindescribe.jsonl", "r") as f:
descriptions = [line["raw_generation"][0] for line in f]
openai.organization = os.getenv("OPENAI_ORGANIZATION")
openai.api_key = os.getenv("OPENAI_API_KEY")
samples = [s for s in load_dataset("bigcode/humanevalpack", LANGUAGE)["test"]]
chat_wrapper = ChatWrapper(MODEL)
parse_errors = 0
parser = ContentParser()
for idx, sample in enumerate(tqdm(samples)):
if TASK == "humanevalfix":
prompt = get_prompt_fix(sample, language=LANGUAGE, mode="tests")
elif TASK == "humanevalsynthesize":
prompt = get_prompt_synthesize(sample, language=LANGUAGE)
elif TASK == "humanevalexplaindescribe":
prompt, docstring_len = get_prompt_explain_desc(sample, language=LANGUAGE)
gen = chat_wrapper(prompt, TIMES)
sample["raw_generation"] = gen
sample["generation"] = [gen_item[:docstring_len] for gen_item in gen]
continue
elif TASK == "humanevalexplainsynthesize":
desc = descriptions[idx]
prompt = get_prompt_explain_syn(sample, desc, language=LANGUAGE)
if VERBOSE:
print(f"Processing {sample['task_id']} ({idx + 1}/{len(samples)}))...")
sample["raw_generation"] = chat_wrapper(prompt, TIMES)
try:
sample["generation"] = [parser(prompt, generation_item, sample["entry_point"]) for generation_item in sample["raw_generation"]]
except ParseError as e:
parse_errors += 1
print("PARSE EXCEPTION:", e)
sample["generation"] = [""]
if VERBOSE:
for i in range(TIMES):
print(termcolor.colored(sample["entry_point"], "yellow", attrs=["bold"]))
print(termcolor.colored(prompt, "yellow"))
print(termcolor.colored(sample["canonical_solution"], "red"))
print(termcolor.colored(sample["generation"][i], "green")+"\n\n")
if VERBOSE:
print("parse error rate:", parse_errors / len(samples))
results_filename = f"completions_{LANGUAGE}_{TASK}.jsonl"
with jsonlines.open(results_filename, "w") as writer:
writer.write_all(samples)
| [
"declaration",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | HugoC28/reading-trainer | backend~controllers~readingComprehension.py | from dotenv import load_dotenv
from openai import AzureOpenAI
import re
import json
from flask import jsonify
from utils import text_endpoint, dalle_endpoint, azure_api_key, words, marks
import requests
load_dotenv()
def parse_text_to_object(text):
print(text)
# Split the input string into parts using the "STORY:", "PROMPT:", "QUESTION:", and "ANSWERS:" markers
parts = [part.strip() for part in text.split("STORY:")[1:]]
# Extract title of the story
fpart = text.split("STORY:")[0]
title = fpart.split("TITLE:")[1]
# Create a list of dictionaries
result = {}
for index, part in enumerate(parts):
story, rest = part.split("PROMPT:")
prompt, rest = rest.split("QUESTION:")
question, rest = rest.split("TRUE_ANSWER:")
true_answer, rest = rest.split("POSSIBLE_ANSWERS:")
answers = [ans.replace(">","").strip() for ans in rest.strip().split('\n') if ans.strip()]
# Construct the dictionary
result_dict = {
'story': story.strip(),
'prompt': prompt.strip(),
'question': question.strip(),
'true_answer': true_answer.strip(),
'answers': answers
}
# Append the dictionary to the result list
result[index] = result_dict
# For an standar structure on excersices, they will be a dictionary of two fields,
# "Type", wich is obvious and "Exercise", which is the original content and "Title",
# to give a title to the story for displaying purposes
exercise = {
"Type":"Reading Comprehension",
"Exercise": result,
"Title":title.strip()
}
return exercise
def generateComprehensionTest(selected_topic, nbr_parts, difficulty):
messages = [{"role":"system","content":"You are a reading exercise generator, adapted for a 9 years old child with language impairments."}]
# The difficult words can be maybe asked from the user in the UI?
prompt = f'''Compose a short and engaging story for a 9-year-old child with reading difficulties, centered around {selected_topic}. The story should be a {marks[difficulty-1]} level for a 9-year-old child. The sentences should be simple, with clear and consistent structure. Ensure that the text is cohesive and forms an engaging narrative about {selected_topic}, including aspects of their appearance, behavior, and environment. This story must contain {nbr_parts} parts, each part should be approximately {words[difficulty-1]} words. For each part, give on DALL-E prompts that describes the related part. Be consistent with the prompts and always describe the characters in the same way. Also add for each of those part one Multiple Choice Question of difficulty {marks[difficulty-1]} related to the part, to test the child's text comprehension. Try not to ask questions that can be answered only with the generated image, to really test child's text comprehension.\nYou must follow this exact structure, with i from 1 to {nbr_parts}, don't add any other details such as specific separators, part titles, transitions or advices :\nSTORY: <story's part i>\nPROMPT: <DALL-E script for part i>\nQUESTION: <MCQ question for part i>\nTRUE_ANSWER: <the true answer among the 4 possible answers>\nPOSSIBLE_ANSWERS: <4 possible answers for part i (containing TRUE_ANSWER, with the exact same syntax (letters and punctuation), at a random position, different for each question), separated by \n >\n Start the response with TITLE:<title of the story>'''
messages.append({"role":"user","content":prompt})
# Try to generate the exercise and prompts with gpt 4 in this try block.
try:
textClient = AzureOpenAI(
api_version="2023-12-01-preview",
api_key=azure_api_key,
azure_endpoint=text_endpoint
)
response = textClient.chat.completions.create(
model="gpt-4", # model = "deployment_name".
messages=messages
)
chatGPTReply = response.choices[0].message.content
parsedText = parse_text_to_object(chatGPTReply)
except requests.RequestException as e:
print(f"Error in generating the exercise and prompts: {e}")
return jsonify({"error": "Internal Server Error"}), 500
# Try to generate the images in this try block.
try:
# Diffenrent models have different endpoints
dalleClient = AzureOpenAI(
api_version="2023-12-01-preview",
api_key=azure_api_key,
azure_endpoint=dalle_endpoint
)
# Loop through the prompts and sentences and generate the images
for key, value in parsedText["Exercise"].items():
print(key, value)
result = dalleClient.images.generate(
#model= "dall-e-3", # the name of your DALL-E 3 deployment
prompt= value["prompt"]+"Use a cartoon style.",
n=1
)
json_response = json.loads(result.model_dump_json())
image_url = json_response["data"][0]["url"] # extract image URL from response
parsedText["Exercise"][key]["url"] = image_url
except Exception as e:
print(f"Error in generating the images: {e}")
return jsonify({"error": "Internal Server Error"}), 500
print(parsedText)
return jsonify(parsedText), 200 | [
"You are a reading exercise generator, adapted for a 9 years old child with language impairments.",
"f'''Compose a short and engaging story for a 9-year-old child with reading difficulties, centered around {selected_topic}. The story should be a {marks[difficulty-1]} level for a 9-year-old child. The sentences should be simple, with clear and consistent structure. Ensure that the text is cohesive and forms an engaging narrative about {selected_topic}, including aspects of their appearance, behavior, and environment. This story must contain {nbr_parts} parts, each part should be approximately {words[difficulty-1]} words. For each part, give on DALL-E prompts that describes the related part. Be consistent with the prompts and always describe the characters in the same way. Also add for each of those part one Multiple Choice Question of difficulty {marks[difficulty-1]} related to the part, to test the child's text comprehension. Try not to ask questions that can be answered only with the generated image, to really test child's text comprehension.\\nYou must follow this exact structure, with i from 1 to {nbr_parts}, don't add any other details such as specific separators, part titles, transitions or advices :\\nSTORY: <story's part i>\\nPROMPT: <DALL-E script for part i>\\nQUESTION: <MCQ question for part i>\\nTRUE_ANSWER: <the true answer among the 4 possible answers>\\nPOSSIBLE_ANSWERS: <4 possible answers for part i (containing TRUE_ANSWER, with the exact same syntax (letters and punctuation), at a random position, different for each question), separated by \\n >\\n Start the response with TITLE:<title of the story>"
] |
2024-01-10 | HugoC28/reading-trainer | backend~controllers~vocabularyBuilding.py | from dotenv import load_dotenv
from openai import AzureOpenAI
import re
import json
from flask import jsonify
from utils import text_endpoint, dalle_endpoint, azure_api_key, marks, words
import requests
load_dotenv()
def parse_story_prompt(text):
# Regular expressions to match the title, story parts, and prompts
title_pattern = r"Title: \"([^\"]+)\""
story_pattern = r"Story Part (\d+): \"([^\"]+)\""
prompt_pattern = r"Prompt for DALLE \(Part (\d+)\): \"([^\"]+)\""
# Extract title
title_match = re.search(title_pattern, text)
title = title_match.group(1) if title_match else None
# Extract story parts and prompts
stories = re.findall(story_pattern, text)
prompts = re.findall(prompt_pattern, text)
# Convert stories and prompts into a dictionary
exercises = {}
for story_part, story_text in stories:
corresponding_prompt = next((prompt_text for part, prompt_text in prompts if part == story_part), None)
exercises[story_part] = {"story": story_text, "prompt": corresponding_prompt}
# Construct the final data structure
data = { "Type":"Vocabulary Building", "Title": title, "Exercise": exercises}
return data
#messages = [{"role":"system","content":"You are a reading exercise generator who is used to generate Vocabulary texts: They are texts with a controlled vocabulary, made in order for the patient to learn and remember certain words that are difficult to them. "}]
def generateVocabularyText(selected_topic, exercise_number, difficulty):
messages = [{"role":"system","content":"You are a reading exercise generator who is used to generate Vocabulary texts: They are texts with a controlled vocabulary, made in order for the patient to learn and remember certain words that are difficult to them. "}]
prompt = f'''Generate a reading exercise and a image prompt on the difficult words {selected_topic}. The exercise should consist of {exercise_number} parts, each with a controlled vocabulary suited for the {marks[difficulty-1]} level. Repeat the difficult words several time in the exercise. The text in each part should be approximately {words[difficulty-1]} words.\n\n For each part of the exercise, also provide a descriptive prompt for image generator to create an image that visually represents the story part.\n\n Format your response as follows:\n\n Title: "Title of the story"\nStory Part 1: "Generated story part 1"\n Prompt for DALLE (Part 1): "Image prompt describing story part 1"\n...\nStory Part {exercise_number}: "Generated story part {exercise_number}"\nPrompt for DALLE (Part {exercise_number}): "Image prompt describing story part {exercise_number}"'''
messages.append({"role":"user","content":prompt})
# Try to generate the exercise and prompts with gpt 4 in this try block.
try:
textClient = AzureOpenAI(
api_version="2023-12-01-preview",
api_key=azure_api_key,
azure_endpoint=text_endpoint
)
print(messages)
response = textClient.chat.completions.create(
model="gpt-4", # model = "deployment_name".
messages=messages
)
chatGPTReply = response.choices[0].message.content
parsedText = parse_story_prompt(chatGPTReply)
except requests.RequestException as e:
print(f"Error in generating the exercise and prompts: {e}")
return jsonify({"error": "Internal Server Error"}), 500
# Try to generate the images in this try block.
try:
# Diffenrent models have different endpoints
dalleClient = AzureOpenAI(
api_version="2023-12-01-preview",
api_key=azure_api_key,
azure_endpoint=dalle_endpoint
)
# Loop through the prompts and sentences and generate the images
for key, value in parsedText["Exercise"].items():
result = dalleClient.images.generate(
#model= "dall-e-3", # the name of your DALL-E 3 deployment
prompt= value["prompt"]+"Use a cartoon style.",
n=1
)
print(result)
json_response = json.loads(result.model_dump_json())
image_url = json_response["data"][0]["url"] # extract image URL from response
parsedText["Exercise"][key]["url"] = image_url
except Exception as e:
print(f"Error in generating the images: {e}")
return jsonify({"error": "Internal Server Error"}), 500
print("========================================\n")
print("Parsed Text:")
print(parsedText["Exercise"])
return jsonify(parsedText), 200 | [
"f'''Generate a reading exercise and a image prompt on the difficult words {selected_topic}. The exercise should consist of {exercise_number} parts, each with a controlled vocabulary suited for the {marks[difficulty-1]} level. Repeat the difficult words several time in the exercise. The text in each part should be approximately {words[difficulty-1]} words.\\n\\n For each part of the exercise, also provide a descriptive prompt for image generator to create an image that visually represents the story part.\\n\\n Format your response as follows:\\n\\n Title: \"Title of the story\"\\nStory Part 1: \"Generated story part 1\"\\n Prompt for DALLE (Part 1): \"Image prompt describing story part 1\"\\n...\\nStory Part {exercise_number}: \"Generated story part {exercise_number}\"\\nPrompt for DALLE (Part {exercise_number}): \"Image prompt describing story part {exercise_number}",
"You are a reading exercise generator who is used to generate Vocabulary texts: They are texts with a controlled vocabulary, made in order for the patient to learn and remember certain words that are difficult to them. ",
"Prompt for DALLE \\(Part (\\d+)\\): \\\"([^\\\"]+)\\\""
] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | examples~python~openai_sdk_0.28.x~azure_openai_functions.py | """ Test Azure OpenAI Functions API """
# See documentation at https://gloveboxes.github.io/azure-openai-service-proxy/category/developer-endpoints/
import os
import openai
from dotenv import load_dotenv
load_dotenv()
ENDPOINT_URL = os.environ.get("ENDPOINT_URL")
API_KEY = os.environ.get("API_KEY")
API_VERSION = "2023-09-01-preview"
DEPLOYMENT_NAME = "gpt-3.5-turbo"
openai.api_type = "azure"
openai.api_key = API_KEY
openai.api_base = ENDPOINT_URL
openai.api_version = API_VERSION
messages = [
{
"role": "system",
"content": (
"Don't make assumptions about what values to plug into functions. "
"Ask for clarification if a user request is ambiguous."
),
},
{"role": "user", "content": "What's the weather like today in seattle"},
]
functions = [
{
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"format": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
},
"required": ["location", "format"],
},
},
{
"name": "get_n_day_weather_forecast",
"description": "Get an N-day weather forecast",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"format": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
"num_days": {
"type": "integer",
"description": "The number of days to forecast",
},
},
"required": ["location", "format", "num_days"],
},
},
]
completion = openai.ChatCompletion.create(
deployment_id=DEPLOYMENT_NAME,
messages=messages,
functions=functions,
)
print(completion)
print()
print(completion.choices[0].finish_reason)
print(completion.choices[0].message.function_call)
| [
"What's the weather like today in seattle",
"Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous."
] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | src~proxy~app~images.py | """ Images Generations API for Dall-e models"""
import logging
from enum import Enum
from typing import Any
from fastapi import HTTPException
from pydantic import BaseModel
from .configuration import OpenAIConfig
from .openai_async import OpenAIAsyncManager
OPENAI_IMAGES_GENERATIONS_API_VERSION = "2023-12-01-preview"
logging.basicConfig(level=logging.WARNING)
class ResponseFormat(Enum):
"""Response Format"""
URL = "url"
BASE64 = "b64_json"
class ImageSize(Enum):
"""Image Size"""
IS_1024X1024 = "1024x1024"
IS_1792X1024 = "1792x1024"
IS_1024X1792 = "1024x1792"
class ImageQuality(Enum):
"""Image Quality"""
HD = "hd"
STANDARD = "standard"
class ImageStyle(Enum):
"""Image Style"""
VIVID = "vivid"
NATURAL = "natural"
class ImagesRequest(BaseModel):
"""OpenAI Images Generations Request"""
prompt: str
# response_format: ResponseFormat = ResponseFormat.URL
n: int = 1
size: ImageSize = ImageSize.IS_1024X1024
quality: ImageQuality = ImageQuality.HD
style: ImageStyle = ImageStyle.VIVID
api_version: str = OPENAI_IMAGES_GENERATIONS_API_VERSION
class Images:
"""OpenAI Images Generations Manager"""
def __init__(self, openai_config: OpenAIConfig):
"""init in memory session manager"""
self.openai_config = openai_config
self.logger = logging.getLogger(__name__)
def report_exception(self, message: str, http_status_code: int) -> Any:
"""report exception"""
self.logger.warning(msg=f"{message}")
raise HTTPException(
status_code=http_status_code,
detail=message,
)
def validate_input(self, images: ImagesRequest):
"""validate input"""
# do some basic input validation
if not images.prompt:
return self.report_exception("Oops, no prompt.", 400)
if len(images.prompt) > 1000:
return self.report_exception("Oops, prompt is too long. The maximum length is 1000 characters.", 400)
# check the image_count is 1
if images.n and images.n != 1:
return self.report_exception("Oops, image_count must be 1.", 400)
# check the image_size is between 256x256, 512x512, 1024x1024
if images.size and images.size not in ImageSize:
return self.report_exception("Oops, image_size must be 1792x1024, 1024x1792, 1024x1024.", 400)
if images.quality and images.quality not in ImageQuality:
return self.report_exception("Oops, image_quality must be hd, standard.", 400)
if images.style and images.style not in ImageStyle:
return self.report_exception("Oops, image_style must be vivid, natural.", 400)
async def call_openai_images_generations(self, images: ImagesRequest) -> Any:
"""call openai with retry"""
self.validate_input(images)
deployment = await self.openai_config.get_deployment()
openai_request = {
"prompt": images.prompt,
"size": images.size.value,
"n": images.n,
"quality": images.quality.value,
"style": images.style.value,
}
url = (
f"https://{deployment.resource_name}.openai.azure.com/openai/deployments/"
f"{deployment.deployment_name}/images/generations"
f"?api-version={images.api_version}"
)
async_mgr = OpenAIAsyncManager(deployment)
response = await async_mgr.async_post(openai_request, url)
response_json = response.json()
return response_json, response.status_code
| [] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | src~proxy~app~chat_completions.py | """ Chat Completions API """
import logging
from typing import Any
from fastapi import HTTPException
from pydantic import BaseModel
from .configuration import OpenAIConfig
from .openai_async import OpenAIAsyncManager
OPENAI_CHAT_COMPLETIONS_API_VERSION = "2023-09-01-preview"
OPENAI_CHAT_COMPLETIONS_EXTENSIONS_API_VERSION = "2023-08-01-preview"
logging.basicConfig(level=logging.WARNING)
class ChatCompletionsRequest(BaseModel):
"""OpenAI Chat Request"""
messages: list[dict[str, str]]
dataSources: list[Any] | None = None
max_tokens: int = None
temperature: float = None
n: int | None = None
stream: bool = False
top_p: float | None = None
stop: str | list[str] | None = None
frequency_penalty: float | None = None
presence_penalty: float | None = None
functions: list[dict[str, Any]] | None = None
function_call: str | dict[str, str] | None = None
api_version: str | None = None
extensions: bool = False
class ChatCompletions:
"""OpenAI Chat Completions Manager"""
def __init__(self, openai_config: OpenAIConfig):
"""init in memory session manager"""
self.openai_config = openai_config
self.logger = logging.getLogger(__name__)
def __throw_validation_error(self, message: str, status_code: int):
"""throw validation error"""
raise HTTPException(
status_code=status_code,
detail=message,
)
def validate_input(self, chat: ChatCompletionsRequest):
"""validate input"""
# do some basic input validation
# check the max_tokens is between 1 and 4096
if chat.max_tokens is not None and not 1 <= chat.max_tokens <= 4096:
self.__throw_validation_error("Oops, max_tokens must be between 1 and 4096.", 400)
if chat.n is not None and not 1 <= chat.n <= 10:
self.__throw_validation_error("Oops, n must be between 1 and 10.", 400)
# check the temperature is between 0 and 1
if chat.temperature is not None and not 0 <= chat.temperature <= 1:
self.__throw_validation_error("Oops, temperature must be between 0 and 1.", 400)
# check the top_p is between 0 and 1
if chat.top_p is not None and not 0 <= chat.top_p <= 1:
self.__throw_validation_error("Oops, top_p must be between 0 and 1.", 400)
# check the frequency_penalty is between 0 and 1
if chat.frequency_penalty is not None and not 0 <= chat.frequency_penalty <= 1:
self.__throw_validation_error("Oops, frequency_penalty must be between 0 and 1.", 400)
# check the presence_penalty is between 0 and 1
if chat.presence_penalty is not None and not 0 <= chat.presence_penalty <= 1:
self.__throw_validation_error("Oops, presence_penalty must be between 0 and 1.", 400)
async def call_openai_chat_completion(
self,
chat: ChatCompletionsRequest,
) -> Any:
"""call openai with retry"""
self.validate_input(chat)
deployment = await self.openai_config.get_deployment()
# if dataSources are provided, use the extensions API
if chat.extensions:
api_version = chat.api_version or OPENAI_CHAT_COMPLETIONS_EXTENSIONS_API_VERSION
url = (
f"https://{deployment.resource_name}.openai.azure.com/openai/deployments/"
f"{deployment.deployment_name}/extensions/chat/completions"
f"?api-version={api_version}"
)
else:
api_version = chat.api_version or OPENAI_CHAT_COMPLETIONS_API_VERSION
url = (
f"https://{deployment.resource_name}.openai.azure.com/openai/deployments/"
f"{deployment.deployment_name}/chat/completions"
f"?api-version={api_version}"
)
del chat.extensions
del chat.api_version
openai_request = {}
for key, value in chat.__dict__.items():
if value is not None:
openai_request[key] = value
async_mgr = OpenAIAsyncManager(deployment)
if chat.stream:
(response, status_code) = await async_mgr.async_post_streaming(openai_request, url)
else:
(response, status_code) = await async_mgr.async_openai_post(openai_request, url)
response["model"] = deployment.friendly_name
return response, status_code
| [] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | examples~python~openai_sdk_0.28.x~azure_openai_completions.py | """ Test completions with azure openai """
# See documentation at https://gloveboxes.github.io/azure-openai-service-proxy/category/developer-endpoints/
import os
import openai
from dotenv import load_dotenv
load_dotenv()
ENDPOINT_URL = os.environ.get("ENDPOINT_URL")
API_KEY = os.environ.get("API_KEY")
API_VERSION = "2023-09-01-preview"
DEPLOYMENT_NAME = "davinci-002"
ENGINE_NAME = "text-davinci-002-prod"
openai.api_type = "azure"
openai.api_key = API_KEY
openai.api_base = ENDPOINT_URL
openai.api_version = API_VERSION
response = openai.Completion.create(engine=ENGINE_NAME, prompt="This is a test", max_tokens=5)
print(response)
| [
"This is a test"
] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | examples~python~openai_sdk_0.28.x~azure_openai_chat.py | """ Test Azure OpenAI Chat Completions API """
# See documentation at https://gloveboxes.github.io/azure-openai-service-proxy/category/developer-endpoints/
import os
import openai
from dotenv import load_dotenv
load_dotenv()
ENDPOINT_URL = os.environ.get("ENDPOINT_URL")
API_KEY = os.environ.get("API_KEY")
API_VERSION = "2023-09-01-preview"
DEPLOYMENT_NAME = "gpt-3.5-turbo"
openai.api_type = "azure"
openai.api_key = API_KEY
openai.api_base = ENDPOINT_URL
openai.api_version = API_VERSION
MESSAGES = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{
"role": "assistant",
"content": "The Los Angeles Dodgers won the World Series in 2020.",
},
{"role": "user", "content": "Where was it played?"},
]
completion = openai.ChatCompletion.create(
deployment_id=DEPLOYMENT_NAME,
messages=MESSAGES,
)
print(completion)
print()
print(completion.choices[0].message.content)
| [
"Where was it played?",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020.",
"Who won the world series in 2020?"
] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | examples~python~openai_sdk_0.28.x~azure_openai_embeddings.py | """ Test Azure OpenAI Embeddings API """
# See documentation at https://gloveboxes.github.io/azure-openai-service-proxy/category/developer-endpoints/
import os
import openai
from dotenv import load_dotenv
load_dotenv()
ENDPOINT_URL = os.environ.get("ENDPOINT_URL")
API_KEY = os.environ.get("API_KEY")
API_VERSION = "2023-08-01-preview"
DEPLOYMENT_NAME = "text-embedding-ada-002"
openai.api_type = "azure"
openai.api_key = API_KEY
openai.api_base = ENDPOINT_URL
content = (
"This stunning leather wrap bracelet will add a touch of bohemian flair to your outfit."
"The bracelet features a braided leather band in a rich brown color, adorned with turquoise beads and silver charms. " # noqa: E501
"The bracelet wraps around your wrist multiple times, creating a layered look that is eye-catching and stylish. "
"The bracelet is adjustable and has a button closure for a secure fit. "
"This leather wrap bracelet is the perfect accessory for any occasion, "
"whether you want to dress up a casual outfit or add some color to a formal one."
)
query_embeddings = openai.Embedding.create(
engine=DEPLOYMENT_NAME,
input=str(content),
encoding_format="float",
api_version="2023-08-01-preview",
)
print(query_embeddings)
print(query_embeddings.data[0].embedding)
| [] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | examples~python~openai_sdk_1.x~azure_openai_chat_streaming_your_data.py | """ Test Azure OpenAI Chat Completions Stream API """
# Create a new Azure Cognitive Search index and load an index with Azure content
# https://microsoftlearning.github.io/mslearn-knowledge-mining/Instructions/Labs/10-vector-search-exercise.html
# https://learn.microsoft.com/en-us/azure/ai-services/openai/use-your-data-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python#create-the-python-app
import os
import time
from dotenv import load_dotenv
from openai import AzureOpenAI
load_dotenv()
ENDPOINT_URL = os.environ.get("ENDPOINT_URL")
API_KEY = os.environ.get("API_KEY")
AZURE_AI_SEARCH_ENDPOINT = os.environ.get("AZURE_AI_SEARCH_ENDPOINT")
AZURE_AI_SEARCH_KEY = os.environ.get("AZURE_AI_SEARCH_KEY")
AZURE_AI_SEARCH_INDEX_NAME = os.environ.get("AZURE_AI_SEARCH_INDEX_NAME")
API_VERSION = "2023-09-01-preview"
MODEL_NAME = "gpt-35-turbo"
client = AzureOpenAI(
base_url=f"{ENDPOINT_URL}/openai/deployments/deployment/extensions",
api_key=API_KEY,
api_version=API_VERSION,
)
messages = [
{
"role": "user",
"content": ("What are the differences between Azure Machine Learning " "and Azure AI services?"),
},
]
body = {
"dataSources": [
{
"type": "AzureCognitiveSearch",
"parameters": {
"endpoint": AZURE_AI_SEARCH_ENDPOINT,
"key": AZURE_AI_SEARCH_KEY,
"indexName": AZURE_AI_SEARCH_INDEX_NAME,
},
}
]
}
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
extra_body=body,
stream=True,
max_tokens=100,
)
# turn off print buffering
# https://stackoverflow.com/questions/107705/disable-output-buffering
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
content = chunk.choices[0].delta.content
if content:
print(content, end="", flush=True)
# delay to simulate real-time chat
time.sleep(0.05)
print()
| [
"What are the differences between Azure Machine Learning and Azure AI services?"
] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | examples~python~openai_sdk_1.x~azure_openai_chat_streaming.py | """ Test Azure OpenAI Chat Completions Stream API """
import os
import time
from dotenv import load_dotenv
from openai import AzureOpenAI
load_dotenv()
ENDPOINT_URL = os.environ.get("ENDPOINT_URL")
API_KEY = os.environ.get("API_KEY")
API_VERSION = "2023-09-01-preview"
MODEL_NAME = "text-davinci-002"
client = AzureOpenAI(
base_url=ENDPOINT_URL,
api_key=API_KEY,
api_version=API_VERSION,
)
MESSAGES = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{
"role": "assistant",
"content": "The Los Angeles Dodgers won the World Series in 2020.",
},
{"role": "user", "content": "Where was it played?"},
]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the meaning of life!"},
],
stream=True,
max_tokens=100,
)
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
content = chunk.choices[0].delta.content
if content:
print(content, end="", flush=True)
# delay to simulate real-time chat
time.sleep(0.05)
print()
| [
"Where was it played?",
"Who won the world series in 2020?",
"What is the meaning of life!",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020."
] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | examples~python~openai_sdk_0.28.x~azure_openai_langchain.py | """ Test langchain with azure openai """
# See documentation at https://gloveboxes.github.io/azure-openai-service-proxy/category/developer-endpoints/
import os
import openai
from dotenv import load_dotenv
from langchain.llms import AzureOpenAI
load_dotenv()
ENDPOINT_URL = os.environ.get("ENDPOINT_URL")
API_KEY = os.environ.get("API_KEY")
API_VERSION = "2023-09-01-preview"
DEPLOYMENT_NAME = "davinci-002"
openai.api_type = "azure"
openai.api_key = API_KEY
openai.api_base = ENDPOINT_URL
openai.api_version = API_VERSION
llm = AzureOpenAI(
deployment_name=DEPLOYMENT_NAME,
openai_api_version=API_VERSION,
openai_api_key=API_KEY,
)
print(llm("Tell me a joke"))
| [] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | src~proxy~app~image_generation.py | """ Images Generations API """
import logging
import os
from enum import Enum
from typing import Any
from fastapi import HTTPException, Request, Response
from pydantic import BaseModel
from .configuration import OpenAIConfig
from .openai_async import OpenAIAsyncManager
OPENAI_IMAGES_GENERATIONS_API_VERSION = "2023-06-01-preview"
logging.basicConfig(level=logging.WARNING)
class ResponseFormat(Enum):
"""Response Format"""
URL = "url"
BASE64 = "b64_json"
class ImageSize(Enum):
"""Image Size"""
IS_256X256 = "256x256"
IS_512X512 = "512x512"
IS_1024X1024 = "1024x1024"
class DalleTimeoutError(Exception):
"""Raised when the Dalle request times out"""
class ImagesGenerationsRequst(BaseModel):
"""OpenAI Images Generations Request"""
prompt: str
response_format: ResponseFormat = ResponseFormat.URL
n: int = 1
size: ImageSize = ImageSize.IS_1024X1024
user: str = None
api_version: str = OPENAI_IMAGES_GENERATIONS_API_VERSION
class ImagesGenerations:
"""OpenAI Images Generations Manager"""
def __init__(self, openai_config: OpenAIConfig):
"""init in memory session manager"""
self.openai_config = openai_config
self.logger = logging.getLogger(__name__)
def report_exception(self, message: str, http_status_code: int) -> Any:
"""report exception"""
self.logger.warning(msg=f"{message}")
raise HTTPException(
status_code=http_status_code,
detail=message,
)
def validate_input(self, images: ImagesGenerationsRequst):
"""validate input"""
# do some basic input validation
if not images.prompt:
return self.report_exception("Oops, no prompt.", 400)
if len(images.prompt) > 1000:
return self.report_exception("Oops, prompt is too long. The maximum length is 1000 characters.", 400)
# check the image_count is between 1 and 5
if images.n and not 1 <= images.n <= 5:
return self.report_exception("Oops, image_count must be between 1 and 5 inclusive.", 400)
# check the image_size is between 256x256, 512x512, 1024x1024
if images.size and images.size not in ImageSize:
return self.report_exception("Oops, image_size must be 256x256, 512x512, 1024x1024.", 400)
# check the response_format is url or base64
if images.response_format and images.response_format not in ResponseFormat:
return self.report_exception("Oops, response_format must be url or b64_json.", 400)
async def call_openai_images_generations(
self, images: ImagesGenerationsRequst, request: Request, response: Response
) -> Any:
"""call openai with retry"""
self.validate_input(images)
deployment = await self.openai_config.get_deployment()
openai_request = {
"prompt": images.prompt,
"n": images.n,
"size": images.size.value,
"response_format": images.response_format.value,
}
url = (
f"https://{deployment.resource_name}.openai.azure.com"
"/openai/images/generations:submit"
f"?api-version={images.api_version}"
)
async_mgr = OpenAIAsyncManager(deployment)
dalle_response = await async_mgr.async_post(openai_request, url)
if "operation-location" in dalle_response.headers:
original_location = dalle_response.headers["operation-location"]
port = f":{request.url.port}" if request.url.port else ""
original_location_suffix = original_location.split("/openai", 1)[1]
if os.environ.get("ENVIRONMENT") == "development":
proxy_location = (
f"http://{request.url.hostname}{port}"
f"/api/v1/{deployment.friendly_name}/openai{original_location_suffix}"
)
else:
proxy_location = (
f"https://{request.url.hostname}{port}"
f"/api/v1/{deployment.friendly_name}/openai{original_location_suffix}"
)
response.headers.append("operation-location", proxy_location)
return dalle_response.json(), dalle_response.status_code
async def call_openai_images_get(
self,
friendly_name: str,
image_id: str,
api_version: str = OPENAI_IMAGES_GENERATIONS_API_VERSION,
):
"""call openai with retry"""
deployment = await self.openai_config.get_deployment_by_friendly_name(friendly_name)
if deployment is None:
return self.report_exception("Oops, failed to find service to generate image.", 404)
url = (
f"https://{deployment.resource_name}.openai.azure.com"
f"/openai/operations/images/{image_id}"
f"?api-version={api_version}"
)
async_mgr = OpenAIAsyncManager(deployment)
dalle_response = await async_mgr.async_get(url)
return dalle_response.json(), dalle_response.status_code
| [] |
2024-01-10 | gloveboxes/azure-openai-service-proxy | src~proxy~app~routes~request_manager.py | """ Request Manager base class """
from fastapi import APIRouter, FastAPI, HTTPException, Request
# pylint: disable=E0402
from ..authorize import Authorize, AuthorizeResponse
from ..configuration import OpenAIConfig
from ..management import DeploymentClass
from ..rate_limit import RateLimit
class RequestManager:
"""Request Manager base class"""
def __init__(
self,
*,
app: FastAPI,
authorize: Authorize,
connection_string: str,
prefix: str,
tags: list[str],
deployment_class: DeploymentClass,
request_class_mgr,
):
self.app = app
self.authorize = authorize
self.prefix = prefix
self.tags = tags
self.deployment_class = deployment_class
openai_config = OpenAIConfig(
connection_string=connection_string,
model_class=deployment_class,
)
self.request_class_mgr = request_class_mgr(openai_config)
self.router = APIRouter()
self.rate_limit = RateLimit()
async def authorize_request(self, deployment_id: str, request: Request) -> (AuthorizeResponse):
"""authorize request"""
authorize_response = await self.authorize.authorize_api_access(
headers=request.headers,
deployment_id=deployment_id,
request_class=self.deployment_class,
)
if self.rate_limit.is_call_rate_exceeded(authorize_response.user_token):
raise HTTPException(
status_code=429,
detail="Rate limit exceeded. Try again in 10 seconds",
)
return authorize_response
| [] |
2024-01-10 | TheQuantumFractal/DocumentationRAG | services.py | import pinecone
import os
from langchain.llms import Modal
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment='gcp-starter'
)
model = Modal(endpoint_url=os.environ['MODAL_ENDPOINT_URL'])
INDEX_NAME = 'modal'
embeddings = OpenAIEmbeddings()
docsearch = Pinecone.from_existing_index(INDEX_NAME, embeddings)
| [] |
2024-01-10 | AchintyaX/Topic_modelling | clean_tweets.py | from sklearn.feature_extraction.text import CountVectorizer
from gensim.corpora import Dictionary
from gensim.models.ldamodel import LdaModel
from gensim.models import CoherenceModel
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from datetime import datetime
import nltk
nltk.download('stopwords')
import pandas as pd
import re
import math
def clean_tweets(df,
tweet_col='text',
):
df_copy = df.copy()
# drop rows with empty values
df_copy.dropna(inplace=True)
# lower the tweets
df_copy['preprocessed_' + tweet_col] = df_copy[tweet_col].str.lower()
# filter out stop words and URLs
en_stop_words = set(stopwords.words('english'))
extended_stop_words = en_stop_words | \
{
'&', 'rt',
'th','co', 're', 've', 'kim', 'daca'
}
url_re = '(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,})'
df_copy['preprocessed_' + tweet_col] = df_copy['preprocessed_' + tweet_col].apply(lambda row: ' '.join([word for word in row.split() if (not word in extended_stop_words) and (not re.match(url_re, word))]))
# tokenize the tweets
tokenizer = RegexpTokenizer('[a-zA-Z]\w+\'?\w*')
df_copy['tokenized_' + tweet_col] = df_copy['preprocessed_' + tweet_col].apply(lambda row: tokenizer.tokenize(row))
return df_copy | [] |
2024-01-10 | e-cal/gpt-cfa | evaluate_few_shot_l2.py | import argparse
import json
import os
import time
import openai
import pandas as pd
from tqdm import tqdm
# ------------------------------------------------------------------------------
# Parseargs
# ------------------------------------------------------------------------------
argparser = argparse.ArgumentParser()
argparser.add_argument(
"-f",
"--file",
type=str,
required=True,
help="path to json exam file with questions and answers",
)
argparser.add_argument(
"-m",
"--model",
type=str,
default="gpt-3.5-turbo-16k",
help="gpt model to use (gpt-3.5-turbo or gpt-4)",
)
argparser.add_argument(
"-t",
"--temp",
type=float,
default=0.0,
help="temperature to use for gpt response (default 0.0)",
)
argparser.add_argument(
"-o",
"--output",
type=str,
required=True,
help="path to output the attempt's csv file",
)
argparser.add_argument(
"-c",
"--chain_of_thought",
action="store_true",
help="enable chain-of-thought prompting",
)
argparser.add_argument(
"-fsr",
"--few_shot_random",
action="store_true",
help="use questions sampled randomly as few shot learning",
)
argparser.add_argument(
"-fst",
"--few_shot_topic",
action="store_true",
help="use questions sampled from each topic as few shot learning",
)
argparser.add_argument(
"-n", "--n_shots", type=int, help="number of shots to use for few shot learning"
)
args = argparser.parse_args()
model = args.model
temp = args.temp
if os.path.exists(args.output):
print(f"output file {args.output} already exists")
overwrite = input("overwrite? (y/n): ")
if overwrite.lower() != "y":
exit(0)
# ------------------------------------------------------------------------------
# Get key
# ------------------------------------------------------------------------------
try:
openai.api_key = os.getenv("OPENAI_API_KEY")
except:
print("No OpenAI key found")
exit(1)
# ------------------------------------------------------------------------------
# System prompt
# ------------------------------------------------------------------------------
# oneshot_nofunc_prompt = f"""You are a CFA (chartered financial analyst) taking a test to evaluate your knowledge of finance.
# You will be given a question along with three possible answers (A, B, and C).
# Before answering, you should think through the question step-by-step.
# Explain your reasoning at each step towards answering the question.
# If calculation is required, do each step of the calculation as a step in your reasoning.
# Finally, indicate the correct answer (A, B, or C) in double brackets.
# Question:
# Phil Jones, CFA, has just finished researching Alpha One Inc. and is about to issue an unfavorable report on the company. His manager does not want him to state any adverse opinions about Alpha One, as it could adversely affect their firm’s relations with the company, which is an important investment banking client. Which of the following actions by the manager most likely violates Standard I (B): Independence and Objectivity?
# A. Putting Alpha One on a restricted list
# B. Asking Jones to issue a favorable report
# C. Asking Jones to only state facts about the company
# Thinking:
# - The CFA Institute's Standard I (B): Independence and Objectivity states that a CFA charterholder or candidate must use reasonable care and judgment to achieve and maintain independence and objectivity in their professional activities. They must not offer, solicit, or accept any gift, benefit, compensation, or consideration that reasonably could be expected to compromise their own or another’s independence and objectivity.
# - In this case, the manager is trying to influence Phil's research report on Alpha One Inc. due to the company's relationship with their firm. This is a clear attempt to compromise Phil's independence and objectivity in his professional activities.
# - Therefore, the manager's action of trying to prevent Phil from issuing an unfavorable report on Alpha One Inc. most likely violates Standard I (B): Independence and Objectivity.
# [[B]]"""
thinking_prompt = ""
if args.chain_of_thought:
thinking_prompt = """
Before answering, you should think through the question step-by-step.
Explain your reasoning at each step towards answering the question.
If calculation is required, do each step of the calculation as a step in your reasoning.
"""
func_prompt = f"""You are a CFA (chartered financial analyst) taking a test to evaluate your knowledge of finance.
You will be given a question along with three possible answers (A, B, and C).
{thinking_prompt}
Indicate the correct answer (A, B, or C)."""
sys_prompt = func_prompt
answer_func = {
"name": "answer_question",
"description": "Answer a multiple choice question on finance",
"parameters": {
"type": "object",
"properties": {
"answer": {
"type": "string",
"description": "The answer to the question",
"enum": ["A", "B", "C"],
},
},
"required": ["answer"],
},
}
if args.chain_of_thought:
answer_func["description"] = "Think through and " + answer_func["description"]
answer_func["parameters"]["required"].append("thinking")
answer_func["parameters"]["properties"]["thinking"] = {
"type": "array",
"items": {
"type": "string",
"description": "Thought and/or calculation for a step in the process of answering the question",
},
"description": "Step by step thought process and calculations towards answering the question",
}
if args.few_shot_random or args.few_shot_topic:
if args.few_shot_random:
sampling_type = "fsr"
else:
sampling_type = "fst"
if args.chain_of_thought:
if "level_1" in args.file:
file_path = f"prompts/l1/{sampling_type}_cot_{args.n_shots}_shot_prompts.json"
else:
file_path = f"prompts/l2/{sampling_type}_cot_{args.n_shots}_shot_prompts.json"
else:
if "level_1" in args.file:
file_path = f"prompts/l1/{sampling_type}_{args.n_shots}_shot_prompts.json"
else:
file_path = f"prompts/l2/{sampling_type}_{args.n_shots}_shot_prompts.json"
with open(file_path, "r") as json_file:
few_shot_prompts = json.load(json_file)
print(f"Few shot prompts: {len(few_shot_prompts)}")
def ask_gpt(question):
out = None
function_response = None
for _ in range(5):
try:
messages = [
{
"role": "system",
"content": sys_prompt,
}
]
messages.extend(few_shot_prompts)
messages.append(
{
"role": "user",
"content": question,
}
)
res = openai.ChatCompletion.create(
model=model,
temperature=temp,
messages=messages,
functions=[answer_func],
function_call={"name": "answer_question"},
)
ans = res.choices[0].message.to_dict()["function_call"]["arguments"] # type: ignore
# ans = ans.replace("\\", "")
# ans = ans.replace(u'\u2013', u'')
# ans = ans.replace(u'U+2013', u'')
answer = ans.split("'answer': ")[-1].split(",")[0].strip()
thinking = ans.split("'thinking': ")[-1].strip()
if args.chain_of_thought:
answer = answer[1:-1]
thinking = thinking[1:-2]
else:
answer = answer[1:-2]
thinking = ""
out = {"answer": answer, "thinking": thinking}
return out
except Exception as e:
print(f"Failed request: {e}")
time.sleep(5)
continue
return {"thinking": "", "answer": "N"}
exam = pd.read_json(args.file)
answers = pd.DataFrame(
columns=[
"case",
"question",
"chapter_name",
"choice_a",
"choice_b",
"choice_c",
"answer",
"guess",
"correct",
]
)
correct = 0
pbar = tqdm(exam.iterrows(), total=len(exam))
i: int
for i, row in pbar: # type: ignore
for rowq in row["cfa2_cbt_questions"]:
question = f"""Case:
{row["case"]}
Question:
{rowq["question"]}
A. {rowq["choice_a"]}
B. {rowq["choice_b"]}
C. {rowq["choice_c"]}"""
row_ans = {
"case": row["case"],
"question": rowq["question"],
"chapter_name": rowq["chapter_name"],
"choice_a": rowq["choice_a"],
"choice_b": rowq["choice_b"],
"choice_c": rowq["choice_c"],
"answer": rowq["answer"]
}
gpt_ans = ask_gpt(question)
row_ans["guess"] = gpt_ans["answer"]
if gpt_ans["answer"].lower() == rowq["answer"][-1]:
correct += 1
row_ans["correct"] = "yes"
else:
row_ans["correct"] = "no"
answers = pd.concat([answers, pd.DataFrame([row_ans])], ignore_index=True)
pbar.set_postfix({"score": f"{correct}/{i+1} {correct/(i+1) * 100:.2f}%"})
print(f"Score: {correct}/{len(answers)} {correct/len(answers) * 100}%")
print(f"{len(answers[answers['guess'] == 'N'])} failed requests")
answers.to_csv(args.output, index=False)
| [
"You are a CFA (chartered financial analyst) taking a test to evaluate your knowledge of finance.\nYou will be given a question along with three possible answers (A, B, and C).\nPLACEHOLDER\nIndicate the correct answer (A, B, or C).",
"\n Before answering, you should think through the question step-by-step.\n Explain your reasoning at each step towards answering the question.\n If calculation is required, do each step of the calculation as a step in your reasoning.\n "
] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C01L04~moderation.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("moderation")
ic(task.data)
# Check text snippets via OpenAI moderation API
result_list = []
for text in task.data["input"]:
moderation_response = openai.Moderation.create(
input=text,
)
ic(moderation_response)
flagged = moderation_response["results"][0]["flagged"]
result_list.append(int(flagged))
# Post an answer
response = client.post_answer(task, result_list)
ic(response)
| [] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C04L04~ownapi.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("ownapi")
ic(task.data)
# Get API URL from environment variables
api_url = os.environ.get("API_URL")
assistant_endpoint = f"{api_url}/assistant"
response = client.post_answer(task, assistant_endpoint)
ic(response)
| [] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C01L05~liar.py | import json
import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("liar")
ic(task.data)
# Define question, send it and get the answer
question = "Is the GTX 1080Ti a Nvidia graphics card?"
response = client.send_question(task, {"question": question})
ic(response)
# Guardrail
guardrail_msg = """
You are a guardrail that checks if the provided answer is on topic.
If the answer is not on topic, return "NO" else return "YES".
The current question is: {question}
"""
guardrail_completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": guardrail_msg.format(question=question)},
{"role": "user", "content": response["answer"]},
],
max_tokens=300,
)
ic(guardrail_completion)
guardrail_answer = guardrail_completion["choices"][0]["message"]["content"]
# Post an answer
response = client.post_answer(task, guardrail_answer)
ic(response)
| [
"answer"
] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C02L02~inprompt.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("inprompt")
ic(task.data)
# Parse the input data and create a dictionary to look up the text snippets by name
name_information = {}
for text in task.data["input"]:
name = text.split(" ")[0]
name_information[name] = text
# Find out the name of the person that the question is about
system_msg = "Based on the provided question, what is the name of the person that the question is about?"
question = task.data["question"]
name_completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_msg},
{"role": "user", "content": question},
],
max_tokens=100,
)
# Add information about the person/name to the system message
name = name_completion["choices"][0]["message"]["content"]
system_msg = f"Answer a question about the person using the following facts: {name_information[name]}"
# Ask a question about the person and get the answer
question_completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": name_information[name]},
{"role": "user", "content": question},
],
max_tokens=200,
)
answer = question_completion["choices"][0]["message"]["content"]
# Post an answer
response = client.post_answer(task, answer)
ic(response)
| [] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C04L03~gnome.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("gnome")
ic(task.data)
# Define the system
system_msg = """
Your task is to analyze a provided image. The image may or may not contain a gnome.
If it does contain a gnome, you should return the color of the gnomes hat in polish (e.g. czerwona, niebieska etc.).
If it does not contain a gnome, just return 'ERROR', nothing else.
"""
gnome_analyzer = openai.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{"role": "system", "content": system_msg},
{
"role": "user",
"content": [
{"type": "image_url", "image_url": task.data["url"]},
],
},
],
)
# Extract the color of the gnome hat
answer = gnome_analyzer.choices[0].message.content
ic(answer)
# Post answer
response = client.post_answer(task, answer)
ic(response)
| [
"\nYour task is to analyze a provided image. The image may or may not contain a gnome.\nIf it does contain a gnome, you should return the color of the gnomes hat in polish (e.g. czerwona, niebieska etc.).\nIf it does not contain a gnome, just return 'ERROR', nothing else.\n"
] |
2024-01-10 | Korred/ai_devs_2.0 | python~api~router.py | import openai
from config import settings
from fastapi import APIRouter
from pydantic import BaseModel
# Define models
class Question(BaseModel):
question: str
class Reply(BaseModel):
reply: str
# Setup API v1 router
v1 = APIRouter(prefix="/api/v1")
# Set OpenAI API key
openai.api_key = settings.openai_api_key
# Example assistant endpoint that uses GPT-4 to answer questions
@v1.post("/assistant")
def assistant(request: Question) -> Reply:
completion = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "user", "content": request.question},
],
max_tokens=200,
)
answer = completion.choices[0].message.content
return Reply(reply=answer)
| [] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C03L05~people.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
import httpx
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("people")
ic(task.data)
# Extract question
question = task.data["question"]
# Extract the name of the person from the question (reverse diminutive form)
system_msg = """
Extract the name and surname of the person from the question provided to you.
Ensure to transform the name into its full form / non-diminutive form e.g.
"Krzysiek" -> "Krzysztof"
"Tomek" -> "Tomasz"
"Jarek" -> "Jarosław"
"Kasia" -> "Katarzyna"
Return the name and surname in the following format: "Name Surname"
"""
extracted_name = (
openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_msg},
{"role": "user", "content": question},
],
max_tokens=100,
)
.choices[0]
.message.content
)
ic(extracted_name)
# Load the list of names and information about then
response = httpx.get(task.data["data"])
# Create a dictionary of names
names = {f"{entry['imie']} {entry['nazwisko']}": entry for entry in response.json()}
person = names[extracted_name]
system_msg = f"""
Use the following facts about the person to answer the questions provided to you:
Name: {person['imie']}
Surname: {person['nazwisko']}
General information: {person['o_mnie']}
Age: {person['wiek']}
Favourite Kapitan Bomba character: {person['ulubiona_postac_z_kapitana_bomby']}
Favourite TV series: {person['ulubiony_serial']}
Favourite movie: {person['ulubiony_film']}
Favourite colour: {person['ulubiony_kolor']}
Answer in Polish.
"""
answer = (
openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_msg},
{"role": "user", "content": question},
],
max_tokens=200,
)
.choices[0]
.message.content
)
ic(answer)
response = client.post_answer(task, answer)
ic(response)
| [] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C01L04~blogger.py | import json
import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("blogger")
ic(task.data)
# Get chapter topics and format user message
user_msg = "\n".join([f"{i+1}) {chapter}" for i, chapter in enumerate(task.data["blog"])])
# Create system message
system_msg = """
Act as a blogger and generate a blog post about pizza with chapters that will be provided as a list.
For each provided chapter, write 5-6 sentences that explain and describe the topic,
providing insightful information and specific proportions e.g. how much flour is needed to make a pizza.
Return all the chapters as a JSON list of strings where every chapter is just one string.
Remember to write in Polish.
"""
# Get the completion
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "system", "content": system_msg}, {"role": "user", "content": user_msg}],
max_tokens=1000,
)
ic(completion)
# Get the chapters text (use json.loads to parse the JSON string returned in the content field)
chapters_text = json.loads(completion["choices"][0]["message"]["content"])
# Post an answer
response = client.post_answer(task, chapters_text)
ic(response)
| [
"\nAct as a blogger and generate a blog post about pizza with chapters that will be provided as a list.\nFor each provided chapter, write 5-6 sentences that explain and describe the topic,\nproviding insightful information and specific proportions e.g. how much flour is needed to make a pizza. \n\nReturn all the chapters as a JSON list of strings where every chapter is just one string.\n\nRemember to write in Polish.\n"
] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C03L03~whoami.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Define a list of facts
hints = []
for i in range(10):
# Get a task
task = client.get_task("whoami")
ic(task.data)
# Extract hint
hint = task.data["hint"]
# Translate hint
translation_msg = f"Translate the following from Polish to English (only return the translation and nothing else): {hint}"
translation = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": translation_msg},
],
max_tokens=100,
)
eng_hint = translation.choices[0].message.content
ic(eng_hint)
hints.append(eng_hint)
# Try to figure out "who am I?"
hints_str = "\n".join([f"- {hint}" for hint in hints])
whoami_msg = f"""
Your task is to answer the question "Who am I?".
To answer this question, you can use the following hints:
{hints_str}
If the hints are not enough, just answer with "I don't know" and nothing else.
"""
whoami = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "user", "content": whoami_msg},
],
max_tokens=400,
)
answer = whoami.choices[0].message.content
ic(answer)
if answer != "I don't know":
# Post an answer
response = client.post_answer(task, answer)
ic(response)
break
| [
"\n Your task is to answer the question \"Who am I?\".\n To answer this question, you can use the following hints:\n PLACEHOLDER\n\n If the hints are not enough, just answer with \"I don't know\" and nothing else.\n ",
"Translate the following from Polish to English (only return the translation and nothing else): PLACEHOLDER"
] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C04L01~knowledge.py | import json
import os
import httpx
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("knowledge")
ic(task.data)
# Define function specifications
functions = [
{
"type": "function",
"function": {
"name": "returnMiddleExchangeRate",
"description": "Returns the middle exchange rate of a foreign currency",
"parameters": {
"type": "object",
"properties": {
"currency": {
"type": "string",
"description": "Foreign currency in ISO 4217 format (e.g. USD, EUR, GBP, etc.)",
},
},
},
"required": ["currency"],
},
},
{
"type": "function",
"function": {
"name": "returnCountryInformation",
"description": "Returns information about a country",
"parameters": {
"type": "object",
"properties": {
"country": {
"type": "string",
"description": "English name of the country in lower case (e.g. spain, france, germany, etc.)",
},
"information_type": {
"type": "string",
"description": "Type of information to return (e.g. population, area, capital, etc.)",
},
},
},
"required": ["country"],
},
},
{
"type": "function",
"function": {
"name": "answerGeneralQuestion",
"description": "Default function to answer general questions. Used when no other function can be used to answer the question.",
"parameters": {
"type": "object",
"properties": {
"answer": {
"type": "string",
"description": "Answer to a general question based on your knowledge.",
},
},
},
"required": ["answer"],
},
},
]
# Figure out which function to use to answer the question
response = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "user", "content": task.data["question"]},
],
tools=functions,
max_tokens=200,
)
ic(response)
# Sometimes the function call is not recognized by the model
# In that case just return the content of the message
if response.choices[0].message.tool_calls:
function_name = response.choices[0].message.tool_calls[0].function.name
arguments = json.loads(response.choices[0].message.tool_calls[0].function.arguments)
if function_name == "returnMiddleExchangeRate":
rates = httpx.get(
f"https://api.nbp.pl/api/exchangerates/rates/a/{arguments['currency']}"
).json()
current_rate = rates["rates"][0]["mid"]
answer = current_rate
elif function_name == "returnCountryInformation":
# fetch country data from api
response = httpx.get(
f"https://restcountries.com/v3.1/name/{arguments['country']}"
)
country_data = response.json()[0]
if arguments["information_type"] == "population":
answer = country_data["population"]
elif arguments["information_type"] == "capital":
# translate capital to Polish
capital = country_data["capital"][0]
answer = (
openai.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": f'Translate "{capital}" to Polish.',
},
],
max_tokens=10,
)
.choices[0]
.message.content
)
else:
answer = arguments["answer"]
else:
answer = response.choices[0].message.content
ic(answer)
response = client.post_answer(task, answer)
ic(response)
| [
"question",
"Translate \"PLACEHOLDER\" to Polish."
] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C02L05~functions.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("functions")
ic(task.data)
# Define a function specification
function = {
"name": "addUser",
"description": "Adds a new user",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "User name",
},
"surname": {
"type": "string",
"description": "User surname",
},
"year": {
"type": "integer",
"description": "User birth year",
},
},
},
"required": ["name", "surname", "year"],
}
# Post an answer
response = client.post_answer(task, function)
ic(response)
| [] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C03L04~search.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams, PointStruct
import httpx
COLLECTION_NAME = "ai_devs_newsletter"
OPENAI_EMBEDDING_SIZE = 1536
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get task
task = client.get_task("search")
ic(task.data)
# Get question and create its embedding
question = task.data["question"]
question_embedding = (
openai.embeddings.create(
input=question,
model="text-embedding-ada-002",
)
.data[0]
.embedding
)
# Extract url from task msg
url = task.data["msg"].split(" - ")[1]
# Get json from url
response = httpx.get(url)
data = response.json()
# Initialize Qdrant client
q_client = QdrantClient(path="db/qdrant/")
# Chek if collection already exists
try:
collection_info = q_client.get_collection(COLLECTION_NAME)
except ValueError:
# Create collection as it does not exist
q_client.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(
size=OPENAI_EMBEDDING_SIZE, distance=Distance.COSINE, on_disk=True
),
)
# Fetch collection info again
collection_info = q_client.get_collection(COLLECTION_NAME)
# Check if documents are already indexed
if collection_info.points_count == 0:
ic("Indexing documents...")
points = []
# Get embeddings for each article
for i, entry in enumerate(data):
ic(f"Indexing document {i}...")
vector = (
openai.embeddings.create(
input=entry["info"],
model="text-embedding-ada-002",
)
.data[0]
.embedding
)
points.append(
PointStruct(
id=i,
vector=vector,
payload={
"url": entry["url"],
"title": entry["title"],
"date": entry["date"],
},
)
)
ic("Inserting documents into Qdrant...")
q_client.upsert(
collection_name=COLLECTION_NAME,
points=points,
wait=True,
)
# Refresh task as the above operation takes some time
# and the token most likely expired
task = client.get_task("search")
# Now that we have indexed documents, we can search for the answer
ic("Searching for answer...")
search_results = q_client.search(
collection_name=COLLECTION_NAME,
query_vector=question_embedding,
limit=1,
)
ic(search_results)
answer = search_results[0].payload["url"]
# Post an answer
response = client.post_answer(task, answer)
ic(response)
| [] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C03L01~rodo.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("rodo")
ic(task.data)
# Get the system message
system_msg = task.data["msg"]
# Define a question
question = """
Please tell me about yourself. However please replace all personal information with placeholders.
Use the following placeholders:
- name -> %imie%
- surname -> %nazwisko%
- city -> %miasto%
- kraj -> %kraj%
- job -> %zawod%
Examples:
- replace "Peter Parker" with "%imie% %nazwisko%"
- replace "New York" with "%miasto%"
- replace "USA" with "%kraj%"
- replace "photographer" or "band member", "personal guard" with "%zawod%"
"""
# Define chat completion
completion = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_msg},
{"role": "user", "content": question},
],
max_tokens=300,
)
ic(completion)
# Post an answer
response = client.post_answer(task, question)
ic(response)
| [
"\nPlease tell me about yourself. However please replace all personal information with placeholders.\nUse the following placeholders:\n- name -> %imie%\n- surname -> %nazwisko%\n- city -> %miasto%\n- kraj -> %kraj%\n- job -> %zawod%\n\nExamples:\n- replace \"Peter Parker\" with \"%imie% %nazwisko%\"\n- replace \"New York\" with \"%miasto%\"\n- replace \"USA\" with \"%kraj%\"\n- replace \"photographer\" or \"band member\", \"personal guard\" with \"%zawod%\"\n"
] |
2024-01-10 | Korred/ai_devs_2.0 | python~ai_devs~tasks~C03L02~scraper.py | import os
import openai
from dotenv import load_dotenv
from icecream import ic
from utils.client import AIDevsClient
from tenacity import retry, wait_exponential
import requests
@retry(wait=wait_exponential())
def fetch_txt(url):
ic("Fetching text...")
# Set User-Agent header to avoid 403 error (bot detection)
# https://www.whatismybrowser.com/guides/the-latest-user-agent/edge
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.2210.91"
}
r = requests.get(url, headers=headers)
r.raise_for_status()
return r.text
# Load environment variables from .env file
load_dotenv()
# Set OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Get API key from environment variables
aidevs_api_key = os.environ.get("AIDEVS_API_KEY")
# Create a client instance
client = AIDevsClient(aidevs_api_key)
# Get a task
task = client.get_task("scraper")
ic(task.data)
# Get text URL
text_url = task.data["input"]
# Fetch text but retry if it fails
text = fetch_txt(text_url)
# Define system message
msg = task.data["msg"]
system_msg = f"""
{msg}
To answer the question, you can use the following text as context:
{text}
"""
# Define chat question
question = task.data["question"]
# Define chat completion
completion = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_msg},
{"role": "user", "content": question},
],
max_tokens=400,
)
ic(completion)
answer = completion.choices[0].message.content
# Post an answer
response = client.post_answer(task, answer)
ic(response)
| [
"\nPLACEHOLDER\n\nTo answer the question, you can use the following text as context:\nPLACEHOLDER\n"
] |
2024-01-10 | PrefectHQ/prefect-openai | tests~test_credentials.py | from prefect_openai.credentials import OpenAICredentials
def test_openai_credentials_get_client():
credentials = OpenAICredentials(api_key="api_key", organization="my_org")
assert credentials.api_key.get_secret_value() == "api_key"
assert credentials.organization == "my_org"
client = credentials.get_client()
assert client.api_key == "api_key"
assert client.organization == "my_org"
| [] |
2024-01-10 | PrefectHQ/prefect-openai | prefect_openai~credentials.py | """Module for authenticating with OpenAI."""
from types import ModuleType
from typing import Optional
import openai
from prefect.blocks.abstract import CredentialsBlock
from pydantic import VERSION as PYDANTIC_VERSION
if PYDANTIC_VERSION.startswith("2."):
from pydantic.v1 import Field, SecretStr
else:
from pydantic import Field, SecretStr
class OpenAICredentials(CredentialsBlock):
"""
Credentials used to authenticate with OpenAI.
Attributes:
api_key: The API key used to authenticate with OpenAI.
Example:
Load a configured block:
```python
from prefect_openai import OpenAICredentials
credentials = OpenAICredentials.load("BLOCK_NAME")
```
Get the OpenAPI client:
```python
from prefect_openai import OpenAICredentials
credentials = OpenAICredentials.load("BLOCK_NAME")
client = credentials.get_client()
```
"""
_block_type_name = "OpenAI Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/760539393a7dbf93a143fb01c2a8b0fe7157a8d8-247x250.png" # noqa
_documentation_url = "https://prefecthq.github.io/prefect-openai/credentials/#prefect_openai.credentials.OpenAICredentials" # noqa
api_key: SecretStr = Field(
default=...,
title="API Key",
description="The API key used to authenticate with OpenAI.",
)
organization: Optional[str] = Field(
default=None,
title="Organization",
description="Specify which organization is used for an API request.",
)
def get_client(self) -> ModuleType:
"""
Gets the OpenAPI client.
Returns:
The OpenAPI client.
"""
openai.api_key = self.api_key.get_secret_value()
openai.organization = self.organization
return openai
| [] |
2024-01-10 | PrefectHQ/prefect-openai | tests~conftest.py | import pytest
from prefect.testing.utilities import AsyncMock, MagicMock, prefect_test_harness
from prefect_openai.credentials import OpenAICredentials
@pytest.fixture(scope="session", autouse=True)
def prefect_db():
"""
Sets up test harness for temporary DB during test runs.
"""
with prefect_test_harness():
yield
@pytest.fixture(autouse=True)
def reset_object_registry():
"""
Ensures each test has a clean object registry.
"""
from prefect.context import PrefectObjectRegistry
with PrefectObjectRegistry():
yield
async def mock_acreate(prompt, **kwargs):
result = MagicMock(prompt=prompt)
for k, v in kwargs.items():
setattr(result, k, v)
return result
@pytest.fixture
def mock_openai_credentials(monkeypatch) -> OpenAICredentials:
mock_model = AsyncMock(name="mock_model")
mock_block_load = AsyncMock()
mock_block_load.return_value = mock_model
mock_model.acreate.side_effect = mock_acreate
monkeypatch.setattr("openai.Completion", mock_model)
monkeypatch.setattr("openai.Image", mock_model)
monkeypatch.setattr(
"prefect_openai.completion.CompletionModel.load", mock_block_load
)
return OpenAICredentials(
api_key="my_api_key", _mock_model=mock_model, _mock_block_load=mock_block_load
)
| [] |
2024-01-10 | aquigni/Chancellerite | transformations~py~transform_proverbs.py | import os
from dotenv import load_dotenv
from openai import OpenAI
# Load environment variables from .env file
load_dotenv()
# API key, set it in .env (remember to add .env to .gitignore)
client = OpenAI()
def transform_proverb(proverb, index, total):
print(f"Processing {index}/{total}") # Only print the counter
try:
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content": "Вы работаете в роли переводчика, который переформулирует поговорки в максимально бюрократический канцеляритный стиль. Пример: «Цыплят по осени считают» превратится в «Подсчет прироста домашней птицы производится после завершения сезона сельскохозяйственных работ»."},
{"role": "user", "content": f"Переформулируйте поговорку, без заключения её в кавычки и без печати в ответе исходной: '{proverb}'"}
]
)
transformed = completion.choices[0].message.content.strip()
return transformed
except Exception as e:
print(f"Error with proverb '{proverb}': {e}")
return None
# Reading proverbs from file
with open("../txt/proverbs.txt", "r") as file:
proverbs = [line.strip() for line in file if line.strip()]
total_proverbs = len(proverbs)
transformed_proverbs = [transform_proverb(proverb, index+1, total_proverbs) for index, proverb in enumerate(proverbs)]
# Writing result to file
with open("../txt/transformed_proverbs.txt", "w") as file:
for proverb in transformed_proverbs:
if proverb is not None:
file.write(proverb + "\n")
else:
file.write("Transformation Failed\n")
| [
"Вы работаете в роли переводчика, который переформулирует поговорки в максимально бюрократический канцеляритный стиль. Пример: «Цыплят по осени считают» превратится в «Подсчет прироста домашней птицы производится после завершения сезона сельскохозяйственных работ».",
"Переформулируйте поговорку, без заключения её в кавычки и без печати в ответе исходной: 'PLACEHOLDER'"
] |
2024-01-10 | sweepai/sweep | tests~notebooks~asst.py | import openai
from openai import OpenAI
client = OpenAI()
INSTRUCTIONS = """\
You are a brilliant and meticulous engineer assigned to write code to complete the user's request. You specialize in Python programming.
# Instructions
Extract code verbatim from the snippets above using EXTRACT sections. These snippets will be used later to refactor the code according to the user request.
* Choose specific and informative names for these functions under new_function_name.
* We must copy the code verbatim, so any extra leading or trailing code will cause us to fail.
* The code must be extracted in contiguous blocks.
* Keep whitespace and comments.
* Extracted functions should be roughly 25 lines unless the function behavior dictates otherwise.
Respond in the following format:
<contextual_request_analysis>
Analyze the user request to identify each section of the code that should be extracted.
These sections should not overlap.
For each new function outline the first and last few lines of code that should be extracted.
</contextual_request_analysis>
<new_function_names>
"new_function_name"
...
</new_function_names>
<extractions>
```
<<<<<<< EXTRACT
first few lines to be extracted from original_code
...
last few lines to be extracted from original_code
>>>>>>>
...
```
</extractions>"""
my_assistant = openai.beta.assistants.create(
instructions=INSTRUCTIONS,
name="Python Coding Assistant",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
thread = client.beta.threads.create()
EXTRACTION_USER_MSG = """\
# Repo & Issue Metadata
Repo: privateGPT - Interact with your documents using the power of GPT, 100% privately, no data leaks
Issue Title: refactor the retrieve_relevant function in private_gpt/server/chunks/chunks_service.py to become more modular
Choose parts of functions that can be extracted to reduce the complexity of the code. If a single function would be too large, refactor it into multiple smaller subfunctions.
Issue Description:
# Code
File path: private_gpt/server/chunks/chunks_service.py
<original_code>
from typing import TYPE_CHECKING
from injector import inject, singleton
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.schema import NodeWithScore
from pydantic import BaseModel, Field
from private_gpt.components.embedding.embedding_component import EmbeddingComponent
from private_gpt.components.llm.llm_component import LLMComponent
from private_gpt.components.node_store.node_store_component import NodeStoreComponent
from private_gpt.components.vector_store.vector_store_component import (
VectorStoreComponent,
)
from private_gpt.open_ai.extensions.context_filter import ContextFilter
from private_gpt.server.ingest.ingest_service import IngestedDoc
if TYPE_CHECKING:
from llama_index.schema import RelatedNodeInfo
class Chunk(BaseModel):
object: str = Field(enum=["context.chunk"])
score: float = Field(examples=[0.023])
document: IngestedDoc
text: str = Field(examples=["Outbound sales increased 20%, driven by new leads."])
previous_texts: list[str] | None = Field(
examples=[["SALES REPORT 2023", "Inbound didn't show major changes."]]
)
next_texts: list[str] | None = Field(
examples=[
[
"New leads came from Google Ads campaign.",
"The campaign was run by the Marketing Department",
]
]
)
@singleton
class ChunksService:
@inject
def __init__(
self,
llm_component: LLMComponent,
vector_store_component: VectorStoreComponent,
embedding_component: EmbeddingComponent,
node_store_component: NodeStoreComponent,
) -> None:
self.vector_store_component = vector_store_component
self.storage_context = StorageContext.from_defaults(
vector_store=vector_store_component.vector_store,
docstore=node_store_component.doc_store,
index_store=node_store_component.index_store,
)
self.query_service_context = ServiceContext.from_defaults(
llm=llm_component.llm, embed_model=embedding_component.embedding_model
)
def _get_sibling_nodes_text(
self, node_with_score: NodeWithScore, related_number: int, forward: bool = True
) -> list[str]:
explored_nodes_texts = []
current_node = node_with_score.node
for _ in range(related_number):
explored_node_info: RelatedNodeInfo | None = (
current_node.next_node if forward else current_node.prev_node
)
if explored_node_info is None:
break
explored_node = self.storage_context.docstore.get_node(
explored_node_info.node_id
)
explored_nodes_texts.append(explored_node.get_content())
current_node = explored_node
return explored_nodes_texts
def retrieve_relevant(
self,
text: str,
context_filter: ContextFilter | None = None,
limit: int = 10,
prev_next_chunks: int = 0,
) -> list[Chunk]:
index = VectorStoreIndex.from_vector_store(
self.vector_store_component.vector_store,
storage_context=self.storage_context,
service_context=self.query_service_context,
show_progress=True,
)
vector_index_retriever = self.vector_store_component.get_retriever(
index=index, context_filter=context_filter, similarity_top_k=limit
)
nodes = vector_index_retriever.retrieve(text)
nodes.sort(key=lambda n: n.score or 0.0, reverse=True)
retrieved_nodes = []
for node in nodes:
doc_id = node.node.ref_doc_id if node.node.ref_doc_id is not None else "-"
retrieved_nodes.append(
Chunk(
object="context.chunk",
score=node.score or 0.0,
document=IngestedDoc(
object="ingest.document",
doc_id=doc_id,
doc_metadata=node.metadata,
),
text=node.get_content(),
previous_texts=self._get_sibling_nodes_text(
node, prev_next_chunks, False
),
next_texts=self._get_sibling_nodes_text(node, prev_next_chunks),
)
)
return retrieved_nodes
</original_code>
# Instructions
Extract code verbatim from the snippets above using EXTRACT sections. These snippets will be used later to refactor the code according to the user request.
* Choose specific and informative names for these functions under new_function_name.
* We must copy the code verbatim, so any extra leading or trailing code will cause us to fail.
* The code must be extracted in contiguous blocks.
* Keep whitespace and comments.
* Extracted functions should be roughly 25 lines unless the function behavior dictates otherwise.
Respond in the following format:
<contextual_request_analysis>
First, determine the function(s) you want to make more modular.
Analyze the user request to identify each section of the code that should be extracted.
These sections should not overlap.
For each new function outline the first and last few lines of code that should be extracted.
</contextual_request_analysis>
<new_function_names>
"new_function_name"
...
</new_function_names>
<extractions>
```
<<<<<<< EXTRACT
first few lines to be extracted from original_code
...
last few lines to be extracted from original_code
>>>>>>>
...
```
</extractions>"""
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=EXTRACTION_USER_MSG,
)
run = client.beta.threads.runs.create(thread_id=thread.id, assistant_id=my_assistant.id)
run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
messages = client.beta.threads.messages.list(thread_id=thread.id)
latest_message = messages.data[0].content[0].text.value
import pdb
pdb.set_trace()
run
| [] |
2024-01-10 | sweepai/sweep | tests~archive~test_scraper.py | import os
import openai
from llama_index import GPTVectorStoreIndex, download_loader
openai.api_key = os.environ.get("OPENAI_API_KEY")
SimpleWebPageReader = download_loader("SimpleWebPageReader")
loader = SimpleWebPageReader()
url = "https://modal.com/docs/guide/continuous-deployment#github-actions"
documents = loader.load_data(urls=[url])
document = documents[0]
index = GPTVectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(streaming=True)
query_engine.query(
"Extract the entire example yaml from the html."
).print_response_stream()
| [] |
2024-01-10 | sweepai/sweep | tests~archive~test_langchain_chunker.py | from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
python_text = '''
import io
import os
import zipfile
import openai
import requests
from loguru import logger
from sweepai.core.gha_extraction import GHAExtractor
from sweepai.events import CheckRunCompleted
from sweepai.handlers.on_comment import on_comment
from sweepai.utils.config.client import SweepConfig, get_gha_enabled
from sweepai.utils.github_utils import get_github_client, get_token
openai.api_key = os.environ.get("OPENAI_API_KEY")
log_message = """GitHub actions yielded the following error.
{error_logs}
This is likely a linting or type-checking issue with the source code but if you are updating the GitHub Actions or versioning, this could be an issue with the GitHub Action yaml files."""
def download_logs(repo_full_name: str, run_id: int, installation_id: int):
headers = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {get_token(installation_id)}",
"X-GitHub-Api-Version": "2022-11-28"
}
response = requests.get(f"https://api.github.com/repos/{repo_full_name}/actions/runs/{run_id}/logs",
headers=headers)
logs_str = ""
if response.status_code == 200:
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
for file in zip_file.namelist():
if "/" not in file:
with zip_file.open(file) as f:
logs_str += f.read().decode("utf-8")
else:
logger.warning(f"Failed to download logs for run id: {run_id}")
return logs_str
def clean_logs(logs_str: str):
log_list = logs_str.split("\n")
truncated_logs = [log[log.find(" ") + 1:] for log in log_list]
patterns = [
# for docker
"Already exists",
"Pulling fs layer",
"Waiting",
"Download complete",
"Verifying Checksum",
"Pull complete",
# For github
"remote: Counting objects",
"remote: Compressing objects:",
"Receiving objects:",
"Resolving deltas:"
]
return "\n".join([log.strip() for log in truncated_logs if not any(pattern in log for pattern in patterns)])
def on_check_suite(request: CheckRunCompleted):
logger.info(f"Received check run completed event for {request.repository.full_name}")
g = get_github_client(request.installation.id)
repo = g.get_repo(request.repository.full_name)
if not get_gha_enabled(repo):
logger.info(f"Skipping github action for {request.repository.full_name} because it is not enabled")
return None
pr = repo.get_pull(request.check_run.pull_requests[0].number)
num_pr_commits = len(list(pr.get_commits()))
if num_pr_commits > 20:
logger.info(f"Skipping github action for PR with {num_pr_commits} commits")
return None
logger.info(f"Running github action for PR with {num_pr_commits} commits")
logs = download_logs(
request.repository.full_name,
request.check_run.run_id,
request.installation.id
)
if not logs:
return None
logs = clean_logs(logs)
extractor = GHAExtractor()
logger.info(f"Extracting logs from {request.repository.full_name}, logs: {logs}")
problematic_logs = extractor.gha_extract(logs)
if problematic_logs.count("\n") > 15:
problematic_logs += "\n\nThere are a lot of errors. This is likely a larger issue with the PR and not a small linting/type-checking issue."
comments = list(pr.get_issue_comments())
if len(comments) >= 2 and problematic_logs == comments[-1].body and comments[-2].body == comments[-1].body:
comment = pr.as_issue().create_comment(log_message.format(error_logs=problematic_logs) + "\n\nI'm getting the same errors 3 times in a row, so I will stop working on fixing this PR.")
logger.warning("Skipping logs because it is duplicated")
raise Exception("Duplicate error logs")
print(problematic_logs)
comment = pr.as_issue().create_comment(log_message.format(error_logs=problematic_logs))
on_comment(
repo_full_name=request.repository.full_name,
repo_description=request.repository.description,
comment=problematic_logs,
pr_path=None,
pr_line_position=None,
username=request.sender.login,
installation_id=request.installation.id,
pr_number=request.check_run.pull_requests[0].number,
comment_id=comment.id,
repo=repo,
)
return {"success": True}
'''
python_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON, chunk_size=1500, chunk_overlap=0
)
python_docs = python_splitter.create_documents([python_text])
# [print(document.page_content + "\n\n===========\n\n") for document in python_docs]
# quit()
js_text = """
import { Document, BaseNode } from "../Node";
import { v4 as uuidv4 } from "uuid";
import { BaseRetriever } from "../Retriever";
import { ServiceContext } from "../ServiceContext";
import { StorageContext } from "../storage/StorageContext";
import { BaseDocumentStore } from "../storage/docStore/types";
import { VectorStore } from "../storage/vectorStore/types";
import { BaseIndexStore } from "../storage/indexStore/types";
import { BaseQueryEngine } from "../QueryEngine";
import { ResponseSynthesizer } from "../ResponseSynthesizer";
/**
* The underlying structure of each index.
*/
export abstract class IndexStruct {
indexId: string;
summary?: string;
constructor(indexId = uuidv4(), summary = undefined) {
this.indexId = indexId;
this.summary = summary;
}
toJson(): Record<string, unknown> {
return {
indexId: this.indexId,
summary: this.summary,
};
}
getSummary(): string {
if (this.summary === undefined) {
throw new Error("summary field of the index dict is not set");
}
return this.summary;
}
}
export enum IndexStructType {
SIMPLE_DICT = "simple_dict",
LIST = "list",
}
export class IndexDict extends IndexStruct {
nodesDict: Record<string, BaseNode> = {};
docStore: Record<string, Document> = {}; // FIXME: this should be implemented in storageContext
type: IndexStructType = IndexStructType.SIMPLE_DICT;
getSummary(): string {
if (this.summary === undefined) {
throw new Error("summary field of the index dict is not set");
}
return this.summary;
}
addNode(node: BaseNode, textId?: string) {
const vectorId = textId ?? node.id_;
this.nodesDict[vectorId] = node;
}
toJson(): Record<string, unknown> {
return {
...super.toJson(),
nodesDict: this.nodesDict,
type: this.type,
};
}
}
export function jsonToIndexStruct(json: any): IndexStruct {
if (json.type === IndexStructType.LIST) {
const indexList = new IndexList(json.indexId, json.summary);
indexList.nodes = json.nodes;
return indexList;
} else if (json.type === IndexStructType.SIMPLE_DICT) {
const indexDict = new IndexDict(json.indexId, json.summary);
indexDict.nodesDict = json.nodesDict;
return indexDict;
} else {
throw new Error(`Unknown index struct type: ${json.type}`);
}
}
export class IndexList extends IndexStruct {
nodes: string[] = [];
type: IndexStructType = IndexStructType.LIST;
addNode(node: BaseNode) {
this.nodes.push(node.id_);
}
toJson(): Record<string, unknown> {
return {
...super.toJson(),
nodes: this.nodes,
type: this.type,
};
}
}
export interface BaseIndexInit<T> {
serviceContext: ServiceContext;
storageContext: StorageContext;
docStore: BaseDocumentStore;
vectorStore?: VectorStore;
indexStore?: BaseIndexStore;
indexStruct: T;
}
/**
* Indexes are the data structure that we store our nodes and embeddings in so
* they can be retrieved for our queries.
*/
export abstract class BaseIndex<T> {
serviceContext: ServiceContext;
storageContext: StorageContext;
docStore: BaseDocumentStore;
vectorStore?: VectorStore;
indexStore?: BaseIndexStore;
indexStruct: T;
constructor(init: BaseIndexInit<T>) {
this.serviceContext = init.serviceContext;
this.storageContext = init.storageContext;
this.docStore = init.docStore;
this.vectorStore = init.vectorStore;
this.indexStore = init.indexStore;
this.indexStruct = init.indexStruct;
}
/**
* Create a new retriever from the index.
* @param retrieverOptions
*/
abstract asRetriever(options?: any): BaseRetriever;
/**
* Create a new query engine from the index. It will also create a retriever
* and response synthezier if they are not provided.
* @param options you can supply your own custom Retriever and ResponseSynthesizer
*/
abstract asQueryEngine(options?: {
retriever?: BaseRetriever;
responseSynthesizer?: ResponseSynthesizer;
}): BaseQueryEngine;
}
export interface VectorIndexOptions {
nodes?: BaseNode[];
indexStruct?: IndexDict;
indexId?: string;
serviceContext?: ServiceContext;
storageContext?: StorageContext;
}
export interface VectorIndexConstructorProps extends BaseIndexInit<IndexDict> {
vectorStore: VectorStore;
}
"""
js_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.JS, chunk_size=1500, chunk_overlap=0
)
js_docs = js_splitter.create_documents([js_text])
[print(document.page_content + "\n\n========\n") for document in js_docs]
| [] |
2024-01-10 | sweepai/sweep | tests~archive~test_api.py | # import openai
# import asyncio
# from fastapi import Body, FastAPI
# from pydantic import BaseModel
# from sweepai.core.chat import ChatGPT
# app = FastAPI()
# tasks = {}
# async def background_task(name: str):
# # import os
# # print(os.getpid())
# # import random
# # print(random.random())
# import os, hashlib
# random_bytes = os.urandom(16)
# hash_obj = hashlib.sha256(random_bytes)
# hash_hex = hash_obj.hexdigest()
# print(hash_hex)
# print("Starting background task")
# for i in range(1, 6):
# print(f"Task {name} running ({i}/5)...")
# await asyncio.sleep(1)
# print(f"Task {name} completed.")
# class Task(BaseModel):
# name: str
# @app.post("/start")
# async def start_task(request: Task):
# task = asyncio.create_task(background_task(request.name))
# tasks[request.name] = task
# return {"message": "Task started"}
# @app.post("/cancel")
# async def cancel_task(request: Task):
# task = tasks.get(request.name)
# if task:
# task.cancel()
# return {"message": "Task canceled"}
# return {"message": "Task not found"}
import multiprocessing
import time
from fastapi import FastAPI
app = FastAPI()
processes_dict = {}
def long_task(key):
for i in range(100):
print(f"{key}", i)
time.sleep(1)
def start_task(key):
print(processes_dict)
if key in processes_dict:
processes_dict[key].terminate()
processes_dict[key].join()
print("Terminated ", key)
process = multiprocessing.Process(target=long_task, args=(key,))
processes_dict[key] = process
process.start()
return {"status": "started"}
def cancel_task(key):
if key in processes_dict:
process = processes_dict[key]
process.terminate()
process.join()
del processes_dict[key]
return {"status": "cancelled"}
return {"status": "not_found"}
@app.post("/start/{key}")
async def start_task_endpoint(key: str):
return start_task(key)
@app.post("/cancel/{key}")
async def cancel_task_endpoint(key: str):
return cancel_task(key)
| [] |
2024-01-10 | sweepai/sweep | sweepai~core~vector_db.py | import json
import re
import time
from functools import lru_cache
from typing import Generator, List
import numpy as np
import replicate
import requests
from deeplake.core.vectorstore.deeplake_vectorstore import ( # pylint: disable=import-error
VectorStore,
)
from loguru import logger
from redis import Redis
from sentence_transformers import SentenceTransformer # pylint: disable=import-error
from tqdm import tqdm
from sweepai.config.client import SweepConfig, get_blocked_dirs
from sweepai.config.server import (
BATCH_SIZE,
HUGGINGFACE_TOKEN,
HUGGINGFACE_URL,
REDIS_URL,
REPLICATE_API_KEY,
REPLICATE_DEPLOYMENT_URL,
SENTENCE_TRANSFORMERS_MODEL,
VECTOR_EMBEDDING_SOURCE,
)
from sweepai.core.entities import Snippet
from sweepai.core.lexical_search import prepare_index_from_snippets, search_index
from sweepai.core.repo_parsing_utils import repo_to_chunks
from sweepai.logn.cache import file_cache
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo
from sweepai.utils.hash import hash_sha256
from sweepai.utils.progress import TicketProgress
from sweepai.utils.scorer import compute_score, get_scores
MODEL_DIR = "/tmp/cache/model"
DEEPLAKE_DIR = "/tmp/cache/"
timeout = 60 * 60 # 30 minutes
CACHE_VERSION = "v1.0.14"
MAX_FILES = 500
redis_client = Redis.from_url(REDIS_URL)
def download_models():
from sentence_transformers import ( # pylint: disable=import-error
SentenceTransformer,
)
model = SentenceTransformer(SENTENCE_TRANSFORMERS_MODEL, cache_folder=MODEL_DIR)
def init_deeplake_vs(repo_name):
deeplake_repo_path = f"mem://{int(time.time())}{repo_name}"
deeplake_vector_store = VectorStore(
path=deeplake_repo_path, read_only=False, overwrite=False
)
return deeplake_vector_store
def parse_collection_name(name: str) -> str:
# Replace any non-alphanumeric characters with hyphens
name = re.sub(r"[^\w-]", "--", name)
# Ensure the name is between 3 and 63 characters and starts/ends with alphanumeric
name = re.sub(r"^(-*\w{0,61}\w)-*$", r"\1", name[:63].ljust(3, "x"))
return name
def embed_huggingface(texts):
"""Embeds a list of texts using Hugging Face's API."""
for i in range(3):
try:
headers = {
"Authorization": f"Bearer {HUGGINGFACE_TOKEN}",
"Content-Type": "application/json",
}
response = requests.post(
HUGGINGFACE_URL, headers=headers, json={"inputs": texts}
)
return response.json()["embeddings"]
except requests.exceptions.RequestException as e:
logger.exception(
f"Error occurred when sending request to Hugging Face endpoint: {e}"
)
def embed_replicate(texts: List[str], timeout=180) -> List[np.ndarray]:
client = replicate.Client(api_token=REPLICATE_API_KEY)
deployment = client.deployments.get(REPLICATE_DEPLOYMENT_URL)
e = None
for i in range(3):
try:
prediction = deployment.predictions.create(
input={"text_batch": json.dumps(texts)}, timeout=timeout
)
prediction.wait()
outputs = prediction.output
break
except Exception:
logger.exception(f"Replicate timeout: {e}")
else:
raise Exception(f"Replicate timeout")
return [output["embedding"] for output in outputs]
@lru_cache(maxsize=64)
def embed_texts(texts: tuple[str]):
logger.info(
f"Computing embeddings for {len(texts)} texts using {VECTOR_EMBEDDING_SOURCE}..."
)
match VECTOR_EMBEDDING_SOURCE:
case "sentence-transformers":
sentence_transformer_model = SentenceTransformer(
SENTENCE_TRANSFORMERS_MODEL, cache_folder=MODEL_DIR
)
vector = sentence_transformer_model.encode(
texts, show_progress_bar=True, batch_size=BATCH_SIZE
)
return vector
case "openai":
from openai import OpenAI
client = OpenAI()
embeddings = []
for batch in tqdm(chunk(texts, batch_size=BATCH_SIZE), disable=False):
try:
response = client.embeddings.create(
input=batch, model="text-embedding-ada-002"
)
embeddings.extend([r["embedding"] for r in response["data"]])
except SystemExit:
raise SystemExit
except Exception:
logger.exception("Failed to get embeddings for batch")
logger.error(f"Failed to get embeddings for {batch}")
return embeddings
case "huggingface":
if HUGGINGFACE_URL and HUGGINGFACE_TOKEN:
embeddings = []
for batch in tqdm(chunk(texts, batch_size=BATCH_SIZE), disable=False):
embeddings.extend(embed_huggingface(texts))
return embeddings
else:
raise Exception("Hugging Face URL and token not set")
case "replicate":
if REPLICATE_API_KEY:
embeddings = []
for batch in tqdm(chunk(texts, batch_size=BATCH_SIZE)):
embeddings.extend(embed_replicate(batch))
return embeddings
else:
raise Exception("Replicate URL and token not set")
case "none":
return [[0.5]] * len(texts)
case _:
raise Exception("Invalid vector embedding mode")
logger.info(
f"Computed embeddings for {len(texts)} texts using {VECTOR_EMBEDDING_SOURCE}"
)
def embedding_function(texts: list[str]):
# For LRU cache to work
return embed_texts(tuple(texts))
def get_deeplake_vs_from_repo(
cloned_repo: ClonedRepo,
sweep_config: SweepConfig = SweepConfig(),
):
deeplake_vs = None
repo_full_name = cloned_repo.repo_full_name
repo = cloned_repo.repo
commits = repo.get_commits()
commit_hash = commits[0].sha
logger.info(f"Downloading repository and indexing for {repo_full_name}...")
start = time.time()
logger.info("Recursively getting list of files...")
blocked_dirs = get_blocked_dirs(repo)
sweep_config.exclude_dirs.extend(blocked_dirs)
file_list, snippets, index = prepare_lexical_search_index(
cloned_repo, sweep_config, repo_full_name, TicketProgress(tracking_id="none")
)
# scoring for vector search
files_to_scores = compute_vector_search_scores(file_list, cloned_repo)
collection_name, documents, ids, metadatas = prepare_documents_metadata_ids(
snippets, cloned_repo, files_to_scores, start, repo_full_name
)
deeplake_vs = deeplake_vs or compute_deeplake_vs(
collection_name, documents, ids, metadatas, commit_hash
)
return deeplake_vs, index, len(documents)
def prepare_documents_metadata_ids(
snippets, cloned_repo, files_to_scores, start, repo_full_name
):
documents = []
metadatas = []
ids = []
for snippet in snippets:
documents.append(snippet.get_snippet(add_ellipsis=False, add_lines=False))
metadata = {
"file_path": snippet.file_path[len(cloned_repo.cached_dir) + 1 :],
"start": snippet.start,
"end": snippet.end,
"score": files_to_scores[snippet.file_path],
}
metadatas.append(metadata)
gh_file_path = snippet.file_path[len("repo") :]
ids.append(f"{gh_file_path}:{snippet.start}:{snippet.end}")
logger.info(f"Getting list of all files took {time.time() - start}")
logger.info(f"Received {len(documents)} documents from repository {repo_full_name}")
collection_name = parse_collection_name(repo_full_name)
return collection_name, documents, ids, metadatas
def compute_vector_search_scores(file_list, cloned_repo):
files_to_scores = {}
score_factors = []
for file_path in tqdm(file_list):
if not redis_client:
score_factor = compute_score(
file_path[len(cloned_repo.cached_dir) + 1 :], cloned_repo.git_repo
)
score_factors.append(score_factor)
continue
cache_key = hash_sha256(file_path) + CACHE_VERSION
try:
cache_value = redis_client.get(cache_key)
except Exception as e:
logger.exception(e)
cache_value = None
if cache_value is not None:
score_factor = json.loads(cache_value)
score_factors.append(score_factor)
else:
score_factor = compute_score(
file_path[len(cloned_repo.cached_dir) + 1 :], cloned_repo.git_repo
)
score_factors.append(score_factor)
redis_client.set(cache_key, json.dumps(score_factor))
# compute all scores
all_scores = get_scores(score_factors)
files_to_scores = {
file_path[len(cloned_repo.cached_dir) + 1 :]: score
for file_path, score in zip(file_list, all_scores)
}
return files_to_scores
def prepare_lexical_search_index(
cloned_repo,
sweep_config,
repo_full_name,
ticket_progress: TicketProgress | None = None,
):
snippets, file_list = repo_to_chunks(cloned_repo.cached_dir, sweep_config)
logger.info(f"Found {len(snippets)} snippets in repository {repo_full_name}")
# prepare lexical search
index = prepare_index_from_snippets(
snippets,
len_repo_cache_dir=len(cloned_repo.cached_dir) + 1,
ticket_progress=ticket_progress,
)
return file_list, snippets, index
def compute_deeplake_vs(collection_name, documents, ids, metadatas, sha):
if len(documents) > 0:
logger.info(f"Computing embeddings with {VECTOR_EMBEDDING_SOURCE}...")
# Check cache here for all documents
embeddings = [None] * len(documents)
# if len(documents) > 10000:
if redis_client:
cache_keys = [
hash_sha256(doc)
+ SENTENCE_TRANSFORMERS_MODEL
+ VECTOR_EMBEDDING_SOURCE
+ CACHE_VERSION
for doc in documents
]
cache_values = redis_client.mget(cache_keys)
for idx, value in enumerate(cache_values):
if value is not None:
arr = json.loads(value)
if isinstance(arr, list):
embeddings[idx] = np.array(arr, dtype=np.float32)
logger.info(
f"Found {len([x for x in embeddings if x is not None])} embeddings in cache"
)
indices_to_compute = [idx for idx, x in enumerate(embeddings) if x is None]
documents_to_compute = [documents[idx] for idx in indices_to_compute]
logger.info(f"Computing {len(documents_to_compute)} embeddings...")
computed_embeddings = embedding_function(documents_to_compute)
logger.info(f"Computed {len(computed_embeddings)} embeddings")
for idx, embedding in zip(indices_to_compute, computed_embeddings):
embeddings[idx] = embedding
embeddings = convert_to_numpy_array(embeddings, documents)
deeplake_vs = init_deeplake_vs(collection_name)
deeplake_vs.add(text=ids, embedding=embeddings, metadata=metadatas)
logger.info("Added embeddings to cache")
if redis_client and len(documents_to_compute) > 0:
logger.info(f"Updating cache with {len(computed_embeddings)} embeddings")
cache_keys = [
hash_sha256(doc)
+ SENTENCE_TRANSFORMERS_MODEL
+ VECTOR_EMBEDDING_SOURCE
+ CACHE_VERSION
for doc in documents_to_compute
]
redis_client.mset(
{
key: json.dumps(
embedding.tolist()
if isinstance(embedding, np.ndarray)
else embedding
)
for key, embedding in zip(cache_keys, computed_embeddings)
}
)
return deeplake_vs
def convert_to_numpy_array(embeddings, documents):
try:
embeddings = np.array(embeddings, dtype=np.float32)
except SystemExit:
raise SystemExit
except:
logger.exception(
"Failed to convert embeddings to numpy array, recomputing all of them"
)
embeddings = embedding_function(documents)
embeddings = np.array(embeddings, dtype=np.float32)
return embeddings
def compute_embeddings(documents):
if len(documents) > 0:
logger.info(f"Computing embeddings with {VECTOR_EMBEDDING_SOURCE}...")
# Check cache here for all documents
embeddings = [None] * len(documents)
if redis_client:
cache_keys = [
hash_sha256(doc)
+ SENTENCE_TRANSFORMERS_MODEL
+ VECTOR_EMBEDDING_SOURCE
+ CACHE_VERSION
for doc in documents
]
cache_values = redis_client.mget(cache_keys)
for idx, value in enumerate(cache_values):
if value is not None:
arr = json.loads(value)
if isinstance(arr, list):
embeddings[idx] = np.array(arr, dtype=np.float32)
logger.info(
f"Found {len([x for x in embeddings if x is not None])} embeddings in cache"
)
indices_to_compute = [idx for idx, x in enumerate(embeddings) if x is None]
documents_to_compute = [documents[idx] for idx in indices_to_compute]
logger.info(f"Computing {len(documents_to_compute)} embeddings...")
computed_embeddings = embedding_function(documents_to_compute)
logger.info(f"Computed {len(computed_embeddings)} embeddings")
for idx, embedding in zip(indices_to_compute, computed_embeddings):
embeddings[idx] = embedding
embeddings = convert_to_numpy_array(embeddings, documents)
return embeddings, documents_to_compute, computed_embeddings, embedding
@file_cache(ignore_params=["cloned_repo", "sweep_config", "token"])
def get_relevant_snippets(
cloned_repo: ClonedRepo,
query: str,
username: str | None = None,
sweep_config: SweepConfig = SweepConfig(),
lexical=True,
):
repo_name = cloned_repo.repo_full_name
installation_id = cloned_repo.installation_id
logger.info("Getting query embedding...")
query_embedding = embedding_function([query]) # pylint: disable=no-member
logger.info("Starting search by getting vector store...")
deeplake_vs, lexical_index, num_docs = get_deeplake_vs_from_repo(
cloned_repo, sweep_config=sweep_config
)
content_to_lexical_score = search_index(query, lexical_index)
logger.info(f"Found {len(content_to_lexical_score)} lexical results")
logger.info(f"Searching for relevant snippets... with {num_docs} docs")
results = {"metadata": [], "text": []}
try:
results = deeplake_vs.search(embedding=query_embedding, k=num_docs)
except SystemExit:
raise SystemExit
except Exception:
logger.exception("Exception occurred while fetching relevant snippets")
logger.info("Fetched relevant snippets...")
if len(results["text"]) == 0:
logger.info(f"Results query {query} was empty")
logger.info(f"Results: {results}")
if username is None:
username = "anonymous"
posthog.capture(
username,
"failed",
{
"reason": "Results query was empty",
"repo_name": repo_name,
"installation_id": installation_id,
"query": query,
},
)
return []
metadatas = results["metadata"]
code_scores = [metadata["score"] for metadata in metadatas]
lexical_scores = []
for metadata in metadatas:
key = f"{metadata['file_path']}:{str(metadata['start'])}:{str(metadata['end'])}"
if key in content_to_lexical_score:
lexical_scores.append(content_to_lexical_score[key])
else:
lexical_scores.append(0.3)
vector_scores = results["score"]
combined_scores = [
code_score * 4
+ vector_score
+ lexical_score * 2.5 # increase weight of lexical search
for code_score, vector_score, lexical_score in zip(
code_scores, vector_scores, lexical_scores
)
]
combined_list = list(zip(combined_scores, metadatas))
sorted_list = sorted(combined_list, key=lambda x: x[0], reverse=True)
sorted_metadatas = [metadata for _, metadata in sorted_list]
relevant_paths = [metadata["file_path"] for metadata in sorted_metadatas]
logger.info("Relevant paths: {}".format(relevant_paths[:5]))
return [
Snippet(
content="",
start=metadata["start"],
end=metadata["end"],
file_path=file_path,
)
for metadata, file_path in zip(sorted_metadatas, relevant_paths)
][:num_docs]
def chunk(texts: List[str], batch_size: int) -> Generator[List[str], None, None]:
"""
Split a list of texts into batches of a given size for embed_texts.
Args:
----
texts (List[str]): A list of texts to be chunked into batches.
batch_size (int): The maximum number of texts in each batch.
Yields:
------
Generator[List[str], None, None]: A generator that yields batches of texts as lists.
Example:
-------
texts = ["text1", "text2", "text3", "text4", "text5"]
batch_size = 2
for batch in chunk(texts, batch_size):
print(batch)
# Output:
# ['text1', 'text2']
# ['text3', 'text4']
# ['text5']
"""
texts = [text[:4096] if text else " " for text in texts]
for text in texts:
assert isinstance(text, str), f"Expected str, got {type(text)}"
assert len(text) <= 4096, f"Expected text length <= 4096, got {len(text)}"
for i in range(0, len(texts), batch_size):
yield texts[i : i + batch_size] if i + batch_size < len(texts) else texts[i:]
| [] |
2024-01-10 | sweepai/sweep | tests~archive~test_cst_splitter.py | python_code = '''
import io
import os
import zipfile
import openai
import requests
from loguru import logger
from sweepai.core.gha_extraction import GHAExtractor
from sweepai.events import CheckRunCompleted
from sweepai.handlers.on_comment import on_comment
from sweepai.utils.config.client import SweepConfig, get_gha_enabled
from sweepai.utils.github_utils import get_github_client, get_token
openai.api_key = os.environ.get("OPENAI_API_KEY")
log_message = """GitHub actions yielded the following error.
{error_logs}
This is likely a linting or type-checking issue with the source code but if you are updating the GitHub Actions or versioning, this could be an issue with the GitHub Action yaml files."""
def download_logs(repo_full_name: str, run_id: int, installation_id: int):
headers = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {get_token(installation_id)}",
"X-GitHub-Api-Version": "2022-11-28"
}
response = requests.get(f"https://api.github.com/repos/{repo_full_name}/actions/runs/{run_id}/logs",
headers=headers)
logs_str = ""
if response.status_code == 200:
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
for file in zip_file.namelist():
if "/" not in file:
with zip_file.open(file) as f:
logs_str += f.read().decode("utf-8")
else:
logger.warning(f"Failed to download logs for run id: {run_id}")
return logs_str
def clean_logs(logs_str: str):
log_list = logs_str.split("\\n")
truncated_logs = [log[log.find(" ") + 1:] for log in log_list]
patterns = [
# for docker
"Already exists",
"Pulling fs layer",
"Waiting",
"Download complete",
"Verifying Checksum",
"Pull complete",
# For github
"remote: Counting objects",
"remote: Compressing objects:",
"Receiving objects:",
"Resolving deltas:"
]
return "\\n".join([log.strip() for log in truncated_logs if not any(pattern in log for pattern in patterns)])
def on_check_suite(request: CheckRunCompleted):
logger.info(f"Received check run completed event for {request.repository.full_name}")
g = get_github_client(request.installation.id)
repo = g.get_repo(request.repository.full_name)
if not get_gha_enabled(repo):
logger.info(f"Skipping github action for {request.repository.full_name} because it is not enabled")
return None
pr = repo.get_pull(request.check_run.pull_requests[0].number)
num_pr_commits = len(list(pr.get_commits()))
if num_pr_commits > 20:
logger.info(f"Skipping github action for PR with {num_pr_commits} commits")
return None
logger.info(f"Running github action for PR with {num_pr_commits} commits")
logs = download_logs(
request.repository.full_name,
request.check_run.run_id,
request.installation.id
)
if not logs:
return None
logs = clean_logs(logs)
extractor = GHAExtractor()
logger.info(f"Extracting logs from {request.repository.full_name}, logs: {logs}")
problematic_logs = extractor.gha_extract(logs)
if problematic_logs.count("\\n") > 15:
problematic_logs += "\\n\\nThere are a lot of errors. This is likely a larger issue with the PR and not a small linting/type-checking issue."
comments = list(pr.get_issue_comments())
if len(comments) >= 2 and problematic_logs == comments[-1].body and comments[-2].body == comments[-1].body:
comment = pr.as_issue().create_comment(log_message.format(error_logs=problematic_logs) + "\\n\\nI'm getting the same errors 3 times in a row, so I will stop working on fixing this PR.")
logger.warning("Skipping logs because it is duplicated")
raise Exception("Duplicate error logs")
print(problematic_logs)
comment = pr.as_issue().create_comment(log_message.format(error_logs=problematic_logs))
on_comment(
repo_full_name=request.repository.full_name,
repo_description=request.repository.description,
comment=problematic_logs,
pr_path=None,
pr_line_position=None,
username=request.sender.login,
installation_id=request.installation.id,
pr_number=request.check_run.pull_requests[0].number,
comment_id=comment.id,
repo=repo,
)
return {"success": True}
'''
js_text = """
import { Document, BaseNode } from "../Node";
import { v4 as uuidv4 } from "uuid";
import { BaseRetriever } from "../Retriever";
import { ServiceContext } from "../ServiceContext";
import { StorageContext } from "../storage/StorageContext";
import { BaseDocumentStore } from "../storage/docStore/types";
import { VectorStore } from "../storage/vectorStore/types";
import { BaseIndexStore } from "../storage/indexStore/types";
import { BaseQueryEngine } from "../QueryEngine";
import { ResponseSynthesizer } from "../ResponseSynthesizer";
/**
* The underlying structure of each index.
*/
export abstract class IndexStruct {
indexId: string;
summary?: string;
constructor(indexId = uuidv4(), summary = undefined) {
this.indexId = indexId;
this.summary = summary;
}
toJson(): Record<string, unknown> {
return {
indexId: this.indexId,
summary: this.summary,
};
}
getSummary(): string {
if (this.summary === undefined) {
throw new Error("summary field of the index dict is not set");
}
return this.summary;
}
}
export enum IndexStructType {
SIMPLE_DICT = "simple_dict",
LIST = "list",
}
export class IndexDict extends IndexStruct {
nodesDict: Record<string, BaseNode> = {};
docStore: Record<string, Document> = {}; // FIXME: this should be implemented in storageContext
type: IndexStructType = IndexStructType.SIMPLE_DICT;
getSummary(): string {
if (this.summary === undefined) {
throw new Error("summary field of the index dict is not set");
}
return this.summary;
}
addNode(node: BaseNode, textId?: string) {
const vectorId = textId ?? node.id_;
this.nodesDict[vectorId] = node;
}
toJson(): Record<string, unknown> {
return {
...super.toJson(),
nodesDict: this.nodesDict,
type: this.type,
};
}
}
export function jsonToIndexStruct(json: any): IndexStruct {
if (json.type === IndexStructType.LIST) {
const indexList = new IndexList(json.indexId, json.summary);
indexList.nodes = json.nodes;
return indexList;
} else if (json.type === IndexStructType.SIMPLE_DICT) {
const indexDict = new IndexDict(json.indexId, json.summary);
indexDict.nodesDict = json.nodesDict;
return indexDict;
} else {
throw new Error(`Unknown index struct type: ${json.type}`);
}
}
export class IndexList extends IndexStruct {
nodes: string[] = [];
type: IndexStructType = IndexStructType.LIST;
addNode(node: BaseNode) {
this.nodes.push(node.id_);
}
toJson(): Record<string, unknown> {
return {
...super.toJson(),
nodes: this.nodes,
type: this.type,
};
}
}
export interface BaseIndexInit<T> {
serviceContext: ServiceContext;
storageContext: StorageContext;
docStore: BaseDocumentStore;
vectorStore?: VectorStore;
indexStore?: BaseIndexStore;
indexStruct: T;
}
/**
* Indexes are the data structure that we store our nodes and embeddings in so
* they can be retrieved for our queries.
*/
export abstract class BaseIndex<T> {
serviceContext: ServiceContext;
storageContext: StorageContext;
docStore: BaseDocumentStore;
vectorStore?: VectorStore;
indexStore?: BaseIndexStore;
indexStruct: T;
constructor(init: BaseIndexInit<T>) {
this.serviceContext = init.serviceContext;
this.storageContext = init.storageContext;
this.docStore = init.docStore;
this.vectorStore = init.vectorStore;
this.indexStore = init.indexStore;
this.indexStruct = init.indexStruct;
}
/**
* Create a new retriever from the index.
* @param retrieverOptions
*/
abstract asRetriever(options?: any): BaseRetriever;
/**
* Create a new query engine from the index. It will also create a retriever
* and response synthezier if they are not provided.
* @param options you can supply your own custom Retriever and ResponseSynthesizer
*/
abstract asQueryEngine(options?: {
retriever?: BaseRetriever;
responseSynthesizer?: ResponseSynthesizer;
}): BaseQueryEngine;
}
export interface VectorIndexOptions {
nodes?: BaseNode[];
indexStruct?: IndexDict;
indexId?: string;
serviceContext?: ServiceContext;
storageContext?: StorageContext;
}
export interface VectorIndexConstructorProps extends BaseIndexInit<IndexDict> {
vectorStore: VectorStore;
}
"""
# if __name__ == "__main__":
# chunks, metadata, _id = chunker.call(js_text, "main.py")
# for chunk in chunks:
# print(chunk + "\n\n==========\n")
| [] |
2024-01-10 | sweepai/sweep | sweepai~agents~assistant_wrapper.py | import json
import os
import re
import time
from pathlib import Path
from typing import Callable
from loguru import logger
from openai import OpenAI
from openai.pagination import SyncCursorPage
from openai.types.beta.threads.thread_message import ThreadMessage
from pydantic import BaseModel
from sweepai.agents.assistant_functions import raise_error_schema
from sweepai.config.server import IS_SELF_HOSTED, OPENAI_API_KEY
from sweepai.core.entities import AssistantRaisedException, Message
from sweepai.utils.chat_logger import ChatLogger
from sweepai.utils.event_logger import posthog
client = OpenAI(api_key=OPENAI_API_KEY, timeout=90) if OPENAI_API_KEY else None
def openai_retry_with_timeout(call, *args, num_retries=3, timeout=5, **kwargs):
"""
Pass any OpenAI client call and retry it num_retries times, incorporating timeout into the call.
Usage:
run = openai_retry_with_timeout(client.beta.threads.runs.submit_tool_outputs, thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs, num_retries=3, timeout=10)
Parameters:
call (callable): The OpenAI client call to be retried.
*args: Positional arguments for the callable.
num_retries (int): The number of times to retry the call.
timeout (int): The timeout value to be applied to the call.
**kwargs: Keyword arguments for the callable.
Returns:
The result of the OpenAI client call.
"""
error_message = None
for attempt in range(num_retries):
try:
return call(*args, **kwargs, timeout=timeout)
except Exception as e:
logger.error(f"Retry {attempt + 1} failed with error: {e}")
error_message = str(e)
raise Exception(
f"Maximum retries reached. The call failed for call {error_message}"
)
save_ticket_progress_type = Callable[[str, str, str], None]
class AssistantResponse(BaseModel):
messages: SyncCursorPage[ThreadMessage]
assistant_id: str
run_id: str
thread_id: str
allowed_exts = [
"c",
"cpp",
"csv",
"docx",
"html",
"java",
"json",
"md",
"pdf",
"php",
"pptx",
"py",
"rb",
"tex",
"txt",
"css",
"jpeg",
"jpg",
"js",
"gif",
"png",
"tar",
"ts",
"xlsx",
"xml",
"zip",
]
def get_json_messages(
thread_id: str,
run_id: str,
assistant_id: str,
):
assistant = openai_retry_with_timeout(
client.beta.assistants.retrieve,
assistant_id=assistant_id,
)
messages = openai_retry_with_timeout(
client.beta.threads.messages.list,
thread_id=thread_id,
)
run_steps = openai_retry_with_timeout(
client.beta.threads.runs.steps.list, run_id=run_id, thread_id=thread_id
)
system_message_json = {
"role": "system",
"content": assistant.instructions,
}
messages_json = [system_message_json]
for message in messages:
if message.role == "user":
messages_json.append(
{
"role": "user",
"content": message.content[0].text.value,
}
)
for message_obj in list(run_steps.data)[:0:-1]:
if message_obj.type == "message_creation":
message_id = message_obj.step_details.message_creation.message_id
thread_messages = openai_retry_with_timeout(
client.beta.threads.messages.retrieve,
message_id=message_id,
thread_id=thread_id,
)
message_content = thread_messages.content[0].text.value
messages_json.append(
{
"role": "assistant",
"content": message_content,
}
)
# TODO: handle annotations
elif message_obj.type == "tool_calls":
for tool_call in message_obj.step_details.tool_calls:
if tool_call.type == "code_interpreter":
code_interpreter = tool_call.code_interpreter
input_ = code_interpreter.input
if not input_:
continue
input_content = f"Code interpreter input:\n```\n{input_}\n```"
messages_json.append(
{
"role": "assistant",
"content": input_content,
}
)
outputs = code_interpreter.outputs
output = outputs[0].logs if outputs else "__No output__"
output_content = f"Code interpreter output:\n```\n{output}\n```"
messages_json.append(
{
"role": "user",
"content": output_content,
}
)
else:
function = tool_call.function
input_content = f"Function call of {function.name}:\n```\n{function.arguments}\n```"
messages_json.append(
{
"role": "assistant",
"content": input_content,
}
)
if function.output:
output_content = (
f"Function output:\n```\n{function.output}\n```"
)
messages_json.append(
{
"role": "user",
"content": output_content,
}
)
return messages_json
def run_until_complete(
thread_id: str,
run_id: str,
assistant_id: str,
model: str = "gpt-4-1106-preview",
chat_logger: ChatLogger | None = None,
sleep_time: int = 3,
max_iterations: int = 200,
save_ticket_progress: save_ticket_progress_type | None = None,
):
message_strings = []
json_messages = []
try:
for i in range(max_iterations):
run = openai_retry_with_timeout(
client.beta.threads.runs.retrieve,
thread_id=thread_id,
run_id=run_id,
)
if run.status == "completed":
logger.info(f"Run completed with {run.status}")
break
elif run.status in ("cancelled", "cancelling", "failed", "expired"):
logger.info(f"Run completed with {run.status}")
raise Exception(
f"Run failed assistant_id={assistant_id}, run_id={run_id}, thread_id={thread_id}"
)
elif run.status == "requires_action":
tool_calls = [
tool_call
for tool_call in run.required_action.submit_tool_outputs.tool_calls
]
if any(
[
tool_call.function.name == raise_error_schema["name"]
for tool_call in tool_calls
]
):
arguments_parsed = json.loads(tool_calls[0].function.arguments)
raise AssistantRaisedException(arguments_parsed["message"])
tool_outputs = []
for tool_call in tool_calls:
try:
tool_call_arguments = re.sub(
r"\\+'", "", tool_call.function.arguments
)
function_input: dict = json.loads(tool_call_arguments)
except:
logger.warning(
f"Could not parse function arguments: {tool_call_arguments}"
)
tool_outputs.append(
{
"tool_call_id": tool_call.id,
"output": "FAILURE: Could not parse function arguments.",
}
)
continue
tool_output = yield tool_call.function.name, function_input
tool_output_formatted = {
"tool_call_id": tool_call.id,
"output": tool_output,
}
tool_outputs.append(tool_output_formatted)
run = openai_retry_with_timeout(
client.beta.threads.runs.submit_tool_outputs,
thread_id=thread_id,
run_id=run.id,
tool_outputs=tool_outputs,
)
if save_ticket_progress is not None:
save_ticket_progress(
assistant_id=assistant_id,
thread_id=thread_id,
run_id=run_id,
)
messages = openai_retry_with_timeout(
client.beta.threads.messages.list,
thread_id=thread_id,
)
current_message_strings = [
message.content[0].text.value for message in messages.data
]
if message_strings != current_message_strings and current_message_strings:
logger.info(run.status)
logger.info(current_message_strings[0])
message_strings = current_message_strings
json_messages = get_json_messages(
thread_id=thread_id,
run_id=run_id,
assistant_id=assistant_id,
)
if chat_logger is not None:
chat_logger.add_chat(
{
"model": model,
"messages": json_messages,
"output": message_strings[0],
"thread_id": thread_id,
"run_id": run_id,
"max_tokens": 1000,
"temperature": 0,
}
)
else:
if i % 5 == 0:
logger.info(run.status)
time.sleep(sleep_time)
except (KeyboardInterrupt, SystemExit):
client.beta.threads.runs.cancel(thread_id=thread_id, run_id=run_id)
logger.warning(f"Run cancelled: {run_id}")
raise SystemExit
if save_ticket_progress is not None:
save_ticket_progress(
assistant_id=assistant_id,
thread_id=thread_id,
run_id=run_id,
)
for json_message in json_messages:
logger.info(json_message["content"])
return client.beta.threads.messages.list(
thread_id=thread_id,
)
def openai_assistant_call_helper(
request: str,
instructions: str | None = None,
additional_messages: list[Message] = [],
file_paths: list[str] = [], # use either file_paths or file_ids
uploaded_file_ids: list[str] = [],
tools: list[dict[str, str]] = [{"type": "code_interpreter"}],
model: str = "gpt-4-1106-preview",
sleep_time: int = 3,
chat_logger: ChatLogger | None = None,
assistant_id: str | None = None,
assistant_name: str | None = None,
save_ticket_progress: save_ticket_progress_type | None = None,
):
file_ids = [] if not uploaded_file_ids else uploaded_file_ids
file_object = None
if not file_ids:
for file_path in file_paths:
if not any(file_path.endswith(extension) for extension in allowed_exts):
os.rename(file_path, file_path + ".txt")
file_path += ".txt"
file_object = client.files.create(
file=Path(file_path), purpose="assistants"
)
file_ids.append(file_object.id)
logger.debug(instructions)
# always create new one
assistant = openai_retry_with_timeout(
client.beta.assistants.create,
name=assistant_name,
instructions=instructions,
tools=tools,
model=model,
)
thread = client.beta.threads.create()
if file_ids:
logger.info("Uploading files...")
client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=request,
file_ids=file_ids,
)
if file_ids:
logger.info("Files uploaded")
for message in additional_messages:
client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=message.content,
)
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions=instructions,
model=model,
)
if len(tools) > 1:
return run_until_complete(
thread_id=thread.id,
run_id=run.id,
model=model,
chat_logger=chat_logger,
assistant_id=assistant.id,
sleep_time=sleep_time,
save_ticket_progress=save_ticket_progress,
)
for file_id in file_ids:
client.files.delete(file_id=file_id)
return (
assistant.id,
run.id,
thread.id,
)
# Split in two so it can be cached
def openai_assistant_call(
request: str,
instructions: str | None = None,
additional_messages: list[Message] = [],
file_paths: list[str] = [],
uploaded_file_ids: list[str] = [],
tools: list[dict[str, str]] = [{"type": "code_interpreter"}],
model: str = "gpt-4-1106-preview",
sleep_time: int = 3,
chat_logger: ChatLogger | None = None,
assistant_id: str | None = None,
assistant_name: str | None = None,
save_ticket_progress: save_ticket_progress_type | None = None,
):
model = (
"gpt-3.5-turbo-1106"
if (chat_logger is None or chat_logger.use_faster_model())
and not IS_SELF_HOSTED
else "gpt-4-1106-preview"
)
posthog.capture(
chat_logger.data.get("username") if chat_logger is not None else "anonymous",
"call_assistant_api",
{
"query": request,
"model": model,
},
)
retries = range(3)
for _ in retries:
try:
response = openai_assistant_call_helper(
request=request,
instructions=instructions,
additional_messages=additional_messages,
file_paths=file_paths,
uploaded_file_ids=uploaded_file_ids,
tools=tools,
model=model,
sleep_time=sleep_time,
chat_logger=chat_logger,
assistant_id=assistant_id,
assistant_name=assistant_name,
save_ticket_progress=save_ticket_progress,
)
if len(tools) > 1:
return response
(assistant_id, run_id, thread_id) = response
messages = client.beta.threads.messages.list(
thread_id=thread_id,
)
return AssistantResponse(
messages=messages,
assistant_id=assistant_id,
run_id=run_id,
thread_id=thread_id,
)
except AssistantRaisedException as e:
logger.warning(e.message)
except Exception as e:
logger.error(e)
raise e
| [] |
2024-01-10 | sweepai/sweep | tests~archive~delete_old_files.py | from openai import OpenAI
from sweepai.config.server import OPENAI_API_KEY
client = OpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None
all_files = client.files.list()
for file in all_files:
client.files.delete(file.id)
file_mb = file.bytes / 1e6
print(f"Deleted {file.id} which used {file_mb} megabytes")
| [] |
2024-01-10 | sweepai/sweep | sweepai~core~context_pruning.py | import json
import re
import time
from attr import dataclass
from loguru import logger
from openai.types.beta.thread import Thread
from openai.types.beta.threads.run import Run
from sweepai.agents.assistant_wrapper import client, openai_retry_with_timeout
from sweepai.config.server import IS_SELF_HOSTED
from sweepai.core.entities import Snippet
from sweepai.utils.chat_logger import ChatLogger, discord_log_error
from sweepai.utils.code_tree import CodeTree
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo
from sweepai.utils.progress import AssistantConversation, TicketProgress
from sweepai.utils.tree_utils import DirectoryTree
ASSISTANT_MAX_CHARS = 4096 * 4 * 0.95 # ~95% of 4k tokens
sys_prompt = """You are a brilliant engineer assigned to the following Github issue. You must gather ALL RELEVANT information from the codebase that allows you to completely solve the issue. It is very important that you get this right and do not miss any relevant lines of code.
## Instructions
You initially start with no snippets and will use the store_file_snippet and expand_directory to add snippets to the context. You will iteratively use the file_search, preview_file and view_file_snippet tools to help you find the relevant snippets to store.
You are provided "Relevant Snippets", which are snippets relevant to the user request. These snippets are retrieved by a lexical search over the codebase, but are NOT in the context initially.
You will do this by using the following process for every relevant file:
1. First use the preview_file tool to preview all files that are relevant, starting with file paths and entities mentioned in "User Request", then those in "Relevant Snippets". For example, if the class foo.bar.Bar was mentioned, be sure to preview foo/bar.py. If the file is irrelevant, move onto the next file. If you don't know the full file path, use file_search with the file name.
2. If the file seems relevant, use the view_file_snippet tool to view specific line numbers of a file. We want to find all line numbers relevant to solve the user request. So if the surrounding lines are relevant, use the view_file_snippet tool again with a larger span to view the surrounding lines. Repeat this process until you are certain you have the maximal relevant span.
3. Finally, when you are certain you have the maximal relevant span, use the store_file_snippet and expand_directory tools to curate the optimal context (snippets_in_repo and repo_tree) until they allow you to completely solve the user request. If you don't know the correct line numbers, complete step one until you find the exact line numbers.
Repeat this process until you have the perfect context to solve the user request. Ensure you have checked ALL files referenced in the user request."""
unformatted_user_prompt = """\
<repo_tree>
{repo_tree}
</repo_tree>
## Relevant Snippets
Here are potentially relevant snippets in the repo in decreasing relevance that you should use the preview_file tool for:
{snippets_in_repo}
## User Request
{query}"""
functions = [
{
"name": "file_search",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "The search query. You can search like main.py to find src/main.py.",
},
"justification": {
"type": "string",
"description": "Justification for searching for the file.",
},
},
"required": ["snippet_path", "justification"],
},
"description": "Use this to find the most similar file paths to the search query.",
},
{
"name": "preview_file",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "File path to preview.",
},
"justification": {
"type": "string",
"description": "Justification for previewing the file.",
},
},
"required": ["snippet_path", "justification"],
},
"description": "Use this to read the summary of the file. Use this tool before viewing a snippet. This is used for exploration only and does not affect the snippets. After using this tool, use the view_file_snippet tool to view specific line numbers of a file to find the exact line numbers to store to solve the user request.",
},
{
"name": "view_file_snippet",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "File or directory to store.",
},
"start_line": {
"type": "integer",
"description": "Start line of the snippet.",
},
"end_line": {
"type": "integer",
"description": "End line of the snippet.",
},
"justification": {
"type": "string",
"description": "Justification for viewing the file_path.",
},
},
"required": ["file_path", "start_line", "end_line", "justification"],
},
"description": "Use this to view a section of a snippet. You may use this tool multiple times to view multiple snippets. After you are finished using this tool, you may use the view_file_snippet to view the surrounding lines or the store_file_snippet tool to store the snippet to solve the user request.",
},
{
"name": "store_file_snippet",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "File or directory to store.",
},
"start_line": {
"type": "integer",
"description": "Start line of the snippet.",
},
"end_line": {
"type": "integer",
"description": "End line of the snippet.",
},
"justification": {
"type": "string",
"description": "Justification for why file_path is relevant and why the surrounding lines are irrelevant by indicating what functions are in the surrounding lines and what they do.",
},
},
"required": ["file_path", "start_line", "end_line", "justification"],
},
"description": "Use this to store a snippet. Only store paths you are CERTAIN are relevant and sufficient to solving the user request and be precise with the line numbers, and provides an entire coherent section of code. Make sure to store ALL of the files that are referenced in the issue title or description. You may store multiple snippets with the same file path.",
},
{
"name": "expand_directory",
"parameters": {
"type": "object",
"properties": {
"directory_path": {
"type": "string",
"description": "Directory to expand",
},
"justification": {
"type": "string",
"description": "Justification for expanding the directory.",
},
},
"required": ["directory_path", "justification"],
},
"description": "Expand an existing directory that is closed. This is used for exploration only and does not affect the snippets. If you expand a directory, you automatically expand all of its subdirectories, so do not list its subdirectories. Store all files or directories that are referenced in the issue title or descriptions.",
},
]
tools = [{"type": "function", "function": function} for function in functions]
@staticmethod
def can_add_snippet(snippet: Snippet, current_snippets: list[Snippet]):
return (
len(snippet.xml) + sum([len(snippet.xml) for snippet in current_snippets])
<= ASSISTANT_MAX_CHARS
)
@dataclass
class RepoContextManager:
dir_obj: DirectoryTree
current_top_tree: str
snippets: list[Snippet]
snippet_scores: dict[str, float]
cloned_repo: ClonedRepo
current_top_snippets: list[Snippet] = []
@property
def top_snippet_paths(self):
return [snippet.file_path for snippet in self.current_top_snippets]
def remove_all_non_kept_paths(self, paths_to_keep: list[str]):
self.current_top_snippets = [
snippet
for snippet in self.current_top_snippets
if any(
snippet.file_path.startswith(path_to_keep)
for path_to_keep in paths_to_keep
)
]
self.dir_obj.remove_all_not_included(paths_to_keep)
def expand_all_directories(self, directories_to_expand: list[str]):
self.dir_obj.expand_directory(directories_to_expand)
def is_path_valid(self, path: str, directory: bool = False):
if directory:
return any(snippet.file_path.startswith(path) for snippet in self.snippets)
return any(snippet.file_path == path for snippet in self.snippets)
def format_context(
self,
unformatted_user_prompt: str,
query: str,
):
new_top_snippets: list[Snippet] = []
for snippet in self.current_top_snippets:
if can_add_snippet(snippet, new_top_snippets):
new_top_snippets.append(snippet)
self.current_top_snippets = new_top_snippets
top_snippets_str = [
f"- {snippet.denotation}" for snippet in self.current_top_snippets
]
[snippet.file_path for snippet in self.current_top_snippets]
snippets_in_repo_str = "\n".join(top_snippets_str)
logger.info(f"Snippets in repo:\n{snippets_in_repo_str}")
repo_tree = str(self.dir_obj)
user_prompt = unformatted_user_prompt.format(
query=query,
snippets_in_repo=snippets_in_repo_str,
repo_tree=repo_tree,
)
return user_prompt
def get_highest_scoring_snippet(self, file_path: str) -> Snippet:
snippet_key = (
lambda snippet: f"{snippet.file_path}:{snippet.start}:{snippet.end}"
)
filtered_snippets = [
snippet
for snippet in self.snippets
if snippet.file_path == file_path
and snippet not in self.current_top_snippets
]
if not filtered_snippets:
return None
highest_scoring_snippet = max(
filtered_snippets,
key=lambda snippet: self.snippet_scores[snippet_key(snippet)]
if snippet_key(snippet) in self.snippet_scores
else 0,
)
return highest_scoring_snippet
def add_snippets(self, snippets_to_add: list[Snippet]):
self.dir_obj.add_file_paths([snippet.file_path for snippet in snippets_to_add])
for snippet in snippets_to_add:
self.current_top_snippets.append(snippet)
# @file_cache(ignore_params=["repo_context_manager", "ticket_progress", "chat_logger"])
def get_relevant_context(
query: str,
repo_context_manager: RepoContextManager,
ticket_progress: TicketProgress | None = None,
chat_logger: ChatLogger = None,
):
modify_iterations: int = 2
model = (
"gpt-3.5-turbo-1106"
if (chat_logger is None or chat_logger.use_faster_model())
and not IS_SELF_HOSTED
else "gpt-4-1106-preview"
)
posthog.capture(
chat_logger.data.get("username") if chat_logger is not None else "anonymous",
"call_assistant_api",
{
"query": query,
"model": model,
},
)
try:
user_prompt = repo_context_manager.format_context(
unformatted_user_prompt=unformatted_user_prompt,
query=query,
)
assistant = openai_retry_with_timeout(
client.beta.assistants.create,
name="Relevant Files Assistant",
instructions=sys_prompt,
tools=tools,
model=model,
)
thread = openai_retry_with_timeout(client.beta.threads.create)
_ = openai_retry_with_timeout(
client.beta.threads.messages.create,
thread.id,
role="user",
content=f"{user_prompt}",
)
run = openai_retry_with_timeout(
client.beta.threads.runs.create,
thread_id=thread.id,
assistant_id=assistant.id,
)
old_top_snippets = [
snippet for snippet in repo_context_manager.current_top_snippets
]
modify_context(thread, run, repo_context_manager, ticket_progress)
if len(repo_context_manager.current_top_snippets) == 0:
repo_context_manager.current_top_snippets = old_top_snippets
discord_log_error(f"Context manager empty ({ticket_progress.tracking_id})")
return repo_context_manager
except Exception as e:
logger.exception(e)
return repo_context_manager
def update_assistant_conversation(
run: Run,
thread: Thread,
ticket_progress: TicketProgress,
repo_context_manager: RepoContextManager,
):
assistant_conversation = AssistantConversation.from_ids(
assistant_id=run.assistant_id,
run_id=run.id,
thread_id=thread.id,
)
if ticket_progress:
if assistant_conversation:
ticket_progress.search_progress.pruning_conversation = (
assistant_conversation
)
ticket_progress.search_progress.repo_tree = str(repo_context_manager.dir_obj)
ticket_progress.search_progress.final_snippets = (
repo_context_manager.current_top_snippets
)
ticket_progress.save()
def modify_context(
thread: Thread,
run: Run,
repo_context_manager: RepoContextManager,
ticket_progress: TicketProgress,
) -> bool | None:
max_iterations = 90
directories_to_expand = []
repo_context_manager.current_top_snippets = []
initial_file_paths = repo_context_manager.top_snippet_paths
paths_to_add = []
for iter in range(max_iterations):
run = openai_retry_with_timeout(
client.beta.threads.runs.retrieve,
thread_id=thread.id,
run_id=run.id,
)
if iter % 5 == 0:
update_assistant_conversation(
run, thread, ticket_progress, repo_context_manager
)
logger.info("iteration: " + str(iter))
if run.status == "completed" or run.status == "failed":
break
if (
run.status != "requires_action"
or run.required_action is None
or run.required_action.submit_tool_outputs is None
or run.required_action.submit_tool_outputs.tool_calls is None
):
time.sleep(3)
continue
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
try:
tool_call_arguments = re.sub(r"\\+'", "", tool_call.function.arguments)
function_input = json.loads(tool_call_arguments)
except:
logger.warning(
f"Could not parse function arguments: {tool_call_arguments}"
)
tool_outputs.append(
{
"tool_call_id": tool_call.id,
"output": "FAILURE: Could not parse function arguments.",
}
)
continue
current_top_snippets_string = "\n".join(
[
"- " + snippet.xml
for snippet in repo_context_manager.current_top_snippets
]
)
logger.info(f"Tool Call: {tool_call.function.name} {function_input}")
function_path_or_dir = function_input.get(
"file_path"
) or function_input.get("directory_path")
valid_path = False
output = ""
if tool_call.function.name == "file_search":
error_message = ""
try:
similar_file_paths = "\n".join(
[
f"- {path}"
for path in repo_context_manager.cloned_repo.get_similar_file_paths(
function_path_or_dir
)
]
)
valid_path = True
except:
similar_file_paths = ""
error_message = "FAILURE: This file path does not exist."
if error_message:
output = error_message
else:
output = (
f"SUCCESS: Here are the most similar file paths to {function_path_or_dir}:\n{similar_file_paths}"
if valid_path
else "FAILURE: This file path does not exist. Please try a new path."
)
elif tool_call.function.name == "view_file_snippet":
error_message = ""
for key in ["start_line", "end_line"]:
if key not in function_input:
logger.warning(
f"Key {key} not in function input {function_input}"
)
error_message = "FAILURE: Please provide a start and end line."
start_line = int(function_input["start_line"])
end_line = int(function_input["end_line"])
try:
file_contents = repo_context_manager.cloned_repo.get_file_contents(
function_path_or_dir
)
valid_path = True
except:
file_contents = ""
similar_file_paths = "\n".join(
[
f"- {path}"
for path in repo_context_manager.cloned_repo.get_similar_file_paths(
function_path_or_dir
)
]
)
error_message = f"FAILURE: This file path does not exist. Did you mean:\n{similar_file_paths}"
if start_line >= end_line:
error_message = "FAILURE: Start line must be less than end line."
if error_message:
output = error_message
else:
end_line = min(end_line, len(file_contents.splitlines()))
logger.info(f"start_line: {start_line}, end_line: {end_line}")
selected_file_contents = ""
lines = file_contents.splitlines()
expansion_width = 25
start_index = max(0, start_line - expansion_width)
for i, line in enumerate(lines[start_index:start_line]):
selected_file_contents += f"{i + start_index} | {line}\n"
selected_file_contents += "\n===START OF SNIPPET===\n"
for i, line in enumerate(lines[start_line:end_line]):
selected_file_contents += f"{i + start_line} | {line}\n"
selected_file_contents += "\n===END OF SNIPPET===\n"
for i, line in enumerate(
lines[end_line : end_line + expansion_width]
):
selected_file_contents += f"{i + end_line} | {line}\n"
output = (
f'Here are the contents of `{function_path_or_dir}:{start_line}:{end_line}`\n```\n{selected_file_contents}\n```\nCheck if there is additional relevant context surrounding the snippet BETWEEN the START and END tags necessary to solve the user request. If so, call view_file_snippet again with a larger span. If you are CERTAIN this snippet is COMPLETELY SUFFICIENT and RELEVANT, and no surrounding lines provide ANY additional relevant context, call store_file_snippet with the same parameters ({{"file_path": "{function_path_or_dir}", "start_line": "{start_line}", "end_line": "{end_line}"}}).'
if valid_path
else "FAILURE: This file path does not exist. Please try a new path."
)
elif tool_call.function.name == "store_file_snippet":
error_message = ""
for key in ["start_line", "end_line"]:
if key not in function_input:
logger.warning(
f"Key {key} not in function input {function_input}"
)
error_message = "FAILURE: Please provide a start and end line."
start_line = int(function_input["start_line"])
end_line = int(function_input["end_line"])
if end_line - start_line > 1000:
error_message = (
"FAILURE: Please provide a snippet of 1000 lines or less."
)
if start_line >= end_line:
error_message = "FAILURE: Start line must be less than end line."
try:
file_contents = repo_context_manager.cloned_repo.get_file_contents(
function_path_or_dir
)
valid_path = True
except:
file_contents = ""
similar_file_paths = "\n".join(
[
f"- {path}"
for path in repo_context_manager.cloned_repo.get_similar_file_paths(
function_path_or_dir
)
]
)
error_message = f"FAILURE: This file path does not exist. Did you mean:\n{similar_file_paths}"
if error_message:
output = error_message
else:
end_line = min(end_line, len(file_contents.splitlines()))
logger.info(f"start_line: {start_line}, end_line: {end_line}")
snippet = Snippet(
file_path=function_path_or_dir,
start=start_line,
end=end_line,
content=file_contents,
)
repo_context_manager.add_snippets([snippet])
paths_to_add.append(function_path_or_dir)
output = (
f"SUCCESS: {function_path_or_dir} was added with contents\n```\n{snippet.xml}\n```. Here are the current selected snippets:\n{current_top_snippets_string}"
if valid_path
else "FAILURE: This file path does not exist. Please try a new path."
)
elif tool_call.function.name == "expand_directory":
valid_path = repo_context_manager.is_path_valid(
function_path_or_dir, directory=True
)
repo_context_manager.expand_all_directories([function_path_or_dir])
dir_string = str(repo_context_manager.dir_obj)
output = (
f"SUCCESS: New repo_tree\n{dir_string}"
if valid_path
else "FAILURE: Invalid directory path. Please try a new path."
)
if valid_path:
directories_to_expand.append(function_path_or_dir)
elif tool_call.function.name == "preview_file":
error_message = ""
try:
code = repo_context_manager.cloned_repo.get_file_contents(
function_path_or_dir
)
valid_path = True
except:
code = ""
similar_file_paths = "\n".join(
[
f"- {path}"
for path in repo_context_manager.cloned_repo.get_similar_file_paths(
function_path_or_dir
)
]
)
error_message = f"FAILURE: This file path does not exist. Did you mean:\n{similar_file_paths}"
if error_message:
output = error_message
else:
file_preview = CodeTree.from_code(code).get_preview()
output = f"SUCCESS: Previewing file {function_path_or_dir}:\n\n{file_preview}"
else:
output = f"FAILURE: Invalid tool name {tool_call.function.name}"
logger.info(output)
logger.info("Current top snippets:")
for snippet in repo_context_manager.current_top_snippets:
logger.info(snippet.denotation)
logger.info("Paths to add:")
for snippet in paths_to_add:
logger.info(snippet)
tool_outputs.append(
{
"tool_call_id": tool_call.id,
"output": output,
}
)
justification = (
function_input["justification"]
if "justification" in function_input
else ""
)
logger.info(
f"Tool Call: {tool_call.function.name} {function_path_or_dir} {justification} Valid Tool Call: {valid_path}"
)
run = openai_retry_with_timeout(
client.beta.threads.runs.submit_tool_outputs,
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs,
)
else:
logger.warning(
f"Context pruning iteration taking too long. Status: {run.status}"
)
assistant_conversation = AssistantConversation.from_ids(
assistant_id=run.assistant_id,
run_id=run.id,
thread_id=thread.id,
)
if ticket_progress:
if assistant_conversation:
ticket_progress.search_progress.pruning_conversation = (
assistant_conversation
)
ticket_progress.save()
logger.info(
f"Context Management End:\npaths_to_add: {paths_to_add}\ndirectories_to_expand: {directories_to_expand}"
)
if directories_to_expand:
repo_context_manager.expand_all_directories(directories_to_expand)
logger.info(
f"Context Management End:\ncurrent snippet paths: {repo_context_manager.top_snippet_paths}"
)
paths_changed = set(initial_file_paths) != set(
repo_context_manager.top_snippet_paths
)
repo_context_manager.current_top_snippets = [
snippet
for snippet in repo_context_manager.current_top_snippets
if snippet.file_path != "sweep.yaml"
]
# if the paths have not changed or all tools were empty, we are done
return not (paths_changed and (paths_to_add or directories_to_expand))
if __name__ == "__main__":
import os
from sweepai.utils.ticket_utils import prep_snippets
installation_id = os.environ["INSTALLATION_ID"]
cloned_repo = ClonedRepo("sweepai/sweep", installation_id, "main")
query = (
"allow sweep.yaml to be read from the user/organization's .github repository"
)
# golden response is
# sweepai/handlers/create_pr.py:401-428
# sweepai/config/client.py:178-282
ticket_progress = TicketProgress(
tracking_id="test",
)
repo_context_manager = prep_snippets(cloned_repo, query, ticket_progress)
rcm = get_relevant_context(
query,
repo_context_manager,
ticket_progress,
chat_logger=ChatLogger({"username": "wwzeng1"}),
)
for snippet in rcm.current_top_snippets:
print(snippet.denotation)
| [
"<repo_tree>\n{repo_tree}\n</repo_tree>\n\n## Relevant Snippets\nHere are potentially relevant snippets in the repo in decreasing relevance that you should use the preview_file tool for:\n{snippets_in_repo}\n\n## User Request\n{query}",
"You are a brilliant engineer assigned to the following Github issue. You must gather ALL RELEVANT information from the codebase that allows you to completely solve the issue. It is very important that you get this right and do not miss any relevant lines of code.\n\n## Instructions\nYou initially start with no snippets and will use the store_file_snippet and expand_directory to add snippets to the context. You will iteratively use the file_search, preview_file and view_file_snippet tools to help you find the relevant snippets to store.\n\nYou are provided \"Relevant Snippets\", which are snippets relevant to the user request. These snippets are retrieved by a lexical search over the codebase, but are NOT in the context initially.\n\nYou will do this by using the following process for every relevant file:\n\n1. First use the preview_file tool to preview all files that are relevant, starting with file paths and entities mentioned in \"User Request\", then those in \"Relevant Snippets\". For example, if the class foo.bar.Bar was mentioned, be sure to preview foo/bar.py. If the file is irrelevant, move onto the next file. If you don't know the full file path, use file_search with the file name.\n2. If the file seems relevant, use the view_file_snippet tool to view specific line numbers of a file. We want to find all line numbers relevant to solve the user request. So if the surrounding lines are relevant, use the view_file_snippet tool again with a larger span to view the surrounding lines. Repeat this process until you are certain you have the maximal relevant span.\n3. Finally, when you are certain you have the maximal relevant span, use the store_file_snippet and expand_directory tools to curate the optimal context (snippets_in_repo and repo_tree) until they allow you to completely solve the user request. If you don't know the correct line numbers, complete step one until you find the exact line numbers.\n\nRepeat this process until you have the perfect context to solve the user request. Ensure you have checked ALL files referenced in the user request."
] |
2024-01-10 | sweepai/sweep | tests~archive~test_naive_chunker.py | from sweepai.utils.utils import chunk_code
file_contents = r"""\
# TODO: Add file validation
import math
import re
import traceback
import openai
import github
from github import GithubException, BadCredentialsException
from tabulate import tabulate
from tqdm import tqdm
from sweepai.logn import logger, LogTask
from sweepai.core.context_pruning import ContextPruning
from sweepai.core.documentation_searcher import extract_relevant_docs
from sweepai.core.entities import (
ProposedIssue,
SandboxResponse,
Snippet,
NoFilesException,
SweepContext,
MaxTokensExceeded,
EmptyRepository,
)
from sweepai.core.external_searcher import ExternalSearcher
from sweepai.core.slow_mode_expand import SlowModeBot
from sweepai.core.sweep_bot import SweepBot
from sweepai.core.prompts import issue_comment_prompt
# from sandbox.sandbox_utils import Sandbox
from sweepai.handlers.create_pr import (
create_pr_changes,
create_config_pr,
safe_delete_sweep_branch,
)
from sweepai.handlers.on_comment import on_comment
from sweepai.handlers.on_review import review_pr
from sweepai.utils.buttons import create_action_buttons
from sweepai.utils.chat_logger import ChatLogger
from sweepai.config.client import (
SweepConfig,
get_documentation_dict,
)
from sweepai.config.server import (
ENV,
MONGODB_URI,
OPENAI_API_KEY,
GITHUB_BOT_USERNAME,
GITHUB_LABEL_NAME,
OPENAI_USE_3_5_MODEL_ONLY,
WHITELISTED_REPOS,
)
from sweepai.utils.ticket_utils import *
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo, get_github_client
from sweepai.utils.prompt_constructor import HumanMessagePrompt
from sweepai.utils.search_utils import search_snippets
from sweepai.utils.tree_utils import DirectoryTree
openai.api_key = OPENAI_API_KEY
@LogTask()
def on_ticket(
title: str,
summary: str,
issue_number: int,
issue_url: str,
username: str,
repo_full_name: str,
repo_description: str,
installation_id: int,
comment_id: int = None,
edited: bool = False,
):
(
title,
slow_mode,
do_map,
subissues_mode,
sandbox_mode,
fast_mode,
lint_mode,
) = strip_sweep(title)
# Flow:
# 1. Get relevant files
# 2: Get human message
# 3. Get files to change
# 4. Get file changes
# 5. Create PR
summary = summary or ""
summary = re.sub(
"<details (open)?>\n<summary>Checklist</summary>.*",
"",
summary,
flags=re.DOTALL,
).strip()
summary = re.sub("Checklist:\n\n- \[[ X]\].*", "", summary, flags=re.DOTALL).strip()
repo_name = repo_full_name
user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
current_issue = repo.get_issue(number=issue_number)
assignee = current_issue.assignee.login if current_issue.assignee else None
if assignee is None:
assignee = current_issue.user.login
chat_logger = (
ChatLogger(
{
"repo_name": repo_name,
"title": title,
"summary": summary,
"issue_number": issue_number,
"issue_url": issue_url,
"username": username if not username.startswith("sweep") else assignee,
"repo_full_name": repo_full_name,
"repo_description": repo_description,
"installation_id": installation_id,
"type": "ticket",
"mode": ENV,
"comment_id": comment_id,
"edited": edited,
}
)
if MONGODB_URI
else None
)
if chat_logger:
is_paying_user = chat_logger.is_paying_user()
is_trial_user = chat_logger.is_trial_user()
use_faster_model = OPENAI_USE_3_5_MODEL_ONLY or chat_logger.use_faster_model(g)
else:
is_paying_user = True
is_trial_user = False
use_faster_model = False
if fast_mode:
use_faster_model = True
sweep_context = SweepContext.create(
username=username,
issue_url=issue_url,
use_faster_model=use_faster_model,
is_paying_user=is_paying_user,
repo=repo,
token=user_token,
)
logger.print(sweep_context)
if not comment_id and not edited and chat_logger:
chat_logger.add_successful_ticket(
gpt3=use_faster_model
) # moving higher, will increment the issue regardless of whether it's a success or not
organization, repo_name = repo_full_name.split("/")
metadata = {
"issue_url": issue_url,
"repo_full_name": repo_full_name,
"organization": organization,
"repo_name": repo_name,
"repo_description": repo_description,
"username": username,
"comment_id": comment_id,
"title": title,
"installation_id": installation_id,
"function": "on_ticket",
"edited": edited,
"model": "gpt-3.5" if use_faster_model else "gpt-4",
"tier": "pro" if is_paying_user else "free",
"mode": ENV,
"slow_mode": slow_mode,
"do_map": do_map,
"subissues_mode": subissues_mode,
"sandbox_mode": sandbox_mode,
"fast_mode": fast_mode,
}
# logger.bind(**metadata)
posthog.capture(username, "started", properties=metadata)
logger.info(f"Getting repo {repo_full_name}")
if current_issue.state == "closed":
logger.warning(f"Issue {issue_number} is closed")
posthog.capture(username, "issue_closed", properties=metadata)
return {"success": False, "reason": "Issue is closed"}
current_issue.edit(body=summary)
item_to_react_to = (
current_issue.get_comment(comment_id) if comment_id else current_issue
)
replies_text = ""
comments = list(current_issue.get_comments())
if comment_id:
logger.info(f"Replying to comment {comment_id}...")
replies_text = "\nComments:\n" + "\n".join(
[
issue_comment_prompt.format(
username=comment.user.login,
reply=comment.body,
)
for comment in comments
if comment.user.type == "User"
]
)
summary = summary if summary else ""
prs = repo.get_pulls(
state="open", sort="created", base=SweepConfig.get_branch(repo)
)
for pr in prs:
# Check if this issue is mentioned in the PR, and pr is owned by bot
# This is done in create_pr, (pr_description = ...)
if (
pr.user.login == GITHUB_BOT_USERNAME
and f"Fixes #{issue_number}.\n" in pr.body
):
success = safe_delete_sweep_branch(pr, repo)
eyes_reaction = item_to_react_to.create_reaction("eyes")
# If SWEEP_BOT reacted to item_to_react_to with "rocket", then remove it.
reactions = item_to_react_to.get_reactions()
for reaction in reactions:
if reaction.content == "rocket" and reaction.user.login == GITHUB_BOT_USERNAME:
item_to_react_to.delete_reaction(reaction.id)
# Removed 1, 3
progress_headers = [
None,
"Step 1: 🔎 Searching",
"Step 2: ⌨️ Coding",
"Step 3: 🔁 Code Review",
]
config_pr_url = None
# Find the first comment made by the bot
issue_comment = None
tickets_allocated = 5
if is_trial_user:
tickets_allocated = 15
if is_paying_user:
tickets_allocated = 500
ticket_count = (
max(tickets_allocated - chat_logger.get_ticket_count(), 0)
if chat_logger
else 999
)
daily_ticket_count = (
(3 - chat_logger.get_ticket_count(use_date=True) if not use_faster_model else 0)
if chat_logger
else 999
)
model_name = "GPT-3.5" if use_faster_model else "GPT-4"
payment_link = "https://buy.stripe.com/00g5npeT71H2gzCfZ8"
daily_message = (
f" and {daily_ticket_count} for the day"
if not is_paying_user and not is_trial_user
else ""
)
user_type = "💎 Sweep Pro" if is_paying_user else "⚡ Sweep Free Trial"
gpt_tickets_left_message = (
f"{ticket_count} GPT-4 tickets left for the month"
if not is_paying_user
else "unlimited GPT-4 tickets"
)
payment_message = (
f"{user_type}: I used {model_name} to create this ticket. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
payment_message_start = (
f"{user_type}: I'm creating this ticket using {model_name}. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
def get_comment_header(index, errored=False, pr_message="", done=False):
config_pr_message = (
"\n" + f"* Install Sweep Configs: [Pull Request]({config_pr_url})"
if config_pr_url is not None
else ""
)
# Why is this so convoluted
# config_pr_message = " To retrigger Sweep, edit the issue.\n" + config_pr_message
actions_message = create_action_buttons(
[
"↻ Restart Sweep",
]
)
if index < 0:
index = 0
if index == 4:
return pr_message + f"\n\n---\n{actions_message}" + config_pr_message
total = len(progress_headers)
index += 1 if done else 0
index *= 100 / total
index = int(index)
index = min(100, index)
if errored:
return (
f""
+ f"\n\n---\n{actions_message}"
)
return (
f""
+ ("\n" + stars_suffix if index != -1 else "")
+ "\n"
+ payment_message_start
# + f"\n\n---\n{actions_message}"
+ config_pr_message
)
# Find Sweep's previous comment
logger.print("USERNAME", GITHUB_BOT_USERNAME)
for comment in comments:
logger.print("COMMENT", comment.user.login)
if comment.user.login == GITHUB_BOT_USERNAME:
logger.print("Found comment")
issue_comment = comment
try:
config = SweepConfig.get_config(repo)
except EmptyRepository as e:
logger.info("Empty repo")
first_comment = (
"Sweep is currently not supported on empty repositories. Please add some"
f" code to your repository and try again.\n{sep}##"
f" {progress_headers[1]}\n{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
return {"success": False}
cloned_repo = ClonedRepo(
repo_full_name, installation_id=installation_id, token=user_token
)
num_of_files = cloned_repo.get_num_files_from_repo()
time_estimate = math.ceil(3 + 5 * num_of_files / 1000)
indexing_message = (
"I'm searching for relevant snippets in your repository. If this is your first"
" time using Sweep, I'm indexing your repository. This may take up to"
f" {time_estimate} minutes. I'll let you know when I'm done."
)
first_comment = (
f"{get_comment_header(0)}\n{sep}I am currently looking into this ticket! I"
" will update the progress of the ticket in this comment. I am currently"
f" searching through your code, looking for relevant snippets.\n{sep}##"
f" {progress_headers[1]}\n{indexing_message}{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
# Comment edit function
past_messages = {}
current_index = 0
# Random variables to save in case of errors
table = None # Show plan so user can finetune prompt
def edit_sweep_comment(message: str, index: int, pr_message="", done=False):
nonlocal current_index, user_token, g, repo, issue_comment
# -1 = error, -2 = retry
# Only update the progress bar if the issue generation errors.
errored = index == -1
if index >= 0:
past_messages[index] = message
current_index = index
agg_message = None
# Include progress history
# index = -2 is reserved for
for i in range(
current_index + 2
): # go to next header (for Working on it... text)
if i == 0 or i >= len(progress_headers):
continue # skip None header
header = progress_headers[i]
if header is not None:
header = "## " + header + "\n"
else:
header = "No header\n"
msg = header + (past_messages.get(i) or "Working on it...")
if agg_message is None:
agg_message = msg
else:
agg_message = agg_message + f"\n{sep}" + msg
suffix = bot_suffix + discord_suffix
if errored:
agg_message = (
"## ❌ Unable to Complete PR"
+ "\n"
+ message
+ "\n\nFor bonus GPT-4 tickets, please report this bug on"
" **[Discord](https://discord.com/invite/sweep-ai)**."
)
if table is not None:
agg_message = (
agg_message
+ f"\n{sep}Please look at the generated plan. If something looks"
f" wrong, please add more details to your issue.\n\n{table}"
)
suffix = bot_suffix # don't include discord suffix for error messages
# Update the issue comment
try:
issue_comment.edit(
f"{get_comment_header(current_index, errored, pr_message, done=done)}\n{sep}{agg_message}{suffix}"
)
except BadCredentialsException:
logger.error("Bad credentials, refreshing token")
_user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
issue_comment = repo.get_issue(current_issue.number)
issue_comment.edit(
f"{get_comment_header(current_index, errored, pr_message, done=done)}\n{sep}{agg_message}{suffix}"
)
if len(title + summary) < 20:
logger.info("Issue too short")
edit_sweep_comment(
(
"Please add more details to your issue. I need at least 20 characters"
" to generate a plan."
),
-1,
)
return {"success": True}
if (
repo_name.lower() not in WHITELISTED_REPOS
and not is_paying_user
and not is_trial_user
):
if ("sweep" in repo_name.lower()) or ("test" in repo_name.lower()):
logger.info("Test repository detected")
edit_sweep_comment(
(
"Sweep does not work on test repositories. Please create an issue"
" on a real repository. If you think this is a mistake, please"
" report this at https://discord.gg/sweep."
),
-1,
)
return {"success": False}
if lint_mode:
# Get files to change
# Create new branch
# Send request to endpoint
for file_path in []:
SweepBot.run_sandbox(
repo.html_url, file_path, None, user_token, only_lint=True
)
logger.info("Fetching relevant files...")
try:
snippets, tree = search_snippets(
cloned_repo,
f"{title}\n{summary}\n{replies_text}",
num_files=num_of_snippets_to_query,
)
assert len(snippets) > 0
except SystemExit:
raise SystemExit
except Exception as e:
trace = traceback.format_exc()
logger.error(e)
logger.error(trace)
edit_sweep_comment(
(
"It looks like an issue has occurred around fetching the files."
" Perhaps the repo has not been initialized. If this error persists"
f" contact [email protected].\n\n> @{username}, please edit the issue"
" description to include more details and I will automatically"
" relaunch."
),
-1,
)
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"File Fetch",
str(e) + "\n" + traceback.format_exc(),
priority=1,
)
raise e
snippets = post_process_snippets(
snippets, max_num_of_snippets=2 if use_faster_model else 5
)
if not repo_description:
repo_description = "No description provided."
message_summary = summary + replies_text
external_results = ExternalSearcher.extract_summaries(message_summary)
if external_results:
message_summary += "\n\n" + external_results
user_dict = get_documentation_dict(repo)
docs_results = ""
try:
docs_results = extract_relevant_docs(
title + message_summary, user_dict, chat_logger
)
if docs_results:
message_summary += "\n\n" + docs_results
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(f"Failed to extract docs: {e}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
context_pruning = ContextPruning(chat_logger=chat_logger)
(
snippets_to_ignore,
excluded_dirs,
) = context_pruning.prune_context( # TODO, ignore directories
human_message, repo=repo
)
snippets = post_process_snippets(
snippets, max_num_of_snippets=5, exclude_snippets=snippets_to_ignore
)
dir_obj = DirectoryTree()
dir_obj.parse(tree)
dir_obj.remove_multiple(excluded_dirs)
tree = str(dir_obj)
logger.info(f"New snippets: {snippets}")
logger.info(f"New tree: {tree}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
sweep_bot = SweepBot.from_system_message_content(
human_message=human_message,
repo=repo,
is_reply=bool(comments),
chat_logger=chat_logger,
sweep_context=sweep_context,
)
# Check repository for sweep.yml file.
sweep_yml_exists = False
for content_file in repo.get_contents(""):
if content_file.name == "sweep.yaml":
sweep_yml_exists = True
break
# If sweep.yaml does not exist, then create a new PR that simply creates the sweep.yaml file.
if not sweep_yml_exists:
try:
logger.info("Creating sweep.yaml file...")
config_pr = create_config_pr(sweep_bot)
config_pr_url = config_pr.html_url
edit_sweep_comment(message="", index=-2)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(
"Failed to create new branch for sweep.yaml file.\n",
e,
traceback.format_exc(),
)
else:
logger.info("sweep.yaml file already exists.")
try:
# ANALYZE SNIPPETS
newline = "\n"
edit_sweep_comment(
"I found the following snippets in your repository. I will now analyze"
" these snippets and come up with a plan."
+ "\n\n"
+ create_collapsible(
"Some code snippets I looked at (click to expand). If some file is"
" missing from here, you can mention the path in the ticket"
" description.",
"\n".join(
[
f"https://github.com/{organization}/{repo_name}/blob/{repo.get_commits()[0].sha}/{snippet.file_path}#L{max(snippet.start, 1)}-L{min(snippet.end, snippet.content.count(newline) - 1)}\n"
for snippet in snippets
]
),
)
+ (
create_collapsible(
"I also found the following external resources that might be helpful:",
f"\n\n{external_results}\n\n",
)
if external_results
else ""
)
+ (f"\n\n{docs_results}\n\n" if docs_results else ""),
1,
)
if do_map:
subissues: list[ProposedIssue] = sweep_bot.generate_subissues()
edit_sweep_comment(
f"I'm creating the following subissues:\n\n"
+ "\n\n".join(
[
f"#{subissue.title}:\n" + blockquote(subissue.body)
for subissue in subissues
]
),
2,
)
for subissue in tqdm(subissues):
subissue.issue_id = repo.create_issue(
title="Sweep: " + subissue.title,
body=subissue.body + f"\n\nParent issue: #{issue_number}",
assignee=username,
).number
subissues_checklist = "\n\n".join(
[
f"- [ ] #{subissue.issue_id}\n\n"
+ blockquote(f"**{subissue.title}**\n{subissue.body}")
for subissue in subissues
]
)
current_issue.edit(
body=summary + "\n\n---\n\nChecklist:\n\n" + subissues_checklist
)
edit_sweep_comment(
f"I finished creating the subissues! Track them at:\n\n"
+ "\n".join(f"* #{subissue.issue_id}" for subissue in subissues),
3,
done=True,
)
edit_sweep_comment(f"N/A", 4)
edit_sweep_comment(f"I finished creating all the subissues.", 5)
return {"success": True}
# COMMENT ON ISSUE
# TODO: removed issue commenting here
logger.info("Fetching files to modify/create...")
file_change_requests, plan = sweep_bot.get_files_to_change()
if not file_change_requests:
if len(title + summary) < 60:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details? Please make sure that the title and"
" summary of the issue are at least 60 characters."
),
-1,
)
else:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details?"
),
-1,
)
raise Exception("No files to modify.")
sweep_bot.summarize_snippets()
file_change_requests = sweep_bot.validate_file_change_requests(
file_change_requests
)
table = tabulate(
[
[
f"`{file_change_request.filename}`",
file_change_request.instructions_display.replace(
"\n", "<br/>"
).replace("```", "\\```"),
]
for file_change_request in file_change_requests
],
headers=["File Path", "Proposed Changes"],
tablefmt="pipe",
)
# edit_sweep_comment(
# "From looking through the relevant snippets, I decided to make the"
# " following modifications:\n\n" + table + "\n\n",
# 2,
# )
# TODO(lukejagg): Generate PR after modifications are made
# CREATE PR METADATA
logger.info("Generating PR...")
pull_request = sweep_bot.generate_pull_request()
# pull_request_content = pull_request.content.strip().replace("\n", "\n>")
# pull_request_summary = f"**{pull_request.title}**\n`{pull_request.branch_name}`\n>{pull_request_content}\n"
# edit_sweep_comment(
# (
# "I have created a plan for writing the pull request. I am now working"
# " my plan and coding the required changes to address this issue. Here"
# f" is the planned pull request:\n\n{pull_request_summary}"
# ),
# 3,
# )
logger.info("Making PR...")
files_progress: list[tuple[str, str, str, str]] = [
(
file_change_request.filename,
file_change_request.instructions_display,
"⏳ In Progress",
"",
)
for file_change_request in file_change_requests
]
checkboxes_progress: list[tuple[str, str, str]] = [
(file_change_request.filename, file_change_request.instructions, " ")
for file_change_request in file_change_requests
]
checkboxes_contents = "\n".join(
[
create_checkbox(f"`{filename}`", blockquote(instructions), check == "X")
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = create_collapsible(
"Checklist", checkboxes_contents, opened=True
)
issue = repo.get_issue(number=issue_number)
issue.edit(body=summary + "\n\n" + checkboxes_collapsible)
delete_branch = False
generator = create_pr_changes( # make this async later
file_change_requests,
pull_request,
sweep_bot,
username,
installation_id,
issue_number,
chat_logger=chat_logger,
)
edit_sweep_comment(checkboxes_contents, 2)
response = {"error": NoFilesException()}
for item in generator:
if isinstance(item, dict):
response = item
break
file_change_request, changed_file, sandbox_response, commit = item
sandbox_response: SandboxResponse | None = sandbox_response
format_exit_code = (
lambda exit_code: "✓" if exit_code == 0 else f"❌ (`{exit_code}`)"
)
logger.print(sandbox_response)
error_logs = (
(
create_collapsible(
"Sandbox Execution Logs",
blockquote(
"\n\n".join(
[
create_collapsible(
f"<code>{execution.command.format(file_path=file_change_request.filename)}</code> {i + 1}/{len(sandbox_response.executions)} {format_exit_code(execution.exit_code)}",
f"<pre>{clean_logs(execution.output)}</pre>",
i == len(sandbox_response.executions) - 1,
)
for i, execution in enumerate(
sandbox_response.executions
)
if len(sandbox_response.executions) > 0
# And error code check
]
)
),
opened=True,
)
)
if sandbox_response
else ""
)
if changed_file:
logger.print("Changed File!")
commit_hash = (
commit.sha
if commit is not None
else repo.get_branch(pull_request.branch_name).commit.sha
)
commit_url = f"https://github.com/{repo_full_name}/commit/{commit_hash}"
checkboxes_progress = [
(
(
f"`{filename}` ✅ Commit [`{commit_hash[:7]}`]({commit_url})",
blockquote(instructions) + error_logs,
"X",
)
if file_change_request.filename == filename
else (filename, instructions, progress)
)
for filename, instructions, progress in checkboxes_progress
]
else:
logger.print("Didn't change file!")
checkboxes_progress = [
(
(
f"`{filename}` ❌ Failed",
blockquote(instructions) + error_logs,
"X",
)
if file_change_request.filename == filename
else (filename, instructions, progress)
)
for filename, instructions, progress in checkboxes_progress
]
checkboxes_contents = "\n".join(
[
checkbox_template.format(
check=check,
filename=filename,
instructions=instructions,
)
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = collapsible_template.format(
summary="Checklist",
body=checkboxes_contents,
opened="open",
)
issue = repo.get_issue(number=issue_number)
issue.edit(body=summary + "\n\n" + checkboxes_collapsible)
logger.info(files_progress)
logger.info(f"Edited {file_change_request.filename}")
edit_sweep_comment(checkboxes_contents, 2)
if not response.get("success"):
raise Exception(f"Failed to create PR: {response.get('error')}")
pr_changes = response["pull_request"]
edit_sweep_comment(
"I have finished coding the issue. I am now reviewing it for completeness.",
3,
)
change_location = f" [`{pr_changes.pr_head}`](https://github.com/{repo_full_name}/commits/{pr_changes.pr_head}).\n\n"
review_message = "Here are my self-reviews of my changes at" + change_location
lint_output = None
try:
current_issue.delete_reaction(eyes_reaction.id)
except:
pass
changes_required = False
try:
# Todo(lukejagg): Pass sandbox linter results to review_pr
# CODE REVIEW
changes_required, review_comment = review_pr(
repo=repo,
pr=pr_changes,
issue_url=issue_url,
username=username,
repo_description=repo_description,
title=title,
summary=summary,
replies_text=replies_text,
tree=tree,
lint_output=lint_output,
plan=plan, # plan for the PR
chat_logger=chat_logger,
)
# Todo(lukejagg): Execute sandbox after each iteration
lint_output = None
review_message += (
f"Here is the {ordinal(1)} review\n"
+ blockquote(review_comment)
+ "\n\n"
)
if changes_required:
edit_sweep_comment(
review_message + "\n\nI'm currently addressing these suggestions.",
3,
)
logger.info(f"Addressing review comment {review_comment}")
on_comment(
repo_full_name=repo_full_name,
repo_description=repo_description,
comment=review_comment,
username=username,
installation_id=installation_id,
pr_path=None,
pr_line_position=None,
pr_number=None,
pr=pr_changes,
chat_logger=chat_logger,
repo=repo,
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(traceback.format_exc())
logger.error(e)
if changes_required:
edit_sweep_comment(
review_message + "\n\nI finished incorporating these changes.",
3,
)
else:
edit_sweep_comment(
f"I have finished reviewing the code for completeness. I did not find errors for {change_location}.",
3,
)
is_draft = config.get("draft", False)
try:
pr = repo.create_pull(
title=pr_changes.title,
body=pr_changes.body,
head=pr_changes.pr_head,
base=SweepConfig.get_branch(repo),
draft=is_draft,
)
except GithubException as e:
is_draft = False
pr = repo.create_pull(
title=pr_changes.title,
body=pr_changes.body,
head=pr_changes.pr_head,
base=SweepConfig.get_branch(repo),
draft=is_draft,
)
pr.add_to_labels(GITHUB_LABEL_NAME)
current_issue.create_reaction("rocket")
logger.info("Running github actions...")
try:
if is_draft:
logger.info("Skipping github actions because PR is a draft")
else:
commit = pr.get_commits().reversed[0]
check_runs = commit.get_check_runs()
for check_run in check_runs:
check_run.rerequest()
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
# Completed code review
edit_sweep_comment(
review_message + "\n\nSuccess! 🚀",
4,
pr_message=(
f"## Here's the PR! [{pr.html_url}]({pr.html_url}).\n{payment_message}"
),
done=True,
)
logger.info("Add successful ticket to counter")
except MaxTokensExceeded as e:
logger.info("Max tokens exceeded")
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Max Tokens Exceeded",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
if chat_logger.is_paying_user():
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too long."
" We are currently working on improved file streaming to address"
" this issue.\n"
),
-1,
)
else:
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too"
" long.\n\nIf this file is incorrect, please describe the desired"
" file in the prompt. However, if you would like to edit longer"
" files, consider upgrading to [Sweep Pro](https://sweep.dev/) for"
" longer context lengths.\n"
),
-1,
)
delete_branch = True
raise e
except NoFilesException as e:
logger.info("Sweep could not find files to modify")
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Sweep could not find files to modify",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
edit_sweep_comment(
(
"Sorry, Sweep could not find any appropriate files to edit to address"
" this issue. If this is a mistake, please provide more context and I"
f" will retry!\n\n> @{username}, please edit the issue description to"
" include more details about this issue."
),
-1,
)
delete_branch = True
raise e
except openai.error.InvalidRequestError as e:
logger.error(traceback.format_exc())
logger.error(e)
edit_sweep_comment(
(
"I'm sorry, but it looks our model has ran out of context length. We're"
" trying to make this happen less, but one way to mitigate this is to"
" code smaller files. If this error persists report it at"
" https://discord.gg/sweep."
),
-1,
)
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Context Length",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
posthog.capture(
username,
"failed",
properties={
"error": str(e),
"reason": "Invalid request error / context length",
**metadata,
},
)
delete_branch = True
raise e
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(traceback.format_exc())
logger.error(e)
# title and summary are defined elsewhere
if len(title + summary) < 60:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred due to"
" insufficient information. Be sure to create a more detailed issue"
" so I can better address it. If this error persists report it at"
" https://discord.gg/sweep."
),
-1,
)
else:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred. Try changing"
" the issue description to re-trigger Sweep. If this error persists"
" contact [email protected]."
),
-1,
)
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Workflow",
str(e) + "\n" + traceback.format_exc(),
priority=1,
)
posthog.capture(
username,
"failed",
properties={"error": str(e), "reason": "Generic error", **metadata},
)
raise e
else:
try:
item_to_react_to.delete_reaction(eyes_reaction.id)
item_to_react_to.create_reaction("rocket")
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
finally:
cloned_repo.delete()
if delete_branch:
try:
if pull_request.branch_name.startswith("sweep"):
repo.get_git_ref(f"heads/{pull_request.branch_name}").delete()
else:
raise Exception(
f"Branch name {pull_request.branch_name} does not start with sweep/"
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
logger.print("Deleted branch", pull_request.branch_name)
posthog.capture(username, "success", properties={**metadata})
logger.info("on_ticket success")
return {"success": True}
"""
chunks = chunk_code(file_contents, "api.py", 10000, 200)
# with open('output.csv', 'w', newline='') as file:
# writer = csv.writer(file)
# for chunk in chunks:
# writer.writerow([chunk.content])
metadata = """Repo: sweepai/sweep: Sweep: AI-powered Junior Developer for small features and bug fixes.
Issue Url: https://github.com/sweepai/sweep/issues/1648
Username: wwzeng1"""
issue = """Move any code that can be moved out of on_ticket.py into ticket_utils.py
Don't move the core logic, just any helper methods, like ones that edit sweeps comment"""
# dic = [x for x in range(100)]
# def chat(index, content):
# chat = ChatGPT.from_system_message_string(summarize_snippet_system_prompt, chat_logger=None)
# f = chat.chat(
# summarize_snippet_prompt.format(
# code=content,
# metadata=metadata,
# issue=issue,
# ),
# model="gpt-3.5-turbo-16k-0613"
# )
# dic[index] = content
# return f
# threads = []
# for index, content in enumerate(chunks):
# import threading
# t = threading.Thread(target=chat, args=(index, content.content,))
# t.start()
# threads.append(t)
# for i, t in enumerate(threads):
# print(i)
# t.join()
# dic = []
# from tqdm import tqdm
# for chunk in tqdm(chunks):
# chat = ChatGPT.from_system_message_string(summarize_snippet_system_prompt, chat_logger=None)
# f = chat.chat(
# summarize_snippet_prompt.format(
# code=chunk.content,
# metadata=metadata,
# issue=issue,
# ),
# model="gpt-3.5-turbo-16k-0613",
# temperature=0.1
# )
# dic.append(f)
# print("=====================================")
# print(dic)
# import json
# ls = json.load(open("tests/summaries.json", "r"))
# for chunk in ls:
# print(chunk + "\n\n========================\n\n")
# Read all thread info sequentially
# for t in threads:
# x = t.join()
# summary.append(x)
for i, chunk in enumerate(chunks):
print(
f"""<chunk number="{i}" start="{chunk.start}" end="{chunk.end}">
{chunk.content}
</chunk>"""
)
| [] |
2024-01-10 | sweepai/sweep | sweepai~handlers~create_pr.py | """
create_pr is a function that creates a pull request from a list of file change requests.
It is also responsible for handling Sweep config PR creation.
"""
import datetime
from typing import Generator
import openai
from github.Commit import Commit
from github.Repository import Repository
from sweepai.config.client import DEFAULT_RULES_STRING, SweepConfig, get_blocked_dirs
from sweepai.config.server import (
ENV,
GITHUB_BOT_USERNAME,
GITHUB_CONFIG_BRANCH,
GITHUB_DEFAULT_CONFIG,
GITHUB_LABEL_NAME,
MONGODB_URI,
)
from sweepai.core.entities import (
FileChangeRequest,
MaxTokensExceeded,
MockPR,
PullRequest,
)
from sweepai.core.sweep_bot import SweepBot
from sweepai.logn import logger
from sweepai.utils.chat_logger import ChatLogger
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo, get_github_client
from sweepai.utils.str_utils import UPDATES_MESSAGE
num_of_snippets_to_query = 10
max_num_of_snippets = 5
INSTRUCTIONS_FOR_REVIEW = """\
### 💡 To get Sweep to edit this pull request, you can:
* Comment below, and Sweep can edit the entire PR
* Comment on a file, Sweep will only modify the commented file
* Edit the original issue to get Sweep to recreate the PR from scratch"""
def create_pr_changes(
file_change_requests: list[FileChangeRequest],
pull_request: PullRequest,
sweep_bot: SweepBot,
username: str,
installation_id: int,
issue_number: int | None = None,
sandbox=None,
chat_logger: ChatLogger = None,
base_branch: str = None,
) -> Generator[tuple[FileChangeRequest, int, Commit], None, dict]:
# Flow:
# 1. Get relevant files
# 2: Get human message
# 3. Get files to change
# 4. Get file changes
# 5. Create PR
chat_logger = (
chat_logger
if chat_logger is not None
else ChatLogger(
{
"username": username,
"installation_id": installation_id,
"repo_full_name": sweep_bot.repo.full_name,
"title": pull_request.title,
"summary": "",
"issue_url": "",
}
)
if MONGODB_URI
else None
)
sweep_bot.chat_logger = chat_logger
organization, repo_name = sweep_bot.repo.full_name.split("/")
metadata = {
"repo_full_name": sweep_bot.repo.full_name,
"organization": organization,
"repo_name": repo_name,
"repo_description": sweep_bot.repo.description,
"username": username,
"installation_id": installation_id,
"function": "create_pr",
"mode": ENV,
"issue_number": issue_number,
}
posthog.capture(username, "started", properties=metadata)
try:
logger.info("Making PR...")
pull_request.branch_name = sweep_bot.create_branch(
pull_request.branch_name, base_branch=base_branch
)
completed_count, fcr_count = 0, len(file_change_requests)
blocked_dirs = get_blocked_dirs(sweep_bot.repo)
for (
file_change_request,
changed_file,
sandbox_error,
commit,
file_change_requests,
) in sweep_bot.change_files_in_github_iterator(
file_change_requests,
pull_request.branch_name,
blocked_dirs,
):
completed_count += changed_file
logger.info(f"Completed {completed_count}/{fcr_count} files")
yield file_change_request, changed_file, sandbox_error, commit, file_change_requests
if completed_count == 0 and fcr_count != 0:
logger.info("No changes made")
posthog.capture(
username,
"failed",
properties={
"error": "No changes made",
"reason": "No changes made",
**metadata,
},
)
# If no changes were made, delete branch
commits = sweep_bot.repo.get_commits(pull_request.branch_name)
if commits.totalCount == 0:
branch = sweep_bot.repo.get_git_ref(f"heads/{pull_request.branch_name}")
branch.delete()
return
# Include issue number in PR description
if issue_number:
# If the #issue changes, then change on_ticket (f'Fixes #{issue_number}.\n' in pr.body:)
pr_description = (
f"{pull_request.content}\n\nFixes"
f" #{issue_number}.\n\n---\n\n{UPDATES_MESSAGE}\n\n---\n\n{INSTRUCTIONS_FOR_REVIEW}"
)
else:
pr_description = f"{pull_request.content}"
pr_title = pull_request.title
if "sweep.yaml" in pr_title:
pr_title = "[config] " + pr_title
except MaxTokensExceeded as e:
logger.error(e)
posthog.capture(
username,
"failed",
properties={
"error": str(e),
"reason": "Max tokens exceeded",
**metadata,
},
)
raise e
except openai.BadRequestError as e:
logger.error(e)
posthog.capture(
username,
"failed",
properties={
"error": str(e),
"reason": "Invalid request error / context length",
**metadata,
},
)
raise e
except Exception as e:
logger.error(e)
posthog.capture(
username,
"failed",
properties={
"error": str(e),
"reason": "Unexpected error",
**metadata,
},
)
raise e
posthog.capture(username, "success", properties={**metadata})
logger.info("create_pr success")
result = {
"success": True,
"pull_request": MockPR(
file_count=completed_count,
title=pr_title,
body=pr_description,
pr_head=pull_request.branch_name,
base=sweep_bot.repo.get_branch(
SweepConfig.get_branch(sweep_bot.repo)
).commit,
head=sweep_bot.repo.get_branch(pull_request.branch_name).commit,
),
}
yield result # Doing this because sometiems using StopIteration doesn't work, kinda jank tho tbh
return
def safe_delete_sweep_branch(
pr, # Github PullRequest
repo: Repository,
) -> bool:
"""
Safely delete Sweep branch
1. Only edited by Sweep
2. Prefixed by sweep/
"""
pr_commits = pr.get_commits()
pr_commit_authors = set([commit.author.login for commit in pr_commits])
# Check if only Sweep has edited the PR, and sweep/ prefix
if (
len(pr_commit_authors) == 1
and GITHUB_BOT_USERNAME in pr_commit_authors
and pr.head.ref.startswith("sweep")
):
branch = repo.get_git_ref(f"heads/{pr.head.ref}")
# pr.edit(state='closed')
branch.delete()
return True
else:
# Failed to delete branch as it was edited by someone else
return False
def create_config_pr(
sweep_bot: SweepBot | None, repo: Repository = None, cloned_repo: ClonedRepo = None
):
if repo is not None:
# Check if file exists in repo
try:
repo.get_contents("sweep.yaml")
return
except SystemExit:
raise SystemExit
except Exception:
pass
title = "Configure Sweep"
branch_name = GITHUB_CONFIG_BRANCH
if sweep_bot is not None:
branch_name = sweep_bot.create_branch(branch_name, retry=False)
try:
# commit_history = []
# if cloned_repo is not None:
# commit_history = cloned_repo.get_commit_history(
# limit=1000, time_limited=False
# )
# commit_string = "\n".join(commit_history)
# sweep_yaml_bot = SweepYamlBot()
# generated_rules = sweep_yaml_bot.get_sweep_yaml_rules(
# commit_history=commit_string
# )
sweep_bot.repo.create_file(
"sweep.yaml",
"Create sweep.yaml",
GITHUB_DEFAULT_CONFIG.format(
branch=sweep_bot.repo.default_branch,
additional_rules=DEFAULT_RULES_STRING,
),
branch=branch_name,
)
sweep_bot.repo.create_file(
".github/ISSUE_TEMPLATE/sweep-template.yml",
"Create sweep template",
SWEEP_TEMPLATE,
branch=branch_name,
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
else:
# Create branch based on default branch
branch = repo.create_git_ref(
ref=f"refs/heads/{branch_name}",
sha=repo.get_branch(repo.default_branch).commit.sha,
)
try:
# commit_history = []
# if cloned_repo is not None:
# commit_history = cloned_repo.get_commit_history(
# limit=1000, time_limited=False
# )
# commit_string = "\n".join(commit_history)
# sweep_yaml_bot = SweepYamlBot()
# generated_rules = sweep_yaml_bot.get_sweep_yaml_rules(
# commit_history=commit_string
# )
repo.create_file(
"sweep.yaml",
"Create sweep.yaml",
GITHUB_DEFAULT_CONFIG.format(
branch=repo.default_branch, additional_rules=DEFAULT_RULES_STRING
),
branch=branch_name,
)
repo.create_file(
".github/ISSUE_TEMPLATE/sweep-template.yml",
"Create sweep template",
SWEEP_TEMPLATE,
branch=branch_name,
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
repo = sweep_bot.repo if sweep_bot is not None else repo
# Check if the pull request from this branch to main already exists.
# If it does, then we don't need to create a new one.
if repo is not None:
pull_requests = repo.get_pulls(
state="open",
sort="created",
base=SweepConfig.get_branch(repo)
if sweep_bot is not None
else repo.default_branch,
head=branch_name,
)
for pr in pull_requests:
if pr.title == title:
return pr
logger.print("Default branch", repo.default_branch)
logger.print("New branch", branch_name)
pr = repo.create_pull(
title=title,
body="""🎉 Thank you for installing Sweep! We're thrilled to announce the latest update for Sweep, your AI junior developer on GitHub. This PR creates a `sweep.yaml` config file, allowing you to personalize Sweep's performance according to your project requirements.
## What's new?
- **Sweep is now configurable**.
- To configure Sweep, simply edit the `sweep.yaml` file in the root of your repository.
- If you need help, check out the [Sweep Default Config](https://github.com/sweepai/sweep/blob/main/sweep.yaml) or [Join Our Discord](https://discord.gg/sweep) for help.
If you would like me to stop creating this PR, go to issues and say "Sweep: create an empty `sweep.yaml` file".
Thank you for using Sweep! 🧹""".replace(
" ", ""
),
head=branch_name,
base=SweepConfig.get_branch(repo)
if sweep_bot is not None
else repo.default_branch,
)
pr.add_to_labels(GITHUB_LABEL_NAME)
return pr
def add_config_to_top_repos(installation_id, username, repositories, max_repos=3):
user_token, g = get_github_client(installation_id)
repo_activity = {}
for repo_entity in repositories:
repo = g.get_repo(repo_entity.full_name)
# instead of using total count, use the date of the latest commit
commits = repo.get_commits(
author=username,
since=datetime.datetime.now() - datetime.timedelta(days=30),
)
# get latest commit date
commit_date = datetime.datetime.now() - datetime.timedelta(days=30)
for commit in commits:
if commit.commit.author.date > commit_date:
commit_date = commit.commit.author.date
# since_date = datetime.datetime.now() - datetime.timedelta(days=30)
# commits = repo.get_commits(since=since_date, author="lukejagg")
repo_activity[repo] = commit_date
# print(repo, commits.totalCount)
logger.print(repo, commit_date)
sorted_repos = sorted(repo_activity, key=repo_activity.get, reverse=True)
sorted_repos = sorted_repos[:max_repos]
# For each repo, create a branch based on main branch, then create PR to main branch
for repo in sorted_repos:
try:
logger.print("Creating config for", repo.full_name)
create_config_pr(
None,
repo=repo,
cloned_repo=ClonedRepo(
repo_full_name=repo.full_name,
installation_id=installation_id,
token=user_token,
),
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.print(e)
logger.print("Finished creating configs for top repos")
def create_gha_pr(g, repo):
# Create a new branch
branch_name = "sweep/gha-enable"
branch = repo.create_git_ref(
ref=f"refs/heads/{branch_name}",
sha=repo.get_branch(repo.default_branch).commit.sha,
)
# Update the sweep.yaml file in this branch to add "gha_enabled: True"
sweep_yaml_content = (
repo.get_contents("sweep.yaml", ref=branch_name).decoded_content.decode()
+ "\ngha_enabled: True"
)
repo.update_file(
"sweep.yaml",
"Enable GitHub Actions",
sweep_yaml_content,
repo.get_contents("sweep.yaml", ref=branch_name).sha,
branch=branch_name,
)
# Create a PR from this branch to the main branch
pr = repo.create_pull(
title="Enable GitHub Actions",
body="This PR enables GitHub Actions for this repository.",
head=branch_name,
base=repo.default_branch,
)
return pr
SWEEP_TEMPLATE = """\
name: Sweep Issue
title: 'Sweep: '
description: For small bugs, features, refactors, and tests to be handled by Sweep, an AI-powered junior developer.
labels: sweep
body:
- type: textarea
id: description
attributes:
label: Details
description: Tell Sweep where and what to edit and provide enough context for a new developer to the codebase
placeholder: |
Unit Tests: Write unit tests for <FILE>. Test each function in the file. Make sure to test edge cases.
Bugs: The bug might be in <FILE>. Here are the logs: ...
Features: the new endpoint should use the ... class from <FILE> because it contains ... logic.
Refactors: We are migrating this function to ... version because ..."""
| [
"name: Sweep Issue\ntitle: 'Sweep: '\ndescription: For small bugs, features, refactors, and tests to be handled by Sweep, an AI-powered junior developer.\nlabels: sweep\nbody:\n - type: textarea\n id: description\n attributes:\n label: Details\n description: Tell Sweep where and what to edit and provide enough context for a new developer to the codebase\n placeholder: |\n Unit Tests: Write unit tests for <FILE>. Test each function in the file. Make sure to test edge cases.\n Bugs: The bug might be in <FILE>. Here are the logs: ...\n Features: the new endpoint should use the ... class from <FILE> because it contains ... logic.\n Refactors: We are migrating this function to ... version because ..."
] |
2024-01-10 | sweepai/sweep | sweepai~utils~progress.py | from __future__ import annotations
from enum import Enum
from threading import Thread
from openai import OpenAI
from openai.types.beta.threads.runs.code_tool_call import CodeToolCall
from openai.types.beta.threads.runs.function_tool_call import FunctionToolCall
from pydantic import BaseModel, Field
from sweepai.config.server import MONGODB_URI, OPENAI_API_KEY
from sweepai.core.entities import FileChangeRequest, Snippet
from sweepai.utils.chat_logger import discord_log_error, global_mongo_client
class AssistantAPIMessageRole(Enum):
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
CODE_INTERPRETER_INPUT = "code_interpreter_input"
CODE_INTERPRETER_OUTPUT = "code_interpreter_output"
FUNCTION_CALL_INPUT = "function_call_input"
FUNCTION_CALL_OUTPUT = "function_call_output"
class AssistantAPIMessage(BaseModel):
class Config:
use_enum_values = True
role: AssistantAPIMessageRole
content: str = ""
class AssistantStatus(Enum):
QUEUED = "queued"
IN_PROGRESS = "in_progress"
REQUIRES_ACTION = "requires_action"
CANCELLING = "cancelling"
CANCELLED = "cancelled"
FAILED = "failed"
COMPLETED = "completed"
EXPIRED = "expired"
class AssistantConversation(BaseModel):
messages: list[AssistantAPIMessage] = []
is_active: bool = True
status: AssistantStatus = "in_progress"
assistant_id: str = ""
run_id: str = ""
thread_id: str = ""
class Config:
use_enum_values = True
@classmethod
def from_ids(
cls,
assistant_id: str,
run_id: str,
thread_id: str,
) -> AssistantConversation | None:
client = OpenAI(api_key=OPENAI_API_KEY)
try:
assistant = client.beta.assistants.retrieve(
assistant_id=assistant_id, timeout=1.5
)
run = client.beta.threads.runs.retrieve(
run_id=run_id, thread_id=thread_id, timeout=1.5
)
message_objects = client.beta.threads.runs.steps.list(
run_id=run_id, thread_id=thread_id, timeout=1.5
).data
except:
return None
messages: list[AssistantAPIMessage] = [
AssistantAPIMessage(
role=AssistantAPIMessageRole.SYSTEM,
content=assistant.instructions,
)
]
for message_obj in list(message_objects)[::-1]:
if message_obj.type == "message_creation":
message_id = message_obj.step_details.message_creation.message_id
try:
message_content = (
client.beta.threads.messages.retrieve(
message_id=message_id, thread_id=thread_id, timeout=1.5
)
.content[0]
.text.value
)
except:
return None
messages.append(
AssistantAPIMessage(
role=AssistantAPIMessageRole.ASSISTANT,
content=message_content,
)
)
# TODO: handle annotations
elif message_obj.type == "tool_calls":
for tool_call in message_obj.step_details.tool_calls:
if isinstance(tool_call, CodeToolCall):
code_interpreter = tool_call.code_interpreter
input_ = code_interpreter.input
if not input_:
continue
messages.append(
AssistantAPIMessage(
role=AssistantAPIMessageRole.CODE_INTERPRETER_INPUT,
content=input_,
)
)
outputs = code_interpreter.outputs
output = outputs[0].logs if outputs else "__No output__"
messages.append(
AssistantAPIMessage(
role=AssistantAPIMessageRole.CODE_INTERPRETER_OUTPUT,
content=output,
)
)
elif isinstance(tool_call, FunctionToolCall):
messages.append(
AssistantAPIMessage(
role=AssistantAPIMessageRole.FUNCTION_CALL_INPUT,
content=tool_call.function.arguments,
)
)
messages.append(
AssistantAPIMessage(
role=AssistantAPIMessageRole.FUNCTION_CALL_OUTPUT,
content=tool_call.function.output or "__No output__",
)
)
return cls(
messages=messages,
status=run.status,
is_active=run.status not in ("succeeded", "failed"),
assistant_id=assistant_id,
run_id=run_id,
thread_id=thread_id,
)
def update_from_ids(
self,
assistant_id: str,
run_id: str,
thread_id: str,
) -> AssistantConversation:
assistant_conversation = AssistantConversation.from_ids(
assistant_id=assistant_id, run_id=run_id, thread_id=thread_id
)
if not assistant_conversation:
return self
self.messages = assistant_conversation.messages
self.is_active = assistant_conversation.is_active
self.status = assistant_conversation.status
return self
class TicketProgressStatus(Enum):
SEARCHING = "searching"
PLANNING = "planning"
CODING = "coding"
COMPLETE = "complete"
ERROR = "error"
class SearchProgress(BaseModel):
class Config:
use_enum_values = True
indexing_progress: int = 0
indexing_total: int = 0
rephrased_query: str = ""
retrieved_snippets: list[Snippet] = []
final_snippets: list[Snippet] = []
pruning_conversation: AssistantConversation = AssistantConversation()
pruning_conversation_counter: int = 0
repo_tree: str = ""
class PlanningProgress(BaseModel):
assistant_conversation: AssistantConversation = AssistantConversation()
file_change_requests: list[FileChangeRequest] = []
class CodingProgress(BaseModel):
file_change_requests: list[FileChangeRequest] = []
assistant_conversations: list[AssistantConversation] = []
class PaymentContext(BaseModel):
use_faster_model: bool = True
pro_user: bool = True
daily_tickets_used: int = 0
monthly_tickets_used: int = 0
class TicketContext(BaseModel):
title: str = ""
description: str = ""
repo_full_name: str = ""
issue_number: int = 0
branch_name: str = ""
is_public: bool = True
pr_id: int = -1
start_time: int = 0
done_time: int = 0
payment_context: PaymentContext = PaymentContext()
class TicketProgress(BaseModel):
tracking_id: str
username: str = ""
context: TicketContext = TicketContext()
status: TicketProgressStatus = TicketProgressStatus.SEARCHING
search_progress: SearchProgress = SearchProgress()
planning_progress: PlanningProgress = PlanningProgress()
coding_progress: CodingProgress = CodingProgress()
prev_dict: dict = Field(default_factory=dict)
error_message: str = ""
class Config:
use_enum_values = True
@classmethod
def load(cls, tracking_id: str) -> TicketProgress:
if MONGODB_URI is None:
return None
db = global_mongo_client["progress"]
collection = db["ticket_progress"]
doc = collection.find_one({"tracking_id": tracking_id})
return cls(**doc)
def _save(self):
try:
if MONGODB_URI is None:
return None
if self.dict() == self.prev_dict:
return
current_dict = self.dict()
del current_dict["prev_dict"]
self.prev_dict = current_dict
db = global_mongo_client["progress"]
collection = db["ticket_progress"]
collection.update_one(
{"tracking_id": self.tracking_id}, {"$set": current_dict}, upsert=True
)
except Exception as e:
discord_log_error(str(e) + "\n\n" + str(self.tracking_id))
def save(self):
thread = Thread(target=self._save)
thread.start()
def create_index():
# killer code to make everything way faster
db = global_mongo_client["progress"]
collection = db["ticket_progress"]
collection.create_index("tracking_id", unique=True)
if __name__ == "__main__":
ticket_progress = TicketProgress(tracking_id="test")
ticket_progress.error_message = (
"I'm sorry, but it looks like an error has occurred due to"
+ " a planning failure. Please create a more detailed issue"
+ " so I can better address it. Alternatively, reach out to Kevin or William for help at"
+ " https://discord.gg/sweep."
)
ticket_progress.status = TicketProgressStatus.ERROR
ticket_progress.save()
# new_ticket_progress = TicketProgress.load("test")
# print(new_ticket_progress)
# assert new_ticket_progress == ticket_progress
| [
"__No output__"
] |
2024-01-10 | sweepai/sweep | tests~archive~test_external_search.py | import os
import openai
from sweepai.core.external_searcher import ExternalSearcher
openai.api_key = os.environ.get("OPENAI_API_KEY")
problem = """
## Sweep: Scaffold tests in generated SDK
We recently introduced simple test scaffolding in [fern-python](https://github.com/fern-api/fern-python/pull/296). We should do something similar here, potentially with `jest`.
Previous PR:
This adds pytest to the list of dev dependencies, as well as creates a tests/ directory with a simple no-op test.
The generated test includes the syntax required for skipping tests (via @pytest.mark.skip) to demonstrate the pytest import. We also include a link to the pytest docs for the user to learn more.
"""
print(ExternalSearcher.extract_summaries(problem))
| [] |
2024-01-10 | sweepai/sweep | tests~archive~test_match.py | from sweepai.utils.search_and_replace import score_multiline
haystack = r"""
# TODO: Add file validation
import math
import re
import traceback
import openai
import github
from github import GithubException, BadCredentialsException
from tabulate import tabulate
from tqdm import tqdm
from sweepai.logn import logger, LogTask
from sweepai.core.context_pruning import ContextPruning
from sweepai.core.documentation_searcher import extract_relevant_docs
from sweepai.core.entities import (
ProposedIssue,
SandboxResponse,
Snippet,
NoFilesException,
SweepContext,
MaxTokensExceeded,
EmptyRepository,
)
from sweepai.core.external_searcher import ExternalSearcher
from sweepai.core.slow_mode_expand import SlowModeBot
from sweepai.core.sweep_bot import SweepBot
from sweepai.core.prompts import issue_comment_prompt
# from sandbox.sandbox_utils import Sandbox
from sweepai.handlers.create_pr import (
create_pr_changes,
create_config_pr,
safe_delete_sweep_branch,
)
from sweepai.handlers.on_comment import on_comment
from sweepai.handlers.on_review import review_pr
from sweepai.utils.buttons import create_action_buttons
from sweepai.utils.chat_logger import ChatLogger
from sweepai.config.client import (
SweepConfig,
get_documentation_dict,
)
from sweepai.config.server import (
ENV,
MONGODB_URI,
OPENAI_API_KEY,
GITHUB_BOT_USERNAME,
GITHUB_LABEL_NAME,
OPENAI_USE_3_5_MODEL_ONLY,
WHITELISTED_REPOS,
)
from sweepai.utils.ticket_utils import *
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo, get_github_client
from sweepai.utils.prompt_constructor import HumanMessagePrompt
from sweepai.utils.search_utils import search_snippets
from sweepai.utils.tree_utils import DirectoryTree
openai.api_key = OPENAI_API_KEY
def center(text: str) -> str:
return f"<div align='center'>{text}</div>"
@LogTask()
def on_ticket(
title: str,
summary: str,
issue_number: int,
issue_url: str,
username: str,
repo_full_name: str,
repo_description: str,
installation_id: int,
comment_id: int = None,
edited: bool = False,
):
(
title,
slow_mode,
do_map,
subissues_mode,
sandbox_mode,
fast_mode,
lint_mode,
) = strip_sweep(title)
# Flow:
# 1. Get relevant files
# 2: Get human message
# 3. Get files to change
# 4. Get file changes
# 5. Create PR
summary = summary or ""
summary = re.sub(
"<details (open)?>\n<summary>Checklist</summary>.*",
"",
summary,
flags=re.DOTALL,
).strip()
summary = re.sub(
"---\s+Checklist:\n\n- \[[ X]\].*", "", summary, flags=re.DOTALL
).strip()
repo_name = repo_full_name
user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
current_issue = repo.get_issue(number=issue_number)
assignee = current_issue.assignee.login if current_issue.assignee else None
if assignee is None:
assignee = current_issue.user.login
chat_logger = (
ChatLogger(
{
"repo_name": repo_name,
"title": title,
"summary": summary,
"issue_number": issue_number,
"issue_url": issue_url,
"username": username if not username.startswith("sweep") else assignee,
"repo_full_name": repo_full_name,
"repo_description": repo_description,
"installation_id": installation_id,
"type": "ticket",
"mode": ENV,
"comment_id": comment_id,
"edited": edited,
}
)
if MONGODB_URI
else None
)
if chat_logger:
is_paying_user = chat_logger.is_paying_user()
is_trial_user = chat_logger.is_trial_user()
use_faster_model = OPENAI_USE_3_5_MODEL_ONLY or chat_logger.use_faster_model(g)
else:
is_paying_user = True
is_trial_user = False
use_faster_model = False
if fast_mode:
use_faster_model = True
sweep_context = SweepContext.create(
username=username,
issue_url=issue_url,
use_faster_model=use_faster_model,
is_paying_user=is_paying_user,
repo=repo,
token=user_token,
)
logger.print(sweep_context)
if not comment_id and not edited and chat_logger:
chat_logger.add_successful_ticket(
gpt3=use_faster_model
) # moving higher, will increment the issue regardless of whether it's a success or not
organization, repo_name = repo_full_name.split("/")
metadata = {
"issue_url": issue_url,
"repo_full_name": repo_full_name,
"organization": organization,
"repo_name": repo_name,
"repo_description": repo_description,
"username": username,
"comment_id": comment_id,
"title": title,
"installation_id": installation_id,
"function": "on_ticket",
"edited": edited,
"model": "gpt-3.5" if use_faster_model else "gpt-4",
"tier": "pro" if is_paying_user else "free",
"mode": ENV,
"slow_mode": slow_mode,
"do_map": do_map,
"subissues_mode": subissues_mode,
"sandbox_mode": sandbox_mode,
"fast_mode": fast_mode,
}
# logger.bind(**metadata)
posthog.capture(username, "started", properties=metadata)
logger.info(f"Getting repo {repo_full_name}")
if current_issue.state == "closed":
logger.warning(f"Issue {issue_number} is closed")
posthog.capture(username, "issue_closed", properties=metadata)
return {"success": False, "reason": "Issue is closed"}
current_issue.edit(body=summary)
item_to_react_to = (
current_issue.get_comment(comment_id) if comment_id else current_issue
)
replies_text = ""
comments = list(current_issue.get_comments())
if comment_id:
logger.info(f"Replying to comment {comment_id}...")
replies_text = "\nComments:\n" + "\n".join(
[
issue_comment_prompt.format(
username=comment.user.login,
reply=comment.body,
)
for comment in comments
if comment.user.type == "User"
]
)
summary = summary if summary else ""
prs = repo.get_pulls(
state="open", sort="created", base=SweepConfig.get_branch(repo)
)
for pr in prs:
# Check if this issue is mentioned in the PR, and pr is owned by bot
# This is done in create_pr, (pr_description = ...)
if (
pr.user.login == GITHUB_BOT_USERNAME
and f"Fixes #{issue_number}.\n" in pr.body
):
success = safe_delete_sweep_branch(pr, repo)
eyes_reaction = item_to_react_to.create_reaction("eyes")
# If SWEEP_BOT reacted to item_to_react_to with "rocket", then remove it.
reactions = item_to_react_to.get_reactions()
for reaction in reactions:
if reaction.content == "rocket" and reaction.user.login == GITHUB_BOT_USERNAME:
item_to_react_to.delete_reaction(reaction.id)
# Removed 1, 3
progress_headers = [
None,
"Step 1: 🔎 Searching",
"Step 2: ⌨️ Coding",
"Step 3: 🔁 Code Review",
]
config_pr_url = None
# Find the first comment made by the bot
issue_comment = None
tickets_allocated = 5
if is_trial_user:
tickets_allocated = 15
if is_paying_user:
tickets_allocated = 500
ticket_count = (
max(tickets_allocated - chat_logger.get_ticket_count(), 0)
if chat_logger
else 999
)
daily_ticket_count = (
(3 - chat_logger.get_ticket_count(use_date=True) if not use_faster_model else 0)
if chat_logger
else 999
)
model_name = "GPT-3.5" if use_faster_model else "GPT-4"
payment_link = "https://buy.stripe.com/00g5npeT71H2gzCfZ8"
daily_message = (
f" and {daily_ticket_count} for the day"
if not is_paying_user and not is_trial_user
else ""
)
user_type = "💎 Sweep Pro" if is_paying_user else "⚡ Sweep Free Trial"
gpt_tickets_left_message = (
f"{ticket_count} GPT-4 tickets left for the month"
if not is_paying_user
else "unlimited GPT-4 tickets"
)
payment_message = (
f"{user_type}: I used {model_name} to create this ticket. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
payment_message_start = (
f"{user_type}: I'm creating this ticket using {model_name}. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
def get_comment_header(index, errored=False, pr_message="", done=False):
config_pr_message = (
"\n" + f"* Install Sweep Configs: [Pull Request]({config_pr_url})"
if config_pr_url is not None
else ""
)
actions_message = create_action_buttons(
[
"↻ Restart Sweep",
]
)
if index < 0:
index = 0
if index == 4:
return pr_message + f"\n\n---\n{actions_message}" + config_pr_message
total = len(progress_headers)
index += 1 if done else 0
index *= 100 / total
index = int(index)
index = min(100, index)
if errored:
pbar = f"\n\n<img src='https://progress-bar.dev/{index}/?&title=Errored&width=600' alt='{index}%' />"
return (
f"{center(sweeping_gif)}<br/>{center(pbar)}\n\n"
+ f"\n\n---\n{actions_message}"
)
pbar = f"\n\n<img src='https://progress-bar.dev/{index}/?&title=Progress&width=600' alt='{index}%' />"
return (
f"{center(sweeping_gif)}<br/>{center(pbar)}"
+ ("\n" + stars_suffix if index != -1 else "")
+ "\n"
+ payment_message_start
+ config_pr_message
+ f"\n\n---\n{actions_message}"
)
# Find Sweep's previous comment
logger.print("USERNAME", GITHUB_BOT_USERNAME)
for comment in comments:
logger.print("COMMENT", comment.user.login)
if comment.user.login == GITHUB_BOT_USERNAME:
logger.print("Found comment")
issue_comment = comment
try:
config = SweepConfig.get_config(repo)
except EmptyRepository as e:
logger.info("Empty repo")
first_comment = (
"Sweep is currently not supported on empty repositories. Please add some"
f" code to your repository and try again.\n{sep}##"
f" {progress_headers[1]}\n{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
return {"success": False}
cloned_repo = ClonedRepo(
repo_full_name, installation_id=installation_id, token=user_token
)
num_of_files = cloned_repo.get_num_files_from_repo()
time_estimate = math.ceil(3 + 5 * num_of_files / 1000)
indexing_message = (
"I'm searching for relevant snippets in your repository. If this is your first"
" time using Sweep, I'm indexing your repository. This may take up to"
f" {time_estimate} minutes. I'll let you know when I'm done."
)
first_comment = (
f"{get_comment_header(0)}\n{sep}I am currently looking into this ticket! I"
" will update the progress of the ticket in this comment. I am currently"
f" searching through your code, looking for relevant snippets.\n{sep}##"
f" {progress_headers[1]}\n{indexing_message}{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
# Comment edit function
past_messages = {}
current_index = 0
# Random variables to save in case of errors
table = None # Show plan so user can finetune prompt
def edit_sweep_comment(message: str, index: int, pr_message="", done=False):
nonlocal current_index, user_token, g, repo, issue_comment
# -1 = error, -2 = retry
# Only update the progress bar if the issue generation errors.
errored = index == -1
if index >= 0:
past_messages[index] = message
current_index = index
agg_message = None
# Include progress history
# index = -2 is reserved for
for i in range(
current_index + 2
): # go to next header (for Working on it... text)
if i == 0 or i >= len(progress_headers):
continue # skip None header
header = progress_headers[i]
if header is not None:
header = "## " + header + "\n"
else:
header = "No header\n"
msg = header + (past_messages.get(i) or "Working on it...")
if agg_message is None:
agg_message = msg
else:
agg_message = agg_message + f"\n{sep}" + msg
suffix = bot_suffix + discord_suffix
if errored:
agg_message = (
"## ❌ Unable to Complete PR"
+ "\n"
+ message
+ "\n\nFor bonus GPT-4 tickets, please report this bug on"
" **[Discord](https://discord.com/invite/sweep-ai)**."
)
if table is not None:
agg_message = (
agg_message
+ f"\n{sep}Please look at the generated plan. If something looks"
f" wrong, please add more details to your issue.\n\n{table}"
)
suffix = bot_suffix # don't include discord suffix for error messages
# Update the issue comment
try:
issue_comment.edit(
f"{get_comment_header(current_index, errored, pr_message, done=done)}\n{sep}{agg_message}{suffix}"
)
except BadCredentialsException:
logger.error("Bad credentials, refreshing token")
_user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
issue_comment = repo.get_issue(current_issue.number)
issue_comment.edit(
f"{get_comment_header(current_index, errored, pr_message, done=done)}\n{sep}{agg_message}{suffix}"
)
if len(title + summary) < 20:
logger.info("Issue too short")
edit_sweep_comment(
(
"Please add more details to your issue. I need at least 20 characters"
" to generate a plan."
),
-1,
)
return {"success": True}
if (
repo_name.lower() not in WHITELISTED_REPOS
and not is_paying_user
and not is_trial_user
):
if ("sweep" in repo_name.lower()) or ("test" in repo_name.lower()):
logger.info("Test repository detected")
edit_sweep_comment(
(
"Sweep does not work on test repositories. Please create an issue"
" on a real repository. If you think this is a mistake, please"
" report this at https://discord.gg/sweep."
),
-1,
)
return {"success": False}
if lint_mode:
# Get files to change
# Create new branch
# Send request to endpoint
for file_path in []:
SweepBot.run_sandbox(
repo.html_url, file_path, None, user_token, only_lint=True
)
logger.info("Fetching relevant files...")
try:
snippets, tree = search_snippets(
cloned_repo,
f"{title}\n{summary}\n{replies_text}",
num_files=num_of_snippets_to_query,
)
assert len(snippets) > 0
except SystemExit:
raise SystemExit
except Exception as e:
trace = traceback.format_exc()
logger.error(e)
logger.error(trace)
edit_sweep_comment(
(
"It looks like an issue has occurred around fetching the files."
" Perhaps the repo has not been initialized. If this error persists"
f" contact [email protected].\n\n> @{username}, editing this issue description to include more details will automatically make me relaunch."
),
-1,
)
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"File Fetch",
str(e) + "\n" + traceback.format_exc(),
priority=1,
)
raise e
snippets = post_process_snippets(
snippets, max_num_of_snippets=2 if use_faster_model else 5
)
if not repo_description:
repo_description = "No description provided."
message_summary = summary + replies_text
external_results = ExternalSearcher.extract_summaries(message_summary)
if external_results:
message_summary += "\n\n" + external_results
user_dict = get_documentation_dict(repo)
docs_results = ""
try:
docs_results = extract_relevant_docs(
title + message_summary, user_dict, chat_logger
)
if docs_results:
message_summary += "\n\n" + docs_results
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(f"Failed to extract docs: {e}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
context_pruning = ContextPruning(chat_logger=chat_logger)
(
snippets_to_ignore,
excluded_dirs,
) = context_pruning.prune_context( # TODO, ignore directories
human_message, repo=repo
)
snippets = post_process_snippets(
snippets, max_num_of_snippets=5, exclude_snippets=snippets_to_ignore
)
dir_obj = DirectoryTree()
dir_obj.parse(tree)
dir_obj.remove_multiple(excluded_dirs)
tree = str(dir_obj)
logger.info(f"New snippets: {snippets}")
logger.info(f"New tree: {tree}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
_user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
sweep_bot = SweepBot.from_system_message_content(
human_message=human_message,
repo=repo,
is_reply=bool(comments),
chat_logger=chat_logger,
sweep_context=sweep_context,
cloned_repo=cloned_repo,
)
# Check repository for sweep.yml file.
sweep_yml_exists = False
for content_file in repo.get_contents(""):
if content_file.name == "sweep.yaml":
sweep_yml_exists = True
break
# If sweep.yaml does not exist, then create a new PR that simply creates the sweep.yaml file.
if not sweep_yml_exists:
"""
needle = r"""
def get_comment_header(index, errored=False, pr_message="", done=False):
...
return (
f"{center(sweeping_gif)}<br/>{center(pbar)}"
+ ("\n" + stars_suffix if index != -1 else "")
+ "\n"
+ payment_message_start
+ config_pr_message
+ f"\n\n---\n{actions_message}"
)
""".strip(
"\n"
)
matched_section = r"""
def get_comment_header(index, errored=False, pr_message="", done=False):
config_pr_message = (
"\n" + f"* Install Sweep Configs: [Pull Request]({config_pr_url})"
if config_pr_url is not None
else ""
)
actions_message = create_action_buttons(
[
"↻ Restart Sweep",
]
)
if index < 0:
index = 0
if index == 4:
return pr_message + f"\n\n---\n{actions_message}" + config_pr_message
total = len(progress_headers)
index += 1 if done else 0
index *= 100 / total
index = int(index)
index = min(100, index)
if errored:
pbar = f"\n\n<img src='https://progress-bar.dev/{index}/?&title=Errored&width=600' alt='{index}%' />"
return (
f"{center(sweeping_gif)}<br/>{center(pbar)}\n\n"
+ f"\n\n---\n{actions_message}"
)
pbar = f"\n\n<img src='https://progress-bar.dev/{index}/?&title=Progress&width=600' alt='{index}%' />"
return (
f"{center(sweeping_gif)}<br/>{center(pbar)}"
+ ("\n" + stars_suffix if index != -1 else "")
+ "\n"
+ payment_message_start
+ config_pr_message
+ f"\n\n---\n{actions_message}"
)
""".strip(
"\n"
)
score = score_multiline(needle.splitlines(), matched_section.splitlines())
print(score)
# best_match = find_best_match(needle, haystack)
# print("\n".join(haystack.splitlines()[best_match.start : best_match.end]))
| [] |
2024-01-10 | sweepai/sweep | tests~archive~test_diff_parsing3.py | from sweepai.utils.diff import generate_new_file_from_patch
old_file = r"""
# TODO: Add file validation
import math
import re
import traceback
import openai
from github import GithubException
from loguru import logger
from tabulate import tabulate
from tqdm import tqdm
from sweepai.core.context_pruning import ContextPruning
from sweepai.core.documentation_searcher import extract_relevant_docs
from sweepai.core.entities import (
ProposedIssue,
SandboxResponse,
Snippet,
NoFilesException,
SweepContext,
MaxTokensExceeded,
EmptyRepository,
)
from sweepai.core.external_searcher import ExternalSearcher
from sweepai.core.slow_mode_expand import SlowModeBot
from sweepai.core.sweep_bot import SweepBot
from sweepai.core.prompts import issue_comment_prompt
# from sandbox.sandbox_utils import Sandbox
from sweepai.handlers.create_pr import (
create_pr_changes,
create_config_pr,
safe_delete_sweep_branch,
)
from sweepai.handlers.on_comment import on_comment
from sweepai.handlers.on_review import review_pr
from sweepai.utils.chat_logger import ChatLogger, discord_log_error
from sweepai.config.client import (
UPDATES_MESSAGE,
SweepConfig,
get_documentation_dict,
)
from sweepai.config.server import (
ENV,
MONGODB_URI,
OPENAI_API_KEY,
GITHUB_BOT_USERNAME,
GITHUB_LABEL_NAME,
OPENAI_USE_3_5_MODEL_ONLY,
WHITELISTED_REPOS,
)
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo, get_github_client
from sweepai.utils.prompt_constructor import HumanMessagePrompt
from sweepai.utils.search_utils import search_snippets
openai.api_key = OPENAI_API_KEY
sep = "\n---\n"
bot_suffix_starring = (
"⭐ If you are enjoying Sweep, please [star our"
" repo](https://github.com/sweepai/sweep) so more people can hear about us!"
)
bot_suffix = (
f"\n{sep}\n{UPDATES_MESSAGE}\n{sep} 💡 To recreate the pull request edit the issue"
" title or description. To tweak the pull request, leave a comment on the pull request."
)
discord_suffix = f"\n<sup>[Join Our Discord](https://discord.com/invite/sweep)"
stars_suffix = (
"⭐ In the meantime, consider [starring our repo](https://github.com/sweepai/sweep)"
" so more people can hear about us!"
)
checkbox_template = "- [{check}] {filename}\n{instructions}\n"
num_of_snippets_to_query = 30
total_number_of_snippet_tokens = 15_000
num_full_files = 2
ordinal = lambda n: str(n) + (
"th" if 4 <= n <= 20 else {1: "st", 2: "nd", 3: "rd"}.get(n % 10, "th")
)
SLOW_MODE = False
SLOW_MODE = True
def clean_logs(logs: str):
cleaned_logs = re.sub(r"\x1b\[.*?[@-~]", "", logs.replace("```", "\`\`\`"))
cleaned_logs = cleaned_logs or "(nothing was outputted)"
return cleaned_logs
def post_process_snippets(
snippets: list[Snippet],
max_num_of_snippets: int = 5,
exclude_snippets: list[str] = [],
):
snippets = [
snippet
for snippet in snippets
if not any(
snippet.file_path.endswith(ext) for ext in SweepConfig().exclude_exts
)
]
snippets = [
snippet
for snippet in snippets
if not any(
snippet.file_path == exclude_file for exclude_file in exclude_snippets
)
]
for snippet in snippets[:num_full_files]:
snippet = snippet.expand()
# snippet fusing
i = 0
while i < len(snippets):
j = i + 1
while j < len(snippets):
if snippets[i] ^ snippets[j]: # this checks for overlap
snippets[i] = snippets[i] | snippets[j] # merging
snippets.pop(j)
else:
j += 1
i += 1
# truncating snippets based on character length
result_snippets = []
total_length = 0
for snippet in snippets:
total_length += len(snippet.get_snippet())
if total_length > total_number_of_snippet_tokens * 5:
break
result_snippets.append(snippet)
return result_snippets[:max_num_of_snippets]
def create_collapsible(summary: str, body: str, opened: bool = False):
return collapsible_template.format(
summary=summary, body=body, opened="open" if opened else ""
)
def blockquote(text: str):
return f"<blockquote>{text}</blockquote>" if text else ""
def create_checkbox(title: str, body: str, checked: bool = False):
return checkbox_template.format(
check="X" if checked else " ", filename=title, instructions=body
)
def strip_sweep(text: str):
return (
re.sub(
r"^[Ss]weep\s?(\([Ss]low\))?(\([Mm]ap\))?(\([Ff]ast\))?\s?:", "", text
).lstrip(),
re.search(r"^[Ss]weep\s?\([Ss]low\)", text) is not None,
re.search(r"^[Ss]weep\s?\([Mm]ap\)", text) is not None,
re.search(r"^[Ss]weep\s?\([Ss]ubissues?\)", text) is not None,
re.search(r"^[Ss]weep\s?\([Ss]andbox?\)", text) is not None,
re.search(r"^[Ss]weep\s?\([Ff]ast\)", text) is not None,
re.search(r"^[Ss]weep\s?\([Ll]int\)", text) is not None,
)
def on_ticket(
title: str,
summary: str,
issue_number: int,
issue_url: str,
username: str,
repo_full_name: str,
repo_description: str,
installation_id: int,
comment_id: int = None,
edited: bool = False,
):
(
title,
slow_mode,
do_map,
subissues_mode,
sandbox_mode,
fast_mode,
lint_mode,
) = strip_sweep(title)
# Flow:
# 1. Get relevant files
# 2: Get human message
# 3. Get files to change
# 4. Get file changes
# 5. Create PR
summary = summary or ""
summary = re.sub(
"<details (open)?>\n<summary>Checklist</summary>.*",
"",
summary,
flags=re.DOTALL,
).strip()
summary = re.sub("Checklist:\n\n- \[[ X]\].*", "", summary, flags=re.DOTALL).strip()
repo_name = repo_full_name
user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
current_issue = repo.get_issue(number=issue_number)
assignee = current_issue.assignee.login if current_issue.assignee else None
if assignee is None:
assignee = current_issue.user.login
chat_logger = (
ChatLogger(
{
"repo_name": repo_name,
"title": title,
"summary": summary,
"issue_number": issue_number,
"issue_url": issue_url,
"username": username if not username.startswith("sweep") else assignee,
"repo_full_name": repo_full_name,
"repo_description": repo_description,
"installation_id": installation_id,
"type": "ticket",
"mode": ENV,
"comment_id": comment_id,
"edited": edited,
}
)
if MONGODB_URI
else None
)
if chat_logger:
is_paying_user = chat_logger.is_paying_user()
is_trial_user = chat_logger.is_trial_user()
use_faster_model = OPENAI_USE_3_5_MODEL_ONLY or chat_logger.use_faster_model(g)
else:
is_paying_user = True
is_trial_user = False
use_faster_model = False
if fast_mode:
use_faster_model = True
sweep_context = SweepContext.create(
username=username,
issue_url=issue_url,
use_faster_model=use_faster_model,
is_paying_user=is_paying_user,
repo=repo,
token=user_token,
)
print(sweep_context)
if not comment_id and not edited and chat_logger:
chat_logger.add_successful_ticket(
gpt3=use_faster_model
) # moving higher, will increment the issue regardless of whether it's a success or not
organization, repo_name = repo_full_name.split("/")
metadata = {
"issue_url": issue_url,
"repo_full_name": repo_full_name,
"organization": organization,
"repo_name": repo_name,
"repo_description": repo_description,
"username": username,
"comment_id": comment_id,
"title": title,
"installation_id": installation_id,
"function": "on_ticket",
"edited": edited,
"model": "gpt-3.5" if use_faster_model else "gpt-4",
"tier": "pro" if is_paying_user else "free",
"mode": ENV,
"slow_mode": slow_mode,
"do_map": do_map,
"subissues_mode": subissues_mode,
"sandbox_mode": sandbox_mode,
"fast_mode": fast_mode,
}
logger.bind(**metadata)
posthog.capture(username, "started", properties=metadata)
logger.info(f"Getting repo {repo_full_name}")
if current_issue.state == "closed":
logger.warning(f"Issue {issue_number} is closed")
posthog.capture(username, "issue_closed", properties=metadata)
return {"success": False, "reason": "Issue is closed"}
current_issue.edit(body=summary)
item_to_react_to = (
current_issue.get_comment(comment_id) if comment_id else current_issue
)
replies_text = ""
comments = list(current_issue.get_comments())
if comment_id:
logger.info(f"Replying to comment {comment_id}...")
replies_text = "\nComments:\n" + "\n".join(
[
issue_comment_prompt.format(
username=comment.user.login,
reply=comment.body,
)
for comment in comments
if comment.user.type == "User"
]
)
summary = summary if summary else ""
prs = repo.get_pulls(
state="open", sort="created", base=SweepConfig.get_branch(repo)
)
for pr in prs:
# Check if this issue is mentioned in the PR, and pr is owned by bot
# This is done in create_pr, (pr_description = ...)
if (
pr.user.login == GITHUB_BOT_USERNAME
and f"Fixes #{issue_number}.\n" in pr.body
):
success = safe_delete_sweep_branch(pr, repo)
eyes_reaction = item_to_react_to.create_reaction("eyes")
# If SWEEP_BOT reacted to item_to_react_to with "rocket", then remove it.
reactions = item_to_react_to.get_reactions()
for reaction in reactions:
if reaction.content == "rocket" and reaction.user.login == GITHUB_BOT_USERNAME:
item_to_react_to.delete_reaction(reaction.id)
# Removed 1, 3
progress_headers = [
None,
"Step 1: 📍 Planning",
"Step 2: ⌨️ Coding",
"Step 3: 🔁 Code Review",
]
config_pr_url = None
# Find the first comment made by the bot
issue_comment = None
tickets_allocated = 5
if is_trial_user:
tickets_allocated = 15
if is_paying_user:
tickets_allocated = 500
ticket_count = (
max(tickets_allocated - chat_logger.get_ticket_count(), 0)
if chat_logger
else 999
)
daily_ticket_count = (
(2 - chat_logger.get_ticket_count(use_date=True) if not use_faster_model else 0)
if chat_logger
else 999
)
model_name = "GPT-3.5" if use_faster_model else "GPT-4"
payment_link = "https://buy.stripe.com/00g5npeT71H2gzCfZ8"
daily_message = (
f" and {daily_ticket_count} for the day"
if not is_paying_user and not is_trial_user
else ""
)
user_type = "💎 Sweep Pro" if is_paying_user else "⚡ Sweep Free Trial"
gpt_tickets_left_message = (
f"{ticket_count} GPT-4 tickets left for the month"
if not is_paying_user
else "unlimited GPT-4 tickets"
)
payment_message = (
f"{user_type}: I used {model_name} to create this ticket. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
payment_message_start = (
f"{user_type}: I'm creating this ticket using {model_name}. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
def get_comment_header(index, errored=False, pr_message="", done=False):
config_pr_message = (
"\n" + f"* Install Sweep Configs: [Pull Request]({config_pr_url})"
if config_pr_url is not None
else ""
)
config_pr_message = " To retrigger Sweep, edit the issue.\n" + config_pr_message
if index < 0:
index = 0
if index == 4:
return pr_message + config_pr_message
total = len(progress_headers) + 1
index += 1 if done else 0
index *= 100 / total
index = int(index)
index = min(100, index)
if errored:
return f""
return (
f""
+ ("\n" + stars_suffix if index != -1 else "")
+ "\n"
+ payment_message_start
+ config_pr_message
)
# Find Sweep's previous comment
print("USERNAME", GITHUB_BOT_USERNAME)
for comment in comments:
print("COMMENT", comment.user.login)
if comment.user.login == GITHUB_BOT_USERNAME:
print("Found comment")
issue_comment = comment
try:
config = SweepConfig.get_config(repo)
except EmptyRepository as e:
logger.info("Empty repo")
first_comment = (
"Sweep is currently not supported on empty repositories. Please add some"
f" code to your repository and try again.\n{sep}##"
f" {progress_headers[1]}\n{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
return {"success": False}
cloned_repo = ClonedRepo(
repo_full_name, installation_id=installation_id, token=user_token
)
num_of_files = cloned_repo.get_num_files_from_repo()
time_estimate = math.ceil(3 + 5 * num_of_files / 1000)
indexing_message = (
"I'm searching for relevant snippets in your repository. If this is your first"
" time using Sweep, I'm indexing your repository. This may take up to"
f" {time_estimate} minutes. I'll let you know when I'm done."
)
first_comment = (
f"{get_comment_header(0)}\n{sep}I am currently looking into this ticket! I"
" will update the progress of the ticket in this comment. I am currently"
f" searching through your code, looking for relevant snippets.\n{sep}##"
f" {progress_headers[1]}\n{indexing_message}{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
# Comment edit function
past_messages = {}
current_index = 0
# Random variables to save in case of errors
table = None # Show plan so user can finetune prompt
def edit_sweep_comment(message: str, index: int, pr_message="", done=False):
nonlocal current_index
# -1 = error, -2 = retry
# Only update the progress bar if the issue generation errors.
errored = index == -1
if index >= 0:
past_messages[index] = message
current_index = index
agg_message = None
# Include progress history
# index = -2 is reserved for
for i in range(
current_index + 2
): # go to next header (for Working on it... text)
if i == 0 or i >= len(progress_headers):
continue # skip None header
header = progress_headers[i]
if header is not None:
header = "## " + header + "\n"
else:
header = "No header\n"
msg = header + (past_messages.get(i) or "Working on it...")
if agg_message is None:
agg_message = msg
else:
agg_message = agg_message + f"\n{sep}" + msg
suffix = bot_suffix + discord_suffix
if errored:
agg_message = (
"## ❌ Unable to Complete PR"
+ "\n"
+ message
+ "\n\nFor bonus GPT-4 tickets, please report this bug on"
" **[Discord](https://discord.com/invite/sweep-ai)**."
)
if table is not None:
agg_message = (
agg_message
+ f"\n{sep}Please look at the generated plan. If something looks"
f" wrong, please add more details to your issue.\n\n{table}"
)
suffix = bot_suffix # don't include discord suffix for error messages
# Update the issue comment
issue_comment.edit(
f"{get_comment_header(current_index, errored, pr_message, done=done)}\n{sep}{agg_message}{suffix}"
)
if len(title + summary) < 20:
logger.info("Issue too short")
edit_sweep_comment(
(
"Please add more details to your issue. I need at least 20 characters"
" to generate a plan."
),
-1,
)
return {"success": True}
if (
repo_name.lower() not in WHITELISTED_REPOS
and not is_paying_user
and not is_trial_user
):
if ("sweep" in repo_name.lower()) or ("test" in repo_name.lower()):
logger.info("Test repository detected")
edit_sweep_comment(
(
"Sweep does not work on test repositories. Please create an issue"
" on a real repository. If you think this is a mistake, please"
" report this at https://discord.gg/sweep."
),
-1,
)
return {"success": False}
def log_error(error_type, exception, priority=0):
nonlocal is_paying_user, is_trial_user
if is_paying_user or is_trial_user:
if priority == 1:
priority = 0
elif priority == 2:
priority = 1
prefix = ""
if is_trial_user:
prefix = " (TRIAL)"
if is_paying_user:
prefix = " (PRO)"
content = (
f"**{error_type} Error**{prefix}\n{username}:"
f" {issue_url}\n```{exception}```"
)
discord_log_error(content, priority=priority)
if lint_mode:
# Get files to change
# Create new branch
# Send request to endpoint
for file_path in []:
SweepBot.run_sandbox(
repo.html_url, file_path, None, user_token, only_lint=True
)
logger.info("Fetching relevant files...")
try:
snippets, tree = search_snippets(
cloned_repo,
f"{title}\n{summary}\n{replies_text}",
num_files=num_of_snippets_to_query,
)
assert len(snippets) > 0
except Exception as e:
trace = traceback.format_exc()
logger.error(e)
logger.error(trace)
edit_sweep_comment(
(
"It looks like an issue has occurred around fetching the files."
" Perhaps the repo has not been initialized. If this error persists"
f" contact [email protected].\n\n> @{username}, please edit the issue"
" description to include more details and I will automatically"
" relaunch."
),
-1,
)
log_error("File Fetch", str(e) + "\n" + traceback.format_exc(), priority=1)
raise e
snippets = post_process_snippets(
snippets, max_num_of_snippets=2 if use_faster_model else 5
)
if not repo_description:
repo_description = "No description provided."
message_summary = summary + replies_text
external_results = ExternalSearcher.extract_summaries(message_summary)
if external_results:
message_summary += "\n\n" + external_results
user_dict = get_documentation_dict(repo)
docs_results = ""
try:
docs_results = extract_relevant_docs(
title + message_summary, user_dict, chat_logger
)
if docs_results:
message_summary += "\n\n" + docs_results
except Exception as e:
logger.error(f"Failed to extract docs: {e}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
if SLOW_MODE:
context_pruning = ContextPruning(chat_logger=chat_logger)
(
snippets_to_ignore,
_,
) = context_pruning.prune_context( # TODO, ignore directories
human_message, repo=repo
)
snippets = post_process_snippets(
snippets, max_num_of_snippets=5, exclude_snippets=snippets_to_ignore
)
logger.info(f"New snippets: {snippets}")
logger.info(f"New tree: {tree}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
sweep_bot = SweepBot.from_system_message_content(
human_message=human_message,
repo=repo,
is_reply=bool(comments),
chat_logger=chat_logger,
sweep_context=sweep_context,
)
# Check repository for sweep.yml file.
sweep_yml_exists = False
for content_file in repo.get_contents(""):
if content_file.name == "sweep.yaml":
sweep_yml_exists = True
break
# If sweep.yaml does not exist, then create a new PR that simply creates the sweep.yaml file.
if not sweep_yml_exists:
try:
logger.info("Creating sweep.yaml file...")
config_pr = create_config_pr(sweep_bot)
config_pr_url = config_pr.html_url
edit_sweep_comment(message="", index=-2)
except Exception as e:
logger.error(
"Failed to create new branch for sweep.yaml file.\n",
e,
traceback.format_exc(),
)
else:
logger.info("sweep.yaml file already exists.")
try:
# ANALYZE SNIPPETS
newline = "\n"
edit_sweep_comment(
"I found the following snippets in your repository. I will now analyze"
" these snippets and come up with a plan."
+ "\n\n"
+ create_collapsible(
"Some code snippets I looked at (click to expand). If some file is"
" missing from here, you can mention the path in the ticket"
" description.",
"\n".join(
[
f"https://github.com/{organization}/{repo_name}/blob/{repo.get_commits()[0].sha}/{snippet.file_path}#L{max(snippet.start, 1)}-L{min(snippet.end, snippet.content.count(newline) - 1)}\n"
for snippet in snippets
]
),
)
+ (
"I also found the following external resources that might be"
f" helpful:\n\n{external_results}\n\n"
if external_results
else ""
)
+ (f"\n\n{docs_results}\n\n" if docs_results else ""),
1,
)
if do_map:
subissues: list[ProposedIssue] = sweep_bot.generate_subissues()
edit_sweep_comment(
f"I'm creating the following subissues:\n\n"
+ "\n\n".join(
[
f"#{subissue.title}:\n" + blockquote(subissue.body)
for subissue in subissues
]
),
2,
)
for subissue in tqdm(subissues):
subissue.issue_id = repo.create_issue(
title="Sweep: " + subissue.title,
body=subissue.body + f"\n\nParent issue: #{issue_number}",
assignee=username,
).number
subissues_checklist = "\n\n".join(
[
f"- [ ] #{subissue.issue_id}\n\n"
+ blockquote(f"**{subissue.title}**\n{subissue.body}")
for subissue in subissues
]
)
current_issue.edit(
body=summary + "\n\n---\n\nChecklist:\n\n" + subissues_checklist
)
edit_sweep_comment(
f"I finished creating the subissues! Track them at:\n\n"
+ "\n".join(f"* #{subissue.issue_id}" for subissue in subissues),
3,
done=True,
)
edit_sweep_comment(f"N/A", 4)
edit_sweep_comment(f"I finished creating all the subissues.", 5)
return {"success": True}
# COMMENT ON ISSUE
# TODO: removed issue commenting here
logger.info("Fetching files to modify/create...")
file_change_requests, plan = sweep_bot.get_files_to_change()
if not file_change_requests:
if len(title + summary) < 60:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details? Please make sure that the title and"
" summary of the issue are at least 60 characters."
),
-1,
)
else:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details?"
),
-1,
)
raise Exception("No files to modify.")
sweep_bot.summarize_snippets()
file_change_requests = sweep_bot.validate_file_change_requests(
file_change_requests
)
table = tabulate(
[
[
f"`{file_change_request.filename}`",
file_change_request.instructions_display.replace(
"\n", "<br/>"
).replace("```", "\\```"),
]
for file_change_request in file_change_requests
],
headers=["File Path", "Proposed Changes"],
tablefmt="pipe",
)
edit_sweep_comment(
"From looking through the relevant snippets, I decided to make the"
" following modifications:\n\n" + table + "\n\n",
2,
)
# TODO(lukejagg): Generate PR after modifications are made
# CREATE PR METADATA
logger.info("Generating PR...")
pull_request = sweep_bot.generate_pull_request()
pull_request_content = pull_request.content.strip().replace("\n", "\n>")
pull_request_summary = f"**{pull_request.title}**\n`{pull_request.branch_name}`\n>{pull_request_content}\n"
# edit_sweep_comment(
# (
# "I have created a plan for writing the pull request. I am now working"
# " my plan and coding the required changes to address this issue. Here"
# f" is the planned pull request:\n\n{pull_request_summary}"
# ),
# 3,
# )
logger.info("Making PR...")
files_progress: list[tuple[str, str, str, str]] = [
(
file_change_request.filename,
file_change_request.instructions_display,
"⏳ In Progress",
"",
)
for file_change_request in file_change_requests
]
checkboxes_progress: list[tuple[str, str, str]] = [
(file_change_request.filename, file_change_request.instructions, " ")
for file_change_request in file_change_requests
]
checkboxes_contents = "\n".join(
[
create_checkbox(f"`{filename}`", blockquote(instructions), check == "X")
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = create_collapsible(
"Checklist", checkboxes_contents, opened=True
)
issue = repo.get_issue(number=issue_number)
issue.edit(body=summary + "\n\n" + checkboxes_collapsible)
delete_branch = False
generator = create_pr_changes( # make this async later
file_change_requests,
pull_request,
sweep_bot,
username,
installation_id,
issue_number,
chat_logger=chat_logger,
)
edit_sweep_comment(checkboxes_contents, 2)
response = {"error": NoFilesException()}
for item in generator:
if isinstance(item, dict):
response = item
break
file_change_request, changed_file, sandbox_response, commit = item
sandbox_response: SandboxResponse | None = sandbox_response
format_exit_code = (
lambda exit_code: "✅" if exit_code == 0 else f"❌ (`{exit_code}`)"
)
print(sandbox_response)
error_logs = (
(
create_collapsible(
"Sandbox Execution Logs",
"\n\n".join(
[
create_collapsible(
f"<code>{execution.command.format(file_path=file_change_request.filename)}</code> {i + 1}/{len(sandbox_response.executions)} {format_exit_code(execution.exit_code)}",
f"<pre>{clean_logs(execution.output)}</pre>",
i == len(sandbox_response.executions) - 1,
)
for i, execution in enumerate(
sandbox_response.executions
)
if len(sandbox_response.executions) > 0
# And error code check
]
),
opened=True,
)
)
if sandbox_response
else ""
)
if changed_file:
print("Changed File!")
commit_hash = (
commit.sha
if commit is not None
else repo.get_branch(pull_request.branch_name).commit.sha
)
commit_url = f"https://github.com/{repo_full_name}/commit/{commit_hash}"
checkboxes_progress = [
(
(
f"`{filename}` ✅ Commit [`{commit_hash[:7]}`]({commit_url})",
blockquote(instructions) + error_logs,
"X",
)
if file_change_request.filename == filename
else (filename, instructions, progress)
)
for filename, instructions, progress in checkboxes_progress
]
else:
print("Didn't change file!")
checkboxes_progress = [
(
(
f"`{filename}` ❌ Failed",
blockquote(instructions) + error_logs,
"X",
)
if file_change_request.filename == filename
else (filename, instructions, progress)
)
for filename, instructions, progress in checkboxes_progress
]
checkboxes_contents = "\n".join(
[
checkbox_template.format(
check=check,
filename=filename,
instructions=instructions,
)
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = collapsible_template.format(
summary="Checklist",
body=checkboxes_contents,
opened="open",
)
issue = repo.get_issue(number=issue_number)
issue.edit(body=summary + "\n\n" + checkboxes_collapsible)
logger.info(files_progress)
logger.info(f"Edited {file_change_request.filename}")
edit_sweep_comment(checkboxes_contents, 2)
if not response.get("success"):
raise Exception(f"Failed to create PR: {response.get('error')}")
pr_changes = response["pull_request"]
edit_sweep_comment(
"I have finished coding the issue. I am now reviewing it for completeness.",
3,
)
review_message = (
"Here are my self-reviews of my changes at"
f" [`{pr_changes.pr_head}`](https://github.com/{repo_full_name}/commits/{pr_changes.pr_head}).\n\n"
)
lint_output = None
try:
current_issue.delete_reaction(eyes_reaction.id)
except:
pass
try:
# Todo(lukejagg): Pass sandbox linter results to review_pr
# CODE REVIEW
changes_required, review_comment = review_pr(
repo=repo,
pr=pr_changes,
issue_url=issue_url,
username=username,
repo_description=repo_description,
title=title,
summary=summary,
replies_text=replies_text,
tree=tree,
lint_output=lint_output,
chat_logger=chat_logger,
)
# Todo(lukejagg): Execute sandbox after each iteration
lint_output = None
review_message += (
f"Here is the {ordinal(1)} review\n"
+ blockquote(review_comment)
+ "\n\n"
)
edit_sweep_comment(
review_message + "\n\nI'm currently addressing these suggestions.",
3,
)
logger.info(f"Addressing review comment {review_comment}")
if changes_required:
on_comment(
repo_full_name=repo_full_name,
repo_description=repo_description,
comment=review_comment,
username=username,
installation_id=installation_id,
pr_path=None,
pr_line_position=None,
pr_number=None,
pr=pr_changes,
chat_logger=chat_logger,
repo=repo,
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(e)
edit_sweep_comment(
review_message + "\n\nI finished incorporating these changes.",
3,
)
is_draft = config.get("draft", False)
try:
pr = repo.create_pull(
title=pr_changes.title,
body=pr_changes.body,
head=pr_changes.pr_head,
base=SweepConfig.get_branch(repo),
draft=is_draft,
)
except GithubException as e:
is_draft = False
pr = repo.create_pull(
title=pr_changes.title,
body=pr_changes.body,
head=pr_changes.pr_head,
base=SweepConfig.get_branch(repo),
draft=is_draft,
)
pr.add_to_labels(GITHUB_LABEL_NAME)
current_issue.create_reaction("rocket")
logger.info("Running github actions...")
try:
if is_draft:
logger.info("Skipping github actions because PR is a draft")
else:
commit = pr.get_commits().reversed[0]
check_runs = commit.get_check_runs()
for check_run in check_runs:
check_run.rerequest()
except Exception as e:
logger.error(e)
# Completed code review
edit_sweep_comment(
review_message + "\n\nSuccess! 🚀",
4,
pr_message=(
f"## Here's the PR! [{pr.html_url}]({pr.html_url}).\n{payment_message}"
),
done=True,
)
logger.info("Add successful ticket to counter")
except MaxTokensExceeded as e:
logger.info("Max tokens exceeded")
log_error(
"Max Tokens Exceeded",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
if chat_logger.is_paying_user():
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too long."
" We are currently working on improved file streaming to address"
" this issue.\n"
),
-1,
)
else:
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too"
" long.\n\nIf this file is incorrect, please describe the desired"
" file in the prompt. However, if you would like to edit longer"
" files, consider upgrading to [Sweep Pro](https://sweep.dev/) for"
" longer context lengths.\n"
),
-1,
)
delete_branch = True
raise e
except NoFilesException as e:
logger.info("Sweep could not find files to modify")
log_error(
"Sweep could not find files to modify",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
edit_sweep_comment(
(
"Sorry, Sweep could not find any appropriate files to edit to address"
" this issue. If this is a mistake, please provide more context and I"
f" will retry!\n\n> @{username}, please edit the issue description to"
" include more details about this issue."
),
-1,
)
delete_branch = True
raise e
except openai.error.InvalidRequestError as e:
logger.error(traceback.format_exc())
logger.error(e)
edit_sweep_comment(
(
"I'm sorry, but it looks our model has ran out of context length. We're"
" trying to make this happen less, but one way to mitigate this is to"
" code smaller files. If this error persists report it at"
" https://discord.gg/sweep."
),
-1,
)
log_error(
"Context Length",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
posthog.capture(
username,
"failed",
properties={
"error": str(e),
"reason": "Invalid request error / context length",
**metadata,
},
)
delete_branch = True
raise e
except Exception as e:
logger.error(traceback.format_exc())
logger.error(e)
# title and summary are defined elsewhere
if len(title + summary) < 60:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred due to"
" insufficient information. Be sure to create a more detailed issue"
" so I can better address it. If this error persists report it at"
" https://discord.gg/sweep."
),
-1,
)
else:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred. Try changing"
" the issue description to re-trigger Sweep. If this error persists"
" contact [email protected]."
),
-1,
)
log_error("Workflow", str(e) + "\n" + traceback.format_exc(), priority=1)
posthog.capture(
username,
"failed",
properties={"error": str(e), "reason": "Generic error", **metadata},
)
raise e
else:
try:
item_to_react_to.delete_reaction(eyes_reaction.id)
item_to_react_to.create_reaction("rocket")
except Exception as e:
logger.error(e)
finally:
cloned_repo.delete()
if delete_branch:
try:
if pull_request.branch_name.startswith("sweep"):
repo.get_git_ref(f"heads/{pull_request.branch_name}").delete()
else:
raise Exception(
f"Branch name {pull_request.branch_name} does not start with sweep/"
)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
print("Deleted branch", pull_request.branch_name)
posthog.capture(username, "success", properties={**metadata})
logger.info("on_ticket success")
return {"success": True}
"""
code_replaces = """
```
<<<< ORIGINAL
def clean_logs(logs: str):
cleaned_logs = re.sub(r"\x1b\[.*?[@-~]", "", logs.replace("```", "\`\`\`"))
cleaned_logs = cleaned_logs or "(nothing was outputted)"
return cleaned_logs
====
def clean_logs(logs: str):
cleaned_logs = re.sub(r"\x1b\[.*?[@-~]", "", logs.replace("```", "\`\`\`"))
cleaned_logs = cleaned_logs or "(nothing was outputted)"
cleaned_logs = re.sub('\n{2,}', '\n', cleaned_logs)
return cleaned_logs
>>>> UPDATED
```
"""
if __name__ == "__main__":
print(generate_new_file_from_patch(code_replaces, old_file)[0][:3000])
# generate_new_file_from_patch(code_replaces, old_file)[0]
| [] |
2024-01-10 | sweepai/sweep | tests~search~test_lexical_search.py | from sweepai.core.lexical_search import tokenize_call
file_contents = """\
# TODO: Add file validation
import math
import re
import traceback
import openai
import github
from github import GithubException, BadCredentialsException
from tabulate import tabulate
from tqdm import tqdm
from sweepai.logn import logger, LogTask
from sweepai.core.context_pruning import ContextPruning
from sweepai.core.documentation_searcher import extract_relevant_docs
from sweepai.core.entities import (
ProposedIssue,
SandboxResponse,
Snippet,
NoFilesException,
SweepContext,
MaxTokensExceeded,
EmptyRepository,
)
from sweepai.core.external_searcher import ExternalSearcher
from sweepai.core.slow_mode_expand import SlowModeBot
from sweepai.core.sweep_bot import SweepBot
from sweepai.core.prompts import issue_comment_prompt
# from sandbox.sandbox_utils import Sandbox
from sweepai.handlers.create_pr import (
create_pr_changes,
create_config_pr,
safe_delete_sweep_branch,
)
from sweepai.handlers.on_comment import on_comment
from sweepai.handlers.on_review import review_pr
from sweepai.utils.buttons import create_action_buttons
from sweepai.utils.chat_logger import ChatLogger
from sweepai.config.client import (
SweepConfig,
get_documentation_dict,
)
from sweepai.config.server import (
ENV,
MONGODB_URI,
OPENAI_API_KEY,
GITHUB_BOT_USERNAME,
GITHUB_LABEL_NAME,
OPENAI_USE_3_5_MODEL_ONLY,
WHITELISTED_REPOS,
)
from sweepai.utils.ticket_utils import *
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo, get_github_client
from sweepai.utils.prompt_constructor import HumanMessagePrompt
from sweepai.utils.search_utils import search_snippets
from sweepai.utils.tree_utils import DirectoryTree
openai.api_key = OPENAI_API_KEY
@LogTask()
def on_ticket(
title: str,
summary: str,
issue_number: int,
issue_url: str,
username: str,
repo_full_name: str,
repo_description: str,
installation_id: int,
comment_id: int = None,
edited: bool = False,
):
(
title,
slow_mode,
do_map,
subissues_mode,
sandbox_mode,
fast_mode,
lint_mode,
) = strip_sweep(title)
# Flow:
# 1. Get relevant files
# 2: Get human message
# 3. Get files to change
# 4. Get file changes
# 5. Create PR
summary = summary or ""
summary = re.sub(
"<details (open)?>\n<summary>Checklist</summary>.*",
"",
summary,
flags=re.DOTALL,
).strip()
summary = re.sub("Checklist:\n\n- \[[ X]\].*", "", summary, flags=re.DOTALL).strip()
repo_name = repo_full_name
user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
current_issue = repo.get_issue(number=issue_number)
assignee = current_issue.assignee.login if current_issue.assignee else None
if assignee is None:
assignee = current_issue.user.login
chat_logger = (
ChatLogger(
{
"repo_name": repo_name,
"title": title,
"summary": summary,
"issue_number": issue_number,
"issue_url": issue_url,
"username": username if not username.startswith("sweep") else assignee,
"repo_full_name": repo_full_name,
"repo_description": repo_description,
"installation_id": installation_id,
"type": "ticket",
"mode": ENV,
"comment_id": comment_id,
"edited": edited,
}
)
if MONGODB_URI
else None
)
if chat_logger:
is_paying_user = chat_logger.is_paying_user()
is_trial_user = chat_logger.is_trial_user()
use_faster_model = OPENAI_USE_3_5_MODEL_ONLY or chat_logger.use_faster_model(g)
else:
is_paying_user = True
is_trial_user = False
use_faster_model = False
if fast_mode:
use_faster_model = True
sweep_context = SweepContext.create(
username=username,
issue_url=issue_url,
use_faster_model=use_faster_model,
is_paying_user=is_paying_user,
repo=repo,
token=user_token,
)
logger.print(sweep_context)
if not comment_id and not edited and chat_logger:
chat_logger.add_successful_ticket(
gpt3=use_faster_model
) # moving higher, will increment the issue regardless of whether it's a success or not
organization, repo_name = repo_full_name.split("/")
metadata = {
"issue_url": issue_url,
"repo_full_name": repo_full_name,
"organization": organization,
"repo_name": repo_name,
"repo_description": repo_description,
"username": username,
"comment_id": comment_id,
"title": title,
"installation_id": installation_id,
"function": "on_ticket",
"edited": edited,
"model": "gpt-3.5" if use_faster_model else "gpt-4",
"tier": "pro" if is_paying_user else "free",
"mode": ENV,
"slow_mode": slow_mode,
"do_map": do_map,
"subissues_mode": subissues_mode,
"sandbox_mode": sandbox_mode,
"fast_mode": fast_mode,
}
# logger.bind(**metadata)
posthog.capture(username, "started", properties=metadata)
logger.info(f"Getting repo {repo_full_name}")
if current_issue.state == "closed":
logger.warning(f"Issue {issue_number} is closed")
posthog.capture(username, "issue_closed", properties=metadata)
return {"success": False, "reason": "Issue is closed"}
current_issue.edit(body=summary)
item_to_react_to = (
current_issue.get_comment(comment_id) if comment_id else current_issue
)
replies_text = ""
comments = list(current_issue.get_comments())
if comment_id:
logger.info(f"Replying to comment {comment_id}...")
replies_text = "\nComments:\n" + "\n".join(
[
issue_comment_prompt.format(
username=comment.user.login,
reply=comment.body,
)
for comment in comments
if comment.user.type == "User"
]
)
summary = summary if summary else ""
prs = repo.get_pulls(
state="open", sort="created", base=SweepConfig.get_branch(repo)
)
for pr in prs:
# Check if this issue is mentioned in the PR, and pr is owned by bot
# This is done in create_pr, (pr_description = ...)
if (
pr.user.login == GITHUB_BOT_USERNAME
and f"Fixes #{issue_number}.\n" in pr.body
):
success = safe_delete_sweep_branch(pr, repo)
eyes_reaction = item_to_react_to.create_reaction("eyes")
# If SWEEP_BOT reacted to item_to_react_to with "rocket", then remove it.
reactions = item_to_react_to.get_reactions()
for reaction in reactions:
if reaction.content == "rocket" and reaction.user.login == GITHUB_BOT_USERNAME:
item_to_react_to.delete_reaction(reaction.id)
# Removed 1, 3
progress_headers = [
None,
"Step 1: 🔎 Searching",
"Step 2: ⌨️ Coding",
"Step 3: 🔁 Code Review",
]
config_pr_url = None
# Find the first comment made by the bot
issue_comment = None
tickets_allocated = 5
if is_trial_user:
tickets_allocated = 15
if is_paying_user:
tickets_allocated = 500
ticket_count = (
max(tickets_allocated - chat_logger.get_ticket_count(), 0)
if chat_logger
else 999
)
daily_ticket_count = (
(3 - chat_logger.get_ticket_count(use_date=True) if not use_faster_model else 0)
if chat_logger
else 999
)
model_name = "GPT-3.5" if use_faster_model else "GPT-4"
payment_link = "https://buy.stripe.com/00g5npeT71H2gzCfZ8"
daily_message = (
f" and {daily_ticket_count} for the day"
if not is_paying_user and not is_trial_user
else ""
)
user_type = "💎 Sweep Pro" if is_paying_user else "⚡ Sweep Free Trial"
gpt_tickets_left_message = (
f"{ticket_count} GPT-4 tickets left for the month"
if not is_paying_user
else "unlimited GPT-4 tickets"
)
payment_message = (
f"{user_type}: I used {model_name} to create this ticket. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
payment_message_start = (
f"{user_type}: I'm creating this ticket using {model_name}. You have {gpt_tickets_left_message}{daily_message}."
+ (
f" For more GPT-4 tickets, visit [our payment portal.]({payment_link})"
if not is_paying_user
else ""
)
)
def get_comment_header(index, errored=False, pr_message="", done=False):
config_pr_message = (
"\n" + f"* Install Sweep Configs: [Pull Request]({config_pr_url})"
if config_pr_url is not None
else ""
)
# Why is this so convoluted
# config_pr_message = " To retrigger Sweep, edit the issue.\n" + config_pr_message
actions_message = create_action_buttons(
[
"Restart Sweep",
]
)
if index < 0:
index = 0
if index == 4:
return pr_message + f"\n\n---\n{actions_message}" + config_pr_message
total = len(progress_headers)
index += 1 if done else 0
index *= 100 / total
index = int(index)
index = min(100, index)
if errored:
return (
f""
+ f"\n\n---\n{actions_message}"
)
return (
f""
+ ("\n" + stars_suffix if index != -1 else "")
+ "\n"
+ payment_message_start
# + f"\n\n---\n{actions_message}"
+ config_pr_message
)
# Find Sweep's previous comment
logger.print("USERNAME", GITHUB_BOT_USERNAME)
for comment in comments:
logger.print("COMMENT", comment.user.login)
if comment.user.login == GITHUB_BOT_USERNAME:
logger.print("Found comment")
issue_comment = comment
try:
config = SweepConfig.get_config(repo)
except EmptyRepository as e:
logger.info("Empty repo")
first_comment = (
"Sweep is currently not supported on empty repositories. Please add some"
f" code to your repository and try again.\n{sep}##"
f" {progress_headers[1]}\n{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
return {"success": False}
cloned_repo = ClonedRepo(
repo_full_name, installation_id=installation_id, token=user_token
)
num_of_files = cloned_repo.get_num_files_from_repo()
time_estimate = math.ceil(3 + 5 * num_of_files / 1000)
indexing_message = (
"I'm searching for relevant snippets in your repository. If this is your first"
" time using Sweep, I'm indexing your repository. This may take up to"
f" {time_estimate} minutes. I'll let you know when I'm done."
)
first_comment = (
f"{get_comment_header(0)}\n{sep}I am currently looking into this ticket! I"
" will update the progress of the ticket in this comment. I am currently"
f" searching through your code, looking for relevant snippets.\n{sep}##"
f" {progress_headers[1]}\n{indexing_message}{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
# Comment edit function
past_messages = {}
current_index = 0
# Random variables to save in case of errors
table = None # Show plan so user can finetune prompt
def edit_sweep_comment(message: str, index: int, pr_message="", done=False):
nonlocal current_index, user_token, g, repo, issue_comment
# -1 = error, -2 = retry
# Only update the progress bar if the issue generation errors.
errored = index == -1
if index >= 0:
past_messages[index] = message
current_index = index
agg_message = None
# Include progress history
# index = -2 is reserved for
for i in range(
current_index + 2
): # go to next header (for Working on it... text)
if i == 0 or i >= len(progress_headers):
continue # skip None header
header = progress_headers[i]
if header is not None:
header = "## " + header + "\n"
else:
header = "No header\n"
msg = header + (past_messages.get(i) or "Working on it...")
if agg_message is None:
agg_message = msg
else:
agg_message = agg_message + f"\n{sep}" + msg
suffix = bot_suffix + discord_suffix
if errored:
agg_message = (
"## ❌ Unable to Complete PR"
+ "\n"
+ message
+ "\n\nFor bonus GPT-4 tickets, please report this bug on"
" **[Discord](https://discord.com/invite/sweep-ai)**."
)
if table is not None:
agg_message = (
agg_message
+ f"\n{sep}Please look at the generated plan. If something looks"
f" wrong, please add more details to your issue.\n\n{table}"
)
suffix = bot_suffix # don't include discord suffix for error messages
# Update the issue comment
try:
issue_comment.edit(
f"{get_comment_header(current_index, errored, pr_message, done=done)}\n{sep}{agg_message}{suffix}"
)
except BadCredentialsException:
logger.error("Bad credentials, refreshing token")
_user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
issue_comment = repo.get_issue(current_issue.number)
issue_comment.edit(
f"{get_comment_header(current_index, errored, pr_message, done=done)}\n{sep}{agg_message}{suffix}"
)
if len(title + summary) < 20:
logger.info("Issue too short")
edit_sweep_comment(
(
"Please add more details to your issue. I need at least 20 characters"
" to generate a plan."
),
-1,
)
return {"success": True}
if (
repo_name.lower() not in WHITELISTED_REPOS
and not is_paying_user
and not is_trial_user
):
if ("sweep" in repo_name.lower()) or ("test" in repo_name.lower()):
logger.info("Test repository detected")
edit_sweep_comment(
(
"Sweep does not work on test repositories. Please create an issue"
" on a real repository. If you think this is a mistake, please"
" report this at https://discord.gg/sweep."
),
-1,
)
return {"success": False}
if lint_mode:
# Get files to change
# Create new branch
# Send request to endpoint
for file_path in []:
SweepBot.run_sandbox(
repo.html_url, file_path, None, user_token, only_lint=True
)
logger.info("Fetching relevant files...")
try:
snippets, tree = search_snippets(
cloned_repo,
f"{title}\n{summary}\n{replies_text}",
num_files=num_of_snippets_to_query,
)
assert len(snippets) > 0
except SystemExit:
raise SystemExit
except Exception as e:
trace = traceback.format_exc()
logger.error(e)
logger.error(trace)
edit_sweep_comment(
(
"It looks like an issue has occurred around fetching the files."
" Perhaps the repo has not been initialized. If this error persists"
f" contact [email protected].\n\n> @{username}, please edit the issue"
" description to include more details and I will automatically"
" relaunch."
),
-1,
)
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"File Fetch",
str(e) + "\n" + traceback.format_exc(),
priority=1,
)
raise e
snippets = post_process_snippets(
snippets, max_num_of_snippets=2 if use_faster_model else 5
)
if not repo_description:
repo_description = "No description provided."
message_summary = summary + replies_text
external_results = ExternalSearcher.extract_summaries(message_summary)
if external_results:
message_summary += "\n\n" + external_results
user_dict = get_documentation_dict(repo)
docs_results = ""
try:
docs_results = extract_relevant_docs(
title + message_summary, user_dict, chat_logger
)
if docs_results:
message_summary += "\n\n" + docs_results
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(f"Failed to extract docs: {e}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
context_pruning = ContextPruning(chat_logger=chat_logger)
(
snippets_to_ignore,
excluded_dirs,
) = context_pruning.prune_context( # TODO, ignore directories
human_message, repo=repo
)
snippets = post_process_snippets(
snippets, max_num_of_snippets=5, exclude_snippets=snippets_to_ignore
)
dir_obj = DirectoryTree()
dir_obj.parse(tree)
dir_obj.remove_multiple(excluded_dirs)
tree = str(dir_obj)
logger.info(f"New snippets: {snippets}")
logger.info(f"New tree: {tree}")
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
_user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
sweep_bot = SweepBot.from_system_message_content(
human_message=human_message,
repo=repo,
is_reply=bool(comments),
chat_logger=chat_logger,
sweep_context=sweep_context,
)
# Check repository for sweep.yml file.
sweep_yml_exists = False
for content_file in repo.get_contents(""):
if content_file.name == "sweep.yaml":
sweep_yml_exists = True
break
# If sweep.yaml does not exist, then create a new PR that simply creates the sweep.yaml file.
if not sweep_yml_exists:
try:
logger.info("Creating sweep.yaml file...")
config_pr = create_config_pr(sweep_bot)
config_pr_url = config_pr.html_url
edit_sweep_comment(message="", index=-2)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(
"Failed to create new branch for sweep.yaml file.\n",
e,
traceback.format_exc(),
)
else:
logger.info("sweep.yaml file already exists.")
try:
# ANALYZE SNIPPETS
newline = "\n"
edit_sweep_comment(
"I found the following snippets in your repository. I will now analyze"
" these snippets and come up with a plan."
+ "\n\n"
+ create_collapsible(
"Some code snippets I looked at (click to expand). If some file is"
" missing from here, you can mention the path in the ticket"
" description.",
"\n".join(
[
f"https://github.com/{organization}/{repo_name}/blob/{repo.get_commits()[0].sha}/{snippet.file_path}#L{max(snippet.start, 1)}-L{min(snippet.end, snippet.content.count(newline) - 1)}\n"
for snippet in snippets
]
),
)
+ (
create_collapsible(
"I also found the following external resources that might be helpful:",
f"\n\n{external_results}\n\n",
)
if external_results
else ""
)
+ (f"\n\n{docs_results}\n\n" if docs_results else ""),
1,
)
if do_map:
subissues: list[ProposedIssue] = sweep_bot.generate_subissues()
edit_sweep_comment(
f"I'm creating the following subissues:\n\n"
+ "\n\n".join(
[
f"#{subissue.title}:\n" + blockquote(subissue.body)
for subissue in subissues
]
),
2,
)
for subissue in tqdm(subissues):
subissue.issue_id = repo.create_issue(
title="Sweep: " + subissue.title,
body=subissue.body + f"\n\nParent issue: #{issue_number}",
assignee=username,
).number
subissues_checklist = "\n\n".join(
[
f"- [ ] #{subissue.issue_id}\n\n"
+ blockquote(f"**{subissue.title}**\n{subissue.body}")
for subissue in subissues
]
)
current_issue.edit(
body=summary + "\n\n---\n\nChecklist:\n\n" + subissues_checklist
)
edit_sweep_comment(
f"I finished creating the subissues! Track them at:\n\n"
+ "\n".join(f"* #{subissue.issue_id}" for subissue in subissues),
3,
done=True,
)
edit_sweep_comment(f"N/A", 4)
edit_sweep_comment(f"I finished creating all the subissues.", 5)
return {"success": True}
# COMMENT ON ISSUE
# TODO: removed issue commenting here
logger.info("Fetching files to modify/create...")
file_change_requests, plan = sweep_bot.get_files_to_change()
if not file_change_requests:
if len(title + summary) < 60:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details? Please make sure that the title and"
" summary of the issue are at least 60 characters."
),
-1,
)
else:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details?"
),
-1,
)
raise Exception("No files to modify.")
sweep_bot.summarize_snippets()
file_change_requests = sweep_bot.validate_file_change_requests(
file_change_requests
)
table = tabulate(
[
[
f"`{file_change_request.filename}`",
file_change_request.instructions_display.replace(
"\n", "<br/>"
).replace("```", "\\```"),
]
for file_change_request in file_change_requests
],
headers=["File Path", "Proposed Changes"],
tablefmt="pipe",
)
# edit_sweep_comment(
# "From looking through the relevant snippets, I decided to make the"
# " following modifications:\n\n" + table + "\n\n",
# 2,
# )
# TODO(lukejagg): Generate PR after modifications are made
# CREATE PR METADATA
logger.info("Generating PR...")
pull_request = sweep_bot.generate_pull_request()
# pull_request_content = pull_request.content.strip().replace("\n", "\n>")
# pull_request_summary = f"**{pull_request.title}**\n`{pull_request.branch_name}`\n>{pull_request_content}\n"
# edit_sweep_comment(
# (
# "I have created a plan for writing the pull request. I am now working"
# " my plan and coding the required changes to address this issue. Here"
# f" is the planned pull request:\n\n{pull_request_summary}"
# ),
# 3,
# )
logger.info("Making PR...")
files_progress: list[tuple[str, str, str, str]] = [
(
file_change_request.filename,
file_change_request.instructions_display,
"⏳ In Progress",
"",
)
for file_change_request in file_change_requests
]
checkboxes_progress: list[tuple[str, str, str]] = [
(file_change_request.filename, file_change_request.instructions, " ")
for file_change_request in file_change_requests
]
checkboxes_contents = "\n".join(
[
create_checkbox(f"`{filename}`", blockquote(instructions), check == "X")
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = create_collapsible(
"Checklist", checkboxes_contents, opened=True
)
issue = repo.get_issue(number=issue_number)
issue.edit(body=summary + "\n\n" + checkboxes_collapsible)
delete_branch = False
generator = create_pr_changes( # make this async later
file_change_requests,
pull_request,
sweep_bot,
username,
installation_id,
issue_number,
chat_logger=chat_logger,
)
edit_sweep_comment(checkboxes_contents, 2)
response = {"error": NoFilesException()}
for item in generator:
if isinstance(item, dict):
response = item
break
file_change_request, changed_file, sandbox_response, commit = item
sandbox_response: SandboxResponse | None = sandbox_response
format_exit_code = (
lambda exit_code: "✓" if exit_code == 0 else f"❌ (`{exit_code}`)"
)
logger.print(sandbox_response)
error_logs = (
(
create_collapsible(
"Sandbox Execution Logs",
blockquote(
"\n\n".join(
[
create_collapsible(
f"<code>{execution.command.format(file_path=file_change_request.filename)}</code> {i + 1}/{len(sandbox_response.executions)} {format_exit_code(execution.exit_code)}",
f"<pre>{clean_logs(execution.output)}</pre>",
i == len(sandbox_response.executions) - 1,
)
for i, execution in enumerate(
sandbox_response.executions
)
if len(sandbox_response.executions) > 0
# And error code check
]
)
),
opened=True,
)
)
if sandbox_response
else ""
)
if changed_file:
logger.print("Changed File!")
commit_hash = (
commit.sha
if commit is not None
else repo.get_branch(pull_request.branch_name).commit.sha
)
commit_url = f"https://github.com/{repo_full_name}/commit/{commit_hash}"
checkboxes_progress = [
(
(
f"`{filename}` ✅ Commit [`{commit_hash[:7]}`]({commit_url})",
blockquote(instructions) + error_logs,
"X",
)
if file_change_request.filename == filename
else (filename, instructions, progress)
)
for filename, instructions, progress in checkboxes_progress
]
else:
logger.print("Didn't change file!")
checkboxes_progress = [
(
(
f"`{filename}` ❌ Failed",
blockquote(instructions) + error_logs,
"X",
)
if file_change_request.filename == filename
else (filename, instructions, progress)
)
for filename, instructions, progress in checkboxes_progress
]
checkboxes_contents = "\n".join(
[
checkbox_template.format(
check=check,
filename=filename,
instructions=instructions,
)
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = collapsible_template.format(
summary="Checklist",
body=checkboxes_contents,
opened="open",
)
issue = repo.get_issue(number=issue_number)
issue.edit(body=summary + "\n\n" + checkboxes_collapsible)
logger.info(files_progress)
logger.info(f"Edited {file_change_request.filename}")
edit_sweep_comment(checkboxes_contents, 2)
if not response.get("success"):
raise Exception(f"Failed to create PR: {response.get('error')}")
pr_changes = response["pull_request"]
edit_sweep_comment(
"I have finished coding the issue. I am now reviewing it for completeness.",
3,
)
change_location = f" [`{pr_changes.pr_head}`](https://github.com/{repo_full_name}/commits/{pr_changes.pr_head}).\n\n"
review_message = "Here are my self-reviews of my changes at" + change_location
lint_output = None
try:
current_issue.delete_reaction(eyes_reaction.id)
except SystemExit:
raise SystemExit
except:
pass
changes_required = False
try:
# Todo(lukejagg): Pass sandbox linter results to review_pr
# CODE REVIEW
changes_required, review_comment = review_pr(
repo=repo,
pr=pr_changes,
issue_url=issue_url,
username=username,
repo_description=repo_description,
title=title,
summary=summary,
replies_text=replies_text,
tree=tree,
lint_output=lint_output,
plan=plan, # plan for the PR
chat_logger=chat_logger,
)
# Todo(lukejagg): Execute sandbox after each iteration
lint_output = None
review_message += (
f"Here is the {ordinal(1)} review\n"
+ blockquote(review_comment)
+ "\n\n"
)
if changes_required:
edit_sweep_comment(
review_message + "\n\nI'm currently addressing these suggestions.",
3,
)
logger.info(f"Addressing review comment {review_comment}")
on_comment(
repo_full_name=repo_full_name,
repo_description=repo_description,
comment=review_comment,
username=username,
installation_id=installation_id,
pr_path=None,
pr_line_position=None,
pr_number=None,
pr=pr_changes,
chat_logger=chat_logger,
repo=repo,
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(traceback.format_exc())
logger.error(e)
if changes_required:
edit_sweep_comment(
review_message + "\n\nI finished incorporating these changes.",
3,
)
else:
edit_sweep_comment(
f"I have finished reviewing the code for completeness. I did not find errors for {change_location}.",
3,
)
is_draft = config.get("draft", False)
try:
pr = repo.create_pull(
title=pr_changes.title,
body=pr_changes.body,
head=pr_changes.pr_head,
base=SweepConfig.get_branch(repo),
draft=is_draft,
)
except GithubException as e:
is_draft = False
pr = repo.create_pull(
title=pr_changes.title,
body=pr_changes.body,
head=pr_changes.pr_head,
base=SweepConfig.get_branch(repo),
draft=is_draft,
)
pr.add_to_labels(GITHUB_LABEL_NAME)
current_issue.create_reaction("rocket")
logger.info("Running github actions...")
try:
if is_draft:
logger.info("Skipping github actions because PR is a draft")
else:
commit = pr.get_commits().reversed[0]
check_runs = commit.get_check_runs()
for check_run in check_runs:
check_run.rerequest()
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
# Completed code review
edit_sweep_comment(
review_message + "\n\nSuccess! 🚀",
4,
pr_message=(
f"## Here's the PR! [{pr.html_url}]({pr.html_url}).\n{payment_message}"
),
done=True,
)
logger.info("Add successful ticket to counter")
except MaxTokensExceeded as e:
logger.info("Max tokens exceeded")
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Max Tokens Exceeded",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
if chat_logger.is_paying_user():
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too long."
" We are currently working on improved file streaming to address"
" this issue.\n"
),
-1,
)
else:
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too"
" long.\n\nIf this file is incorrect, please describe the desired"
" file in the prompt. However, if you would like to edit longer"
" files, consider upgrading to [Sweep Pro](https://sweep.dev/) for"
" longer context lengths.\n"
),
-1,
)
delete_branch = True
raise e
except NoFilesException as e:
logger.info("Sweep could not find files to modify")
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Sweep could not find files to modify",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
edit_sweep_comment(
(
"Sorry, Sweep could not find any appropriate files to edit to address"
" this issue. If this is a mistake, please provide more context and I"
f" will retry!\n\n> @{username}, please edit the issue description to"
" include more details about this issue."
),
-1,
)
delete_branch = True
raise e
except openai.error.InvalidRequestError as e:
logger.error(traceback.format_exc())
logger.error(e)
edit_sweep_comment(
(
"I'm sorry, but it looks our model has ran out of context length. We're"
" trying to make this happen less, but one way to mitigate this is to"
" code smaller files. If this error persists report it at"
" https://discord.gg/sweep."
),
-1,
)
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Context Length",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
posthog.capture(
username,
"failed",
properties={
"error": str(e),
"reason": "Invalid request error / context length",
**metadata,
},
)
delete_branch = True
raise e
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(traceback.format_exc())
logger.error(e)
# title and summary are defined elsewhere
if len(title + summary) < 60:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred due to"
" insufficient information. Be sure to create a more detailed issue"
" so I can better address it. If this error persists report it at"
" https://discord.gg/sweep."
),
-1,
)
else:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred. Try changing"
" the issue description to re-trigger Sweep. If this error persists"
" contact [email protected]."
),
-1,
)
log_error(
is_paying_user,
is_trial_user,
username,
issue_url,
"Workflow",
str(e) + "\n" + traceback.format_exc(),
priority=1,
)
posthog.capture(
username,
"failed",
properties={"error": str(e), "reason": "Generic error", **metadata},
)
raise e
else:
try:
item_to_react_to.delete_reaction(eyes_reaction.id)
item_to_react_to.create_reaction("rocket")
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
finally:
cloned_repo.delete()
if delete_branch:
try:
if pull_request.branch_name.startswith("sweep"):
repo.get_git_ref(f"heads/{pull_request.branch_name}").delete()
else:
raise Exception(
f"Branch name {pull_request.branch_name} does not start with sweep/"
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
logger.print("Deleted branch", pull_request.branch_name)
posthog.capture(username, "success", properties={**metadata})
logger.info("on_ticket success")
return {"success": True}
"""
tokens = tokenize_call(file_contents)
symbols = list(set([token.text for token in tokens]))
print(symbols)
| [] |
2024-01-10 | sweepai/sweep | sweepai~handlers~on_ticket.py | """
on_ticket is the main function that is called when a new issue is created.
It is only called by the webhook handler in sweepai/api.py.
"""
import difflib
import os
import re
import traceback
from time import time
import markdown
import openai
import yaml
import yamllint.config as yamllint_config
from github import BadCredentialsException
from github.Issue import Issue
from logtail import LogtailContext, LogtailHandler
from loguru import logger
from tabulate import tabulate
from tqdm import tqdm
from yamllint import linter
from sweepai.agents.pr_description_bot import PRDescriptionBot
from sweepai.config.client import (
DEFAULT_RULES,
RESET_FILE,
RESTART_SWEEP_BUTTON,
REVERT_CHANGED_FILES_TITLE,
RULES_LABEL,
RULES_TITLE,
SWEEP_BAD_FEEDBACK,
SWEEP_GOOD_FEEDBACK,
SweepConfig,
get_documentation_dict,
get_rules,
)
from sweepai.config.server import (
DISCORD_FEEDBACK_WEBHOOK_URL,
ENV,
GITHUB_BOT_USERNAME,
GITHUB_LABEL_NAME,
IS_SELF_HOSTED,
LOGTAIL_SOURCE_KEY,
MONGODB_URI,
OPENAI_USE_3_5_MODEL_ONLY,
WHITELISTED_REPOS,
)
from sweepai.core.entities import (
AssistantRaisedException,
FileChangeRequest,
MaxTokensExceeded,
NoFilesException,
ProposedIssue,
PullRequest,
SandboxResponse,
)
from sweepai.core.entities import create_error_logs as entities_create_error_logs
from sweepai.core.external_searcher import ExternalSearcher
from sweepai.core.sweep_bot import SweepBot
from sweepai.handlers.create_pr import (
create_config_pr,
create_pr_changes,
safe_delete_sweep_branch,
)
from sweepai.handlers.on_comment import on_comment
from sweepai.handlers.on_review import review_pr
from sweepai.utils.buttons import Button, ButtonList, create_action_buttons
from sweepai.utils.chat_logger import ChatLogger
from sweepai.utils.diff import generate_diff
from sweepai.utils.event_logger import posthog
from sweepai.utils.github_utils import ClonedRepo, get_github_client
from sweepai.utils.progress import (
AssistantConversation,
PaymentContext,
TicketContext,
TicketProgress,
TicketProgressStatus,
)
from sweepai.utils.prompt_constructor import HumanMessagePrompt
from sweepai.utils.str_utils import (
UPDATES_MESSAGE,
blockquote,
bot_suffix,
checkbox_template,
clean_logs,
collapsible_template,
create_checkbox,
create_collapsible,
discord_suffix,
format_sandbox_success,
ordinal,
sep,
stars_suffix,
strip_sweep,
to_branch_name,
)
from sweepai.utils.ticket_utils import (
center,
fetch_relevant_files,
fire_and_forget_wrapper,
log_error,
)
from sweepai.utils.user_settings import UserSettings
# from sandbox.sandbox_utils import Sandbox
sweeping_gif = """<a href="https://github.com/sweepai/sweep"><img class="swing" src="https://raw.githubusercontent.com/sweepai/sweep/main/.assets/sweeping.gif" width="100" style="width:50px; margin-bottom:10px" alt="Sweeping"></a>"""
custom_config = """
extends: relaxed
rules:
line-length: disable
indentation: disable
"""
INSTRUCTIONS_FOR_REVIEW = """\
### 💡 To get Sweep to edit this pull request, you can:
* Comment below, and Sweep can edit the entire PR
* Comment on a file, Sweep will only modify the commented file
* Edit the original issue to get Sweep to recreate the PR from scratch"""
email_template = """Hey {name},
<br/><br/>
🚀 I just finished creating a pull request for your issue ({repo_full_name}#{issue_number}) at <a href="{pr_url}">{repo_full_name}#{pr_number}</a>!
<br/><br/>
You can view how I created this pull request <a href="{progress_url}">here</a>.
<h2>Summary</h2>
<blockquote>
{summary}
</blockquote>
<h2>Files Changed</h2>
<ul>
{files_changed}
</ul>
{sweeping_gif}
<br/>
Cheers,
<br/>
Sweep
<br/>"""
def on_ticket(
title: str,
summary: str,
issue_number: int,
issue_url: str,
username: str,
repo_full_name: str,
repo_description: str,
installation_id: int,
comment_id: int = None,
edited: bool = False,
tracking_id: str | None = None,
):
on_ticket_start_time = time()
logger.info(f"Starting on_ticket with title {title} and summary {summary}")
(
title,
slow_mode,
do_map,
subissues_mode,
sandbox_mode,
fast_mode,
lint_mode,
) = strip_sweep(title)
def initialize_logtail_context():
context = LogtailContext()
context.context(
task={
"issue_url": issue_url,
"issue_number": issue_number,
"repo_full_name": repo_full_name,
"repo_description": repo_description,
"username": username,
"comment_id": comment_id,
"edited": edited,
"issue_title": title,
}
)
handler = LogtailHandler(source_token=LOGTAIL_SOURCE_KEY, context=context)
logger.add(handler)
fire_and_forget_wrapper(initialize_logtail_context)()
summary = summary or ""
summary = re.sub(
"<details (open)?>(\r)?\n<summary>Checklist</summary>.*",
"",
summary,
flags=re.DOTALL,
).strip()
summary = re.sub(
"---\s+Checklist:(\r)?\n(\r)?\n- \[[ X]\].*", "", summary, flags=re.DOTALL
).strip()
summary = re.sub("### Details\n\n_No response_", "", summary, flags=re.DOTALL)
summary = re.sub("\n\n", "\n", summary, flags=re.DOTALL)
repo_name = repo_full_name
user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
current_issue: Issue = repo.get_issue(number=issue_number)
assignee = current_issue.assignee.login if current_issue.assignee else None
if assignee is None:
assignee = current_issue.user.login
ticket_progress = TicketProgress(
tracking_id=tracking_id,
username=username,
context=TicketContext(
title=title,
description=summary,
repo_full_name=repo_full_name,
issue_number=issue_number,
is_public=repo.private is False,
start_time=time(),
),
)
branch_match = re.search(r"branch: (.*)(\n\r)?", summary)
if branch_match:
branch_name = branch_match.group(1)
fire_and_forget_wrapper(SweepConfig.get_branch)(repo, branch_name)
chat_logger = (
ChatLogger(
{
"repo_name": repo_name,
"title": title,
"summary": summary,
"issue_number": issue_number,
"issue_url": issue_url,
"username": username if not username.startswith("sweep") else assignee,
"repo_full_name": repo_full_name,
"repo_description": repo_description,
"installation_id": installation_id,
"type": "ticket",
"mode": ENV,
"comment_id": comment_id,
"edited": edited,
"tracking_id": tracking_id,
},
active=True,
)
if MONGODB_URI
else None
)
if chat_logger:
is_paying_user = chat_logger.is_paying_user()
is_consumer_tier = chat_logger.is_consumer_tier()
use_faster_model = OPENAI_USE_3_5_MODEL_ONLY or chat_logger.use_faster_model()
else:
is_paying_user = True
is_consumer_tier = False
use_faster_model = False
if fast_mode:
use_faster_model = True
if not comment_id and not edited and chat_logger and not sandbox_mode:
fire_and_forget_wrapper(chat_logger.add_successful_ticket)(
gpt3=use_faster_model
)
organization, repo_name = repo_full_name.split("/")
metadata = {
"issue_url": issue_url,
"repo_full_name": repo_full_name,
"organization": organization,
"repo_name": repo_name,
"repo_description": repo_description,
"username": username,
"comment_id": comment_id,
"title": title,
"installation_id": installation_id,
"function": "on_ticket",
"edited": edited,
"model": "gpt-3.5" if use_faster_model else "gpt-4",
"tier": "pro" if is_paying_user else "free",
"mode": ENV,
"slow_mode": slow_mode,
"do_map": do_map,
"subissues_mode": subissues_mode,
"sandbox_mode": sandbox_mode,
"fast_mode": fast_mode,
"is_self_hosted": IS_SELF_HOSTED,
"tracking_id": tracking_id,
}
fire_and_forget_wrapper(posthog.capture)(username, "started", properties=metadata)
try:
if current_issue.state == "closed":
fire_and_forget_wrapper(posthog.capture)(
username,
"issue_closed",
properties={
**metadata,
"duration": round(time() - on_ticket_start_time),
},
)
return {"success": False, "reason": "Issue is closed"}
# Add :eyes: emoji to ticket
def add_emoji(reaction_content="eyes"):
item_to_react_to = (
current_issue.get_comment(comment_id) if comment_id else current_issue
)
item_to_react_to.create_reaction("eyes")
fire_and_forget_wrapper(add_emoji)()
# If SWEEP_BOT reacted to item_to_react_to with "rocket", then remove it.
def remove_emoji(content_to_delete="eyes"):
item_to_react_to = (
current_issue.get_comment(comment_id) if comment_id else current_issue
)
reactions = item_to_react_to.get_reactions()
for reaction in reactions:
if (
reaction.content == content_to_delete
and reaction.user.login == GITHUB_BOT_USERNAME
):
item_to_react_to.delete_reaction(reaction.id)
fire_and_forget_wrapper(remove_emoji)(content_to_delete="rocket")
fire_and_forget_wrapper(current_issue.edit)(body=summary)
replies_text = ""
summary = summary if summary else ""
def delete_old_prs():
logger.info("Deleting old PRs...")
prs = repo.get_pulls(
state="open",
sort="created",
direction="desc",
base=SweepConfig.get_branch(repo),
)
checked_pr_count = 0
for pr in tqdm(prs):
# # Check if this issue is mentioned in the PR, and pr is owned by bot
# # This is done in create_pr, (pr_description = ...)
if checked_pr_count >= 40:
break
if (
pr.user.login == GITHUB_BOT_USERNAME
and f"Fixes #{issue_number}.\n" in pr.body
):
success = safe_delete_sweep_branch(pr, repo)
break
checked_pr_count += 1
fire_and_forget_wrapper(delete_old_prs)()
if not sandbox_mode:
progress_headers = [
None,
"Step 1: 🔎 Searching",
"Step 2: ⌨️ Coding",
"Step 3: 🔁 Code Review",
]
else:
progress_headers = [
None,
"📖 Reading File",
"🛠️ Executing Sandbox",
]
issue_comment = None
payment_message, payment_message_start = get_payment_messages(chat_logger)
ticket_progress.context.payment_context = PaymentContext(
use_faster_model=use_faster_model,
pro_user=is_paying_user,
daily_tickets_used=chat_logger.get_ticket_count(use_date=True)
if chat_logger
else 0,
monthly_tickets_used=chat_logger.get_ticket_count() if chat_logger else 0,
)
ticket_progress.save()
config_pr_url = None
user_settings = UserSettings.from_username(username=username)
user_settings_message = user_settings.get_message()
def get_comment_header(
index,
errored=False,
pr_message="",
done=False,
initial_sandbox_response: int | SandboxResponse = -1,
initial_sandbox_response_file=None,
):
config_pr_message = (
"\n"
+ f"<div align='center'>Install Sweep Configs: <a href='{config_pr_url}'>Pull Request</a></div>"
if config_pr_url is not None
else ""
)
actions_message = create_action_buttons(
[
RESTART_SWEEP_BUTTON,
]
)
sandbox_execution_message = "\n\n## GitHub Actions failed\n\nThe sandbox appears to be unavailable or down.\n\n"
if initial_sandbox_response == -1:
sandbox_execution_message = ""
elif initial_sandbox_response is not None:
repo = g.get_repo(repo_full_name)
commit_hash = repo.get_commits()[0].sha
success = (
initial_sandbox_response.outputs
and initial_sandbox_response.success
)
status = "✓" if success else "X"
sandbox_execution_message = (
"\n\n## GitHub Actions"
+ status
+ "\n\nHere are the GitHub Actions logs prior to making any changes:\n\n"
)
sandbox_execution_message += entities_create_error_logs(
f'<a href="https://github.com/{repo_full_name}/commit/{commit_hash}"><code>{commit_hash[:7]}</code></a>',
initial_sandbox_response,
initial_sandbox_response_file,
)
if success:
sandbox_execution_message += f"\n\nSandbox passed on the latest `{repo.default_branch}`, so sandbox checks will be enabled for this issue."
else:
sandbox_execution_message += f"\n\nSandbox failed, so all sandbox checks will be disabled for this issue."
if index < 0:
index = 0
if index == 4:
return (
pr_message
+ config_pr_message
+ f"\n\n---\n{user_settings.get_message(completed=True)}"
+ f"\n\n---\n{actions_message}"
+ sandbox_execution_message
)
total = len(progress_headers)
index += 1 if done else 0
index *= 100 / total
index = int(index)
index = min(100, index)
if errored:
pbar = f"\n\n<img src='https://progress-bar.dev/{index}/?&title=Errored&width=600' alt='{index}%' />"
return (
f"{center(sweeping_gif)}<br/>{center(pbar)}\n\n"
+ f"\n\n---\n{actions_message}"
+ sandbox_execution_message
)
pbar = f"\n\n<img src='https://progress-bar.dev/{index}/?&title=Progress&width=600' alt='{index}%' />"
return (
f"{center(sweeping_gif)}"
+ center(
f'\n\n<h2>✨ Track Sweep\'s progress on our <a href="https://progress.sweep.dev/issues/{tracking_id}">progress dashboard</a>!</h2>'
)
+ f"<br/>{center(pbar)}"
+ ("\n" + stars_suffix if index != -1 else "")
+ "\n"
+ center(payment_message_start)
+ f"\n\n---\n{user_settings_message}"
+ config_pr_message
+ f"\n\n---\n{actions_message}"
+ sandbox_execution_message
)
cloned_repo = ClonedRepo(
repo_full_name, installation_id=installation_id, token=user_token, repo=repo
)
# check that repo's directory is non-empty
if os.listdir(cloned_repo.cached_dir) == []:
logger.info("Empty repo")
first_comment = (
"Sweep is currently not supported on empty repositories. Please add some"
f" code to your repository and try again.\n{sep}##"
f" {progress_headers[1]}\n{bot_suffix}{discord_suffix}"
)
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
issue_comment.edit(first_comment)
return {"success": False}
indexing_message = (
"I'm searching for relevant snippets in your repository. If this is your first"
" time using Sweep, I'm indexing your repository. You can monitor the progress using the progress dashboard"
)
first_comment = (
f"{get_comment_header(0)}\n{sep}I am currently looking into this ticket! I"
" will update the progress of the ticket in this comment. I am currently"
f" searching through your code, looking for relevant snippets.\n{sep}##"
f" {progress_headers[1]}\n{indexing_message}{bot_suffix}{discord_suffix}"
)
# Find Sweep's previous comment
comments = []
for comment in current_issue.get_comments():
comments.append(comment)
if comment.user.login == GITHUB_BOT_USERNAME:
issue_comment = comment
break
if issue_comment is None:
issue_comment = current_issue.create_comment(first_comment)
else:
fire_and_forget_wrapper(issue_comment.edit)(first_comment)
past_messages = {}
current_index = 0
table = None
initial_sandbox_response = -1
initial_sandbox_response_file = None
def edit_sweep_comment(message: str, index: int, pr_message="", done=False):
nonlocal current_index, user_token, g, repo, issue_comment, initial_sandbox_response, initial_sandbox_response_file
# -1 = error, -2 = retry
# Only update the progress bar if the issue generation errors.
errored = index == -1
if index >= 0:
past_messages[index] = message
current_index = index
agg_message = None
# Include progress history
# index = -2 is reserved for
for i in range(
current_index + 2
): # go to next header (for Working on it... text)
if i == 0 or i >= len(progress_headers):
continue # skip None header
header = progress_headers[i]
if header is not None:
header = "## " + header + "\n"
else:
header = "No header\n"
msg = header + (past_messages.get(i) or "Working on it...")
if agg_message is None:
agg_message = msg
else:
agg_message = agg_message + f"\n{sep}" + msg
suffix = bot_suffix + discord_suffix
if errored:
agg_message = (
"## ❌ Unable to Complete PR"
+ "\n"
+ message
+ "\n\nFor bonus GPT-4 tickets, please report this bug on"
f" **[Discord](https://discord.gg/invite/sweep)** (tracking ID: `{tracking_id}`)."
)
if table is not None:
agg_message = (
agg_message
+ f"\n{sep}Please look at the generated plan. If something looks"
f" wrong, please add more details to your issue.\n\n{table}"
)
suffix = bot_suffix # don't include discord suffix for error messages
# Update the issue comment
msg = f"{get_comment_header(current_index, errored, pr_message, done=done, initial_sandbox_response=initial_sandbox_response, initial_sandbox_response_file=initial_sandbox_response_file)}\n{sep}{agg_message}{suffix}"
try:
issue_comment.edit(msg)
except BadCredentialsException:
logger.error(
f"Bad credentials, refreshing token (tracking ID: `{tracking_id}`)"
)
_user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
for comment in comments:
if comment.user.login == GITHUB_BOT_USERNAME:
issue_comment = comment
current_issue = repo.get_issue(number=issue_number)
if issue_comment is None:
issue_comment = current_issue.create_comment(msg)
else:
issue_comment = [
comment
for comment in current_issue.get_comments()
if comment.user == GITHUB_BOT_USERNAME
][0]
issue_comment.edit(msg)
if sandbox_mode:
handle_sandbox_mode(
title, repo_full_name, repo, ticket_progress, edit_sweep_comment
)
return {"success": True}
if len(title + summary) < 20:
logger.info("Issue too short")
edit_sweep_comment(
(
f"Please add more details to your issue. I need at least 20 characters"
f" to generate a plan. Please join our Discord server for support (tracking_id={tracking_id})"
),
-1,
)
posthog.capture(
username,
"issue_too_short",
properties={
**metadata,
"duration": round(time() - on_ticket_start_time),
},
)
return {"success": True}
if (
repo_name.lower() not in WHITELISTED_REPOS
and not is_paying_user
and not is_consumer_tier
):
if ("sweep" in repo_name.lower()) or ("test" in repo_name.lower()):
logger.info("Test repository detected")
edit_sweep_comment(
(
f"Sweep does not work on test repositories. Please create an issue"
f" on a real repository. If you think this is a mistake, please"
f" report this at https://discord.gg/sweep. Please join our Discord server for support (tracking_id={tracking_id})"
),
-1,
)
posthog.capture(
username,
"test_repo",
properties={
**metadata,
"duration": round(time() - on_ticket_start_time),
},
)
return {"success": False}
try:
snippets, tree, _ = fetch_relevant_files(
cloned_repo,
title,
summary,
replies_text,
username,
metadata,
on_ticket_start_time,
tracking_id,
is_paying_user,
is_consumer_tier,
issue_url,
chat_logger,
ticket_progress,
)
except:
edit_sweep_comment(
(
"It looks like an issue has occurred around fetching the files."
" Perhaps the repo has not been initialized. If this error persists"
f" contact [email protected].\n\n> @{username}, editing this issue description to include more details will automatically make me relaunch. Please join our Discord server for support (tracking_id={tracking_id})"
),
-1,
)
raise Exception("Failed to fetch files")
ticket_progress.search_progress.indexing_progress = (
ticket_progress.search_progress.indexing_total
)
ticket_progress.status = TicketProgressStatus.PLANNING
ticket_progress.save()
# Fetch git commit history
if not repo_description:
repo_description = "No description provided."
message_summary = summary + replies_text
external_results = ExternalSearcher.extract_summaries(message_summary)
if external_results:
message_summary += "\n\n" + external_results
user_dict = get_documentation_dict(repo)
docs_results = ""
human_message = HumanMessagePrompt(
repo_name=repo_name,
issue_url=issue_url,
username=username,
repo_description=repo_description.strip(),
title=title,
summary=message_summary,
snippets=snippets,
tree=tree,
)
_user_token, g = get_github_client(installation_id)
repo = g.get_repo(repo_full_name)
sweep_bot = SweepBot.from_system_message_content(
human_message=human_message,
repo=repo,
is_reply=bool(comments),
chat_logger=chat_logger,
cloned_repo=cloned_repo,
ticket_progress=ticket_progress,
)
# Check repository for sweep.yml file.
sweep_yml_exists = False
sweep_yml_failed = False
for content_file in repo.get_contents(""):
if content_file.name == "sweep.yaml":
sweep_yml_exists = True
# Check if YAML is valid
yaml_content = content_file.decoded_content.decode("utf-8")
sweep_yaml_dict = {}
try:
sweep_yaml_dict = yaml.safe_load(yaml_content)
except:
logger.error(f"Failed to load YAML file: {yaml_content}")
if len(sweep_yaml_dict) > 0:
break
linter_config = yamllint_config.YamlLintConfig(custom_config)
problems = list(linter.run(yaml_content, linter_config))
if problems:
errors = [
f"Line {problem.line}: {problem.desc} (rule: {problem.rule})"
for problem in problems
]
error_message = "\n".join(errors)
markdown_error_message = f"**There is something wrong with your [sweep.yaml](https://github.com/{repo_full_name}/blob/main/sweep.yaml):**\n```\n{error_message}\n```"
sweep_yml_failed = True
logger.error(markdown_error_message)
edit_sweep_comment(markdown_error_message, -1)
else:
logger.info("The YAML file is valid. No errors found.")
break
# If sweep.yaml does not exist, then create a new PR that simply creates the sweep.yaml file.
if not sweep_yml_exists:
try:
logger.info("Creating sweep.yaml file...")
config_pr = create_config_pr(sweep_bot, cloned_repo=cloned_repo)
config_pr_url = config_pr.html_url
edit_sweep_comment(message="", index=-2)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(
"Failed to create new branch for sweep.yaml file.\n",
e,
traceback.format_exc(),
)
else:
logger.info("sweep.yaml file already exists.")
try:
# ANALYZE SNIPPETS
newline = "\n"
edit_sweep_comment(
"I found the following snippets in your repository. I will now analyze"
" these snippets and come up with a plan."
+ "\n\n"
+ create_collapsible(
"Some code snippets I think are relevant in decreasing order of relevance (click to expand). If some file is missing from here, you can mention the path in the ticket description.",
"\n".join(
[
f"https://github.com/{organization}/{repo_name}/blob/{repo.get_commits()[0].sha}/{snippet.file_path}#L{max(snippet.start, 1)}-L{min(snippet.end, snippet.content.count(newline) - 1)}\n"
for snippet in snippets
]
),
)
+ (
create_collapsible(
"I also found the following external resources that might be helpful:",
f"\n\n{external_results}\n\n",
)
if external_results
else ""
)
+ (f"\n\n{docs_results}\n\n" if docs_results else ""),
1,
)
if do_map:
subissues: list[ProposedIssue] = sweep_bot.generate_subissues()
edit_sweep_comment(
f"I'm creating the following subissues:\n\n"
+ "\n\n".join(
[
f"#{subissue.title}:\n" + blockquote(subissue.body)
for subissue in subissues
]
),
2,
)
for subissue in tqdm(subissues):
subissue.issue_id = repo.create_issue(
title="Sweep: " + subissue.title,
body=subissue.body + f"\n\nParent issue: #{issue_number}",
assignee=username,
).number
subissues_checklist = "\n\n".join(
[
f"- [ ] #{subissue.issue_id}\n\n"
+ blockquote(f"**{subissue.title}**\n{subissue.body}")
for subissue in subissues
]
)
current_issue.edit(
body=summary + "\n\n---\n\nChecklist:\n\n" + subissues_checklist
)
edit_sweep_comment(
f"I finished creating the subissues! Track them at:\n\n"
+ "\n".join(f"* #{subissue.issue_id}" for subissue in subissues),
3,
done=True,
)
edit_sweep_comment(f"N/A", 4)
edit_sweep_comment(f"I finished creating all the subissues.", 5)
posthog.capture(
username,
"subissues_created",
properties={
**metadata,
"count": len(subissues),
"duration": round(time() - on_ticket_start_time),
},
)
return {"success": True}
logger.info("Fetching files to modify/create...")
non_python_count = sum(
not file_path.endswith(".py")
and not file_path.endswith(".ipynb")
and not file_path.endswith(".md")
for file_path in human_message.get_file_paths()
)
python_count = len(human_message.get_file_paths()) - non_python_count
is_python_issue = python_count >= non_python_count and python_count > 0
posthog.capture(
username,
"is_python_issue",
properties={"is_python_issue": is_python_issue},
)
file_change_requests, plan = sweep_bot.get_files_to_change(is_python_issue)
ticket_progress.planning_progress.file_change_requests = (
file_change_requests
)
ticket_progress.coding_progress.file_change_requests = file_change_requests
ticket_progress.coding_progress.assistant_conversations = [
AssistantConversation() for fcr in file_change_requests
]
ticket_progress.status = TicketProgressStatus.CODING
ticket_progress.save()
if not file_change_requests:
if len(title + summary) < 60:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details? Please make sure that the title and"
" summary of the issue are at least 60 characters."
),
-1,
)
else:
edit_sweep_comment(
(
"Sorry, I could not find any files to modify, can you please"
" provide more details?"
),
-1,
)
raise Exception("No files to modify.")
(
initial_sandbox_response,
initial_sandbox_response_file,
) = sweep_bot.validate_sandbox(file_change_requests)
file_change_requests: list[
FileChangeRequest
] = sweep_bot.validate_file_change_requests(
file_change_requests, initial_sandbox_response=initial_sandbox_response
)
ticket_progress.planning_progress.file_change_requests = (
file_change_requests
)
ticket_progress.coding_progress.assistant_conversations = [
AssistantConversation() for fcr in file_change_requests
]
ticket_progress.save()
table = tabulate(
[
[
file_change_request.entity_display,
file_change_request.instructions_display.replace(
"\n", "<br/>"
).replace("```", "\\```"),
]
for file_change_request in file_change_requests
if file_change_request.change_type != "check"
],
headers=["File Path", "Proposed Changes"],
tablefmt="pipe",
)
logger.info("Generating PR...")
pull_request = PullRequest(
title="Sweep: " + title,
branch_name="sweep/" + to_branch_name(title),
content="",
)
logger.info("Making PR...")
ticket_progress.context.branch_name = pull_request.branch_name
ticket_progress.save()
files_progress: list[tuple[str, str, str, str]] = [
(
file_change_request.entity_display,
file_change_request.instructions_display,
"⏳ In Progress",
"",
)
for file_change_request in file_change_requests
]
checkboxes_progress: list[tuple[str, str, str]] = [
(
file_change_request.entity_display,
file_change_request.instructions_display,
" ",
)
for file_change_request in file_change_requests
if not file_change_request.change_type == "check"
]
checkboxes_contents = "\n".join(
[
create_checkbox(
f"`{filename}`", blockquote(instructions), check == "X"
)
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = create_collapsible(
"Checklist", checkboxes_contents, opened=True
)
file_change_requests[0].status = "running"
condensed_checkboxes_contents = "\n".join(
[
create_checkbox(f"`{filename}`", "", check == "X").strip()
for filename, instructions, check in checkboxes_progress
]
)
condensed_checkboxes_collapsible = create_collapsible(
"Checklist", condensed_checkboxes_contents, opened=True
)
current_issue = repo.get_issue(number=issue_number)
current_issue.edit(body=summary + "\n\n" + condensed_checkboxes_collapsible)
delete_branch = False
generator = create_pr_changes(
file_change_requests,
pull_request,
sweep_bot,
username,
installation_id,
issue_number,
chat_logger=chat_logger,
)
edit_sweep_comment(checkboxes_contents, 2)
response = {"error": NoFilesException()}
def create_error_logs(
commit_url_display: str,
sandbox_response: SandboxResponse,
status: str = "✓",
):
return (
(
"<br/>"
+ create_collapsible(
f"Sandbox logs for {commit_url_display} {status}",
blockquote(
"\n\n".join(
[
create_collapsible(
f"<code>{output}</code> {i + 1}/{len(sandbox_response.outputs)} {format_sandbox_success(sandbox_response.success)}",
f"<pre>{clean_logs(output)}</pre>",
i == len(sandbox_response.outputs) - 1,
)
for i, output in enumerate(
sandbox_response.outputs
)
if len(sandbox_response.outputs) > 0
]
)
),
opened=True,
)
)
if sandbox_response
else ""
)
def update_progress(
entity_display: str,
header: str,
error_logs: str,
status: str = "X",
):
nonlocal checkboxes_progress
for i, (entity_display_, instructions, status_) in enumerate(
checkboxes_progress
):
if entity_display in entity_display_:
checkboxes_progress[i] = (
header,
instructions + error_logs,
status,
)
return True
return False
changed_files = []
for item in generator:
if isinstance(item, dict):
response = item
break
(
file_change_request,
changed_file,
sandbox_response,
commit,
file_change_requests,
) = item
changed_files.append(file_change_request.filename)
sandbox_response: SandboxResponse | None = sandbox_response
logger.info(sandbox_response)
commit_hash: str = (
commit
if isinstance(commit, str)
else (
commit.sha
if commit is not None
else repo.get_branch(pull_request.branch_name).commit.sha
)
)
commit_url = f"https://github.com/{repo_full_name}/commit/{commit_hash}"
commit_url_display = (
f"<a href='{commit_url}'><code>{commit_hash[:7]}</code></a>"
)
error_logs: str = create_error_logs(
commit_url_display,
sandbox_response,
status="✓"
if (sandbox_response is None or sandbox_response.success)
else "❌",
)
checkboxes_progress = [
(
file_change_request.display_summary
+ " "
+ file_change_request.status_display
+ " "
+ (file_change_request.commit_hash_url or "")
+ f" [Edit]({file_change_request.get_edit_url(repo.full_name, pull_request.branch_name)})",
file_change_request.instructions_ticket_display
+ f"\n\n{file_change_request.diff_display}",
"X"
if file_change_request.status in ("succeeded", "failed")
else " ",
)
for file_change_request in file_change_requests
]
checkboxes_contents = "\n".join(
[
checkbox_template.format(
check=check,
filename=filename,
instructions=blockquote(instructions),
)
for filename, instructions, check in checkboxes_progress
]
)
checkboxes_collapsible = collapsible_template.format(
summary="Checklist",
body=checkboxes_contents,
opened="open",
)
condensed_checkboxes_contents = "\n".join(
[
checkbox_template.format(
check=check,
filename=filename,
instructions="",
).strip()
for filename, instructions, check in checkboxes_progress
if not instructions.lower().startswith("run")
]
)
condensed_checkboxes_collapsible = collapsible_template.format(
summary="Checklist",
body=condensed_checkboxes_contents,
opened="open",
)
current_issue = repo.get_issue(number=issue_number)
current_issue.edit(
body=summary + "\n\n" + condensed_checkboxes_collapsible
)
logger.info(files_progress)
logger.info(f"Edited {file_change_request.entity_display}")
edit_sweep_comment(checkboxes_contents, 2)
if not response.get("success"):
raise Exception(f"Failed to create PR: {response.get('error')}")
checkboxes_contents = "\n".join(
[
checkbox_template.format(
check=check,
filename=filename,
instructions=blockquote(instructions),
)
for filename, instructions, check in checkboxes_progress
]
)
condensed_checkboxes_contents = "\n".join(
[
checkbox_template.format(
check=check,
filename=filename,
instructions="",
).strip()
for filename, instructions, check in checkboxes_progress
if not instructions.lower().startswith("run")
]
)
condensed_checkboxes_collapsible = collapsible_template.format(
summary="Checklist",
body=condensed_checkboxes_contents,
opened="open",
)
for _ in range(3):
try:
current_issue.edit(
body=summary + "\n\n" + condensed_checkboxes_collapsible
)
break
except:
from time import sleep
sleep(1)
edit_sweep_comment(checkboxes_contents, 2)
pr_changes = response["pull_request"]
# change the body here
diff_text = get_branch_diff_text(repo, pull_request.branch_name)
new_description = PRDescriptionBot().describe_diffs(
diff_text,
pull_request.title,
)
# TODO: update the title as well
if new_description:
pr_changes.body = (
f"{new_description}\n\nFixes"
f" #{issue_number}.\n\n---\n\n{UPDATES_MESSAGE}\n\n---\n\n{INSTRUCTIONS_FOR_REVIEW}"
)
edit_sweep_comment(
"I have finished coding the issue. I am now reviewing it for completeness.",
3,
)
change_location = f" [`{pr_changes.pr_head}`](https://github.com/{repo_full_name}/commits/{pr_changes.pr_head}).\n\n"
review_message = (
"Here are my self-reviews of my changes at" + change_location
)
lint_output = None
try:
fire_and_forget_wrapper(remove_emoji)(content_to_delete="eyes")
except SystemExit:
raise SystemExit
except:
pass
changes_required, review_message = False, ""
if False:
changes_required, review_message = review_code(
repo,
pr_changes,
issue_url,
username,
repo_description,
title,
summary,
replies_text,
tree,
lint_output,
plan,
chat_logger,
review_message,
edit_sweep_comment,
repo_full_name,
installation_id,
)
if changes_required:
edit_sweep_comment(
review_message + "\n\nI finished incorporating these changes.",
3,
)
else:
edit_sweep_comment(
f"I have finished reviewing the code for completeness. I did not find errors for {change_location}",
3,
)
pr_actions_message = (
create_action_buttons(
[
SWEEP_GOOD_FEEDBACK,
SWEEP_BAD_FEEDBACK,
],
header="### PR Feedback (click)\n",
)
+ "\n"
if DISCORD_FEEDBACK_WEBHOOK_URL is not None
else ""
)
revert_buttons = []
for changed_file in set(changed_files):
revert_buttons.append(Button(label=f"{RESET_FILE} {changed_file}"))
revert_buttons_list = ButtonList(
buttons=revert_buttons, title=REVERT_CHANGED_FILES_TITLE
)
rule_buttons = []
repo_rules = get_rules(repo)
if repo_rules != [""]:
for rule in repo_rules:
if rule:
rule_buttons.append(Button(label=f"{RULES_LABEL} {rule}"))
if len(repo_rules) == 0:
for rule in DEFAULT_RULES:
rule_buttons.append(Button(label=f"{RULES_LABEL} {rule}"))
rules_buttons_list = ButtonList(buttons=rule_buttons, title=RULES_TITLE)
sandbox_passed = None
for file_change_request in file_change_requests:
if file_change_request.change_type == "check":
if (
file_change_request.sandbox_response
and file_change_request.sandbox_response.error_messages
):
sandbox_passed = False
elif sandbox_passed is None:
sandbox_passed = True
if sandbox_passed == True:
pr_changes.title = f"{pr_changes.title} (✓ Sandbox Passed)"
# delete failing sweep yaml if applicable
if sweep_yml_failed:
try:
repo.delete_file(
"sweep.yaml",
"Delete failing sweep.yaml",
branch=pr_changes.pr_head,
sha=repo.get_contents("sweep.yaml").sha,
)
except:
pass
pr: PullRequest = repo.create_pull(
title=pr_changes.title,
body=pr_actions_message + pr_changes.body,
head=pr_changes.pr_head,
base=SweepConfig.get_branch(repo),
)
ticket_progress.status = TicketProgressStatus.COMPLETE
ticket_progress.context.done_time = time()
ticket_progress.context.pr_id = pr.number
ticket_progress.save()
if revert_buttons:
pr.create_issue_comment(revert_buttons_list.serialize())
if rule_buttons:
pr.create_issue_comment(rules_buttons_list.serialize())
# add comments before labelling
pr.add_to_labels(GITHUB_LABEL_NAME)
current_issue.create_reaction("rocket")
heres_pr_message = f'<h1 align="center">🚀 Here\'s the PR! <a href="{pr.html_url}">#{pr.number}</a></h1>'
progress_message = f'<div align="center"><b>See Sweep\'s progress at <a href="https://progress.sweep.dev/issues/{tracking_id}">the progress dashboard</a>!</b></div>'
edit_sweep_comment(
review_message + "\n\nSuccess! 🚀",
4,
pr_message=(
f"{center(heres_pr_message)}\n{center(progress_message)}\n{center(payment_message_start)}"
),
done=True,
)
user_settings = UserSettings.from_username(username=username)
user = g.get_user(username)
full_name = user.name or user.login
name = full_name.split(" ")[0]
files_changed = []
for fcr in file_change_requests:
if fcr.change_type in ("create", "modify"):
diff = list(
difflib.unified_diff(
(fcr.old_content or "").splitlines() or [],
(fcr.new_content or "").splitlines() or [],
lineterm="",
)
)
added = sum(
1
for line in diff
if line.startswith("+") and not line.startswith("+++")
)
removed = sum(
1
for line in diff
if line.startswith("-") and not line.startswith("---")
)
files_changed.append(
f"<code>{fcr.filename}</code> (+{added}/-{removed})"
)
user_settings.send_email(
subject=f"Sweep Pull Request Complete for {repo_name}#{issue_number} {title}",
html=email_template.format(
name=name,
pr_url=pr.html_url,
issue_number=issue_number,
repo_full_name=repo_full_name,
pr_number=pr.number,
progress_url=f"https://progress.sweep.dev/issues/{tracking_id}",
summary=markdown.markdown(pr_changes.body),
files_changed="\n".join(
[f"<li>{item}</li>" for item in files_changed]
),
sweeping_gif=sweeping_gif,
),
)
except MaxTokensExceeded as e:
logger.info("Max tokens exceeded")
ticket_progress.status = TicketProgressStatus.ERROR
ticket_progress.error_message = "Max tokens exceeded. Feel free to add more details to the issue descript for Sweep to better address it, or alternatively, reach out to Kevin or William for help at https://discord.gg/sweep."
ticket_progress.save()
log_error(
is_paying_user,
is_consumer_tier,
username,
issue_url,
"Max Tokens Exceeded",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
if chat_logger and chat_logger.is_paying_user():
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too long."
" We are currently working on improved file streaming to address"
" this issue.\n"
),
-1,
)
else:
edit_sweep_comment(
(
f"Sorry, I could not edit `{e.filename}` as this file is too"
" long.\n\nIf this file is incorrect, please describe the desired"
" file in the prompt. However, if you would like to edit longer"
" files, consider upgrading to [Sweep Pro](https://sweep.dev/) for"
" longer context lengths.\n"
),
-1,
)
delete_branch = True
raise e
except NoFilesException as e:
ticket_progress.status = TicketProgressStatus.ERROR
ticket_progress.error_message = "Sweep could not find files to modify to address this issue. Feel free to add more details to the issue descript for Sweep to better address it, or alternatively, reach out to Kevin or William for help at https://discord.gg/sweep."
ticket_progress.save()
logger.info("Sweep could not find files to modify")
log_error(
is_paying_user,
is_consumer_tier,
username,
issue_url,
"Sweep could not find files to modify",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
edit_sweep_comment(
(
"Sorry, Sweep could not find any appropriate files to edit to address"
" this issue. If this is a mistake, please provide more context and Sweep"
f" will retry!\n\n> @{username}, please edit the issue description to"
" include more details about this issue."
),
-1,
)
delete_branch = True
raise e
except openai.BadRequestError as e:
ticket_progress.status = TicketProgressStatus.ERROR
ticket_progress.error_message = "Sorry, it looks like there is an error with communicating with OpenAI. If this error persists, reach out to Kevin or William for help at https://discord.gg/sweep."
ticket_progress.save()
logger.error(traceback.format_exc())
logger.error(e)
edit_sweep_comment(
(
"I'm sorry, but it looks our model has ran out of context length. We're"
" trying to make this happen less, but one way to mitigate this is to"
" code smaller files. If this error persists report it at"
" https://discord.gg/sweep."
),
-1,
)
log_error(
is_paying_user,
is_consumer_tier,
username,
issue_url,
"Context Length",
str(e) + "\n" + traceback.format_exc(),
priority=2,
)
posthog.capture(
username,
"failed",
properties={
"error": str(e),
"reason": "Invalid request error / context length",
**metadata,
"duration": round(time() - on_ticket_start_time),
},
)
delete_branch = True
raise e
except AssistantRaisedException as e:
ticket_progress.status = TicketProgressStatus.ERROR
ticket_progress.error_message = f"Sweep raised an error with the following message: {e.message}. Feel free to add more details to the issue descript for Sweep to better address it, or alternatively, reach out to Kevin or William for help at https://discord.gg/sweep."
ticket_progress.save()
logger.exception(e)
edit_sweep_comment(
f"Sweep raised an error with the following message:\n{blockquote(e.message)}",
-1,
)
log_error(
is_paying_user,
is_consumer_tier,
username,
issue_url,
"Workflow",
str(e) + "\n" + traceback.format_exc(),
priority=1,
)
raise e
except Exception as e:
ticket_progress.status = TicketProgressStatus.ERROR
ticket_progress.error_message = f"Internal server error: {str(e)}. Feel free to add more details to the issue descript for Sweep to better address it, or alternatively, reach out to Kevin or William for help at https://discord.gg/sweep."
ticket_progress.save()
logger.error(traceback.format_exc())
logger.error(e)
# title and summary are defined elsewhere
if len(title + summary) < 60:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred due to"
+ " a planning failure. Feel free to add more details to the issue description"
+ " so Sweep can better address it. Alternatively, reach out to Kevin or William for help at"
+ " https://discord.gg/sweep."
),
-1,
)
else:
edit_sweep_comment(
(
"I'm sorry, but it looks like an error has occurred due to"
+ " a planning failure. Feel free to add more details to the issue description"
+ " so Sweep can better address it. Alternatively, reach out to Kevin or William for help at"
+ " https://discord.gg/sweep."
),
-1,
)
log_error(
is_paying_user,
is_consumer_tier,
username,
issue_url,
"Workflow",
str(e) + "\n" + traceback.format_exc(),
priority=1,
)
raise e
else:
try:
fire_and_forget_wrapper(remove_emoji)(content_to_delete="eyes")
fire_and_forget_wrapper(add_emoji)("rocket")
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
finally:
cloned_repo.delete()
if delete_branch:
try:
if pull_request.branch_name.startswith("sweep"):
repo.get_git_ref(f"heads/{pull_request.branch_name}").delete()
else:
raise Exception(
f"Branch name {pull_request.branch_name} does not start with sweep/"
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
logger.info("Deleted branch", pull_request.branch_name)
except Exception as e:
posthog.capture(
username,
"failed",
properties={
**metadata,
"error": str(e),
"trace": traceback.format_exc(),
"duration": round(time() - on_ticket_start_time),
},
)
raise e
posthog.capture(
username,
"success",
properties={**metadata, "duration": round(time() - on_ticket_start_time)},
)
logger.info("on_ticket success in " + str(round(time() - on_ticket_start_time)))
return {"success": True}
def handle_sandbox_mode(
title, repo_full_name, repo, ticket_progress, edit_sweep_comment
):
logger.info("Running in sandbox mode")
sweep_bot = SweepBot(repo=repo, ticket_progress=ticket_progress)
logger.info("Getting file contents")
file_name = title.split(":")[1].strip()
file_contents = sweep_bot.get_contents(file_name).decoded_content.decode("utf-8")
try:
ext = file_name.split(".")[-1]
except:
ext = ""
displayed_contents = file_contents.replace("```", "\`\`\`")
sha = repo.get_branch(repo.default_branch).commit.sha
permalink = f"https://github.com/{repo_full_name}/blob/{sha}/{file_name}#L1-L{len(file_contents.splitlines())}"
logger.info("Running sandbox")
edit_sweep_comment(
f"Running sandbox for {file_name}. Current Code:\n\n{permalink}",
1,
)
updated_contents, sandbox_response = sweep_bot.check_sandbox(
file_name, file_contents, []
)
logger.info("Sandbox finished")
logs = (
(
"<br/>"
+ create_collapsible(
f"Sandbox logs",
blockquote(
"\n\n".join(
[
create_collapsible(
f"<code>{output}</code> {i + 1}/{len(sandbox_response.outputs)} {format_sandbox_success(sandbox_response.success)}",
f"<pre>{clean_logs(output)}</pre>",
i == len(sandbox_response.outputs) - 1,
)
for i, output in enumerate(sandbox_response.outputs)
if len(sandbox_response.outputs) > 0
]
)
),
opened=True,
)
)
if sandbox_response
else ""
)
updated_contents = updated_contents.replace("```", "\`\`\`")
diff = generate_diff(file_contents, updated_contents).replace("```", "\`\`\`")
diff_display = (
f"Updated Code:\n\n```{ext}\n{updated_contents}```\nDiff:\n```diff\n{diff}\n```"
if diff
else f"Sandbox made no changes to {file_name} (formatters were not configured or Sweep didn't make changes)."
)
edit_sweep_comment(
f"{logs}\n{diff_display}",
2,
)
edit_sweep_comment("N/A", 3)
logger.info("Sandbox comments updated")
def review_code(
repo,
pr_changes,
issue_url,
username,
repo_description,
title,
summary,
replies_text,
tree,
lint_output,
plan,
chat_logger,
review_message,
edit_sweep_comment,
repo_full_name,
installation_id,
):
try:
# CODE REVIEW
changes_required = False
changes_required, review_comment = review_pr(
repo=repo,
pr=pr_changes,
issue_url=issue_url,
username=username,
repo_description=repo_description,
title=title,
summary=summary,
replies_text=replies_text,
tree=tree,
lint_output=lint_output,
plan=plan, # plan for the PR
chat_logger=chat_logger,
)
lint_output = None
review_message += (
f"Here is the {ordinal(1)} review\n" + blockquote(review_comment) + "\n\n"
)
if changes_required:
edit_sweep_comment(
review_message + "\n\nI'm currently addressing these suggestions.",
3,
)
logger.info(f"Addressing review comment {review_comment}")
on_comment(
repo_full_name=repo_full_name,
repo_description=repo_description,
comment=review_comment,
username=username,
installation_id=installation_id,
pr_path=None,
pr_line_position=None,
pr_number=None,
pr=pr_changes,
chat_logger=chat_logger,
repo=repo,
)
except SystemExit:
raise SystemExit
except Exception as e:
logger.error(traceback.format_exc())
logger.error(e)
return changes_required, review_message
def get_branch_diff_text(repo, branch, base_branch=None):
base_branch = base_branch or SweepConfig.get_branch(repo)
comparison = repo.compare(base_branch, branch)
file_diffs = comparison.files
pr_diffs = []
for file in file_diffs:
diff = file.patch
if (
file.status == "added"
or file.status == "modified"
or file.status == "removed"
):
pr_diffs.append((file.filename, diff))
else:
logger.info(
f"File status {file.status} not recognized"
) # TODO(sweep): We don't handle renamed files
return "\n".join([f"{filename}\n{diff}" for filename, diff in pr_diffs])
def get_payment_messages(chat_logger: ChatLogger):
if chat_logger:
is_paying_user = chat_logger.is_paying_user()
is_consumer_tier = chat_logger.is_consumer_tier()
use_faster_model = OPENAI_USE_3_5_MODEL_ONLY or chat_logger.use_faster_model()
else:
is_paying_user = True
is_consumer_tier = False
use_faster_model = False
tracking_id = chat_logger.data["tracking_id"] if chat_logger else None
# Find the first comment made by the bot
tickets_allocated = 5
if is_consumer_tier:
tickets_allocated = 15
if is_paying_user:
tickets_allocated = 500
purchased_ticket_count = (
chat_logger.get_ticket_count(purchased=True) if chat_logger else 0
)
ticket_count = (
max(tickets_allocated - chat_logger.get_ticket_count(), 0)
+ purchased_ticket_count
if chat_logger
else 999
)
daily_ticket_count = (
(3 - chat_logger.get_ticket_count(use_date=True) if not use_faster_model else 0)
if chat_logger
else 999
)
model_name = "GPT-3.5" if use_faster_model else "GPT-4"
payment_link = "https://sweep.dev/pricing"
single_payment_link = "https://buy.stripe.com/00g3fh7qF85q0AE14d"
pro_payment_link = "https://buy.stripe.com/00g5npeT71H2gzCfZ8"
daily_message = (
f" and {daily_ticket_count} for the day"
if not is_paying_user and not is_consumer_tier
else ""
)
user_type = "💎 <b>Sweep Pro</b>" if is_paying_user else "⚡ <b>Sweep Basic Tier</b>"
gpt_tickets_left_message = (
f"{ticket_count} GPT-4 tickets left for the month"
if not is_paying_user
else "unlimited GPT-4 tickets"
)
purchase_message = f"<br/><br/> For more GPT-4 tickets, visit <a href={single_payment_link}>our payment portal</a>. For a one week free trial, try <a href={pro_payment_link}>Sweep Pro</a> (unlimited GPT-4 tickets)."
payment_message = (
f"{user_type}: I used {model_name} to create this ticket. You have {gpt_tickets_left_message}{daily_message}. (tracking ID: <code>{tracking_id}</code>)"
+ (purchase_message if not is_paying_user else "")
)
payment_message_start = (
f"{user_type}: I'm using {model_name}. You have {gpt_tickets_left_message}{daily_message}. (tracking ID: <code>{tracking_id}</code>)"
+ (purchase_message if not is_paying_user else "")
)
return payment_message, payment_message_start
| [
"Hey {name},\n<br/><br/>\n🚀 I just finished creating a pull request for your issue ({repo_full_name}#{issue_number}) at <a href=\"{pr_url}\">{repo_full_name}#{pr_number}</a>!\n\n<br/><br/>\nYou can view how I created this pull request <a href=\"{progress_url}\">here</a>.\n\n<h2>Summary</h2>\n<blockquote>\n{summary}\n</blockquote>\n\n<h2>Files Changed</h2>\n<ul>\n{files_changed}\n</ul>\n\n{sweeping_gif}\n<br/>\nCheers,\n<br/>\nSweep\n<br/>"
] |
2024-01-10 | sahilkamath0108/Jarvis-AI | deal_with_files~vecDB.py | from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
def vectorize(path):
# loader = DirectoryLoader(path, glob="./*.pdf", loader_cls=PyPDFLoader)
# documents = loader.load()
loader_cls = PyPDFLoader
loader = loader_cls(path)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
embeddings = SentenceTransformerEmbeddings(model_name="multi-qa-mpnet-base-dot-v1")
persist_directory = "data"
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory)
return True
if __name__ == '__main__':
path = "C:\\Users\\Hp\\Desktop\\realmadrid.pdf"
vectorize(path)
| [] |
2024-01-10 | sahilkamath0108/Jarvis-AI | deal_with_files~performQA.py | from langchain.vectorstores import Chroma
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import pipeline
import torch
from langchain.llms import HuggingFacePipeline
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.chains import RetrievalQA
from helpers.say import say
from helpers.listen import listen
chat_history = []
def ques_ans():
say('Alright shoot questions at me')
while True:
query = listen()
if 'malf' in query:
continue
if 'finish questioning' in query:
break
else:
if query and 'malf' not in query:
response = chat(chat_history, query)
say(response)
say('Next question')
return True
def chat(chat_history, user_input):
bot_response = qa_chain({"query": user_input})
bot_response = bot_response['result']
response = ""
for letter in ''.join(bot_response):
response += letter + ""
chat_history = chat_history + [(user_input, response)]
return bot_response
checkpoint = "MBZUAI/LaMini-Flan-T5-783M" #google/flan-t5-xl google/flan-t5 MBZUAI/LaMini-Flan-T5-783M
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
base_model = AutoModelForSeq2SeqLM.from_pretrained(
checkpoint,
device_map="auto",
torch_dtype = torch.float32)
embeddings = SentenceTransformerEmbeddings(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
db = Chroma(persist_directory="data", embedding_function=embeddings)
pipe = pipeline(
'text2text-generation',
model = base_model,
tokenizer = tokenizer,
max_length = 512,
do_sample = True,
temperature = 0.3,
top_p= 0.95
)
local_llm = HuggingFacePipeline(pipeline=pipe)
qa_chain = RetrievalQA.from_chain_type(llm=local_llm,
chain_type='stuff',
retriever=db.as_retriever(search_type="similarity", search_kwargs={"k":2}),
return_source_documents=True,
)
if __name__ == "__main__":
print('how much stipend')
| [] |
2024-01-10 | RavinMaddHatter/YoutubePrepTools | youtubePrep.py | import time
t1 = time.time()
from time import sleep
from threading import Thread
import cutter
import openai_translator as Translator
from queue import Queue
from csv import DictReader
from pyperclip import copy
from json import dump, load
from tkinter import Tk, Label, Button, INSERT, Scale, IntVar, Checkbutton, END
from tkinter import filedialog, Entry, DoubleVar, ttk, Toplevel, StringVar, OptionMenu
from os.path import exists, split, join, getmtime
from tkinter.scrolledtext import ScrolledText
from pathlib import Path
from glob import glob
print(time.time() - t1)
data = None
BoilerplateInfo = None
slider_defaults = None
sliders_enabled = None
audioChans = 6
translator = Translator.Translator
def update_save(data, file_name="default_profile.json"):
with open(file_name, "w+") as file:
dump(data, file, indent=2)
def load_file(file_name="default_profile.json"):
print(file_name)
data = {}
if exists(file_name):
with open(file_name) as file:
data = load(file)
if "boilerplate" not in data.keys():
data["boilerplate"] = "Default Test For Your Youtube Description/n"
if "slider_defaults" not in data.keys():
data["sliders_enabled"] = []
data["slider_defaults"] = []
for i in range(audioChans):
data["slider_defaults"].append(-24)
data["sliders_enabled"].append(False)
data["sliders_enabled"][0] = True
if "model" not in data.keys():
data["model"] = "base"
if "in_space" not in data.keys():
data["in_space"] = 0.1
if "out_space" not in data.keys():
data["out_space"] = 0.1
if "min_clip" not in data.keys():
data["min_clip"] = 1
if "min_silent" not in data.keys():
data["min_silent"] = 0.1
if "min_silent" not in data.keys():
data["min_silent"] = 0.1
update_save(data)
return data
data = load_file()
class Markerprocessor:
def __init__(self, file):
self.markers = []
with open(file, newline='') as csvfile:
reader = DictReader(csvfile, delimiter=',')
for row in reader:
time = row["Source In"].split(":")
time[0] = int(time[0]) - 1
if time[0] == 0:
time.pop(0)
else:
time[0] = "{:02d}".format(time[0])
time.pop()
time = ":".join(time)
self.markers.append(time + " " + row["Notes"])
def string_to_clipboard(self):
copy(data["boilerplate"] + "\n\r\n\rChapters: \n\r" + "\n\r".join(self.markers))
def string_to_file(self, name):
with open(name, "w+") as text_file:
text_file.write("\n\r".join(self.markers))
if __name__ == "__main__":
def progress_bar(operation_name, update_queue):
popup = Toplevel(height=100, width=500)
status_text = StringVar()
popup_description = Label(popup, textvariable=status_text)
popup_description.grid(row=0, column=0)
progress_var = DoubleVar()
progress_bar = ttk.Progressbar(popup, variable=progress_var, maximum=100)
progress_bar.grid(row=1, column=0)
complete = False
while not complete:
sleep(0.01)
if not update_queue.empty():
update = update_queue.get()
progress_var.set(update["percent"])
status_text.set(update["state"])
popup.update()
popup.focus_force()
complete = (update["state"] == "done")
popup.destroy()
popup.update()
def find_csv():
filename = filedialog.askopenfilename(title="Select a CSV File",
filetypes=(("CSV files",
"*.CSV*"),
("all files",
"*.*")))
data["boilerplate"] = st.get("1.0", END)
mk = Markerprocessor(filename)
mk.string_to_clipboard()
print("markers in clipboard")
def transcribe_process(transcribe_queue, filename):
print("setting moddel")
trans = translator(transcribe_queue, selected_model.get())
print("running model")
trans.audio_to_text(filename)
print("finished")
def transcribe_vid():
filename = filedialog.askopenfilename(title="Select a Media File File",
filetypes=(("Media Files",
"*.WAV *.MP4 *.MOV *.AVI *.Y4M *.MKV"),
("all files",
"*.*")))
try:
transcribe_queue = Queue()
print("queue sent")
popup = Thread(target=progress_bar, args=("Transcribing Video", transcribe_queue,))
popup.start()
trans = Thread(target=transcribe_process, args=(transcribe_queue, filename,))
trans.start()
print("transcribe finished")
except Exception as e:
print("failed translation")
print(e)
def do_settings(cc):
levels = []
chans = []
for i in range(len(sliders)):
levels.append(-sliders[i].get())
chans.append(slider_chks[i].get() == 1)
cc.set_multi_chan_thres(levels)
cc.set_lead_in(lead_in.get())
cc.set_lead_out(lead_out.get())
cc.set_min_clip_dur(clip_dur.get())
cc.set_enabled_tracks(chans)
cc.set_min_silent_dur(min_silent_dur_var.get())
def cut_clip_process(queue, video_file):
name = Path(video_file).stem
head, tail = split(video_file)
cc = cutter.Clipcutter(queue)
try:
do_settings(cc)
cc.add_cut_video_to_timeline(video_file)
cc.export_edl(join(head, name + "-cut.edl"))
cc._cleanup()
except Exception as e:
print(e)
cc._cleanup()
def cut_clip():
video_file = filedialog.askopenfilename(title="Select a WAV File",
filetypes=(("video files",
"*.mkv*"),
("all files",
"*.*")))
cut_queue = Queue()
popup = Thread(target=progress_bar, args=("Cutting Video", cut_queue,))
popup.start()
trans = Thread(target=cut_clip_process, args=(cut_queue, video_file,))
trans.start()
def cut_folder_process(queue, folder):
cc = cutter.Clipcutter(queue)
try:
name = split(folder)[-1]
do_settings(cc)
files = glob(join(folder, "*.mkv"))
files.sort(key=getmtime)
for file in files:
print(file)
cc.add_cut_video_to_timeline(file)
print(join(folder, (name + "-cut.edl")))
cc.export_edl(join(folder, (name + "-cut.edl")))
cc._cleanup()
except Exception as e:
print(e)
cc._cleanup()
def cut_folder():
folder = filedialog.askdirectory()
cut_queue = Queue()
popup = Thread(target=progress_bar, args=("Cutting Video", cut_queue,))
popup.start()
trans = Thread(target=cut_folder_process, args=(cut_queue, folder,))
trans.start()
def set_save_data():
for i in range(audioChans):
data["slider_defaults"][i] = sliders[i].get()
data["sliders_enabled"][i] = slider_chks[i].get()
data["boilerplate"] = st.get("1.0", END)
data["model"] = selected_model.get()
data["in_space"] = lead_in.get()
data["out_space"] = lead_out.get()
data["min_clip"] = clip_dur.get()
data["min_silent"] = min_silent_dur_var.get()
def load_profile():
settings_file = filedialog.askopenfilename(title="Select a profile",
filetypes=(("json files",
"*.json*"),
("all files",
"*.*")))
load_file(settings_file)
for i in range(audioChans):
sliders[i].set(data["slider_defaults"][i])
slider_chks[i].set(data["sliders_enabled"][i])
st.delete('1.0', END)
st.insert(INSERT, data["boilerplate"])
selected_model.set(data["model"])
lead_in.set(data["in_space"])
lead_out.set(data["out_space"])
clip_dur.set(data["min_clip"])
min_silent_dur_var.set(data["min_silent"])
def save_as():
file_name = filedialog.asksaveasfile(title="Set Profile File Name",
filetypes=(("JSON",
"*.json*"),)).name
if not (file_name.endswith(".json") or file_name.endswith(".json")):
file_name += ".json"
set_save_data()
update_save(data, file_name=file_name)
def save():
set_save_data()
update_save(data)
def exit():
window.destroy()
window = Tk()
window.title('Youtube Video Publishing Tools')
label_file_explorer = Label(window,
text="Video Prep Tools",
width=20, height=2)
csvButton = Button(window,
text="Markers to Clipboard",
command=find_csv,
width=20)
waveButton = Button(window,
text="Transcribe Media",
command=transcribe_vid,
width=20)
cut_button = Button(window,
text="Cut Clip",
command=cut_clip,
width=20)
super_cut_button = Button(window,
text="Cut Folder",
command=cut_folder,
width=20)
button_exit = Button(window,
text="Exit",
command=exit,
width=20)
button_save = Button(window,
text="Save Default",
command=save,
width=20)
button_save_as = Button(window,
text="Save as",
command=save_as,
width=20)
button_load = Button(window,
text="Load",
command=load_profile,
width=20)
lbl_entry = Label(window,
text="Description Tools",
width=50, height=2)
st = ScrolledText(window, width=75, height=5, relief="raised")
st.insert(INSERT, data["boilerplate"])
options = list(Translator._MODELS.keys())
model_label = Label(window, text="Speach Model Size", width=15, height=2)
selected_model = StringVar()
selected_model.set(data["model"])
model_select = OptionMenu(window, selected_model, *options)
sliders = []
sliders_lb = []
sliders_ch = []
slider_chks = []
for i in range(audioChans):
sliders_lb.append(Label(window,
text="ch {}".format(i + 1),
height=2))
sliders.append(Scale(window, from_=0, to=-50))
sliders[i].set(data["slider_defaults"][i])
slider_chks.append(IntVar())
slider_chks[i].set(data["sliders_enabled"][i])
sliders_ch.append(Checkbutton(window, variable=slider_chks[i]))
slider_chks[0].set(1)
lead_in = DoubleVar()
ld_in_ent = Entry(window, textvariable=lead_in, width=10)
in_lb = Label(window, text="In Space", width=15, height=2)
lead_out = DoubleVar()
ld_out_ent = Entry(window, textvariable=lead_out, width=10)
out_lb = Label(window, text="Out Space", width=15, height=2)
clip_dur = DoubleVar()
clip_dur_ent = Entry(window, textvariable=clip_dur, width=10)
dur_lb = Label(window, text="Min Clip Length", width=15, height=2)
min_silent_dur_var = DoubleVar()
min_silent_dur_ent = Entry(window, textvariable=min_silent_dur_var, width=10)
silent_lb = Label(window, text="Min Silent Dur", width=15, height=2)
lead_in.set(data["in_space"])
lead_out.set(data["out_space"])
clip_dur.set(data["min_clip"])
min_silent_dur_var.set(data["min_silent"])
audio_lb = Label(window, text="Audio Tools", width=15, height=2)
row = 1
label_file_explorer.grid(column=1, row=row, columnspan=audioChans)
row += 1
cut_button.grid(column=0, row=row, columnspan=3)
super_cut_button.grid(column=3, row=row, columnspan=3)
row += 1
for i in range(len(sliders)):
sliders_lb[i].grid(column=i + 1, row=row)
sliders[i].grid(column=i + 1, row=row + 1)
sliders_ch[i].grid(column=i + 1, row=row + 2)
row += 3
in_lb.grid(column=1, row=row)
out_lb.grid(column=2, row=row)
dur_lb.grid(column=3, row=row)
silent_lb.grid(column=4, row=row)
row += 1
ld_in_ent.grid(column=1, row=row)
ld_out_ent.grid(column=2, row=row)
clip_dur_ent.grid(column=3, row=row)
min_silent_dur_ent.grid(column=4, row=row)
row += 1
audio_lb.grid(column=1, row=row, columnspan=6)
row += 1
model_label.grid(column=0, row=row, columnspan=2)
model_select.grid(column=2, row=row, columnspan=1)
waveButton.grid(column=3, row=row, columnspan=3)
row += 1
lbl_entry.grid(column=1, row=row, columnspan=audioChans)
row += 1
st.grid(column=1, row=row, columnspan=audioChans)
row += 1
csvButton.grid(column=1, row=row, columnspan=audioChans)
row += 1
button_save.grid(column=1, row=row)
button_save_as.grid(column=2, row=row)
button_load.grid(column=3, row=row)
button_exit.grid(column=4, row=row, columnspan=audioChans - 1)
window.mainloop()
| [] |
2024-01-10 | f01zy/Assistant | commands~execute.py | import os
import random
import webbrowser
import pathlib
# import openai
from .say import say
def execute(cmd):
BASE_DIR = pathlib.Path().resolve()
# if purpose == "cmd":
if cmd == "yandex":
os.system(f"{BASE_DIR}/applications/Yandex.lnk")
say(f"ok{random.randint(1 , 4)}")
elif cmd == "excellent":
say("thanks.wav")
elif cmd == "youtube":
url = "https://youtube.com/"
webbrowser.open(url)
elif cmd == "VS Code":
os.system(f"{BASE_DIR}/applications/Code.lnk")
say(f"ok{random.randint(1 , 4)}")
elif cmd == "figma":
os.system(f"{BASE_DIR}/applications/Figma.lnk")
say(f"ok{random.randint(1 , 4)}")
# elif purpose == "openai":
# pass | [] |
2024-01-10 | chicodelacruz/mlprj | qa_api.py | import os
import openai
import configuration
# add a configuration.py file with the line:
# key = "your api key"
class Answer:
def __init__(self, answer="", additional_info="", next_question=""):
answer = answer
additional = additional_info
follow_up = next_question
def create_jsonlfile():
#Paste the API KEY
#openai.api_key ="Your api key"
openai.api_key = configuration.key
# Create the documents file as jsonl file
document_path = "jsonlfiles/finaldoc.jsonl"
file = openai.File.create(file=open(document_path), purpose='answers')
return file
def look_alternative_document(response_object):
"""
Look for an alternative answer
:param response_object:
:return:
"""
return "Would you like to browse all the handbook?"
def check_scores(user_question, response_object, score_threshold=0, low_threshold=50):
"""
:param response_object:
:param score_threshold:
:param low_threshold: threshold for responses with low confidence
:return:
"""
answer_object = Answer()
# go through response selected documents
scores = []
for document in response_object.selected_documents:
# select max score
scores.append(document.score)
max_score = max(scores)
print("max_score: {0}".format(str(max_score)))
if max_score > score_threshold:
# look for low confidence answers, it means gpt-3 generates an answer but the similarity to documents is low
if max_score <= low_threshold:
# adjust temperature, so far adjusting temperature still returns low scores
# response = generateAnswers(user_question, temp=response_object.temperature + 1)
print("low confidence")
chatbot_response = look_alternative_document(response_object)
else:
# it could be the one with the maximum score but the one with higher score is not always on-point
answer_object.answer = response_object.answers[0]
# find document with top score
answer_object.additional = response_object.selected_documents[0].text
# but also include the documents text
else:
chatbot_response = "I don't understand the question"
return answer_object
def generateAnswers(user_question,jsonl_file,temp = 0.1,maxtoken = 20):
try:
# Api for creating answers
response =openai.Answer.create(
search_model="ada",
model="davinci",
question=user_question,
file=jsonl_file["id"],
examples_context="Corruption is dishonest or illegal behavior, especially by people in power, typically involving bribery. It can also include other acts, such as fraud, embezzlement, favoritism, and nepotism. The most common form of corruption is bribery.For further information see Section G1 of the BCG.**Additional Information** : For further information, also about what the term gifts of money covers, see [Compliance Handbook](https://webbooks.siemens.com/public/LC/chen/index.htm?n=Part-1-Activity-Fields,A.-Anti-Corruption",
examples=[["Can I take my client on a holiday?", "No, you cannot take your client on a holiday .**Additional Information** For further information, see [Compliance Handbook](https://webbooks.siemens.com/public/LC/chen/index.htm?n=Part-1-Activity-Fields,A.-Anti-Corruption"],["What is corruption?", "Corruption is dishonest or illegal behavior, especially by people in power, typically involving bribery **Additional Information** For further information , see [Compliance Handbook](https://webbooks.siemens.com/public/LC/chen/index.htm?n=Part-1-Activity-Fields,A.-Anti-Corruption"],["What is bribery?","Bribery is the act of offering, promising, or giving money, gifts, or other benefit to a public official or public or private employee with the aim of receiving improper advantages. Bribery is a criminal offense worldwide. Siemens does not tolerate any form of bribery. **Additional Information** For further information check [BCG](https://compliance.siemens.cloud/bcg/responsibility.html#g)"],["What are the rules for cash payments?","Payments with Cash are specifically regulated in many jurisdictions according to money laundering or other laws. The governance for Anti-Money Laundering lies with Legal & Compliance (LC CO RFC / LC CO SFS) and supports the BizCos by appropriate processes. **Additional Information** More information can be found [Here](https://webbooks.siemens.com/public/LC/chen/index.htm?n=Part-1-Activity-Fields,C.-Anti-Money-Laundering-(AML),5.-Cash-Handling-Rules)"],
["Was ist ein Geschenk?", "Ein Geschenk ist eine freiwillige Überweisung von Geld oder anderen Vorteilen an Dritte ohne Gegenleistung. ** Zusätzliche Informationen ** Weitere Informationen finden Sie im [Compliance-Handbuch](https://webbooks.siemens.com/public/LC/chen/index.htm?n=Part-1-Activity-Fields,A.-Anti-Corruption"]],
max_rerank=10,
max_tokens=maxtoken,
temperature=temp,
stop=["\n"]
)
return response
except:
response ={"answers": ["Apologies, I could not find an answer for your query. Please ask questions related to"
" compliance or please rephrase your question"],
"file": file}
return response
print("Creating file !")
file =create_jsonlfile()
print("File created!! File id: ", file["id"])
user_ques =input("Chatbot - Enter your question :")
response = generateAnswers(user_ques, file)
full_answer = check_scores(user_ques, response)
# print("Chatbot Answer :", response["answers"][0])
print("Chatbot Answer :", full_answer.answer)
if full_answer.additional:
print("Additionally:\n")
print(full_answer.additional)
| [] |
2024-01-10 | stdanyaa/AIINIR_langchain | llm_utils.py | from langchain.llms import LlamaCpp
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import ChatPromptTemplate, PromptTemplate
from operator import itemgetter
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.prompt_template import format_document
from langchain.prompts import PromptTemplate
from langchain.retrievers import RePhraseQueryRetriever
from prompt_templates import QA_PROMPT_TEMPLATE, QUERY_REPHRASE_PROMPT_TEMPLATE
"""
Documents loading and preprocessing
"""
# def process_docs(docs):
# prompt = PromptTemplate.from_template("{page_content}\n")
# return [format_document(doc, prompt) for doc in docs]
def load_documents(
docs_path, text_splitter=None,
loaders={
'.pdf': PyPDFLoader,
'.csv': CSVLoader,},
loader_kwargs=None
):
def create_directory_loader(file_type, directory_path):
return DirectoryLoader(
path=directory_path,
glob=f"**/*{file_type}",
loader_cls=loaders[file_type],
loader_kwargs=loader_kwargs
)
pdf_loader = create_directory_loader('.pdf', docs_path)
csv_loader = create_directory_loader('.csv', docs_path)
if text_splitter:
pdf_documents = pdf_loader.load_and_split(text_splitter=text_splitter)
csv_documents = csv_loader.load_and_split(text_splitter=text_splitter)
else:
pdf_documents = pdf_loader.load()
csv_documents = csv_loader.load()
return pdf_documents + csv_documents
def get_text_splitter(chunk_size=800, chunk_overlap=100):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = chunk_size,
chunk_overlap = chunk_overlap,
length_function = len,
separators=['\d+\.\s', '\d+\.\d+\.\s', '\d+(\.\d+){2}\.\s', '\n\n', '\n'],
is_separator_regex=True
)
return text_splitter
"""
Vector database, embedder and retriever
"""
def get_db(chunks, embedder_name='cointegrated/LaBSE-en-ru'):
embeddings_model = HuggingFaceEmbeddings(model_name=embedder_name)
db = Chroma.from_documents(chunks, embeddings_model)
return db
def get_query_rephraser(llm):
query_prompt = PromptTemplate(
input_variables=["question"],
template=QUERY_REPHRASE_PROMPT_TEMPLATE
)
return LLMChain(llm=llm, prompt=query_prompt)
def get_retriever(vectorstore, search_kwargs={"k": 2}, rephraser=None):
retriever=vectorstore.as_retriever(search_kwargs=search_kwargs)
if rephraser:
return RePhraseQueryRetriever(
retriever=retriever, llm_chain=rephraser
)
else:
return retriever
"""
LLM and QA-langchain
"""
def get_llm(model_path='models/llama-2-7b-chat.Q4_K_M.gguf', n_ctx=4096):
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(
model_path=model_path,
temperature=0.0, #0.75,
max_tokens=min(n_ctx, 4000),
n_ctx=n_ctx,
top_p=1,
callback_manager=callback_manager,
verbose=True, # Verbose is required to pass to the callback manager
)
return llm
def get_qa_langchain(model, retriever):
template = QA_PROMPT_TEMPLATE
prompt = ChatPromptTemplate.from_template(template)
chain = {
"context": itemgetter("question") | retriever,
"question": itemgetter("question")
} | prompt | model | StrOutputParser()
return chain
| [
"question"
] |
2024-01-10 | xinggonglie/ColossalAI | applications~ColossalEval~colossal_eval~evaluate~dataset_evaluator~gpt_judge.py | # Code adapted from https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge
import ast
import concurrent.futures
import copy
import json
import os
import re
import time
from typing import Any, Dict, List
import numpy as np
import openai
import tqdm
MODEL = "gpt-4"
API_MAX_RETRY = 16
API_RETRY_SLEEP = 10
API_ERROR_OUTPUT = "$ERROR$"
NEED_REF_CATS = ["math", "reasoning", "coding"]
one_score_pattern = re.compile("\[\[(\d+\.?\d*)\]\]")
one_score_pattern_backup = re.compile("\[(\d+\.?\d*)\]")
def load_mt_prompts(prompt_file: str):
prompts = {}
with open(prompt_file) as fin:
for line in fin:
line = json.loads(line)
prompts[line["name"]] = line
return prompts
def get_mt_prompt(prompts: Dict[str, str], multiturn: bool, math: bool):
if math and multiturn:
return prompts["single-math-v1-multi-turn"]
elif math and not multiturn:
return prompts["single-math-v1"]
elif not math and multiturn:
return prompts["single-v1-multi-turn"]
elif not math and not multiturn:
return prompts["single-v1"]
def chat_compeletion_openai(messages: List[Dict], temperature: float = 0.0, max_tokens: int = 2048):
output = API_ERROR_OUTPUT
model = MODEL
for _ in range(API_MAX_RETRY):
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
n=1,
temperature=temperature,
max_tokens=max_tokens,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output
def get_mtbench_judgements(question: Dict[str, Any], prompts: Dict[str, str]):
id = question["id"]
judgement = {"id": id, "judgements": [], "ratings": []}
category = question["category"]
math = category in NEED_REF_CATS
turn_number = len(question["instruction"])
for num in range(turn_number):
assert (len(question["target"]) >= 1 and math) or not math
kwargs = {}
if num >= 1:
prompt = get_mt_prompt(prompts, multiturn=True, math=math)
if len(question["target"]) >= 1 and math:
kwargs = {f"ref_answer_{i+1}": question["target"][i] for i in range(len(question["target"]))}
user_prompt = prompt["prompt_template"].format(
question_1=question["instruction"][0],
question_2=question["instruction"][1],
answer_1=question["output"][0],
answer_2=question["output"][1],
**kwargs,
)
else:
prompt = get_mt_prompt(prompts, multiturn=False, math=math)
if len(question["target"]) >= 1 and math:
kwargs = {"ref_answer_1": question["target"][0]}
user_prompt = prompt["prompt_template"].format(
question=question["instruction"][0],
answer=question["output"][0],
**kwargs,
)
rating = -1
sys_prompt = prompt["system_prompt"]
messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": user_prompt}]
judgement_str = chat_compeletion_openai(messages, temperature=0.0, max_tokens=2048)
match = re.search(one_score_pattern, judgement_str)
if not match:
match = re.search(one_score_pattern_backup, judgement_str)
if match:
rating = ast.literal_eval(match.groups()[0])
else:
rating = -1
judgement["judgements"].append(judgement_str)
judgement["ratings"].append(rating)
return judgement
def mtbench_single_judge(data: List[Dict], config_path: str):
judgements = []
prompt_dir = os.path.dirname(config_path)
prompts = load_mt_prompts(os.path.join(prompt_dir, "mtbench_judge_prompts.jsonl"))
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
futures = []
for i, question in enumerate(data):
future = executor.submit(get_mtbench_judgements, question, prompts)
futures.append(future)
for future in tqdm.tqdm(
concurrent.futures.as_completed(futures),
desc=f"MTBench single judge for {data[0]['category']}",
total=len(futures),
):
judgements.append(future.result())
judgements.sort(key=lambda x: x["id"])
judgements_by_id = {j["id"]: j for j in judgements}
data_to_dump = copy.deepcopy(data)
for d in data_to_dump:
id = d["id"]
d["judgements"] = judgements_by_id[id]["judgements"]
d["ratings"] = judgements_by_id[id]["ratings"]
avg_ratings = np.mean([j["ratings"] for j in judgements], axis=0)
return data_to_dump, avg_ratings
| [
"{}",
"instruction",
"mtbench_judge_prompts.jsonl",
"prompt_template",
"system_prompt"
] |
2024-01-10 | unconv/gpt4v-examples | vision.py | from openai import OpenAI
import base64
model = OpenAI()
def image_b64(image_path):
with open(image_path, "rb") as f:
return base64.b64encode(f.read()).decode()
def look(image_path, prompt="Describe this image"):
b64_image = image_b64(image_path)
response = model.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{b64_image}",
},
{
"type": "text",
"text": prompt,
}
]
}
],
max_tokens=1024,
)
message = response.choices[0].message
return message.content
| [
"[{'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}, {'type': 'text', 'text': PLACEHOLDER}]"
] |
2024-01-10 | unconv/gpt4v-examples | multivision.py | from openai import OpenAI
import base64
model = OpenAI()
def image_b64(image_path):
with open(image_path, "rb") as f:
return base64.b64encode(f.read()).decode()
def look(
image_paths: list[str] | str,
prompt="Describe this image",
):
if not isinstance(image_paths, list):
image_paths = [image_paths]
images = []
for image in image_paths:
b64_image = image_b64(image)
images.append({
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{b64_image}",
})
response = model.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": images + [
{
"type": "text",
"text": prompt,
}
]
}
],
max_tokens=1024,
)
message = response.choices[0].message
return message.content
| [] |
2024-01-10 | Exic9999/GPPT4GUI | GPT4GUI.py | import tkinter as tk
from tkinter import scrolledtext, font
import openai
import threading
def send_request(event=None):
def api_call():
user_input = user_input_box.get("1.0", tk.END).strip()
clear_output = clear_output_check_var.get()
if clear_output:
output_box.configure(state='normal')
output_box.delete("1.0", tk.END)
output_box.configure(state='disabled')
if user_input.lower() == 'exit':
root.quit()
else:
try:
loading_label.config(text="Asking ChatGPT4 now...")
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_input}
],
max_tokens=4096
)
output_box.configure(state='normal')
# Insert and apply bold tag to user input
output_box.insert(tk.END, "You: ")
start = output_box.index("end-1c linestart")
output_box.insert(tk.END, user_input + "\n")
end = output_box.index("end-1c linestart")
output_box.tag_add("bold", start, end)
# Insert GPT-4 response
output_box.insert(tk.END, "GPT-4: " + response['choices'][0]['message']['content'] + "\n\n")
output_box.configure(state='disabled')
output_box.yview(tk.END)
loading_label.config(text="")
except Exception as e:
output_box.configure(state='normal')
output_box.insert(tk.END, "Error: " + str(e) + "\n")
output_box.configure(state='disabled')
loading_label.config(text="")
threading.Thread(target=api_call).start()
openai.api_key = ''
root = tk.Tk()
root.title("GPT-4 GUI")
root.geometry("1500x1000")
root.configure(bg="#f0f0f0")
input_font = font.Font(family="Times New Roman", size=14)
output_font = font.Font(family="Times New Roman", size=14)
bold_font = font.Font(family="Times New Roman", size=14, weight="bold") # Bold font
input_frame = tk.Frame(root)
input_frame.pack(padx=10, pady=5, fill='both', expand=True)
user_input_box = scrolledtext.ScrolledText(input_frame, height=4, width=70, font=input_font, bg="#7FFFD4")
user_input_box.pack(side='left', fill='both', expand=True)
user_input_box.bind("<Return>", send_request)
send_button = tk.Button(input_frame, text="Send", command=send_request, bg="#4CAF50", fg="white", padx=10, pady=5)
send_button.pack(side='right', padx=10)
send_button.bind("<Enter>", lambda e: e.widget.config(bg="#45a049"))
send_button.bind("<Leave>", lambda e: e.widget.config(bg="#4CAF50"))
loading_label = tk.Label(input_frame, text="", font=("Helvetica", 10))
loading_label.pack(side='right')
clear_output_check_var = tk.BooleanVar()
clear_output_check = tk.Checkbutton(input_frame, text="Clear output on send", var=clear_output_check_var, bg="#f0f0f0")
clear_output_check.pack(side='right')
output_box = scrolledtext.ScrolledText(root, height=15, width=100, font=output_font, bg="#ADD8E6")
output_box.pack(padx=10, pady=5, fill='both', expand=True)
output_box.configure(state='disabled')
output_box.tag_configure("bold", font=bold_font) # Configure bold tag
root.mainloop()
| [
"You are a helpful assistant."
] |
2024-01-10 | allenai/DecomP | src~decomp~models~gpt3generator.py | import logging
import os
import openai
from diskcache import Cache
logger = logging.getLogger(__name__)
cache = Cache(os.path.expanduser("~/.cache/gpt3calls"))
@cache.memoize()
def cached_openai_call( # kwargs doesn't work with caching.
prompt, engine, temperature, max_tokens, top_p,
frequency_penalty, presence_penalty, stop,
n, best_of, logprobs,
):
return openai.Completion.create(
prompt=prompt, engine=engine, temperature=temperature, max_tokens=max_tokens,
top_p=top_p, frequency_penalty=frequency_penalty, presence_penalty=presence_penalty,
stop=stop, n=n, best_of=best_of, logprobs=logprobs
)
def openai_call(
prompt, engine, temperature, max_tokens, top_p,
frequency_penalty, presence_penalty, stop,
n, best_of, logprobs,
):
function = cached_openai_call if temperature == 0 else openai.Completion.create
return function(
prompt=prompt, engine=engine, temperature=temperature, max_tokens=max_tokens,
top_p=top_p, frequency_penalty=frequency_penalty, presence_penalty=presence_penalty,
stop=stop, n=n, best_of=best_of, logprobs=logprobs
)
class GPT3Generator:
def __init__(self, engine="text-davinci-002", temperature=0, max_tokens=100,
top_p=1, frequency_penalty=0, presence_penalty=0, stop=["\n"],
n=1, best_of=1, logprobs=0):
self.engine = engine
self.logprobs = logprobs
self.n = n
self.best_of = best_of
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.max_tokens = max_tokens
self.top_p = top_p
self.stop = stop
self.temperature = temperature
def generate_text_sequence(self, prompt):
"""
:param input_text:
:return: returns a sequence of tuples (string, score) where lower score is better
"""
# GPT3 can't handle trailing white-space
prompt = prompt.rstrip()
if self.best_of is None:
response = openai.Completion.create(
engine=self.engine,
prompt=prompt,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
n=self.n,
logprobs=self.logprobs,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
stop=self.stop
)
else:
response = openai_call(
engine=self.engine,
prompt=prompt,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
n=self.n,
best_of=self.best_of,
logprobs=self.logprobs,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
stop=self.stop
)
output_seq_score = []
for index, choice in enumerate(response["choices"]):
# print(choice)
if "logprobs" in choice and "token_logprobs" in choice["logprobs"]:
# get probs of the tokens used in text (i.e. till the stop token)
probs = []
# selected_toks = []
for prob, tok in zip(choice["logprobs"]["token_logprobs"],
choice["logprobs"]["tokens"]):
if tok not in self.stop and tok != "<|endoftext|>":
probs.append(prob)
# selected_toks.append(tok)
else:
# include the probability of the stop character too. This will also
# ensure that an empty string (i.e. first predicted character being a stop
# character) also has a reasonable probability measure
# selected_toks.append(tok)
probs.append(prob)
break
# average the logits and negate to make them +ve scores where lower is better
# set a high +ve score if no predictions
# print(probs, selected_toks)
score = -sum(probs) / len(probs) if len(probs) else 100.0
output_seq_score.append((choice["text"], score))
else:
# no score, just use index
output_seq_score.append((choice["text"], index))
# Ensure sorted output
return sorted(output_seq_score, key=lambda x: x[1])
| [] |
2024-01-10 | EleutherAI/NeMo | nemo~collections~nlp~modules~common~megatron~transformer.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer."""
import math
from contextlib import nullcontext
from enum import Enum
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from nemo.collections.common.parts.adapter_modules import LinearAdapterConfig
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
InfusedAdapterConfig,
MLPInfusedAdapterConfig,
ParallelLinearAdapterConfig,
)
from nemo.collections.nlp.modules.common.megatron.fused_bias_dropout_add import (
bias_dropout_add,
bias_dropout_add_fused_inference,
bias_dropout_add_fused_train,
dropout_add,
)
from nemo.collections.nlp.modules.common.megatron.flash_attention import flash_attn_unpadded_qkvpacked_func
from nemo.collections.nlp.modules.common.megatron.fused_bias_geglu import fused_bias_geglu
from nemo.collections.nlp.modules.common.megatron.fused_bias_gelu import fused_bias_gelu
from nemo.collections.nlp.modules.common.megatron.fused_layer_norm import get_layer_norm
from nemo.collections.nlp.modules.common.megatron.fused_softmax import MatchedScaleMaskSoftmax
from nemo.collections.nlp.modules.common.megatron.layer_norm_1p import LayerNorm1P
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.rotary_pos_embedding import apply_rotary_pos_emb
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, attention_mask_func, erf_gelu
from nemo.collections.nlp.modules.common.megatron.utils import openai_gelu as openai_gelu_func
from nemo.core import adapter_mixins
from nemo.utils import logging
try:
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import AttnMaskType, AttnType, ModelType
from apex.transformer.utils import divide as safe_divide
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.normalization import MixedFusedRMSNorm
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
ModelType = AttnMaskType = AttnType = LayerType = ApexGuardDefaults()
""" We use the following notation throughout this file:
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
l: number of layers
Transformer takes input of size [s, b, h] and returns a
tensor of the same size. We use the following arguments:
hyperparameters: transformer hyperparameters
"""
if HAVE_APEX:
class ColumnLinear(tensor_parallel.ColumnParallelLinear):
# redefine forward only for non-parallel inference
def forward(self, input_):
world_size = get_tensor_model_parallel_world_size()
if input_.requires_grad or world_size > 1:
return tensor_parallel.ColumnParallelLinear.forward(self, input_)
# Matrix multiply.
output = torch.matmul(input_, self.weight.t())
if not self.skip_bias_add and self.bias is not None:
output = output + self.bias
output_bias = self.bias if self.skip_bias_add else None
return output, output_bias
else:
class ColumnLinear(ApexGuardDefaults):
def __init__(self):
super().__init__()
logging.warning(
"Apex was not found. ColumnLinear will not work. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
class AttentionImpl(Enum):
core = "core"
flash = "flash"
class ParallelMLP(MegatronModule):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension.
"""
def __init__(
self,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
use_cpu_initialization=False,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
sequence_parallel=False,
gradient_accumulation_fusion=False,
dropout=0.0,
):
super(ParallelMLP, self).__init__()
self.activation = activation
self.bias = bias
self.transformer_block_type = transformer_block_type
self.normalization = normalization
self.layernorm_epsilon = layernorm_epsilon
self.persist_layer_norm = persist_layer_norm
self.activation = activation
self.dropout = dropout
self.set_accepted_adapter_types([MLPInfusedAdapterConfig._target_])
if activation not in ['gelu', 'geglu', 'reglu', 'swiglu']:
raise ValueError(f"Activation {activation} not supported. Only gelu, geglu, reglu, swiglu are supported.")
no_async_tensor_model_parallel_allreduce = (
parallel_state.get_tensor_model_parallel_world_size() == 1 or sequence_parallel
)
# Project to 4h.
self.dense_h_to_4h = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size, # NOTE: When using geglu, divide ffn dim by 2/3 to keep overall params the same.
gather_output=False,
init_method=init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
if activation in ['geglu', 'reglu', 'swiglu']:
# Separate linear layer for *GLU activations.
# Source: https://github.com/huggingface/transformers/blob/bee361c6f1f7704f8c688895f2f86f6e5ff84727/src/transformers/models/t5/modeling_t5.py#L292
self.dense_h_to_4h_2 = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size, # NOTE: When using *glu, divide ffn dim by 2/3 to keep overall params the same.
gather_output=False,
init_method=init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.glu_activation_family = activation in ['geglu', 'reglu', 'swiglu']
bias_activation_fusion_unavailable = activation in ['reglu', 'swiglu']
if bias_activation_fusion_unavailable and bias_activation_fusion:
raise ValueError(
f"Cannot use bias_activation_fusion with {activation} activation. Please turn bias gelu fusion off."
)
if self.glu_activation_family and openai_gelu:
raise ValueError(
f"Cannot use openai_gelu with specificed activation function : {activation} Please turn openai gelu off."
)
if self.glu_activation_family and onnx_safe:
raise ValueError(
f"Cannot use onnx_safe with specificed activation function and bias_activation_fusion : {activation} Please turn onnx safe off."
)
if bias_activation_fusion and not bias:
raise ValueError(
f"Cannot use bias_activation_fusion without bias terms. Please set bias=True or bias_activation_fusion=False."
)
self.bias_activation_fusion = bias_activation_fusion
# Give openai_gelu precedence over other activations if set, for HF compatibility. Normally this is off and shouldn't affect regular model training.
if openai_gelu:
self.activation_func = openai_gelu_func
elif activation in ["gelu", "geglu"]:
self.activation_func = F.gelu
elif onnx_safe:
self.activation_func = erf_gelu
elif activation == "reglu":
self.activation_func = F.relu
elif activation == "swiglu":
# SiLU or sigmoid linear unit is the same as swish with beta = 1 (which is what https://arxiv.org/pdf/2002.05202.pdf uses.)
self.activation_func = F.silu
# Project back to h.
self.dense_4h_to_h = tensor_parallel.RowParallelLinear(
ffn_hidden_size,
hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.normalization = get_layer_norm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon, persist_layer_norm
)
elif normalization == 'layernorm1p':
self.normalization = LayerNorm1P(
ffn_hidden_size // get_tensor_model_parallel_world_size(),
layernorm_epsilon,
sequence_parallel_enabled=sequence_parallel,
)
else:
self.normalization = MixedFusedRMSNorm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
if self.glu_activation_family:
intermediate_parallel_2, bias_parallel_2 = self.dense_h_to_4h_2(hidden_states)
if self.bias_activation_fusion:
if self.activation == 'gelu':
intermediate_parallel = fused_bias_gelu(intermediate_parallel, bias_parallel)
elif self.activation == 'geglu':
intermediate_parallel = fused_bias_geglu(
intermediate_parallel, bias_parallel, intermediate_parallel_2, bias_parallel_2
)
elif self.activation in ['reglu', 'swiglu'] or (
self.glu_activation_family and not self.bias_activation_fusion
):
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel) * (
intermediate_parallel_2 + bias_parallel_2
)
else:
intermediate_parallel = self.activation_func(intermediate_parallel) * intermediate_parallel_2
else:
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel)
else:
intermediate_parallel = self.activation_func(intermediate_parallel)
if self.dropout > 0:
intermediate_parallel = F.dropout(intermediate_parallel, p=self.dropout, training=self.training)
infused_adapter = self.get_from_adapter_layer(AdapterName.MLP_INFUSED)
if infused_adapter:
intermediate_parallel = infused_adapter(intermediate_parallel)
# Normformer normalization
if self.transformer_block_type == 'normformer':
intermediate_parallel = self.normalization(intermediate_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class SwitchMLP(MegatronModule):
"""Top-1 MoE
Curently supports Sinkhorn based expert routing."""
def __init__(
self,
num_experts,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
use_cpu_initialization=False,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
sequence_parallel=False,
gradient_accumulation_fusion=False,
dropout=0.0,
):
super(SwitchMLP, self).__init__()
self.num_experts = num_experts
self.route_algo = SwitchMLP.sinkhorn
self.router = tensor_parallel.RowParallelLinear(
hidden_size,
num_experts,
input_is_parallel=False,
init_method=init_method,
skip_bias_add=False,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
mlp_args = {
'init_method': init_method,
'output_layer_init_method': output_layer_init_method,
'hidden_size': hidden_size,
'ffn_hidden_size': ffn_hidden_size,
'use_cpu_initialization': use_cpu_initialization,
'bias_activation_fusion': bias_activation_fusion,
'openai_gelu': openai_gelu,
'onnx_safe': onnx_safe,
'activation': activation,
'bias': bias,
'transformer_block_type': transformer_block_type,
'normalization': normalization,
'layernorm_epsilon': layernorm_epsilon,
'persist_layer_norm': persist_layer_norm,
'sequence_parallel': sequence_parallel,
'gradient_accumulation_fusion': gradient_accumulation_fusion,
'dropout': dropout,
}
self.experts = torch.nn.ModuleList([ParallelMLP(**mlp_args) for _ in range(num_experts)])
def forward(self, hidden_states):
hidden_shape = hidden_states.shape
route, _ = self.router(hidden_states)
route = route.view(-1, self.num_experts)
if self.training:
with torch.no_grad():
norm_route = self.route_algo(
route.detach().to(dtype=torch.float32)
) # explicit fp32 conversion for stability
_, max_ind = torch.max(norm_route, dim=1)
route = torch.sigmoid(route)
max_prob = route[torch.arange(route.size(0)), max_ind]
else:
route = torch.sigmoid(route)
max_prob, max_ind = torch.max(route, dim=1)
max_prob = torch.unsqueeze(max_prob, 1)
hidden_states = hidden_states.view(-1, hidden_shape[-1])
local_indices = (max_ind == 0).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = self.experts[0](hidden)
output_bias = output_bias.expand_as(output)
output_total = torch.empty_like(hidden_states, dtype=output.dtype)
output_bias_total = torch.empty_like(hidden_states, dtype=output_bias.dtype)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
for expert_num, expert in enumerate(self.experts):
if expert_num == 0:
continue
local_indices = (max_ind == expert_num).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = expert(hidden)
output_bias = output_bias.expand_as(output)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
output_total = output_total * max_prob
output_bias_total = output_bias_total * max_prob
output_total = output_total.view(hidden_shape)
output_bias_total = output_bias_total.view(hidden_shape)
return output_total, output_bias_total
@classmethod
def sinkhorn(cls, cost, tol=0.0001):
"Megatron-LMs sinkhorn implementation"
cost = torch.exp(cost)
d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype)
d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype)
eps = 0.00000001
error = 1e9
d1_old = d1
while error > tol:
d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps)
d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps)
error = torch.mean(torch.abs(d1_old - d1))
d1_old = d1
return d1 * cost * d0.unsqueeze(1)
class CoreAttention(MegatronModule):
""" Region where selective activation recomputation is applied.
See Figure 3. in Reducing Activation Recomputation in Large Transformer Models
https://arxiv.org/pdf/2205.05198.pdf for more details.
"""
def __init__(
self,
layer_number,
num_attention_heads,
hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
masked_softmax_fusion=True,
attention_dropout=0.1,
sequence_parallel=False,
normalize_attention_scores=True,
):
super(CoreAttention, self).__init__()
self.precision = precision
self.fp16 = precision == 16
self.bf16 = precision == 'bf16'
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = False
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.sequence_parallel = sequence_parallel
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
projection_size = kv_channels * num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_partition = safe_divide(projection_size, world_size)
self.hidden_size_per_attention_head = safe_divide(projection_size, num_attention_heads)
self.num_attention_heads_per_partition = safe_divide(num_attention_heads, world_size)
self.num_attention_heads_partition_offset = (
self.num_attention_heads_per_partition * parallel_state.get_tensor_model_parallel_rank()
)
coeff = None
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
if self.apply_query_key_layer_scaling:
coeff = self.layer_number
self.norm_factor *= coeff
self.scale_mask_softmax = MatchedScaleMaskSoftmax(
self.fp16,
self.bf16,
self.attn_mask_type,
masked_softmax_fusion,
attention_mask_func,
self.attention_softmax_in_fp32,
coeff,
)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(attention_dropout)
def forward(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
layer_past=None,
get_key_value=False,
rotary_pos_emb=None,
relative_position_bias=None,
headscale_tensor=None,
):
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
# TODO: figure out how to do this
# apply relative positional encoding (rotary embedding)
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb)
key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb)
# TODO, can apply positional embedding to value_layer so it has
# absolute positional embedding.
# otherwise, only relative positional embedding takes effect
# value_layer = apply_rotary_pos_emb(value_layer, k_pos_emb)
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
# preallocting input tensor: [b * np, sq, sk]
matmul_input_buffer = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=query_layer.dtype,
device=torch.cuda.current_device(),
)
# Raw attention scores. [b * np, sq, sk]
matmul_result = torch.baddbmm(
matmul_input_buffer,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / self.norm_factor) if self.normalize_attention_scores else 1.0,
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
if relative_position_bias is not None:
attention_scores += relative_position_bias[
:,
self.num_attention_heads_partition_offset : self.num_attention_heads_partition_offset
+ self.num_attention_heads_per_partition,
: attention_scores.size(2),
: attention_scores.size(3),
]
# ==================================================
# Update attention mask for inference. [b, np, sq, sk]
# ==================================================
if get_key_value:
with torch.no_grad():
if layer_past is not None:
attention_mask = attention_mask[
..., attention_scores.size(3) - 1, : attention_scores.size(3)
].unsqueeze(2)
else:
attention_mask = attention_mask[..., : attention_scores.size(3), : attention_scores.size(3)]
# ===========================
# Attention probs and dropout
# ===========================
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
if not self.sequence_parallel:
with tensor_parallel.random.get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
else:
attention_probs = self.attention_dropout(attention_probs)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
# change view [sk, b * np, hn]
value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
if headscale_tensor is not None:
context_layer = context_layer * headscale_tensor
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class FlashAttention(MegatronModule):
def __init__(
self,
layer_number,
num_attention_heads,
hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
precision=16,
kv_channels=None,
attention_dropout=0.1,
sequence_parallel=False,
):
super(FlashAttention, self).__init__()
if precision == 32:
raise ValueError('FlashAttention does not support fp32.')
self.precision = precision
self.fp16 = precision == 16
self.bf16 = precision == 'bf16'
if self.fp16:
self.dtype = torch.float16
else:
self.dtype = torch.bfloat16
self.flash_attention_fn = flash_attn_unpadded_qkvpacked_func
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.sequence_parallel = sequence_parallel
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
projection_size = kv_channels * num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_partition = safe_divide(projection_size, world_size)
self.hidden_size_per_attention_head = safe_divide(projection_size, num_attention_heads)
self.num_attention_heads_per_partition = safe_divide(num_attention_heads, world_size)
self.num_attention_heads_partition_offset = (
self.num_attention_heads_per_partition * parallel_state.get_tensor_model_parallel_rank()
)
self.attention_dropout = attention_dropout
def forward(self, query_layer, key_layer, value_layer, attention_mask, **kwargs):
# [b, np, sq, sk]
output_size = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
)
causal = attention_mask is not None
# [s, b, np, hn] -> [b, s, np, hn] -> [b * s, 1, np, hn]
query_layer = query_layer.transpose(0, 1).reshape(output_size[0] * output_size[2], 1, output_size[1], -1)
key_layer = key_layer.transpose(0, 1).reshape(output_size[0] * output_size[3], 1, output_size[1], -1)
value_layer = value_layer.transpose(0, 1).reshape(output_size[0] * output_size[3], 1, output_size[1], -1)
# Combined q/k/v into [b * s, 3, np, hn].
qkv = torch.concat([query_layer, key_layer, value_layer], dim=1)
prev_dtype = None
if qkv.dtype != self.dtype:
prev_dtype = qkv.dtype
qkv = qkv.to(self.dtype)
batch_size = output_size[0]
seqlen = output_size[2]
max_s = seqlen
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, device=qkv.device)
output = self.flash_attention_fn(
qkv, cu_seqlens, max_s, self.attention_dropout,
softmax_scale=None, causal=causal
)
if prev_dtype is not None:
output.to(prev_dtype)
# [b * sq, np, hn] -> [b, sq, np, hn]
matmul_result = output.view(output_size[0], output_size[2], output.shape[1], output.shape[2])
# [b, sq, np, hn] -> [b, np, sq, hn]
matmul_result = matmul_result.transpose(1, 2)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = matmul_result.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class ParallelAttention(MegatronModule):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [s, b, h]
and returns output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
num_attention_heads,
hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
use_cpu_initialization=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
layer_type=None,
megatron_legacy=False,
bias=True,
headscale=False,
activations_checkpoint_granularity=None,
sequence_parallel=False,
gradient_accumulation_fusion=False,
attention_impl=AttentionImpl.flash,
):
super(ParallelAttention, self).__init__()
self.precision = precision
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.normalize_attention_scores = normalize_attention_scores
self.megatron_legacy = megatron_legacy
self.set_accepted_adapter_types([InfusedAdapterConfig._target_])
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
projection_size = kv_channels * num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_attention_head = safe_divide(projection_size, num_attention_heads)
self.num_attention_heads_per_partition = safe_divide(num_attention_heads, world_size)
self.num_attention_heads_partition_offset = (
self.num_attention_heads_per_partition * parallel_state.get_tensor_model_parallel_rank()
)
no_async_tensor_model_parallel_allreduce = (
parallel_state.get_tensor_model_parallel_world_size() == 1 or sequence_parallel
)
# Strided linear layer.
if attention_type == AttnType.self_attn:
# TODO: checl self.query_key_value.weight.dtype at initialization for Core vs Flash
self.query_key_value = ColumnLinear(
hidden_size,
3 * projection_size,
gather_output=False,
init_method=init_method,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
else:
assert attention_type == AttnType.cross_attn
self.query = tensor_parallel.ColumnParallelLinear(
hidden_size,
projection_size,
gather_output=False,
init_method=init_method,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.key_value = tensor_parallel.ColumnParallelLinear(
hidden_size,
2 * projection_size,
gather_output=False,
init_method=init_method,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.checkpoint_core_attention = activations_checkpoint_granularity == 'selective'
if attention_impl == AttentionImpl.core:
self.attention = CoreAttention(
layer_number=self.layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=self.attention_type,
attn_mask_type=self.attn_mask_type,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
sequence_parallel=sequence_parallel,
)
elif attention_impl == AttentionImpl.flash:
self.attention = FlashAttention(
layer_number=self.layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=self.attention_type,
attn_mask_type=self.attn_mask_type,
precision=precision,
kv_channels=kv_channels,
attention_dropout=attention_dropout,
sequence_parallel=sequence_parallel,
)
else:
raise NotImplementedError(f'Attention algorithm {attention_impl} has not been implemented.')
# Output.
self.dense = tensor_parallel.RowParallelLinear(
projection_size,
hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.headscale = headscale
if headscale:
self.head_scale_tensor = torch.nn.Parameter(
torch.ones(1, self.num_attention_heads_per_partition, 1, 1), requires_grad=True
)
# Inference key-value memory
self.inference_key_memory = None
self.inference_value_memory = None
self.inference_current_sequence_len = 0
# relative position embedding
self.layer_type = layer_type
def _checkpointed_attention_forward(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=None,
relative_position_bias=None,
headscale_tensor=None,
):
"""Forward method with activation checkpointing."""
def custom_forward(*inputs):
if len(inputs) == 7:
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
rotary_pos_emb = inputs[4]
relative_position_bias = inputs[5]
headscale_tensor = inputs[6]
elif len(inputs) == 8:
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
rotary_pos_emb = (inputs[4], inputs[5])
relative_position_bias = inputs[6]
headscale_tensor = inputs[7]
else:
raise ValueError('unexpected number of inputs')
output_ = self.attention(
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=headscale_tensor,
)
return output_
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1])
hidden_states = tensor_parallel.checkpoint(
custom_forward,
False,
query_layer,
key_layer,
value_layer,
attention_mask,
*rot_tuple,
relative_position_bias,
headscale_tensor,
)
return hidden_states
def _allocate_memory(self, inference_max_sequence_len, batch_size, dtype):
return torch.empty(
inference_max_sequence_len,
batch_size,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
dtype=dtype,
device=torch.cuda.current_device(),
)
def _transpose_last_dim(self, mixed_layer, num_splits, num_splits_first):
input_shape = mixed_layer.size()
if num_splits_first:
"""[s, b, num_splits * np * hn]
-->(view) [s, b, num_splits, np, hn]
-->(tranpose) [s, b, np, num_splits, hn]
-->(view) [s, b, np * num_splits * hn] """
intermediate_shape = input_shape[:-1] + (
num_splits,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
mixed_layer = mixed_layer.view(*intermediate_shape)
mixed_layer = mixed_layer.transpose(-2, -3).contiguous()
else:
"""[s, b, np * hn * num_splits]
-->(view) [s, b, np, hn, num_splits]
-->(tranpose) [s, b, np, num_splits, hn]
-->(view) [s, b, np * num_splits * hn] """
intermediate_shape = input_shape[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
num_splits,
)
mixed_layer = mixed_layer.view(*intermediate_shape)
mixed_layer = mixed_layer.transpose(-1, -2).contiguous()
mixed_layer = mixed_layer.view(*input_shape)
return mixed_layer
def forward(
self,
hidden_states,
attention_mask,
layer_past=None,
get_key_value=False,
encoder_output=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # rotary positional embedding
relative_position_bias=None,
checkpoint_core_attention=False,
):
# hidden_states: [sq, b, h]
# =================================================
# Pre-allocate memory for key-values for inference.
# =================================================
if set_inference_key_value_memory:
assert inference_max_sequence_len and inference_max_sequence_len > 0
self.inference_key_memory = self._allocate_memory(
inference_max_sequence_len, hidden_states.size(1), hidden_states.dtype
)
self.inference_value_memory = self._allocate_memory(
inference_max_sequence_len, hidden_states.size(1), hidden_states.dtype
)
self.inference_current_sequence_len = 0
# Some consistency check.
if inference_max_sequence_len:
assert self.inference_current_sequence_len < self.inference_key_memory.size(0)
assert inference_max_sequence_len == self.inference_key_memory.size(0)
# This is added for safety. In case inference_max_sequence_len
# is not provided, make sure there is no potential memory left
# from previous inference.
if not inference_max_sequence_len:
self.inference_key_memory = None
self.inference_value_memory = None
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == AttnType.self_attn:
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
mixed_x_layer, _ = self.query_key_value(hidden_states)
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
if self.megatron_legacy:
mixed_x_layer = self._transpose_last_dim(mixed_x_layer, 3, True)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
(query_layer, key_layer, value_layer) = tensor_parallel.split_tensor_along_last_dim(mixed_x_layer, 3)
else:
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer, _ = self.key_value(encoder_output)
# [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
2 * self.hidden_size_per_attention_head,
)
if self.megatron_legacy:
mixed_kv_layer = self._transpose_last_dim(mixed_kv_layer, 2, True)
mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
# [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
(key_layer, value_layer) = tensor_parallel.split_tensor_along_last_dim(mixed_kv_layer, 2)
# Attention head [sq, b, h] --> [sq, b, hp]
query_layer, _ = self.query(hidden_states)
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = query_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
query_layer = query_layer.view(*new_tensor_shape)
if self.is_adapter_available():
key_infused_adapter = self.get_from_adapter_layer(AdapterName.KEY_INFUSED)
value_infused_adapter = self.get_from_adapter_layer(AdapterName.VALUE_INFUSED)
if key_infused_adapter:
assert value_infused_adapter is not None, "Expected value_infused_adapter not found!"
kls = key_layer.shape
key_layer = key_infused_adapter(key_layer.reshape(kls[0], kls[1], -1)).reshape(kls)
if value_infused_adapter:
assert key_infused_adapter is not None, "Expected key_infused_adapter not found!"
vls = value_layer.shape
value_layer = value_infused_adapter(value_layer.reshape(vls[0], vls[1], -1)).reshape(vls)
# ===================================================
# Adjust key, value, and attention mask for inference
# ===================================================
# duplicate the pos_emb for self attention
if rotary_pos_emb is not None:
rotary_pos_emb = rotary_pos_emb if isinstance(rotary_pos_emb, tuple) else ((rotary_pos_emb,) * 2)
if inference_max_sequence_len:
# Adjust the range variables.
start = self.inference_current_sequence_len
self.inference_current_sequence_len += key_layer.size(0)
end = self.inference_current_sequence_len
# Copy key and values.
self.inference_key_memory[start:end, ...] = key_layer
self.inference_value_memory[start:end, ...] = value_layer
key_layer = self.inference_key_memory[:end, ...]
value_layer = self.inference_value_memory[:end, ...]
# Adjust attention mask
attention_mask = attention_mask[..., start:end, :end]
# adjust the key rotary positional embedding
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
if not set_inference_key_value_memory:
# In inference, we compute one token at a time.
# Select the correct positional embedding.
q_pos_emb = q_pos_emb[end - 1 : end]
k_pos_emb = k_pos_emb[:end, :, :, :]
rotary_pos_emb = (q_pos_emb, k_pos_emb)
if layer_past is not None:
past_key, past_value = layer_past
key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=0)
value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=0)
if get_key_value:
present = (key_layer, value_layer)
if checkpoint_core_attention:
context_layer = self._checkpointed_attention_forward(
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=self.head_scale_tensor if self.headscale else None,
)
else:
context_layer = self.attention(
query_layer,
key_layer,
value_layer,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=self.head_scale_tensor if self.headscale else None,
)
# =================
# Output. [sq, b, h]
# =================
output, bias = self.dense(context_layer)
if get_key_value:
output = [output, present]
return output, bias
class ParallelChunkedCrossAttention(MegatronModule):
"""Parallel chunked cross-attention layer class.
Self-attention layer takes input with size [b, s, h]
and returns output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
num_attention_heads,
hidden_size,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
use_cpu_initialization=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
megatron_legacy=False,
chunk_size=64, # each chunk, how many tokens
bias=True,
headscale=False,
gradient_accumulation_fusion=False,
normalize_attention_scores=True,
):
super(ParallelChunkedCrossAttention, self).__init__()
self.cross_attention = ParallelAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.cross_attn,
attn_mask_type=AttnMaskType.padding,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
)
self.chunk_size = chunk_size
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None,
checkpoint_core_attention=False,
):
if checkpoint_core_attention:
raise ValueError(
'checkpoint_core_attention during forward not implemented yet for ParallelChunkedCrossAttention'
)
# hidden_states is assumed to have dimension [token length, batch, dimension]
# derive variables
# encoder_output here is the retrieved context
context = encoder_output
# context is assumed to have dimension [num_chunks, num_neighbors, context_token_len, batch, dimension]
chunk_size = self.chunk_size
b, n, dim = (
hidden_states.shape[1],
hidden_states.shape[0],
hidden_states.shape[2],
)
default_bias = self.cross_attention.dense.bias
if set_inference_key_value_memory:
seq_index = (n // chunk_size) * chunk_size
self.current_len = n
elif inference_max_sequence_len is not None:
# only handles single token increment
assert n == 1
self.current_len += n
token_pos = (self.current_len - 1) % chunk_size
chunk_id = self.current_len // chunk_size
if chunk_id <= 0:
# if sequence length less than chunk size, do an early return
return torch.zeros_like(hidden_states), default_bias
causal_padding = chunk_size - 1
# pad it as a full chunk, put it at the end of the chunk position
hidden_states = F.pad(hidden_states, (0, 0, 0, 0, causal_padding, 0), value=0.0)
# only use the relevant context
context = context[chunk_id - 1 : chunk_id, :, :, :, :]
attention_mask = rearrange(attention_mask, '(b k) 1 q v -> b k 1 q v', b=b)
# select the relevant chunk attn mask
attention_mask = attention_mask[:, chunk_id - 1]
seq_index = chunk_size
else:
# this is normal forward without inference
seq_index = (n // chunk_size) * chunk_size
# if sequence length less than chunk size, do an early return
if n < self.chunk_size and set_inference_key_value_memory and inference_max_sequence_len is not None:
return torch.zeros_like(hidden_states), default_bias
num_chunks, num_retrieved = (
context.shape[-5],
context.shape[-4],
)
# causal padding
causal_padding = chunk_size - 1
x = F.pad(hidden_states, (0, 0, 0, 0, -causal_padding, causal_padding), value=0.0)
# remove sequence which is ahead of the neighbors retrieved (during inference)
# seq_index = (n // chunk_size) * chunk_size
x, x_remainder = x[:seq_index], x[seq_index:]
seq_remain_len = x_remainder.shape[0]
# take care of rotary positional embedding
# make sure queries positions are properly shifted to the future
q_pos_emb, k_pos_emb = rotary_pos_emb
# currently implementation is broken
# q need to extend to causal_padding, and just do
# q_pos_emb = F.pad(q_pos_emb, (0, 0, -causal_padding, 0), value = 0.)
if inference_max_sequence_len is not None and not set_inference_key_value_memory:
q_pos_emb = F.pad(
q_pos_emb, (0, 0, 0, 0, 0, 0, -causal_padding - token_pos, -causal_padding + token_pos), value=0.0
)
else:
q_pos_emb = F.pad(q_pos_emb, (0, 0, 0, 0, 0, 0, -causal_padding, 0), value=0.0)
k_pos_emb = repeat(k_pos_emb, 'n b h d -> (r n) b h d', r=num_retrieved)
rotary_pos_emb = (q_pos_emb, k_pos_emb)
# make sure number context chunks is enough
assert x.shape[0] // chunk_size == num_chunks
# reshape so we have chunk to chunk attention, without breaking causality
x = rearrange(x, '(k n) b d -> n (b k) d', k=num_chunks)
context = rearrange(context, 'k r n b d -> (r n) (b k) d')
# cross attention
out, bias = self.cross_attention(x, attention_mask, encoder_output=context, rotary_pos_emb=rotary_pos_emb)
# reshape back to original sequence
out = rearrange(out, 'n (b k) d -> (k n) b d', b=b)
# pad back to original, with 0s at the beginning (which will be added to the residual and be fine)
out = F.pad(out, (0, 0, 0, 0, causal_padding, -causal_padding + seq_remain_len), value=0.0)
if not set_inference_key_value_memory and inference_max_sequence_len is not None:
out = out[-1:]
return out, bias
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
def get_dropout_add(training):
def _dropout_add(x, bias, residual, prob):
assert bias is None
return dropout_add(x, bias, residual, prob, training)
return _dropout_add
class ParallelTransformerLayer_(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""A single transformer layer.
Transformer layer takes input with size [s, b, h] and returns an
output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
hidden_size,
ffn_hidden_size,
num_attention_heads,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
fp32_residual_connection=False,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
persist_layer_norm=False,
use_cpu_initialization=False,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
gradient_accumulation_fusion=False,
openai_gelu=False,
onnx_safe=False,
attention_dropout=0.1,
ffn_dropout=0.0,
activation='gelu',
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
activations_checkpoint_granularity=None,
sequence_parallel=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
):
super(ParallelTransformerLayer_, self).__init__()
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
self.layer_number = layer_number
self.layer_type = layer_type
self.bias = bias
self.transformer_block_type = transformer_block_type
self.set_accepted_adapter_types([LinearAdapterConfig._target_, ParallelLinearAdapterConfig._target_])
if not bias and bias_dropout_add_fusion:
raise ValueError(
'bias_dropout_add_fusion=True requires bias=True, found bias=False. Either set both to True or both to False.'
)
if normalization not in ['layernorm', 'layernorm1p', 'rmsnorm']:
raise ValueError(f'normalization must be "layernorm", "layernorm1p" or "rmsnorm", found {normalization}')
if transformer_block_type not in ['pre_ln', 'post_ln', 'normformer']:
raise ValueError(
f'transformer_block_type must be either "pre_ln" or "post_ln" or "normformer", found {transformer_block_type}'
)
self.fp32_residual_connection = fp32_residual_connection # if true move residual connections to fp32
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bias_dropout_add_fusion = bias_dropout_add_fusion # if true, enable bias dropout fusion
# Self attention.
# retrieval_decoder_after_self_attn skips the self attention
if self.layer_type != LayerType.retrieval_decoder_after_self_attn:
# Layernorm on the input data.
if normalization == 'layernorm':
self.input_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.input_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.input_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
self.self_attention = ParallelAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=self_attn_mask_type,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
layer_type=layer_type,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
activations_checkpoint_granularity=activations_checkpoint_granularity,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
)
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm
)
else:
self.post_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if self.layer_type != LayerType.decoder_pre_mlp or self.transformer_block_type != 'post_ln':
# the post_attention_layernorm is used for layermorm after mlp
# don't need it for decoder_pre_mlp and post_ln
if normalization == 'layernorm':
self.post_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if self.layer_type == LayerType.decoder_pre_mlp:
# skip MLP and cross attention
return
# the post_attention_layernorm is used for layermorm after mlp
# need it for post_ln
if self.layer_type == LayerType.retrieval_decoder_after_self_attn and self.transformer_block_type == 'post_ln':
# Layernorm on the attention output
if normalization == 'layernorm':
self.post_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if self.layer_type == LayerType.decoder or self.layer_type == LayerType.retrieval_encoder:
self.inter_attention = ParallelAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.cross_attn,
attn_mask_type=AttnMaskType.padding,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_inter_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_normformer_norm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# Layernorm on the attention output.
if normalization == 'layernorm':
self.post_inter_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
elif (
self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
self.inter_attention = ParallelChunkedCrossAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
chunk_size=chunk_size,
bias=bias,
headscale=headscale,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_inter_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_normformer_norm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# Layernorm on the attention output.
if normalization == 'layernorm':
self.post_inter_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# MLP
if num_moe_experts > 1 and self.layer_number % moe_frequency == 0:
self.mlp = SwitchMLP(
num_experts=num_moe_experts,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
transformer_block_type=transformer_block_type,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
persist_layer_norm=persist_layer_norm,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
dropout=moe_dropout,
)
else:
self.mlp = ParallelMLP(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
transformer_block_type=transformer_block_type,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
persist_layer_norm=persist_layer_norm,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
dropout=ffn_dropout,
)
def _get_bias_droput_add_func(self, transformer_block_type='pre_ln', position_after='attention'):
"""
Returns a function that potentially fuses the dropout and bias addition.
This function is particularly helpful for the normformer architecture that does not the fused kernel after attention layers, but can after the MLP.
"""
# Normformer activations at this point have no bias vector since they've gone through another normalization layer.
if transformer_block_type == 'normformer' and position_after == 'attention':
bias_dropout_add_func = get_dropout_add(self.training)
# Bias dropout add fused kernel
elif self.bias and self.bias_dropout_add_fusion:
if self.training:
bias_dropout_add_func = bias_dropout_add_fused_train
else:
bias_dropout_add_func = bias_dropout_add_fused_inference
# Bias dropout add non-fused kernel
elif self.bias and not self.bias_dropout_add_fusion:
bias_dropout_add_func = get_bias_dropout_add(self.training)
# Dropout add non-fused kernel for a model without bias terms.
else:
bias_dropout_add_func = get_dropout_add(self.training)
return bias_dropout_add_func
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
layer_past=None,
get_key_value=False,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # list of positional embedding tensors, first one self attention, second one and third one are for cross attention (q, k)
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_core_attention=False,
):
# Self attention.
if rotary_pos_emb is not None:
# self attention pos_emb is (q, q)
self_attention_pos_emb = (rotary_pos_emb[0], rotary_pos_emb[0])
cross_attention_pos_emb = (rotary_pos_emb[1], rotary_pos_emb[2])
else:
self_attention_pos_emb = None
cross_attention_pos_emb = None
if self.layer_type != LayerType.retrieval_decoder_after_self_attn:
# hidden_states: [b, s, h]
# Pre-LN: x -> LN -> MHA -> Residual -> LN -> MLP -> Residual
# Post-LN: x -> MHA -> Residual -> LN -> MLP -> Residual -> LN
# Normformer: x -> LN -> MHA -> LN -> Residual -> MLP (w/LN) -> Residual
residual = hidden_states
# Layer norm at the beginning of the transformer layer.
if self.transformer_block_type in ['pre_ln', 'normformer']:
hidden_states = self.input_layernorm(hidden_states)
attention_output, attention_bias = self.self_attention(
hidden_states,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
rotary_pos_emb=self_attention_pos_emb,
relative_position_bias=self_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
if get_key_value:
attention_output, presents = attention_output
# If normformer, apply norm on the output of the self attention.
if self.transformer_block_type == 'normformer':
# Normformer normalization
attention_output = (
attention_output + attention_bias if attention_bias is not None else attention_output
)
attention_output = self.post_attention_normformer_norm(attention_output)
attention_bias = None
# jit scripting for a nn.module (with dropout) is not
# trigerring the fusion kernel. For now, we use two
# different nn.functional routines to account for varying
# dropout semantics during training and inference phases.
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='attention'
)
if attention_bias is not None:
attention_bias = attention_bias.expand_as(residual)
layernorm_input = bias_dropout_add_func(attention_output, attention_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} Attention checksum {layernorm_input.sum()}")
if self.is_adapter_available():
adapter_1 = self.get_from_adapter_layer(AdapterName.PRE_ATTN_ADAPTER)
if adapter_1:
strategy = adapter_1.adapter_strategy
layernorm_input = self.forward_single_enabled_adapter_(
layernorm_input,
adapter_1,
adapter_name=AdapterName.PRE_ATTN_ADAPTER,
adapter_strategy=strategy,
)
# Post-LN normalization after residual
if self.transformer_block_type == 'post_ln':
normalization_output = self.input_layernorm(layernorm_input)
layernorm_input = normalization_output
elif self.transformer_block_type in ['pre_ln', 'normformer']:
# Layer norm post the self attention.
normalization_output = self.post_attention_layernorm(layernorm_input)
else:
layernorm_input, normalization_output = hidden_states
if self.layer_type == LayerType.decoder_pre_mlp:
return layernorm_input, normalization_output
if (
self.layer_type == LayerType.decoder
or self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_encoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
if (
self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
attention_output, attention_bias = self.inter_attention(
normalization_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
rotary_pos_emb=cross_attention_pos_emb,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
checkpoint_core_attention=checkpoint_core_attention,
)
else:
attention_output, attention_bias = self.inter_attention(
normalization_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
rotary_pos_emb=cross_attention_pos_emb,
relative_position_bias=cross_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
# If normformer, apply norm on the output of the self attention.
if self.transformer_block_type == 'normformer':
# Normformer normalization
attention_output = (
attention_output + attention_bias if attention_bias is not None else attention_output
)
attention_output = self.post_inter_attention_normformer_norm(attention_output)
attention_bias = None
residual = layernorm_input
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='attention'
)
layernorm_input = bias_dropout_add_func(attention_output, attention_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} Cross-Attention checksum {layernorm_input.sum()}")
normalization_output = self.post_inter_attention_layernorm(layernorm_input)
# Post-LN normalization after residual
if self.transformer_block_type == 'post_ln':
layernorm_input = normalization_output
# MLP.
mlp_output, mlp_bias = self.mlp(normalization_output)
residual = layernorm_input
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='mlp'
)
output = bias_dropout_add_func(mlp_output, mlp_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} MLP + Dropout + Residual checksum {output.sum()}")
if self.transformer_block_type == 'post_ln':
output = self.post_attention_layernorm(output)
if get_key_value:
output = [output, presents]
if (
self.is_adapter_available()
): # TODO: (@adithyre) was able to move adapter_2 back to the end of the transformer after ptl 1.7 update.
adapter_2 = self.get_from_adapter_layer(AdapterName.POST_ATTN_ADAPTER)
if adapter_2:
strategy = adapter_2.adapter_strategy
output = self.forward_single_enabled_adapter_(
output, adapter_2, adapter_name=AdapterName.POST_ATTN_ADAPTER, adapter_strategy=strategy
)
return output
class ParallelTransformerLayer(ParallelTransformerLayer_):
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
hidden_size,
ffn_hidden_size,
num_attention_heads,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
fp32_residual_connection=False,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
bias_dropout_add_fusion=True,
persist_layer_norm=False,
use_cpu_initialization=False,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
ffn_dropout=0.0,
activation='gelu',
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
activations_checkpoint_granularity=None,
sequence_parallel=False,
gradient_accumulation_fusion=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
):
super(ParallelTransformerLayer, self).__init__(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
layer_type=layer_type,
self_attn_mask_type=self_attn_mask_type,
fp32_residual_connection=fp32_residual_connection,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
bias_dropout_add_fusion=bias_dropout_add_fusion,
persist_layer_norm=persist_layer_norm,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
activation=activation,
megatron_legacy=megatron_legacy,
bias=bias,
chunk_size=chunk_size,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
activations_checkpoint_granularity=activations_checkpoint_granularity,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
)
if precision == 32:
self.dtype = torch.float32
elif precision == 16:
self.dtype = torch.float16
elif precision == 'bf16':
self.dtype = torch.bfloat16
else:
raise ValueError
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
rotary_pos_emb=None,
layer_past=None,
get_key_value=False,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_core_attention=False,
):
if self.dtype == torch.float32:
return super().forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
layer_past,
get_key_value,
set_inference_key_value_memory,
inference_max_sequence_len,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_core_attention,
)
with torch.autocast(device_type="cuda", dtype=self.dtype):
return super().forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
layer_past,
get_key_value,
set_inference_key_value_memory,
inference_max_sequence_len,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_core_attention,
)
class AutocastTransformerLayer(TransformerLayer):
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
layernorm_epsilon: float,
num_attention_heads: int,
init_method: Callable,
output_layer_init_method: Callable,
hidden_dropout: float,
attention_dropout: float,
layer_number: Optional[int] = None,
kv_channels: Optional[int] = None,
self_attn_mask_type: str = "causal",
tp_group: Optional[Any] = None,
tp_size: int = 1,
params_dtype: torch.dtype = torch.float32,
get_rng_state_tracker: Optional[Callable] = None,
fuse_wgrad_accumulation: bool = False,
apply_query_key_layer_scaling: bool = True,
attention_softmax_in_fp32: bool = False,
seq_length: Optional[int] = None,
micro_batch_size: Optional[int] = None,
sequence_parallel: bool = False,
apply_residual_connection_post_layernorm: bool = False,
output_layernorm: bool = False,
layer_type: str = "encoder",
drop_path_rate: float = 0,
use_emha: bool = False,
autocast_dtype: Any = 16,
) -> None:
super().__init__(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
layernorm_epsilon=layernorm_epsilon,
num_attention_heads=num_attention_heads,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
layer_number=layer_number,
kv_channels=kv_channels,
self_attn_mask_type=self_attn_mask_type,
tp_group=tp_group,
tp_size=tp_size,
params_dtype=params_dtype,
get_rng_state_tracker=get_rng_state_tracker,
fuse_wgrad_accumulation=fuse_wgrad_accumulation,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
attention_softmax_in_fp32=attention_softmax_in_fp32,
seq_length=seq_length,
micro_batch_size=micro_batch_size,
sequence_parallel=sequence_parallel,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm,
output_layernorm=output_layernorm,
layer_type=layer_type,
drop_path_rate=drop_path_rate,
set_parallel_mode=tp_size > 1,
fuse_qkv_params=True,
)
# use_emha=use_emha,
if autocast_dtype == 32:
self.dtype = torch.float32
elif autocast_dtype == 16:
self.dtype = torch.float16
elif autocast_dtype == 'bf16':
self.dtype = torch.bfloat16
else:
raise ValueError
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
encoder_output: Optional[torch.Tensor] = None,
enc_dec_attn_mask: Optional[torch.Tensor] = None,
inference_params: Optional[Any] = None,
is_first_microbatch: Optional[bool] = None,
checkpoint_core_attention: Optional[bool] = False,
) -> torch.Tensor:
if self.dtype == torch.float32:
return super().forward(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
with torch.autocast(device_type="cuda", dtype=self.dtype):
return super().forward(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
class ParallelTransformer(MegatronModule):
"""Transformer class."""
def __init__(
self,
init_method,
output_layer_init_method,
num_layers,
hidden_size,
ffn_hidden_size,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
layer_type=LayerType.encoder, # it can be a list of types or single type
self_attn_mask_type=AttnMaskType.padding,
pre_process=True,
post_process=True,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
use_cpu_initialization=False,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
gradient_accumulation_fusion=False,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
model_type=ModelType.encoder_or_decoder,
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
layer_number_offset=0, # this is use only for attention norm_factor scaling
activations_checkpoint_granularity=None,
activations_checkpoint_layers_per_pipeline=None,
sequence_parallel=False,
transformer_engine=False,
fp8=False,
fp8_e4m3=False,
fp8_hybrid=False,
fp8_margin=0,
fp8_interval=1,
fp8_amax_history_len=1,
fp8_amax_compute_algo='most_recent',
use_emha=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
):
super(ParallelTransformer, self).__init__()
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
self.fp32_residual_connection = fp32_residual_connection
self.pre_process = pre_process
self.post_process = post_process
self.input_tensor = None
self.self_attn_mask_type = self_attn_mask_type
self.model_type = model_type
self.normalization = normalization
self.transformer_block_type = transformer_block_type
self.layer_type = layer_type
self.activations_checkpoint_method = activations_checkpoint_method
self.activations_checkpoint_num_layers = activations_checkpoint_num_layers
self.activations_checkpoint_granularity = activations_checkpoint_granularity
self.activations_checkpoint_layers_per_pipeline = activations_checkpoint_layers_per_pipeline
if self.activations_checkpoint_granularity:
if self.activations_checkpoint_granularity == 'selective':
if self.activations_checkpoint_method == 'uniform':
logging.info(
(
f'Using uniform activation checkpointing with granularity selective forces all layers to use checkpointing.'
)
)
elif self.activations_checkpoint_method == 'block':
logging.info(
(
f'Using block activation checkpointing requires activations_checkpoint_num_layers to be set.'
f'Got: {self.activations_checkpoint_num_layers}. Setting to 1 by default.'
)
)
else:
raise ValueError(
f'activations_checkpoint_method should be "uniform" or "block" when using granularity selective.'
)
elif self.activations_checkpoint_granularity == 'full':
if self.activations_checkpoint_method in ['uniform', 'block']:
if not self.activations_checkpoint_num_layers:
logging.info(
(
f'Using uniform or block activation checkpointing requires activations_checkpoint_num_layers to be set.'
f'Got: {self.activations_checkpoint_num_layers}. Setting to 1 by default.'
)
)
else:
raise ValueError(
f'activations_checkpoint_method should be "uniform" or "block" when using granularity full.'
)
else:
raise ValueError(f'activations_checkpoint_granularity should be "selective" or "full".')
self.sequence_parallel = sequence_parallel
self.transformer_engine = transformer_engine
self.fp8 = fp8
self.fp8_e4m3 = fp8_e4m3
self.fp8_hybrid = fp8_hybrid
self.fp8_margin = fp8_margin
self.fp8_interval = fp8_interval
self.fp8_amax_history_len = fp8_amax_history_len
self.fp8_amax_compute_algo = fp8_amax_compute_algo
self.fp8_recipe = None
if self.fp8:
if self.fp8_e4m3:
fp8_format = recipe.Format.E4M3
elif self.fp8_hybrid:
fp8_format = recipe.Format.HYBRID
self.fp8_recipe = recipe.DelayedScaling(
margin=self.fp8_margin,
interval=self.fp8_interval,
fp8_format=fp8_format,
amax_history_len=self.fp8_amax_history_len,
amax_compute_algo=self.fp8_amax_compute_algo,
)
self.is_first_microbatch = True
self.microbatch_count = 0 # transformer engine forward needs to know if it is working on the first microbatch
self.checkpoint_core_attention = (
activations_checkpoint_granularity == 'selective'
) # transformer engine forward allows for more granular selective checkpointing
if self.model_type == ModelType.encoder_or_decoder:
assert (
num_layers % parallel_state.get_pipeline_model_parallel_world_size() == 0
), 'num_layers must be divisible by pipeline_model_parallel_size'
assert moe_frequency <= num_layers, 'MoE frequency must be <= number of transformer layers'
# TODO: Add similar assert for encoder-decoder.
self.num_layers = self.get_num_layers(num_layers)
# Transformer layers.
def build_layer(layer_number):
if isinstance(layer_type, list):
lt = layer_type[layer_number - 1]
else:
lt = layer_type
if self.transformer_engine:
return AutocastTransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
layernorm_epsilon=layernorm_epsilon,
num_attention_heads=num_attention_heads,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
layer_number=layer_number + layer_number_offset,
kv_channels=kv_channels,
self_attn_mask_type=self_attn_mask_type.name,
tp_size=parallel_state.get_tensor_model_parallel_world_size(),
params_dtype=torch.float32, # dtype params are initialized in
get_rng_state_tracker=tensor_parallel.random.get_cuda_rng_tracker,
fuse_wgrad_accumulation=gradient_accumulation_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
seq_length=None, # used for jit warmup
micro_batch_size=None, # used for jit warmup
sequence_parallel=sequence_parallel,
apply_residual_connection_post_layernorm=False,
autocast_dtype=precision,
use_emha=use_emha,
)
else:
return ParallelTransformerLayer(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number + layer_number_offset,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layer_type=lt,
self_attn_mask_type=self_attn_mask_type,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
gradient_accumulation_fusion=gradient_accumulation_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
megatron_legacy=megatron_legacy,
bias=bias,
chunk_size=chunk_size,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
activations_checkpoint_granularity=activations_checkpoint_granularity,
sequence_parallel=sequence_parallel,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
)
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
assert num_layers % parallel_state.get_virtual_pipeline_model_parallel_world_size() == 0, (
'num_layers_per_stage must be divisible by ' 'virtual_pipeline_model_parallel_size'
)
assert self.model_type.value != 2, f'virtual pipeline parallel currently only supported for GPT'
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
self.num_layers = self.num_layers // parallel_state.get_virtual_pipeline_model_parallel_world_size()
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
offset = parallel_state.get_virtual_pipeline_model_parallel_rank() * (
num_layers // parallel_state.get_virtual_pipeline_model_parallel_world_size()
) + (parallel_state.get_pipeline_model_parallel_rank() * self.num_layers)
else:
# Each stage gets a contiguous set of layers.
if (
self.model_type == ModelType.encoder_and_decoder
and parallel_state.get_pipeline_model_parallel_world_size() > 1
):
pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()
if layer_type == LayerType.encoder:
offset = pipeline_rank * self.num_layers
else:
num_ranks_in_enc = parallel_state.get_pipeline_model_parallel_split_rank()
offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers
else:
offset = parallel_state.get_pipeline_model_parallel_rank() * self.num_layers
self.layers = torch.nn.ModuleList([build_layer(i + 1 + offset) for i in range(self.num_layers)])
if self.post_process and self.transformer_block_type != 'post_ln':
# Final layer norm before output.
if normalization == 'layernorm':
self.final_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel=sequence_parallel
)
elif normalization == 'layernorm1p':
self.final_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.final_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
def _get_layer(self, layer_number):
return self.layers[layer_number]
def get_num_layers(self, num_layers):
"""Compute the number of transformer layers resident on the current rank."""
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if self.model_type == ModelType.encoder_and_decoder:
assert parallel_state.get_pipeline_model_parallel_split_rank() is not None
num_ranks_in_encoder = parallel_state.get_pipeline_model_parallel_split_rank()
num_ranks_in_decoder = parallel_state.get_pipeline_model_parallel_world_size() - num_ranks_in_encoder
if self.layer_type == LayerType.encoder:
assert (
num_layers % num_ranks_in_encoder == 0
), 'num_layers must be divisible by number of ranks given to encoder'
elif self.layer_type == LayerType.decoder:
assert (
num_layers % num_ranks_in_decoder == 0
), 'num_layers must be divisible by number of ranks given to decoder'
else:
raise ValueError(f"Unknown layer type {self.layer_type}")
if parallel_state.is_pipeline_stage_before_split():
num_layers = num_layers // num_ranks_in_encoder
else:
num_layers = num_layers // num_ranks_in_decoder
else:
assert (
num_layers % parallel_state.get_pipeline_model_parallel_world_size() == 0
), 'num_layers must be divisible by pipeline_model_parallel_size'
num_layers = num_layers // parallel_state.get_pipeline_model_parallel_world_size()
return num_layers
def _checkpointed_forward(
self,
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_activations_all_layers,
):
"""Forward method with activation checkpointing."""
def custom(start, end):
if self.transformer_engine:
def custom_forward(*inputs):
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
for index in range(start, end):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=None,
is_first_microbatch=self.is_first_microbatch,
checkpoint_core_attention=False,
)
return hidden_states
else:
def custom_forward(*inputs):
if len(inputs) == 9:
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
rotary_pos_emb = (inputs[4], inputs[5], inputs[6])
self_attention_relative_position_bias = inputs[7]
cross_attention_relative_position_bias = inputs[8]
elif len(inputs) == 10:
hidden_states = (inputs[0], inputs[1])
attention_mask = inputs[2]
encoder_output = inputs[3]
enc_dec_attn_mask = inputs[4]
rotary_pos_emb = (inputs[5], inputs[6], inputs[7])
self_attention_relative_position_bias = inputs[8]
cross_attention_relative_position_bias = inputs[9]
else:
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
rotary_pos_emb = inputs[4]
self_attention_relative_position_bias = inputs[5]
cross_attention_relative_position_bias = inputs[6]
for index in range(start, end):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
rotary_pos_emb=rotary_pos_emb,
self_attention_relative_position_bias=self_attention_relative_position_bias,
cross_attention_relative_position_bias=cross_attention_relative_position_bias,
)
if isinstance(hidden_states, tuple):
pass
else:
hidden_states = hidden_states.contiguous()
return hidden_states
return custom_forward
# Make sure memory is freed.
tensor_parallel.reset_checkpointed_activations_memory_buffer()
if self.activations_checkpoint_method == 'uniform':
# Uniformly divide the total number of Transformer layers and checkpoint
# the input activation of each divided chunk.
# A method to further reduce memory usage reducing checkpoints.
l = 0
while l < self.num_layers:
if isinstance(hidden_states, tuple):
hidden_tuple = (hidden_states[0], hidden_states[1])
else:
hidden_tuple = (hidden_states,)
middle_tuple = (
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1], rotary_pos_emb[2])
final_tuple = (self_attention_relative_position_bias, cross_attention_relative_position_bias)
arg_tuple = hidden_tuple + middle_tuple + rot_tuple + final_tuple
if self.transformer_engine:
hidden_states = te_checkpoint(
custom(l, l + self.activations_checkpoint_num_layers),
False,
tensor_parallel.random.get_cuda_rng_tracker,
parallel_state.get_tensor_model_parallel_group(),
*arg_tuple,
)
else:
hidden_states = tensor_parallel.checkpoint(
custom(l, l + self.activations_checkpoint_num_layers), False, *arg_tuple
)
l += self.activations_checkpoint_num_layers
elif self.activations_checkpoint_method == 'block':
# When pipeline-parallel size > 1 and 'num_micro_batches_with_partial_activation_checkpoints' = int,
# pipeline scheduling can force to checkpoint all layers or partial layers in a micro-batch.
if checkpoint_activations_all_layers:
activations_checkpoint_num_layers = self.num_layers
else:
activations_checkpoint_num_layers = self.activations_checkpoint_num_layers
if (
parallel_state.get_pipeline_model_parallel_world_size() > 0
and self.activations_checkpoint_layers_per_pipeline is not None
):
# Decrease the number of layers to checkpoint at later pipeline stages
activations_checkpoint_num_layers -= int(
parallel_state.get_pipeline_model_parallel_rank()
* self.activations_checkpoint_layers_per_pipeline
)
# Checkpoint the input activation of only a set number of individual
# Transformer layers and skip the rest.
# A method fully use the device memory removing redundant re-computation.
for l in range(self.num_layers):
if isinstance(hidden_states, tuple):
hidden_tuple = (hidden_states[0], hidden_states[1])
else:
hidden_tuple = (hidden_states,)
middle_tuple = (
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1], rotary_pos_emb[2])
final_tuple = (self_attention_relative_position_bias, cross_attention_relative_position_bias)
arg_tuple = hidden_tuple + middle_tuple + rot_tuple + final_tuple
if l < activations_checkpoint_num_layers:
if self.transformer_engine:
hidden_states = te_checkpoint(
custom(l, l + 1),
False,
tensor_parallel.random.get_cuda_rng_tracker,
parallel_state.get_tensor_model_parallel_group(),
*arg_tuple,
)
else:
hidden_states = tensor_parallel.checkpoint(custom(l, l + 1), False, *arg_tuple)
else:
hidden_states = custom(l, l + 1)(*arg_tuple)
else:
raise ValueError("Invalid activation checkpoint method.")
return hidden_states
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
hidden_states,
attention_mask,
layer_past=None,
get_key_value=False,
encoder_output=None,
enc_dec_attn_mask=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # list of positional embedding tensors, first one self attention, second one and third one are for cross attention (q, k)
retrieved_emb=None, # tensor of retrieved embedding of shape [b, k, r, n, d]
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_activations_all_layers=None,
):
# Checks.
if inference_max_sequence_len:
assert self.activations_checkpoint_method is None, 'inference does not work with activation checkpointing'
if layer_past is not None:
assert get_key_value, 'for not None values in layer_past, ' 'expected get_key_value to be set'
if get_key_value:
assert self.activations_checkpoint_method is None, (
'get_key_value does not work with ' 'activation checkpointing'
)
if not self.pre_process:
# See set_input_tensor()
hidden_states = self.input_tensor
# TODO: @Yi Dong, what should this be?
if retrieved_emb is not None:
assert len(retrieved_emb.shape) == 5
# this is retrieval decoder, need special transpose
encoder_output = rearrange(retrieved_emb, 'b k r n d -> k r n b d').contiguous()
"""
is_first_microbatch is an optimization parameter for transformer engine.
It indicates if the current step in the forward pass is the first in a gradient accumulation cycle.
If set, FP8 weights are cached and some minor optimizations are applied to fuse_wgrad_accumulation
"""
from apex.transformer.pipeline_parallel.utils import _GLOBAL_NUM_MICROBATCHES_CALCULATOR
num_micro_batches = getattr(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, 'num_micro_batches', 1)
if self.sequence_parallel:
rng_context = tensor_parallel.random.get_cuda_rng_tracker().fork()
else:
rng_context = nullcontext()
with rng_context:
if self.activations_checkpoint_granularity == 'full':
hidden_states = self._checkpointed_forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
)
else:
fp8_context = nullcontext()
with fp8_context:
if self.activations_checkpoint_granularity == 'full' and self.activations_checkpoint_num_layers > 0:
hidden_states = self._checkpointed_forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_activations_all_layers,
)
else:
if get_key_value:
presents = []
for index in range(self.num_layers):
layer = self._get_layer(index)
past = None
if layer_past is not None:
past = layer_past[index]
if self.activations_checkpoint_granularity == 'selective':
# When pipeline-parallel size > 1 and 'num_micro_batches_with_partial_activation_checkpoints' = int,
# pipeline scheduling can force to checkpoint all layers or partial layers in a micro-batch.
if (
checkpoint_activations_all_layers == True
or self.activations_checkpoint_method == 'uniform'
):
checkpoint_core_attention = True
elif self.activations_checkpoint_method == 'block':
activations_checkpoint_num_layers = self.activations_checkpoint_num_layers
# Decrease the number of layers to checkpoint at later pipeline stages
if self.activations_checkpoint_layers_per_pipeline is not None:
activations_checkpoint_num_layers -= int(
parallel_state.get_pipeline_model_parallel_rank()
* self.activations_checkpoint_layers_per_pipeline
)
checkpoint_core_attention = index < activations_checkpoint_num_layers
else:
checkpoint_core_attention = False
if self.transformer_engine:
inference_params = None
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=self.is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
else:
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
layer_past=past,
get_key_value=get_key_value,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
rotary_pos_emb=rotary_pos_emb,
self_attention_relative_position_bias=self_attention_relative_position_bias,
cross_attention_relative_position_bias=cross_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
# Skip counter update for eval and activation checkpointing
if torch.is_grad_enabled() and self.training:
self.microbatch_count += 1
if self.microbatch_count % num_micro_batches == 0:
self.microbatch_count = 0
self.is_first_microbatch = True
else:
self.is_first_microbatch = False
output = hidden_states
# Final layer norm.
if self.post_process:
# only apply the final_layernorm for pre-ln
if self.transformer_block_type != 'post_ln':
output = self.final_layernorm(hidden_states)
if get_key_value:
output = [output, presents]
return output
| [] |
2024-01-10 | erman-gurses/SHARK | apps~stable_diffusion~web~ui~h2ogpt.py | import gradio as gr
import torch
import os
from pathlib import Path
from transformers import (
AutoModelForCausalLM,
)
from apps.stable_diffusion.web.ui.utils import available_devices
from apps.language_models.langchain.enums import (
DocumentChoices,
LangChainAction,
)
import apps.language_models.langchain.gen as gen
from gpt_langchain import (
path_to_docs,
create_or_update_db,
)
from apps.stable_diffusion.src import args
def user(message, history):
# Append the user's message to the conversation history
return "", history + [[message, ""]]
sharkModel = 0
h2ogpt_model = 0
# NOTE: Each `model_name` should have its own start message
start_message = """
SHARK DocuChat
Chat with an AI, contextualized with provided files.
"""
def create_prompt(history):
system_message = start_message
for item in history:
print("His item: ", item)
conversation = "<|endoftext|>".join(
[
"<|endoftext|><|answer|>".join([item[0], item[1]])
for item in history
]
)
msg = system_message + conversation
msg = msg.strip()
return msg
def chat(curr_system_message, history, device, precision):
args.run_docuchat_web = True
global h2ogpt_model
global sharkModel
global h2ogpt_tokenizer
global model_state
global langchain
global userpath_selector
from apps.language_models.langchain.h2oai_pipeline import generate_token
if h2ogpt_model == 0:
if "cuda" in device:
shark_device = "cuda"
elif "sync" in device:
shark_device = "cpu"
elif "task" in device:
shark_device = "cpu"
elif "vulkan" in device:
shark_device = "vulkan"
else:
print("unrecognized device")
device = "cpu" if shark_device == "cpu" else "cuda"
args.device = shark_device
args.precision = precision
from apps.language_models.langchain.gen import Langchain
langchain = Langchain(device, precision)
h2ogpt_model, h2ogpt_tokenizer, _ = langchain.get_model(
load_4bit=True
if device == "cuda"
else False, # load model in 4bit if device is cuda to save memory
load_gptq="",
use_safetensors=False,
infer_devices=True,
device=device,
base_model="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
inference_server="",
tokenizer_base_model="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
lora_weights="",
gpu_id=0,
reward_type=None,
local_files_only=False,
resume_download=True,
use_auth_token=False,
trust_remote_code=True,
offload_folder=None,
compile_model=False,
verbose=False,
)
model_state = dict(
model=h2ogpt_model,
tokenizer=h2ogpt_tokenizer,
device=device,
base_model="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
tokenizer_base_model="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
lora_weights="",
inference_server="",
prompt_type=None,
prompt_dict=None,
)
from apps.language_models.langchain.h2oai_pipeline import (
H2OGPTSHARKModel,
)
sharkModel = H2OGPTSHARKModel()
prompt = create_prompt(history)
output_dict = langchain.evaluate(
model_state=model_state,
my_db_state=None,
instruction=prompt,
iinput="",
context="",
stream_output=True,
prompt_type="prompt_answer",
prompt_dict={
"promptA": "",
"promptB": "",
"PreInstruct": "<|prompt|>",
"PreInput": None,
"PreResponse": "<|answer|>",
"terminate_response": [
"<|prompt|>",
"<|answer|>",
"<|endoftext|>",
],
"chat_sep": "<|endoftext|>",
"chat_turn_sep": "<|endoftext|>",
"humanstr": "<|prompt|>",
"botstr": "<|answer|>",
"generates_leading_space": False,
},
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=1,
max_new_tokens=256,
min_new_tokens=0,
early_stopping=False,
max_time=180,
repetition_penalty=1.07,
num_return_sequences=1,
do_sample=False,
chat=True,
instruction_nochat=prompt,
iinput_nochat="",
langchain_mode="UserData",
langchain_action=LangChainAction.QUERY.value,
top_k_docs=3,
chunk=True,
chunk_size=512,
document_choice=[DocumentChoices.All_Relevant.name],
concurrency_count=1,
memory_restriction_level=2,
raise_generate_gpu_exceptions=False,
chat_context="",
use_openai_embedding=False,
use_openai_model=False,
hf_embedding_model="sentence-transformers/all-MiniLM-L6-v2",
db_type="chroma",
n_jobs=-1,
first_para=False,
max_max_time=60 * 2,
model_state0=model_state,
model_lock=True,
user_path=userpath_selector.value,
)
output = generate_token(sharkModel, **output_dict)
for partial_text in output:
history[-1][1] = partial_text
yield history
return history
userpath_selector = gr.Textbox(
label="Document Directory",
value=str(os.path.abspath("apps/language_models/langchain/user_path/")),
interactive=True,
container=True,
)
with gr.Blocks(title="DocuChat") as h2ogpt_web:
with gr.Row():
supported_devices = available_devices
enabled = len(supported_devices) > 0
# show cpu-task device first in list for chatbot
supported_devices = supported_devices[-1:] + supported_devices[:-1]
supported_devices = [x for x in supported_devices if "sync" not in x]
print(supported_devices)
device = gr.Dropdown(
label="Device",
value=supported_devices[0]
if enabled
else "Only CUDA Supported for now",
choices=supported_devices,
interactive=enabled,
)
precision = gr.Radio(
label="Precision",
value="fp16",
choices=[
"int4",
"int8",
"fp16",
"fp32",
],
visible=True,
)
chatbot = gr.Chatbot(height=500)
with gr.Row():
with gr.Column():
msg = gr.Textbox(
label="Chat Message Box",
placeholder="Chat Message Box",
show_label=False,
interactive=enabled,
container=False,
)
with gr.Column():
with gr.Row():
submit = gr.Button("Submit", interactive=enabled)
stop = gr.Button("Stop", interactive=enabled)
clear = gr.Button("Clear", interactive=enabled)
system_msg = gr.Textbox(
start_message, label="System Message", interactive=False, visible=False
)
submit_event = msg.submit(
fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False
).then(
fn=chat,
inputs=[system_msg, chatbot, device, precision],
outputs=[chatbot],
queue=True,
)
submit_click_event = submit.click(
fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False
).then(
fn=chat,
inputs=[system_msg, chatbot, device, precision],
outputs=[chatbot],
queue=True,
)
stop.click(
fn=None,
inputs=None,
outputs=None,
cancels=[submit_event, submit_click_event],
queue=False,
)
clear.click(lambda: None, None, [chatbot], queue=False)
with gr.Blocks(title="DocuChat Upload") as h2ogpt_upload:
import pathlib
upload_path = None
database = None
database_directory = os.path.abspath(
"apps/language_models/langchain/db_path/"
)
def read_path():
global upload_path
filenames = [
[f]
for f in os.listdir(upload_path)
if os.path.isfile(os.path.join(upload_path, f))
]
filenames.sort()
return filenames
def upload_file(f):
names = []
for tmpfile in f:
name = tmpfile.name.split("/")[-1]
basename = os.path.join(upload_path, name)
with open(basename, "wb") as w:
with open(tmpfile.name, "rb") as r:
w.write(r.read())
update_or_create_db()
return read_path()
def update_userpath(newpath):
global upload_path
upload_path = newpath
pathlib.Path(upload_path).mkdir(parents=True, exist_ok=True)
return read_path()
def update_or_create_db():
global database
global upload_path
sources = path_to_docs(
upload_path,
verbose=True,
fail_any_exception=False,
n_jobs=-1,
chunk=True,
chunk_size=512,
url=None,
enable_captions=False,
captions_model=None,
caption_loader=None,
enable_ocr=False,
)
pathlib.Path(database_directory).mkdir(parents=True, exist_ok=True)
database = create_or_update_db(
"chroma",
database_directory,
"UserData",
sources,
False,
True,
True,
"sentence-transformers/all-MiniLM-L6-v2",
)
def first_run():
global database
if database is None:
update_or_create_db()
update_userpath(
os.path.abspath("apps/language_models/langchain/user_path/")
)
h2ogpt_upload.load(fn=first_run)
h2ogpt_web.load(fn=first_run)
with gr.Column():
text = gr.DataFrame(
col_count=(1, "fixed"),
type="array",
label="Documents",
value=read_path(),
)
with gr.Row():
upload = gr.UploadButton(
label="Upload documents",
file_count="multiple",
)
upload.upload(fn=upload_file, inputs=upload, outputs=text)
userpath_selector.render()
userpath_selector.input(
fn=update_userpath, inputs=userpath_selector, outputs=text
).then(fn=update_or_create_db)
| [] |
2024-01-10 | jeromeku/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | ncats/Rare-Disease-Social-Media-Project | rdsmproj~tm_t2v~top2vec_topic_tools.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Collection of tools for analysis of topic modeling results from Top2Vec.
"""
from typing import Dict, Union, Optional
from pathlib import Path
import random
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import gensim
from gensim.models.coherencemodel import CoherenceModel
from gensim.corpora.dictionary import Dictionary
from rdsmproj import utils
def create_topic_sizes_dict(topic_sizes:list[int]) -> Dict[str, int]:
"""
Creates topic size distribution dictionary from the topic size list for Top2Vec models.
Parameters
----------
topic_sizes: list[int]
Returns
-------
Dictionary of topics and the number of documents that have that topic as their primary document
based on the highest topic score for that document.
"""
return {topic: int(size) for topic, size in enumerate(topic_sizes)}
def create_distplot(docs_per_topics:Dict[int, list[str]], name:str, path:Union[str,Path]):
"""
Creates a barplot of the distribution of different topics and the number of documents where
that topic is the most probable topic.
Parameters
----------
docs_per_topic: Dict[int, list[str]]
Dictionary with each key being the topic number and the value being the list of documents
that have that topic as their top topic based on probability.
name: str
Name of the collection of documents for use in title and saving file (e.g. 'CysticFibrosis')
path: str, Path
Path to where the figure will be saved to.
"""
plt.figure(figsize=(16,9))
if isinstance(docs_per_topics, dict):
x = [f'{key}' for key in docs_per_topics]
y = [len(value) if isinstance(value, list) else 0 for value in docs_per_topics.values()]
else:
x = [i for i in range(len(docs_per_topics))]
y = docs_per_topics
sns.barplot(x=x, y=y, color =[0,114/255,178/255])
#plt.xticks(fontsize='x-small')
plt.xticks(rotation='vertical')
plt.xlabel('Topic Number')
plt.ylabel('Number of Posts')
plt.title(f'Topic Distribution for {name}', weight='bold', fontsize='large')
plt.tight_layout()
plt.savefig(Path(path, f'{name}_postdistplot.png'), dpi=300)
plt.clf()
plt.close('all')
def create_coherence_model(model:Optional[gensim.models.basemodel.BaseTopicModel] = None,
topics:Optional[list[list[str]]] = None,
texts:Optional[list[list[str]]] = None,
id2word:Optional[Dictionary] = None,
corpus:Optional[list[tuple[int, int]]] = None,
coherence:str = 'c_v',
topn:int = 10,
processes:int = 1):
"""
Creates a gensim.models.coherencemodel.CoherenceModel object from either a model or list of
tokenized topics. Used to calculate coherence of a topic.
Parameters
----------
model: gensim.models.basemodel.BaseTopicModel (Optional, default None)
Pre-trained topic model provided if topics not provided. Currently supports LdaModel,
LdaMulticore, LdaMallet, and LdaVowpalWabbit.
topics: list[list[str]] (Optional, default None)
List of tokenized topics. id2word must be provided.
texts: list[list[str]] (Optional, default None)
Tokenized texts for use with sliding window based probability estimator ('c_something').
id2word: gensim.corpora.dictionary.Dictionary (Optional, default None)
If model present, not needed. If both provided, passed id2word will be used.
corpus: list[tuple[int, int]] (Optional, default None)
Document vectors made up of list of tuples with (word_id, word_frequency)
coherence: str (default 'c_v')
Currently through gensim supports following coherence measures: 'u_mass', 'c_v', 'c_uci',
and 'c_npmi. Coherence measure 'c_uci = 'c_pmi'.
topn: int (default 10)
Integer corresponding to number of top words to be extracted from each topic.
processes: int (default 1)
Number of processes to use, any value less than 1 will be num_cpus - 1.
Returns
-------
coherence_model: gensim.models.coherencemodel.CoherenceModel
CoherenceModel object used for building and maintaining a model for topic coherence.
"""
coherence_model = CoherenceModel(model = model,
topics = topics,
texts = texts,
dictionary= id2word,
corpus = corpus,
coherence = coherence,
topn = topn,
processes = processes)
return coherence_model
def create_coherence_distplot(coherence_values_per_topic:Union[list[float],
Dict[str, float]],
name:str,
path:Union[str,Path]):
"""
Create coherence distribution plot of the topics and their coherence values.
Parameters
----------
coherence_values_per_topic: Union[list[float], Dict[str, float]]
Coherence values per topic. Either a list with the topic number being the index of the
score or a dictionary where the key is the label and the score is the value.
name: str
Name of the collection of documents for use in title and saving file (e.g. 'CysticFibrosis')
path: str, Path
Path to where the figure will be saved to.
"""
plt.figure(figsize=(4.5,8))
sns.displot(coherence_values_per_topic, binwidth = 0.05)
plt.axvline(x=np.mean(coherence_values_per_topic), label='Average Coherence', linestyle='--')
plt.title(f'{name}\nTopic Coherence Distribution')
plt.xlabel('Topic Coherence')
plt.xlim([0.0, 1.05])
plt.ylabel('Topic Count')
plt.tight_layout()
plt.savefig(Path(path,f'{name}_coherencedistplot.png'), dpi=300)
plt.clf()
plt.close('all')
def create_word_dict(topic_words:list[list[str]],
word_scores:list[list[float]],
topic_nums:list[int]) -> Dict[int, Dict[str, float]]:
"""
Creates a dictionary of words and scores for each topic. For use with Top2Vec models to create
word score dictionaries for wordcloud creation.
Parameters
----------
topic_words: list[list[str]]
List of topics and their list of top words that make up that topic based on their scores.
word_scores: list[list[float]]
List of topics and the list of word scores that correspond to each word top word for that
topic.
topic_nums: list[int]
List of topic numbers.
Returns
-------
word_score_dict: Dict[int, Dict[str, float]]
Dictionary where keys are labels from topic_list and values are dictionaries of
keys = word:str and values = score:float) for each label.
"""
word_score_dict = {}
for topic in topic_nums:
words = topic_words[topic]
scores = [float(score) for score in word_scores[topic]]
word_score_dict[int(topic)] = dict(zip(words, scores))
return word_score_dict
def grey_color_func(word, font_size, position, orientation, random_state=None,
**kwargs):
return "hsl(0, 0%%, %d%%)" % random.randint(0, 60)
def create_wordcloud_subplots(data:Dict[int, Dict[str, float]],
suptitle:str,
path:Union[str,Path],
topics:Optional[list[list[str]]] = None,
max_words:Optional[int]=50,
context:Optional[str]='paper'):
"""
Creates wordcloud subplots for a top2vec model.
Parameters
----------
data: Dict[int, Dict[str, float]]
Word score dict. Dictionary where keys are labels from topic_list and values are
dictionaries of keys = word:str and values = score:float) for each label.
suptitle: str
Name of the collection of documents for use in title and saving file
(e.g. 'CysticFibrosis')
path: str, Path
Path to where the figure will be saved to.
topics: list[list[str]] (Optional, default None)
List of tokenized topics. id2word must be provided.
max_words: int (Optional, default 50)
Maximum number of words for each wordcloud.
context: str (Optional, default paper)
Name of context to pass to seaborn for plot style.
"""
cm = 1/2.54
sns.set_context(context)
sns.set_style(style='white')
if topics is None:
num_topics = len(data)
topics = list(data.keys())
else:
num_topics = len(topics)
if num_topics < 5:
num_cols = num_topics
fig_width = (16/5)*cm*num_cols
num_rows = 1
else:
num_cols = 5
fig_width = 16*cm
num_rows = int(np.ceil(num_topics/5))
widths = [400]*num_cols
heights = [500]*num_rows
fig_height = fig_width * sum(heights)/sum(widths)
fig, axs = plt.subplots(num_rows,num_cols, figsize=(fig_width, fig_height),
gridspec_kw = {'height_ratios': heights, 'wspace':0, 'hspace':0},
constrained_layout=True)
fig.suptitle(suptitle, weight='bold')
for n, ax in enumerate(axs.flat):
if n < len(topics):
try:
wordcloud = WordCloud(background_color='white', width=400, height=400,
max_words=max_words
).generate_from_frequencies(data[topics[n]])
ax.imshow(wordcloud.recolor(color_func=grey_color_func, random_state=3),
interpolation="bilinear")
ax.text(0, -5, f'{topics[n]}',weight='bold')
ax.axis('off')
except OSError:
ax.axis('off')
pass
else:
ax.axis('off')
plt.savefig(Path(path, f'{suptitle}_wordcloud.png'), dpi=300)
plt.clf()
plt.close('all')
class AnalyzeTopics:
"""
Class to call analysis tools to create files and visualizations to analyze the results of topic
modeling done.
Parameters
----------
model:
Topic model. Currently either supports model from Top2Vec.
subreddit_name: str
Name of the collection of documents for use in title and saving file
(e.g. 'r/CysticFibrosis')
model_name: str
Name of the embedding model.
tokenized_docs: list[list[str]]
Tokenized list of documents.
id2word: Dict[(int, str)]
Mapping of word ids to words.
corpus: list[tuple[int, int]]
Document vectors made up of list of tuples with (word_id, word_frequency)
model_type: str ('LDA', 'Top2Vec')
Model type for model passed to class. Currently only supports gensim or Top2Vec models.
coherence: str (default 'c_v')
Currently through gensim supports following coherence measures: 'u_mass', 'c_v', 'c_uci',
and 'c_npmi. Coherence measure 'c_uci = 'c_pmi'.
path: Path, str (Optional, default None)
Path to store the analysis results files to.
"""
def __init__(self,
model,
subreddit_name:str,
model_name:str,
tokenized_docs:list[list[str]],
id2word:Dict[(int, str)],
corpus:list[tuple[int, int]],
model_type:str,
coherence:str='c_v',
path:Optional[Union[Path,str]]=None):
self.model_name = model_name
self.subreddit_name = subreddit_name
self.tokenized_docs = tokenized_docs
self.id2word = id2word
self.corpus = corpus
self.model_type = model_type
# Sets the path for analysis files and plots to be saved to.
if path is None:
results_path = utils.get_data_path('results')
self.path = Path(results_path, self.subreddit_name, self.model_name)
else:
self.path = path
utils.check_folder(self.path)
# Sets the model for use in analysis.
self.model = model
# Retrieves topic sizes and numbers from the Top2Vec model.
topic_sizes, topic_nums = self.model.get_topic_sizes()
# Creates a dictionary of topic sizes.
topic_sizes_dict = create_topic_sizes_dict(topic_sizes)
# Saves the topic size dictionary.
utils.dump_json(topic_sizes_dict,
self.path,
f'{self.model_name}_topic_sizes_Top2Vec')
# Creates a distribtion plot of number of documents for each topic.
create_distplot(topic_sizes, f'{self.model_name} Top2Vec', self.path)
# Retrieves topic words and their scores from the model.
topic_words, word_scores, _ = model.get_topics()
# Creates a list of topic words for each topic for use in coherence model creation.
topics = [list(words) for words in topic_words]
# Creates a dictionary of word scores. For use in creating wordclouds.
word_score_dict = create_word_dict(topic_words, word_scores, topic_nums)
# Saves the word score dictionary.
utils.dump_json(word_score_dict,
self.path,
f'{self.model_name}_word_score_dict_Top2Vec')
# Prints the number of topics and the mean coherence of derived topics for the model.
num_topics = self.model.get_num_topics()
coherence_value = {}
coherence_values_per_topic = []
for coherence_measure in ['c_v', 'c_npmi', 'u_mass', 'c_uci']:
try:
coherence_model = create_coherence_model(topics=topics,
texts=tokenized_docs,
id2word=id2word,
coherence=coherence_measure)
coherence_value[coherence_measure] = coherence_model.get_coherence()
#print(f'{coherence_measure}: {coherence_value[coherence_measure]}')
if coherence_measure == coherence:
coherence_values_per_topic = coherence_model.get_coherence_per_topic()
# Saves the coherence values for each topic.
utils.dump_json(coherence_values_per_topic,
self.path,
f'{self.model_name}_coherence_values_per_topic_Top2Vec')
except ValueError:
coherence_value[coherence_measure] = np.nan
#print(f'{coherence_measure}: {coherence_value[coherence_measure]}')
if coherence_values_per_topic:
# Saves the coherence value dictionary.
utils.dump_json(coherence_value, self.path, f'{self.model_name}_coherence_values')
# Creates the coherence distribution plot of coherence values for each topic with the
# dashed line showing the mean coherence value.
if num_topics > 1:
create_coherence_distplot(coherence_values_per_topic,
f'{self.model_name} Top2Vec',
self.path)
print(f'>>> Model: {self.model_name}')
print(f'>>> Num Topics: {num_topics}')
print(f'>>> Coherence ({coherence}): {coherence_value[coherence]}')
# Creates wordcloud figure.
if num_topics > 1:
create_wordcloud_subplots(word_score_dict,
suptitle = self.subreddit_name,
path=self.path)
else:
print(f'No coherence model was created for {self.model_name}')
| [] |
2024-01-10 | devprashantt/picstone-generative-ai | server~utils~generate_story.py | import openai
def generate_story(tags, tag_analysis, image_text, story_title, desc, themes, ai_content):
# Extract detected moods, sentiments, and tones from tag_analysis
detected_moods = tag_analysis.get("moods", [])
detected_sentiments = tag_analysis.get("sentiments", [])
detected_tones = tag_analysis.get("tones", [])
# Define default values if not detected
default_mood = "neutral"
default_sentiment = "neutral"
default_tone = "calm"
# Use the detected values if available; otherwise, use defaults
mood = ', '.join(detected_moods) if detected_moods else default_mood
sentiment = ', '.join(
detected_sentiments) if detected_sentiments else default_sentiment
tone = ', '.join(detected_tones) if detected_tones else default_tone
# Create a prompt with specific instructions for ChatGPT
prompt = f"""Generate a captivating story based on the provided image and information. The image analysis has extracted tags, and further analysis has revealed moods: {mood}, sentiments: {sentiment}, and tones: {tone}. The OCR applied to the image has provided the following text: {image_text}. The user has contributed a story titled "{story_title}" with the description: "{desc}" and themes: {themes}. Additionally, an AI content analysis has generated the following caption: "{ai_content}". Create a narrative that seamlessly incorporates these elements into a coherent and engaging story."""
try:
# Generate a story/poem using ChatGPT
response = openai.Completion.create(
engine="text-davinci-003",
temperature=0.7, # Adjust temperature for creativity
max_tokens=1000, # Adjust max_tokens for desired length
prompt=prompt,
n=1 # Ensure only one response is generated
)
return response.choices[0].text
except Exception as e:
print(f"Error generating poem/story from ChatGPT: {str(e)}")
raise e
| [
"Generate a captivating story based on the provided image and information. The image analysis has extracted tags, and further analysis has revealed moods: PLACEHOLDER, sentiments: PLACEHOLDER, and tones: PLACEHOLDER. The OCR applied to the image has provided the following text: PLACEHOLDER. The user has contributed a story titled \"PLACEHOLDER\" with the description: \"PLACEHOLDER\" and themes: PLACEHOLDER. Additionally, an AI content analysis has generated the following caption: \"PLACEHOLDER\". Create a narrative that seamlessly incorporates these elements into a coherent and engaging story."
] |
2024-01-10 | devprashantt/picstone-generative-ai | server~utils~themed_story.py | import openai
def generate_themed_story(
theme
):
# Create a prompt with specific instructions for ChatGPT
prompt = f"Generate an intriguing story based on the {theme} theme. The story should include suspenseful events, unexpected twists, and engaging characters. Ensure that the story maintains a sense of {theme} throughout, keeping the user captivated until the resolution. Consider incorporating elements such as hidden clues, enigmatic settings, and characters with ambiguous motives. The generated story should be immersive and evoke a sense of curiosity. Keep the user engaged by introducing new elements that deepen the mystery and lead to a satisfying conclusion. Be creative and make the story dynamic and compelling."
try:
# Generate a story/poem using ChatGPT
response = openai.Completion.create(
engine="text-davinci-003",
temperature=0.7, # Adjust temperature for creativity
max_tokens=1000, # Adjust max_tokens for desired length
prompt=prompt,
n=1 # Ensure only one response is generated
)
return response.choices[0].text
except Exception as e:
print(f"Error generating poem/story from Server: {str(e)}")
return None
| [
"Generate an intriguing story based on the PLACEHOLDER theme. The story should include suspenseful events, unexpected twists, and engaging characters. Ensure that the story maintains a sense of PLACEHOLDER throughout, keeping the user captivated until the resolution. Consider incorporating elements such as hidden clues, enigmatic settings, and characters with ambiguous motives. The generated story should be immersive and evoke a sense of curiosity. Keep the user engaged by introducing new elements that deepen the mystery and lead to a satisfying conclusion. Be creative and make the story dynamic and compelling."
] |
2024-01-10 | Lynxye/dataframe-editor | dfeditor.py | import streamlit as st
from streamlit_chat import message
import pandas as pd
from pandasai import PandasAI
import numpy as np
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_pandas_dataframe_agent
import os
# Retrieve the API key from the environment variables
api_key = os.getenv("OPENAI_API_KEY")
# Instantiate a LLM
from pandasai.llm.openai import OpenAI
llm = OpenAI(api_token="YOUR_API_TOKEN")
# Initialize PandasAI
pandas_ai = PandasAI(llm)
# set page to wide mode
st.set_page_config(layout="wide")
# Check if DataFrame is already in session_state
if "df" not in st.session_state:
st.session_state.df = pd.DataFrame(columns=['Classification', 'Space Type', 'Room Name', 'Level', 'Room Count', 'Unit Count', 'NSF/Unit', 'NSF', 'Net to Gross Factor', 'GSF', 'Floor Finish', 'Wall Finish', 'Ceiling Finish'])
with st.sidebar:
st.subheader('Instructions')
st.info(
"""
- You can start by uploading a CSV file or start from scratch.
- Complete the input fields and click "Add to Table" to add data to the table.
- To delete a row, enter the row index and click "Delete Row". The index is zero-based, i.e., the first row is index 0.
- You can clear the entire data using the "Clear Data" button.
- Finally, you can save your data as a CSV file with a filename of your choice.
"""
)
# File uploader
uploaded_file = st.file_uploader("Choose a CSV file", type='csv')
# Clear data button
clear_data = st.button('Clear Data')
if uploaded_file is not None:
st.session_state.df = pd.read_csv(uploaded_file)
if clear_data:
st.session_state.df = pd.DataFrame(columns=['Classification', 'Space Type', 'Room Name', 'Level', 'Room Count', 'Unit Count', 'NSF/Unit', 'NSF', 'Net to Gross Factor', 'GSF', 'Floor Finish', 'Wall Finish', 'Ceiling Finish'])
st.success('Data cleared.')
# Delete row
row_index = st.number_input('Enter row index to delete', value=-1, min_value=-1)
delete = st.button('Delete Row')
if delete:
if row_index >= 0 and row_index < len(st.session_state.df):
st.session_state.df = st.session_state.df.drop(st.session_state.df.index[row_index])
st.session_state.df.reset_index(drop=True, inplace=True)
st.success(f'Row {row_index} deleted.')
# Input fields in sidebar
with st.sidebar:
st.subheader('Input Fields')
classification = st.text_input('Classification', value='Revenue & Fan Experience')
space_type = st.text_input('Space Type', value='Public Space')
room_name = st.text_input('Room Name', value='Concourse')
level = st.text_input('Level', value='Level 1')
room_count = st.number_input('Room Count', value=1, format="%i")
unit_count = st.number_input('Unit Count', value=1, format="%i")
nsf_per_unit = st.number_input('NSF/Unit', value=1, format="%i")
net_to_gross_factor = st.number_input('Net to Gross Factor', value=1.0)
floor_finish = st.selectbox('Floor Finish', options=list(range(1, 6)))
wall_finish = st.selectbox('Wall Finish', options=list(range(1, 6)))
ceiling_finish = st.selectbox('Ceiling Finish', options=list(range(1, 6)))
add_row = st.button('Add to Table')
if add_row:
gsf_value = room_count * unit_count * nsf_per_unit * net_to_gross_factor
df_new = pd.DataFrame({
'Classification': [classification],
'Space Type': [space_type],
'Room Name': [room_name],
'Level': [level],
'Room Count': [room_count],
'Unit Count': [unit_count],
'NSF/Unit': [nsf_per_unit],
'NSF': [room_count * unit_count * nsf_per_unit],
'Net to Gross Factor': [net_to_gross_factor],
'GSF': [np.round(gsf_value, 0)], # rounding the GSF value
'Floor Finish': [floor_finish],
'Wall Finish': [wall_finish],
'Ceiling Finish': [ceiling_finish]
})
st.session_state.df = pd.concat([st.session_state.df, df_new], axis=0)
st.session_state.df.reset_index(drop=True, inplace=True)
st.markdown(f"**Total GSF:** {np.round(st.session_state.df['GSF'].sum(), 0)}") # rounding the total GSF value
# Display the DataFrame
st.dataframe(st.session_state.df)
# Save DataFrame as CSV
file_name = st.text_input('Enter filename to save as CSV')
if st.button('Save DataFrame as CSV') and file_name:
st.session_state.df.to_csv(f'{file_name}.csv', index=False)
st.success(f'DataFrame saved as {file_name}.csv')
| [] |
2024-01-10 | ThiagoTrabach/cover-letter-gpt | cover-letter-gpt~helpers.py | import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ['OPENAI_KEY']
def gpt_get_completion(prompt, model="gpt-3.5-turbo"): # Andrew mentioned that the prompt/ completion paradigm is preferable for this class
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the model's output
)
return response
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~task_envs~task_envs_list.py | #!/usr/bin/env python
from gym.envs.registration import register
from gym import envs
def RegisterOpenAI_Ros_Env(task_env, max_episode_steps=10000):
"""
Registers all the ENVS supported in OpenAI ROS. This way we can load them
with variable limits.
Here is where you have to PLACE YOUR NEW TASK ENV, to be registered and accessible.
return: False if the Task_Env wasnt registered, True if it was.
"""
###########################################################################
# MovingCube Task-Robot Envs
result = True
# Cubli Moving Cube
if task_env == 'MovingCubeOneDiskWalk-v0':
print("Import module")
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.moving_cube import one_disk_walk
print("Importing register env")
# We register the Class through the Gym system
register(
id=task_env,
#entry_point='openai_ros:task_envs.moving_cube.one_disk_walk.MovingCubeOneDiskWalkEnv',
entry_point='openai_ros.task_envs.moving_cube.one_disk_walk:MovingCubeOneDiskWalkEnv',
max_episode_steps=max_episode_steps,
)
# Husarion Robot
elif task_env == 'HusarionGetToPosTurtleBotPlayGround-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.husarion.husarion_get_to_position_turtlebot_playground:HusarionGetToPosTurtleBotPlayGroundEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.husarion import husarion_get_to_position_turtlebot_playground
elif task_env == 'FetchTest-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.fetch.fetch_test_task:FetchTestEnv',
max_episode_steps=max_episode_steps,
)
# 50
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_test_task
elif task_env == 'FetchSimpleTest-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_simple_task.FetchSimpleTestEnv',
entry_point='openai_ros.task_envs.fetch.fetch_simple_task:FetchSimpleTestEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_simple_task
elif task_env == 'FetchPickAndPlace-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPickAndPlaceEnv',
entry_point='openai_ros.task_envs.fetch.fetch_pick_and_place_task:FetchPickAndPlaceEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_pick_and_place_task
elif task_env == 'FetchPush-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPushEnv',
# entry_point='openai_ros:task_envs.fetch.fetch_push.FetchPushEnv',
entry_point='openai_ros.task_envs.fetch.fetch_push:FetchPushEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_push
elif task_env == 'CartPoleStayUp-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.cartpole_stay_up.stay_up:CartPoleStayUpEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.cartpole_stay_up import stay_up
elif task_env == 'HopperStayUp-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.hopper.hopper_stay_up:HopperStayUpEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.hopper import hopper_stay_up
elif task_env == 'IriWamTcpToBowl-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.iriwam.tcp_to_bowl:IriWamTcpToBowlEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.iriwam import tcp_to_bowl
elif task_env == 'ParrotDroneGoto-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.parrotdrone.parrotdrone_goto:ParrotDroneGotoEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.parrotdrone import parrotdrone_goto
elif task_env == 'SawyerTouchCube-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.sawyer.learn_to_touch_cube:SawyerTouchCubeEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.sawyer import learn_to_touch_cube
elif task_env == 'ShadowTcGetBall-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.shadow_tc.learn_to_pick_ball:ShadowTcGetBallEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.shadow_tc import learn_to_pick_ball
elif task_env == 'SumitXlRoom-v0':
register(
id='SumitXlRoom-v0',
entry_point='openai_ros.task_envs.sumit_xl.sumit_xl_room:SumitXlRoom',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.sumit_xl import sumit_xl_room
elif task_env == 'MyTurtleBot2Maze-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_maze:TurtleBot2MazeEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
elif task_env == 'MyTurtleBot2Wall-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_wall:TurtleBot2WallEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_wall
elif task_env == 'TurtleBot3World-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot3.turtlebot3_world:TurtleBot3WorldEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot3 import turtlebot3_world
########################################################### ADDED FOR WALRUS
elif task_env == 'WalrusTest-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.walrus.walrus_test:WalrusTestEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.walrus import walrus_test
elif task_env == 'WalrusCampus-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.walrus.walrus_campus:WalrusCampusEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.walrus import walrus_campus
elif task_env == 'WalrusNav-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.walrus.walrus_nav:WalrusNavEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.walrus import walrus_nav
elif task_env == 'WalrusStairs-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.walrus.walrus_stairs:WalrusStairsEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.walrus import walrus_stairs
elif task_env == 'WalrusBalance-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.walrus.walrus_balance:WalrusBalanceEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.walrus import walrus_balance
############################################################ END WALRUS ADDITIONS
elif task_env == 'WamvNavTwoSetsBuoys-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.wamv.wamv_nav_twosets_buoys:WamvNavTwoSetsBuoysEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.wamv import wamv_nav_twosets_buoys
# Add here your Task Envs to be registered
else:
result = False
###########################################################################
if result:
# We check that it was really registered
supported_gym_envs = GetAllRegisteredGymEnvs()
#print("REGISTERED GYM ENVS===>"+str(supported_gym_envs))
assert (task_env in supported_gym_envs), "The Task_Robot_ENV given is not Registered ==>" + \
str(task_env)
return result
def GetAllRegisteredGymEnvs():
"""
Returns a List of all the registered Envs in the system
return EX: ['Copy-v0', 'RepeatCopy-v0', 'ReversedAddition-v0', ... ]
"""
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
return env_ids
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~robot_envs~sawyer_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
from intera_core_msgs.msg import JointLimits
from sensor_msgs.msg import Image
from openai_ros.openai_ros_common import ROSLauncher
class SawyerEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all SawyerEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new SawyerEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /robot/joint_limits: Odometry of the Base of Wamv
Actuators Topic List:
* As actuator we will use a class to interface with the movements through commands.
Args:
"""
rospy.logdebug("Start SawyerEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="sawyer_gazebo",
launch_file_name="put_sawyer_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(SawyerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("SawyerEnv unpause...")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
# TODO: Fill it with the sensors
self._check_all_systems_ready()
rospy.Subscriber("/io/internal_camera/head_camera/image_raw",
Image, self._head_camera_image_raw_callback)
rospy.Subscriber("/io/internal_camera/right_hand_camera/image_raw",
Image, self._right_hand_camera_image_raw_callback)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished SawyerEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("SawyerEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END SawyerEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
self._check_head_camera_image_raw_ready()
self._check_right_hand_camera_image_raw_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_head_camera_image_raw_ready(self):
self.head_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/head_camera/image_raw to be READY...")
while self.head_camera_image_raw is None and not rospy.is_shutdown():
try:
self.head_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/head_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/head_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/head_camera/image_raw not ready yet, retrying for getting head_camera_image_raw")
return self.head_camera_image_raw
def _check_right_hand_camera_image_raw_ready(self):
self.right_hand_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/right_hand_camera/image_raw to be READY...")
while self.right_hand_camera_image_raw is None and not rospy.is_shutdown():
try:
self.right_hand_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/right_hand_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/right_hand_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/right_hand_camera/image_raw not ready yet, retrying for getting right_hand_camera_image_raw")
return self.right_hand_camera_image_raw
def _head_camera_image_raw_callback(self, data):
self.head_camera_image_raw = data
def _right_hand_camera_image_raw_callback(self, data):
self.right_hand_camera_image_raw = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rospy.loginfo("Valid Sawyer Limbs==>"+str(valid_limbs))
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
rospy.loginfo("Enabling robot...")
rs.enable()
self._map_actions_to_movement()
def _map_actions_to_movement(self, side="right", joint_delta=0.1):
self.limb = intera_interface.Limb(side)
try:
self.gripper = intera_interface.Gripper(side + '_gripper')
except:
self.has_gripper = False
rospy.loginfo("The electric gripper is not detected on the robot.")
else:
self.has_gripper = True
self.joints = self.limb.joint_names()
self.bindings = {
self.joints[0]+"_increase": (self.set_j, [self.joints[0], joint_delta], self.joints[0]+" increase"),
self.joints[0]+"_decrease": (self.set_j, [self.joints[0], -joint_delta], self.joints[0]+" decrease"),
self.joints[1]+"_increase": (self.set_j, [self.joints[1], joint_delta], self.joints[1]+" increase"),
self.joints[1]+"_decrease": (self.set_j, [self.joints[1], -joint_delta], self.joints[1]+" decrease"),
self.joints[2]+"_increase": (self.set_j, [self.joints[2], joint_delta], self.joints[2]+" increase"),
self.joints[2]+"_decrease": (self.set_j, [self.joints[2], -joint_delta], self.joints[2]+" decrease"),
self.joints[3]+"_increase": (self.set_j, [self.joints[3], joint_delta], self.joints[3]+" increase"),
self.joints[3]+"_decrease": (self.set_j, [self.joints[3], -joint_delta], self.joints[3]+" decrease"),
self.joints[4]+"_increase": (self.set_j, [self.joints[4], joint_delta], self.joints[4]+" increase"),
self.joints[4]+"_decrease": (self.set_j, [self.joints[4], -joint_delta], self.joints[4]+" decrease"),
self.joints[5]+"_increase": (self.set_j, [self.joints[5], joint_delta], self.joints[5]+" increase"),
self.joints[5]+"_decrease": (self.set_j, [self.joints[5], -joint_delta], self.joints[5]+" decrease"),
self.joints[6]+"_increase": (self.set_j, [self.joints[6], joint_delta], self.joints[6]+" increase"),
self.joints[6]+"_decrease": (self.set_j, [self.joints[6], -joint_delta], self.joints[6]+" decrease")
}
if self.has_gripper:
self.bindings.update({
"close": (self.set_g, "close", side+" gripper close"),
"open": (self.set_g, "open", side+" gripper open"),
"calibrate": (self.set_g, "calibrate", side+" gripper calibrate")
})
rospy.loginfo("Controlling joints...")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def execute_movement(self, action_id):
"""
It executed the command given through an id. This will move any joint
of Sawyer, including the gripper if it has it.
:param: action_id: These are the possible action_id values and the action asociated.
self.joints[0]+"_increase",
self.joints[0]+_decrease,
self.joints[1]+"_increase",
self.joints[1]+"_decrease",
self.joints[2]+"_increase",
self.joints[2]+"_decrease",
self.joints[3]+"_increase",
self.joints[3]+"_decrease",
self.joints[4]+"_increase",
self.joints[4]+"_decrease",
self.joints[5]+"_increase",
self.joints[5]+"_decrease",
self.joints[6]+"_increase",
self.joints[6]+"_decrease",
gripper_close,
gripper_open,
gripper_calibrate
"""
if action_id in self.bindings:
cmd = self.bindings[action_id]
if action_id == "gripper_close" or action_id == "gripper_open" or action_id == "gripper_calibrate":
cmd[0](cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
# expand binding to something like "self.set_j(right, 'j0', joint_delta)"
cmd[0](*cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
rospy.logerr("NOT VALID key binding, it should be one of these: ")
for key, val in sorted(self.bindings.items(),
key=lambda x: x[1][2]):
rospy.logerr(" %s: %s" % (key, val[2]))
def set_j(self, joint_name, delta):
current_position = self.limb.joint_angle(joint_name)
joint_command = {joint_name: current_position + delta}
self.limb.set_joint_positions(joint_command)
def set_g(self, action):
if self.has_gripper:
if action == "close":
self.gripper.close()
elif action == "open":
self.gripper.open()
elif action == "calibrate":
self.gripper.calibrate()
def move_joints_to_angle_blocking(self, joint_positions_dict, timeout=15.0, threshold=0.008726646):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
"""
self.limb.move_to_joint_positions(positions=joint_positions_dict,
timeout=15.0,
threshold=0.008726646,
test=None)
def get_limb_joint_names_array(self):
"""
Returns the Joint Names array of the Limb.
"""
return self.joints
def get_all_limb_joint_angles(self):
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return self.limb.joint_angles()
def get_all_limb_joint_efforts(self):
"""
Returns a dictionary dict({str:float}) with all the joints efforts
"""
return self.limb.joint_efforts()
def get_tf_start_to_end_frames(self, start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans, rot = None, None
while (trans is None or rot is None) and not rospy.is_shutdown():
try:
(trans, rot) = self.listener.lookupTransform(
start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
duration_obj = rospy.Duration.from_sec(1.0)
rospy.sleep(duration_obj)
return trans, rot
def check_joint_limits_ready(self):
self.joint_limits = None
rospy.logdebug("Waiting for /robot/joint_limits to be READY...")
while self.joint_limits is None and not rospy.is_shutdown():
try:
self.joint_limits = rospy.wait_for_message(
"/robot/joint_limits", JointLimits, timeout=3.0)
rospy.logdebug("Current /robot/joint_limits READY=>")
except:
rospy.logerr(
"Current /robot/joint_limits not ready yet, retrying for getting joint_limits")
return self.joint_limits
def get_joint_limits(self):
return self.joint_limits
def get_head_camera_image_raw(self):
return self.head_camera_image_raw
def get_right_hand_camera_image_raw(self):
return self.right_hand_camera_image_raw
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
self.gazebo.unpauseSim()
joint_limits = self.check_joint_limits_ready()
self.gazebo.pauseSim()
return joint_limits
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~robot_envs~walrus_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class WalrusEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new WalrusEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List: TODO update with actual sensors from Walrus
* /odom : Odometry readings of the Base of the Robot
* /imu: Inertial Mesuring Unit that gives relative accelerations and orientations.
* /scan: Laser Readings
Actuators Topic List: /cmd_vel, #TODO update
Args:
"""
rospy.logdebug("Start WalrusEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="walrus_gazebo",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = ['diff_vel_controller','joint_state_controller']
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(WalrusEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False)
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
rospy.Subscriber("/imu/data", Imu, self._imu_callback)
rospy.Subscriber("/scan", LaserScan, self._laser_scan_l_callback)
rospy.Subscriber("/scan_1", LaserScan, self._laser_scan_r_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished WalrusEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
self._check_imu_ready()
self._check_laser_scan_l_ready()
self._check_laser_scan_r_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_imu_ready(self):
self.imu = None
rospy.logdebug("Waiting for /imu to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message("/imu/data", Imu, timeout=5.0)
rospy.logdebug("Current /imu/data READY=>")
except:
rospy.logerr("Current /imu/data not ready yet, retrying for getting imu")
return self.imu
def _check_laser_scan_l_ready(self):
self.laser_scan_l = None
rospy.logdebug("Waiting for /scan to be READY...")
while self.laser_scan_l is None and not rospy.is_shutdown():
try:
self.laser_scan_l = rospy.wait_for_message("/scan", LaserScan, timeout=1.0)
rospy.logdebug("Current /scan READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan_l
def _check_laser_scan_r_ready(self):
self.laser_scan_r = None
rospy.logdebug("Waiting for /scan_1 to be READY...")
while self.laser_scan_r is None and not rospy.is_shutdown():
try:
self.laser_scan_r = rospy.wait_for_message("/scan_1", LaserScan, timeout=1.0)
rospy.logdebug("Current /scan_1 READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan_r
def _odom_callback(self, data):
self.odom = data
def _imu_callback(self, data):
self.imu = data
def _laser_scan_l_callback(self, data):
self.laser_scan_l = data
def _laser_scan_r_callback(self, data):
self.laser_scan_r = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("Walrus Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
#self.wait_until_twist_achieved(cmd_vel_value,epsilon,update_rate)
# Weplace a waitof certain amiunt of time, because this twist achived doesnt work properly
time.sleep(0.2)
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logdebug("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
current_odometry = self._check_odom_ready()
# IN Walrus the odometry angular readings are inverted, so we have to invert the sign. TODO check this
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = -1*current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logdebug("END wait_until_twist_achieved...")
return delta_time
def get_odom(self):
return self.odom
def get_imu(self):
return self.imu
def get_laser_scan_l(self):
return self.laser_scan_l
def get_laser_scan_r(self):
return self.laser_scan_r | [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~robot_envs~walrus_upright_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class WalrusUprightEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new WalrusUprightEnv environment.
Walrus doesnt use controller_manager, therefore we wont reset the TODO: check controllers
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List: TODO update with actual sensors from Walrus
* /odom : Odometry readings of the Base of the Robot
* /imu: Inertial Mesuring Unit that gives relative accelerations and orientations.
* /scan: Laser Readings
Actuators Topic List: /cmd_vel, #TODO update
Args:
"""
rospy.logdebug("Start WalrusUprightEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="walrus_gazebo",
launch_file_name="put_robot_in_world_upright.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = ['diff_vel_controller','joint_state_controller']
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(WalrusUprightEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True,
start_init_physics_parameters=True)
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/diff_vel_controller/odom", Odometry, self._odom_callback)
rospy.Subscriber("/imu/data", Imu, self._imu_callback)
rospy.Subscriber("/scan", LaserScan, self._laser_scan_l_callback)
rospy.Subscriber("/scan_1", LaserScan, self._laser_scan_r_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
# The odometry from diff_vel_controller doesn't reset after each run.
# Instead, track elapsed odometry before each run, so that it can be subtracted to give actual relative odometry.
#self.elapsed_x = 0.0
#self.elapsed_y = 0.0
#self.elapsed_z = 0.0
#self.odom = Odometry() # Blank odometry message
rospy.logdebug("Finished WalrusUprightEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
self._check_imu_ready()
self._check_laser_scan_l_ready()
self._check_laser_scan_r_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /diff_vel_controller/odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/diff_vel_controller/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /diff_vel_controller/odom READY=>")
except:
rospy.logerr("Current /diff_vel_controller/odom not ready yet, retrying for getting odom")
self.elapsed_x = self.odom.pose.pose.position.x
self.elapsed_y = self.odom.pose.pose.position.y
self.elapsed_z = self.odom.pose.pose.position.z
return self.odom
def _check_imu_ready(self):
self.imu = None
rospy.logdebug("Waiting for /imu to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message("/imu/data", Imu, timeout=5.0)
rospy.logdebug("Current /imu/data READY=>")
except:
rospy.logerr("Current /imu/data not ready yet, retrying for getting imu")
return self.imu
def _check_laser_scan_l_ready(self):
self.laser_scan_l = None
rospy.logdebug("Waiting for /scan to be READY...")
while self.laser_scan_l is None and not rospy.is_shutdown():
try:
self.laser_scan_l = rospy.wait_for_message("/scan", LaserScan, timeout=1.0)
rospy.logdebug("Current /scan READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan_l
def _check_laser_scan_r_ready(self):
self.laser_scan_r = None
rospy.logdebug("Waiting for /scan_1 to be READY...")
while self.laser_scan_r is None and not rospy.is_shutdown():
try:
self.laser_scan_r = rospy.wait_for_message("/scan_1", LaserScan, timeout=1.0)
rospy.logdebug("Current /scan_1 READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan_r
def _odom_callback(self, data):
self.odom = data
def _imu_callback(self, data):
self.imu = data
def _laser_scan_l_callback(self, data):
self.laser_scan_l = data
def _laser_scan_r_callback(self, data):
self.laser_scan_r = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
# Since odometry drifts and cannot be reset between runs, save the elapsed pose for later processing.
self.elapsed_x = self.odom.pose.pose.position.x
self.elapsed_y = self.odom.pose.pose.position.y
self.elapsed_z = self.odom.pose.pose.position.z
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("Walrus Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
#self.wait_until_twist_achieved(cmd_vel_value,epsilon,update_rate)
# Weplace a waitof certain amiunt of time, because this twist achived doesnt work properly
time.sleep(0.2)
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logdebug("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
current_odometry = self._check_odom_ready()
# IN Walrus the odometry angular readings are inverted, so we have to invert the sign. TODO check this
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = -1*current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logdebug("END wait_until_twist_achieved...")
return delta_time
def get_odom(self):
# Uncorrected, drifting odom:
odom_drift = self.odom
# Initialize relative odom as equal to drifting odom
rel_odom = odom_drift
# Now, subtract elapsed odometry and return corrected relative odometry
rel_odom.pose.pose.position.x -= self.elapsed_x
rel_odom.pose.pose.position.y -= self.elapsed_y
rel_odom.pose.pose.position.z -= self.elapsed_z
# Print an output for debugging
rospy.logdebug("Uncorrected odom position: " + str(self.odom.pose.pose.position))
rospy.logdebug("Corrected odom position: " + str(rel_odom.pose.pose.position))
return rel_odom
def get_imu(self):
return self.imu
def get_laser_scan_l(self):
return self.laser_scan_l
def get_laser_scan_r(self):
return self.laser_scan_r | [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~task_envs~walrus~walrus_balance.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import walrus_upright_env
from gym.envs.registration import register
from geometry_msgs.msg import Vector3
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class WalrusBalanceEnv(walrus_upright_env.WalrusUprightEnv):
def __init__(self):
"""
This Task Env is designed for having the Walrus balance.
It will learn how to balance without tipping over.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/walrus/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="walrus_gazebo",
launch_file_name="load_stairs.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/walrus/config",
yaml_file_name="walrus_balance.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(WalrusBalanceEnv, self).__init__(ros_ws_abspath)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/walrus/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Action parameters
self.linear_forward_speed_max = rospy.get_param('/walrus/linear_forward_speed_max')
self.linear_forward_speed_min = rospy.get_param('/walrus/linear_forward_speed_min')
#self.linear_turn_speed = rospy.get_param('/walrus/linear_turn_speed')
#self.angular_speed = rospy.get_param('/walrus/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/walrus/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/walrus/init_linear_turn_speed')
# Set up action space. Potential action/commanded velocity is any value between linear_forward_speed_min and _max
#number_actions = rospy.get_param('/walrus/n_actions')
#self.action_space = spaces.Discrete(number_actions)
self.action_space = spaces.Box(numpy.array([self.linear_forward_speed_min]), numpy.array([self.linear_forward_speed_max]))
# Observation parameters
self.new_ranges = rospy.get_param('/walrus/new_ranges')
self.num_scans = rospy.get_param('/walrus/num_scans')
self.min_range = rospy.get_param('/walrus/min_range')
self.max_laser_value = rospy.get_param('/walrus/max_laser_value')
self.min_laser_value = rospy.get_param('/walrus/min_laser_value')
#self.num_imu_obs = rospy.get_param('/walrus/num_imu_obs')
self.max_pitch_orient = rospy.get_param('/walrus/max_pitch_orient')
self.min_pitch_orient = rospy.get_param('/walrus/min_pitch_orient')
self.max_pitch_rate = rospy.get_param('/walrus/max_pitch_rate')
self.min_pitch_rate = rospy.get_param('/walrus/min_pitch_rate')
self.max_x_disp = rospy.get_param('/walrus/max_x_disp')
self.min_x_disp = rospy.get_param('/walrus/min_x_disp')
self.max_linear_acceleration = rospy.get_param('/walrus/max_linear_acceleration')
self.max_angular_velocity = rospy.get_param('/walrus/max_angular_velocity')
# Set up observation space
# We create two arrays based on the range values that will be assigned
# In the discretization method.
laser_scan_l = self.get_laser_scan_l()
laser_scan_r = self.get_laser_scan_r()
#num_laser_readings = int(len(laser_scan.ranges)/self.new_ranges)
# Define high and low values for the scans
high_scan = numpy.full((self.new_ranges*self.num_scans), self.max_laser_value)
low_scan = numpy.full((self.new_ranges*self.num_scans), self.min_laser_value)
# Now, define high and low values for the imu measurements in a numpy array
high_imu = numpy.array([self.max_pitch_orient, self.max_pitch_rate])
low_imu = numpy.array([self.min_pitch_orient, self.min_pitch_rate])
# Now, define high and low values for the odometry measurement in a numpy array
high_disp = numpy.array(self.max_x_disp)
low_disp = numpy.array(self.min_x_disp)
# Define high and low values for all observations, and create the observation space to span
high = numpy.append(high_scan, high_imu)
high = numpy.append(high, high_disp)
low = numpy.append(low_scan, low_imu)
low = numpy.append(low, low_disp)
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Reward/penalty parameters
self.stay_up_reward = rospy.get_param("/walrus/stay_up_reward")
self.position_penalty = rospy.get_param("/walrus/position_penalty")
self.ang_velocity_threshold = rospy.get_param("/walrus/ang_velocity_threshold")
self.ang_velocity_reward = rospy.get_param("/walrus/ang_velocity_reward")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Reset Controller
#self.controllers_object.reset_controllers()
# Set to false Done, because its calculated asyncronously
self._episode_done = False
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the walrus
based on the action given.
:param action: The action value; i.e. commanded linear velocity.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
# if action == 0: #FORWARD
# linear_speed = self.linear_forward_speed
# angular_speed = 0.0
# self.last_action = "FORWARDS"
# elif action == 1: #BACKWARD
# linear_speed = -self.linear_forward_speed
# angular_speed = 0.0
# self.last_action = "BACKWARDS"
# elif action == 2: #HOLD
# linear_speed = 0.0
# angular_speed = 0.0
# self.last_action = "HOLD"
linear_speed = action[0]
angular_speed = 0.0
self.last_action = str(action[0])
# We tell walrus the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
WalrusEnv API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan_l = self.get_laser_scan_l()
laser_scan_r = self.get_laser_scan_r()
imu_data = self.get_imu()
odom = self.get_odom()
discretized_observations_l = self.discretize_scan_observation(laser_scan_l, self.new_ranges)
discretized_observations_r = self.discretize_scan_observation(laser_scan_r, self.new_ranges)
imu_observations = [imu_data.orientation.y, imu_data.angular_velocity.y]
odom_observations = [odom.pose.pose.position.x]
obs = [] # initialize empty list
obs.extend(discretized_observations_l) # add left scan obs to obs
obs.extend(discretized_observations_r) # add right scan obs to obs
obs.extend(imu_observations) # add imu obs to obs
obs.extend(odom_observations) # add odom obs to obs
# obs.extend(new_list)
rospy.logdebug("Observations==>"+str(obs))
rospy.logdebug("END Get Observation ==>")
return obs
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("Walrus is Too Close to wall==>")
else:
rospy.logwarn("Walrus is NOT close to a wall ==>")
# Check orientation and angular velocity observations for rollover
if (observations[16]>self.max_pitch_orient)|(observations[16]<self.min_pitch_orient):
rospy.logerr("Walrus pitch orientation out of bounds==>"+str(observations[16]))
self._episode_done = True
else:
rospy.logdebug("Walrus pitch orientation in bounds==>"+str(observations[16]))
if (observations[17]>self.max_pitch_rate)|(observations[17]<self.min_pitch_rate):
rospy.logerr("Walrus angular velocity out of bounds==>"+str(observations[17]))
self._episode_done = True
else:
rospy.logdebug("Walrus pitch velocity in bounds==>"+str(observations[17]))
# Now we check if it has crashed based on the imu
imu_data = self.get_imu()
linear_acceleration_magnitude = self.get_vector_magnitude(imu_data.linear_acceleration)
if linear_acceleration_magnitude > self.max_linear_acceleration:
rospy.logerr("Walrus Crashed==>"+str(linear_acceleration_magnitude)+">"+str(self.max_linear_acceleration))
self._episode_done = True
else:
rospy.logerr("Walrus DIDN'T crash ==>"+str(linear_acceleration_magnitude)+"<"+str(self.max_linear_acceleration))
return self._episode_done
def _compute_reward(self, observations, done):
# Reward for staying up / continuing the training episode
reward = self.stay_up_reward
# Penalty for x odometry being far away from origin (off-center)
rospy.logdebug("Displacement is " + str(observations[18]) + ", penalty is " + str(self.position_penalty*abs(observations[18])))
reward += self.position_penalty*abs(observations[18])
# If angular velocity is below threshold, give a reward
if abs(observations[17]) < self.ang_velocity_threshold:
rospy.logdebug("Angular velocity " + str(observations[17]) + " is below threshold, giving reward.")
reward += self.ang_velocity_reward
# if not done:
# if self.last_action == "FORWARDS":
# reward = self.forwards_reward
# else:
# reward = self.turn_reward
# else:
# reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_scan_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
#discretized_ranges.append(int(item))
discretized_ranges.append(item)
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logdebug("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def get_vector_magnitude(self, vector):
"""
It calculated the magnitude of the Vector3 given.
This is usefull for reading imu accelerations and knowing if there has been
a crash
:return:
"""
contact_force_np = numpy.array((vector.x, vector.y, vector.z))
force_magnitude = numpy.linalg.norm(contact_force_np)
return force_magnitude
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~task_envs~cartpole_stay_up~stay_up.py | from gym import utils
from openai_ros.robot_envs import cartpole_env
from gym.envs.registration import register
from gym import error, spaces
import rospy
import math
import numpy as np
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class CartPoleStayUpEnv(cartpole_env.CartPoleEnv):
def __init__(self):
ros_ws_abspath = rospy.get_param("/cartpole_v0/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="cartpole_description",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/cartpole_stay_up/config",
yaml_file_name="stay_up.yaml")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
high = np.array([
2.5 * 2,
np.finfo(np.float32).max,
0.7 * 2,
np.finfo(np.float32).max])
self.observation_space = spaces.Box(-high, high)
# TODO: Remove when working
"""
cartpole_env.CartPoleEnv.__init__(
self, control_type=self.control_type
)
"""
# Here we will add any init functions prior to starting the MyRobotEnv
super(CartPoleStayUpEnv, self).__init__(control_type=self.control_type,
ros_ws_abspath=ros_ws_abspath)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/cartpole_v0/n_actions')
self.min_pole_angle = rospy.get_param('/cartpole_v0/min_pole_angle')
self.max_pole_angle = rospy.get_param('/cartpole_v0/max_pole_angle')
self.max_base_velocity = rospy.get_param(
'/cartpole_v0/max_base_velocity')
self.min_base_pose_x = rospy.get_param('/cartpole_v0/min_base_pose_x')
self.max_base_pose_x = rospy.get_param('/cartpole_v0/max_base_pose_x')
self.pos_step = rospy.get_param('/cartpole_v0/pos_step')
self.running_step = rospy.get_param('/cartpole_v0/running_step')
self.init_pos = rospy.get_param('/cartpole_v0/init_pos')
self.wait_time = rospy.get_param('/cartpole_v0/wait_time')
self.control_type = rospy.get_param('/cartpole_v0/control_type')
def _set_action(self, action):
# Take action
if action == 0: # LEFT
rospy.loginfo("GO LEFT...")
self.pos[0] -= self.pos_step
elif action == 1: # RIGHT
rospy.loginfo("GO RIGHT...")
self.pos[0] += self.pos_step
elif action == 2: # LEFT BIG
rospy.loginfo("GO LEFT BIG...")
self.pos[0] -= self.pos_step * 10
elif action == 3: # RIGHT BIG
rospy.loginfo("GO RIGHT BIG...")
self.pos[0] += self.pos_step * 10
# Apply action to simulation.
rospy.loginfo("MOVING TO POS=="+str(self.pos))
# 1st: unpause simulation
#rospy.logdebug("Unpause SIM...")
# self.gazebo.unpauseSim()
self.move_joints(self.pos)
rospy.logdebug(
"Wait for some time to execute movement, time="+str(self.running_step))
rospy.sleep(self.running_step) # wait for some time
rospy.logdebug(
"DONE Wait for some time to execute movement, time=" + str(self.running_step))
# 3rd: pause simulation
#rospy.logdebug("Pause SIM...")
# self.gazebo.pauseSim()
def _get_obs(self):
data = self.joints
# base_postion base_velocity pole angle pole velocity
#obs = [round(data.position[1],1), round(data.velocity[1],1), round(data.position[0],1), round(data.velocity[0],1)]
obs = [data.position[1], data.velocity[1],
data.position[0], data.velocity[0]]
return np.array(obs)
def _is_done(self, observations):
done = False
data = self.joints
rospy.loginfo("BASEPOSITION=="+str(observations[0]))
rospy.loginfo("POLE ANGLE==" + str(observations[2]))
# check if the base is still within the ranges of (-2, 2)
if (self.min_base_pose_x >= observations[0] or observations[0] >= self.max_base_pose_x):
rospy.logerr("Base Outside Limits==>min="+str(self.min_base_pose_x) +
",pos="+str(observations[0])+",max="+str(self.max_base_pose_x))
done = True
# check if pole has toppled over
if (self.min_pole_angle >= observations[2] or observations[2] >= self.max_pole_angle):
rospy.logerr(
"Pole Angle Outside Limits==>min=" + str(self.min_pole_angle) + ",pos=" + str(observations[2]) + ",max=" + str(
self.max_pole_angle))
done = True
rospy.loginfo("FINISHED get _is_done")
return done
def _compute_reward(self, observations, done):
"""
Gives more points for staying upright, gets data from given observations to avoid
having different data than other previous functions
:return:reward
"""
rospy.logdebug("START _compute_reward")
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
rospy.logdebug("END _compute_reward")
return reward
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.steps_beyond_done = None
def _set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
# Reset Internal pos variable
self.init_internal_vars(self.init_pos)
self.move_joints(self.pos)
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~task_envs~walrus~walrus_stairs.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import walrus_env
from gym.envs.registration import register
from geometry_msgs.msg import Vector3
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class WalrusStairsEnv(walrus_env.WalrusEnv):
def __init__(self):
"""
This Task Env is designed for having the Walrus climb and descend stairs.
It will learn how to climb stairs without tipping over.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/walrus/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="walrus_gazebo",
launch_file_name="load_stairs.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/walrus/config",
yaml_file_name="walrus_stairs.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(WalrusStairsEnv, self).__init__(ros_ws_abspath)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/walrus/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Action parameters
self.linear_forward_speed_max = rospy.get_param('/walrus/linear_forward_speed_max')
self.linear_forward_speed_min = rospy.get_param('/walrus/linear_forward_speed_min')
#self.linear_turn_speed = rospy.get_param('/walrus/linear_turn_speed')
#self.angular_speed = rospy.get_param('/walrus/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/walrus/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/walrus/init_linear_turn_speed')
# Set up action space. Potential action/commanded velocity is any value between linear_forward_speed_min and _max
#number_actions = rospy.get_param('/walrus/n_actions')
#self.action_space = spaces.Discrete(number_actions)
self.action_space = spaces.Box(numpy.array([self.linear_forward_speed_min]), numpy.array([self.linear_forward_speed_max]))
# Observation parameters
self.new_ranges = rospy.get_param('/walrus/new_ranges')
self.num_scans = rospy.get_param('/walrus/num_scans')
self.min_range = rospy.get_param('/walrus/min_range')
self.max_laser_value = rospy.get_param('/walrus/max_laser_value')
self.min_laser_value = rospy.get_param('/walrus/min_laser_value')
#self.num_imu_obs = rospy.get_param('/walrus/num_imu_obs')
self.max_pitch_orient = rospy.get_param('/walrus/max_pitch_orient')
self.min_pitch_orient = rospy.get_param('/walrus/min_pitch_orient')
self.max_pitch_rate = rospy.get_param('/walrus/max_pitch_rate')
self.min_pitch_rate = rospy.get_param('/walrus/min_pitch_rate')
self.max_x_disp = rospy.get_param('/walrus/max_x_disp')
self.min_x_disp = rospy.get_param('/walrus/min_x_disp')
self.max_linear_acceleration = rospy.get_param('/walrus/max_linear_acceleration')
self.max_angular_velocity = rospy.get_param('/walrus/max_angular_velocity')
# Set up observation space
# We create two arrays based on the range values that will be assigned
# In the discretization method.
laser_scan_l = self.get_laser_scan_l()
laser_scan_r = self.get_laser_scan_r()
#num_laser_readings = int(len(laser_scan.ranges)/self.new_ranges)
# Define high and low values for the scans
high_scan = numpy.full((self.new_ranges*self.num_scans), self.max_laser_value)
low_scan = numpy.full((self.new_ranges*self.num_scans), self.min_laser_value)
# Now, define high and low values for the imu measurements in a numpy array
high_imu = numpy.array([self.max_pitch_orient, self.max_pitch_rate])
low_imu = numpy.array([self.min_pitch_orient, self.min_pitch_rate])
# Now, define high and low values for the odometry measurement in a numpy array
high_disp = numpy.array(self.max_x_disp)
low_disp = numpy.array(self.min_x_disp)
# Define high and low values for all observations, and create the observation space to span
high = numpy.append(high_scan, high_imu)
high = numpy.append(high, high_disp)
low = numpy.append(low_scan, low_imu)
low = numpy.append(low, low_disp)
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Reward/penalty parameters
self.stay_alive_reward = rospy.get_param("/walrus/stay_alive_reward")
self.position_reward = rospy.get_param("/walrus/position_reward")
self.ang_velocity_threshold = rospy.get_param("/walrus/ang_velocity_threshold")
self.ang_velocity_reward = rospy.get_param("/walrus/ang_velocity_reward")
self.forward_velocity_reward = rospy.get_param("/walrus/forward_velocity_reward")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Reset Controller
#self.controllers_object.reset_controllers()
# Set to false Done, because its calculated asyncronously
self._episode_done = False
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the walrus
based on the action given.
:param action: The action value; i.e. commanded linear velocity.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
linear_speed = action[0]
angular_speed = 0.0
self.last_action = action[0]
# We tell walrus the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
WalrusEnv API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan_l = self.get_laser_scan_l()
laser_scan_r = self.get_laser_scan_r()
imu_data = self.get_imu()
odom = self.get_odom()
discretized_observations_l = self.discretize_scan_observation(laser_scan_l, self.new_ranges)
discretized_observations_r = self.discretize_scan_observation(laser_scan_r, self.new_ranges)
imu_observations = [imu_data.orientation.y, imu_data.angular_velocity.y]
odom_observations = [odom.pose.pose.position.x]
obs = [] # initialize empty list
obs.extend(discretized_observations_l) # add left scan obs to obs
obs.extend(discretized_observations_r) # add right scan obs to obs
obs.extend(imu_observations) # add imu obs to obs
obs.extend(odom_observations) # add odom obs to obs
# obs.extend(new_list)
rospy.logdebug("Observations==>"+str(obs))
rospy.logdebug("END Get Observation ==>")
return obs
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("Walrus is Too Close to wall==>")
else:
rospy.logwarn("Walrus is NOT close to a wall ==>")
# Check orientation and angular velocity observations for rollover
if (observations[16]>self.max_pitch_orient)|(observations[16]<self.min_pitch_orient):
rospy.logerr("Walrus pitch orientation out of bounds==>"+str(observations[16]))
self._episode_done = True
else:
rospy.logdebug("Walrus pitch orientation in bounds==>"+str(observations[16]))
if (observations[17]>self.max_pitch_rate)|(observations[17]<self.min_pitch_rate):
rospy.logerr("Walrus angular velocity out of bounds==>"+str(observations[17]))
self._episode_done = True
else:
rospy.logdebug("Walrus pitch velocity in bounds==>"+str(observations[17]))
# Check to see if robot out of bounds
if (observations[18]>self.max_x_disp)|(observations[18]<self.min_x_disp):
rospy.logerr("Walrus x-position out of bounds==>"+str(observations[18]))
self._episode_done = True
else:
rospy.logdebug("Walrus x-position in bounds==>"+str(observations[18]))
# Now we check if it has crashed based on the imu
imu_data = self.get_imu()
linear_acceleration_magnitude = self.get_vector_magnitude(imu_data.linear_acceleration)
if linear_acceleration_magnitude > self.max_linear_acceleration:
rospy.logerr("Walrus Crashed==>"+str(linear_acceleration_magnitude)+">"+str(self.max_linear_acceleration))
self._episode_done = True
else:
rospy.logerr("Walrus DIDN'T crash ==>"+str(linear_acceleration_magnitude)+"<"+str(self.max_linear_acceleration))
return self._episode_done
def _compute_reward(self, observations, done):
# Reward for staying up / continuing the training episode
reward = self.stay_alive_reward
# Penalty for x odometry being far away from origin (off-center)
rospy.logdebug("Displacement is " + str(observations[18]) + ", reward is " + str(self.position_reward*observations[18]))
reward += self.position_reward*abs(observations[18])
# If angular velocity is below threshold, give a reward
if abs(observations[17]) < self.ang_velocity_threshold:
rospy.logdebug("Angular velocity " + str(observations[17]) + " is below threshold, giving reward.")
reward += self.ang_velocity_reward
# if not done:
if self.last_action > 0:
rospy.logdebug("Forward velocity " + str(self.last_action) + ", giving reward " + str(self.forward_velocity_reward*self.last_action))
reward += self.forward_velocity_reward*self.last_action
# else:
# reward = self.turn_reward
# else:
# reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_scan_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
#discretized_ranges.append(int(item))
discretized_ranges.append(item)
# Check if collision occurred
#if (self.min_range > item > 0):
# rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
# self._episode_done = True
#else:
# rospy.logdebug("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def get_vector_magnitude(self, vector):
"""
It calculated the magnitude of the Vector3 given.
This is usefull for reading imu accelerations and knowing if there has been
a crash
:return:
"""
contact_force_np = numpy.array((vector.x, vector.y, vector.z))
force_magnitude = numpy.linalg.norm(contact_force_np)
return force_magnitude
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~task_envs~walrus~walrus_test.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import walrus_env
from gym.envs.registration import register
from geometry_msgs.msg import Vector3
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class WalrusTestEnv(walrus_env.WalrusEnv):
def __init__(self):
"""
This Task Env is designed for having the Walrus in the closed room with columns.
It will learn how to move around without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/walrus/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="walrus_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/walrus/config",
yaml_file_name="walrus_world.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(WalrusTestEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/walrus/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/walrus/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.linear_forward_speed = rospy.get_param('/walrus/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/walrus/linear_turn_speed')
self.angular_speed = rospy.get_param('/walrus/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/walrus/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/walrus/init_linear_turn_speed')
self.new_ranges = rospy.get_param('/walrus/new_ranges')
self.min_range = rospy.get_param('/walrus/min_range')
self.max_laser_value = rospy.get_param('/walrus/max_laser_value')
self.min_laser_value = rospy.get_param('/walrus/min_laser_value')
self.max_linear_aceleration = rospy.get_param('/walrus/max_linear_aceleration')
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
laser_scan = self.get_laser_scan()
num_laser_readings = int(len(laser_scan.ranges)/self.new_ranges)
#high = numpy.full((num_laser_readings), self.max_laser_value)
#low = numpy.full((num_laser_readings), self.min_laser_value)
high = numpy.full((self.new_ranges), self.max_laser_value)
low = numpy.full((self.new_ranges), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/walrus/forwards_reward")
self.turn_reward = rospy.get_param("/walrus/turn_reward")
self.end_episode_points = rospy.get_param("/walrus/end_episode_points")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the walrus
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: #FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: #LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: #RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell walrus the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
WalrusEnv API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_observations = self.discretize_scan_observation( laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("Walrus is Too Close to wall==>")
else:
rospy.logwarn("Walrus is NOT close to a wall ==>")
# Now we check if it has crashed based on the imu
imu_data = self.get_imu()
linear_acceleration_magnitude = self.get_vector_magnitude(imu_data.linear_acceleration)
if linear_acceleration_magnitude > self.max_linear_aceleration:
rospy.logerr("Walrus Crashed==>"+str(linear_acceleration_magnitude)+">"+str(self.max_linear_aceleration))
self._episode_done = True
else:
rospy.logerr("Walrus DIDN'T crash ==>"+str(linear_acceleration_magnitude)+">"+str(self.max_linear_aceleration))
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_scan_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logdebug("new_ranges=" + str(new_ranges))
rospy.logdebug("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logdebug("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def get_vector_magnitude(self, vector):
"""
It calculated the magnitude of the Vector3 given.
This is usefull for reading imu accelerations and knowing if there has been
a crash
:return:
"""
contact_force_np = numpy.array((vector.x, vector.y, vector.z))
force_magnitude = numpy.linalg.norm(contact_force_np)
return force_magnitude
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~task_envs~walrus~walrus_nav.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import walrus_env
from gym.envs.registration import register
from geometry_msgs.msg import Vector3
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class WalrusNavEnv(walrus_env.WalrusEnv):
def __init__(self):
"""
This Task Env is designed for 2D Walrus navigation.
It will learn how to get to the goal.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/walrus/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="walrus_gazebo",
launch_file_name="playpen.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/walrus/config",
yaml_file_name="walrus_nav.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(WalrusNavEnv, self).__init__(ros_ws_abspath)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
# Goal parameters
self.x_goal = rospy.get_param('/walrus/x_goal')
self.y_goal = rospy.get_param('/walrus/y_goal')
self.success_radius= rospy.get_param('/walrus/success_radius')
# Action parameters
self.linear_speed_max = rospy.get_param('/walrus/linear_speed_max')
self.linear_speed_min = rospy.get_param('/walrus/linear_speed_min')
self.angular_speed_max = rospy.get_param('/walrus/angular_speed_max')
self.angular_speed_min = rospy.get_param('/walrus/angular_speed_min')
self.init_linear_forward_speed = rospy.get_param('/walrus/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/walrus/init_linear_turn_speed')
# Set up action space. Potential action/commanded velocity is any value between linear_speed_min and _max
#number_actions = rospy.get_param('/walrus/n_actions')
#self.action_space = spaces.Discrete(number_actions)
self.action_space = spaces.Box(numpy.array([self.linear_speed_min, self.angular_speed_min]), numpy.array([self.linear_speed_max, self.angular_speed_max]))
# Observation parameters
self.new_ranges = rospy.get_param('/walrus/new_ranges')
self.num_scans = rospy.get_param('/walrus/num_scans')
self.min_range = rospy.get_param('/walrus/min_range')
self.max_laser_value = rospy.get_param('/walrus/max_laser_value')
self.min_laser_value = rospy.get_param('/walrus/min_laser_value')
#self.num_imu_obs = rospy.get_param('/walrus/num_imu_obs')
# self.max_pitch_orient = rospy.get_param('/walrus/max_pitch_orient')
# self.min_pitch_orient = rospy.get_param('/walrus/min_pitch_orient')
self.max_yaw_orient = rospy.get_param('/walrus/max_yaw_orient')
self.min_yaw_orient = rospy.get_param('/walrus/min_yaw_orient')
# self.max_pitch_rate = rospy.get_param('/walrus/max_pitch_rate')
# self.min_pitch_rate = rospy.get_param('/walrus/min_pitch_rate')
self.max_x_disp = rospy.get_param('/walrus/max_x_disp')
self.min_x_disp = rospy.get_param('/walrus/min_x_disp')
self.max_y_disp = rospy.get_param('/walrus/max_y_disp')
self.min_y_disp = rospy.get_param('/walrus/min_y_disp')
self.max_linear_acceleration = rospy.get_param('/walrus/max_linear_acceleration')
self.max_angular_velocity = rospy.get_param('/walrus/max_angular_velocity')
# Set up observation space
# We create two arrays based on the range values that will be assigned
# In the discretization method.
laser_scan_l = self.get_laser_scan_l()
laser_scan_r = self.get_laser_scan_r()
#num_laser_readings = int(len(laser_scan.ranges)/self.new_ranges)
# Define high and low values for the scans
high_scan = numpy.full((self.new_ranges*self.num_scans), self.max_laser_value)
low_scan = numpy.full((self.new_ranges*self.num_scans), self.min_laser_value)
# Now, define high and low values for the imu measurements in a numpy array
#high_imu = numpy.array([self.max_pitch_orient, self.max_pitch_rate])
#low_imu = numpy.array([self.min_pitch_orient, self.min_pitch_rate])
high_imu = numpy.array([self.max_yaw_orient])
low_imu = numpy.array([self.min_yaw_orient])
# Now, define high and low values for the odometry measurement in a numpy array
high_disp = numpy.array([self.max_x_disp, self.max_y_disp])
low_disp = numpy.array([self.min_x_disp, self.min_y_disp])
# Define high and low values for all observations, and create the observation space to span
high = numpy.append(high_scan, high_imu)
high = numpy.append(high, high_disp)
low = numpy.append(low_scan, low_imu)
low = numpy.append(low, low_disp)
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Reward/penalty parameters
self.stay_alive_reward = rospy.get_param("/walrus/stay_alive_reward")
self.position_reward = rospy.get_param("/walrus/position_reward")
self.goal_reached_reward = rospy.get_param("/walrus/goal_reached_reward")
self.ang_velocity_threshold = rospy.get_param("/walrus/ang_velocity_threshold")
self.ang_velocity_reward = rospy.get_param("/walrus/ang_velocity_reward")
self.forward_velocity_reward = rospy.get_param("/walrus/forward_velocity_reward")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Reset Controller
#self.controllers_object.reset_controllers()
# Set to false Done, because its calculated asyncronously
self._episode_done = False
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the walrus
based on the action given.
:param action: The action value; i.e. commanded linear velocity.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
linear_speed = action[0]
angular_speed = action[1]
self.last_action = action[0]
# We tell walrus the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
WalrusEnv API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan_l = self.get_laser_scan_l()
laser_scan_r = self.get_laser_scan_r()
imu_data = self.get_imu()
odom = self.get_odom()
discretized_observations_l = self.discretize_scan_observation(laser_scan_l, self.new_ranges)
discretized_observations_r = self.discretize_scan_observation(laser_scan_r, self.new_ranges)
imu_observations = [imu_data.orientation.z]
odom_observations = [odom.pose.pose.position.x, odom.pose.pose.position.y]
obs = [] # initialize empty list
obs.extend(discretized_observations_l) # add left scan obs to obs
obs.extend(discretized_observations_r) # add right scan obs to obs
obs.extend(imu_observations) # add imu obs to obs
obs.extend(odom_observations) # add odom obs to obs
# obs.extend(new_list)
rospy.logdebug("Observations==>"+str(obs))
rospy.logdebug("END Get Observation ==>")
return obs
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("Walrus is Too Close to wall==>")
else:
rospy.logwarn("Walrus is NOT close to a wall ==>")
# Check orientation and angular velocity observations for rollover
# if (observations[16]>self.max_pitch_orient)|(observations[16]<self.min_pitch_orient):
# rospy.logerr("Walrus pitch orientation out of bounds==>"+str(observations[16]))
# self._episode_done = True
# else:
# rospy.logdebug("Walrus pitch orientation in bounds==>"+str(observations[16]))
# if (observations[17]>self.max_pitch_rate)|(observations[17]<self.min_pitch_rate):
# rospy.logerr("Walrus angular velocity out of bounds==>"+str(observations[17]))
# self._episode_done = True
# else:
# rospy.logdebug("Walrus pitch velocity in bounds==>"+str(observations[17]))
# Check to see if robot out of bounds
if (observations[17]>self.max_x_disp)|(observations[17]<self.min_x_disp):
rospy.logerr("Walrus x-position out of bounds==>"+str(observations[17]))
self._episode_done = True
elif (observations[18]>self.max_y_disp)|(observations[18]<self.min_y_disp):
rospy.logerr("Walrus y-position out of bounds==>"+str(observations[18]))
self._episode_done = True
else:
rospy.logdebug("Walrus x-position in bounds==>"+str(observations[17]))
rospy.logdebug("Walrus y-position in bounds==>"+str(observations[18]))
# Now we check if it has crashed based on the imu
imu_data = self.get_imu()
linear_acceleration_magnitude = self.get_vector_magnitude(imu_data.linear_acceleration)
if linear_acceleration_magnitude > self.max_linear_acceleration:
rospy.logerr("Walrus Crashed==>"+str(linear_acceleration_magnitude)+">"+str(self.max_linear_acceleration))
self._episode_done = True
else:
rospy.logerr("Walrus DIDN'T crash ==>"+str(linear_acceleration_magnitude)+"<"+str(self.max_linear_acceleration))
return self._episode_done
def _compute_reward(self, observations, done):
# Reward for staying up / continuing the training episode
reward = self.stay_alive_reward
# Bonus for forward speed, penalty for reverse
rospy.logdebug("Linear velocity " + str(self.last_action) + ", giving reward " + str(self.forward_velocity_reward*self.last_action))
reward += self.forward_velocity_reward*self.last_action
# Reward for proximity to goal
dist_to_goal = numpy.sqrt((observations[17] - self.x_goal)**2 + (observations[18] - self.y_goal)**2)
reward += self.position_reward/dist_to_goal
rospy.logdebug("Distance to goal is " + str(dist_to_goal) + ", reward is " + str(self.position_reward/dist_to_goal))
# Reward for reaching goal
if dist_to_goal < self.success_radius:
reward += self.goal_reached_reward
self._episode_done = True
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_scan_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
#discretized_ranges.append(int(item))
discretized_ranges.append(item)
# Check if collision occurred
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+" < "+str(self.min_range))
self._episode_done = True
else:
rospy.logdebug("NOT done Validation >>> item=" + str(item)+" > "+str(self.min_range))
return discretized_ranges
def get_vector_magnitude(self, vector):
"""
It calculated the magnitude of the Vector3 given.
This is usefull for reading imu accelerations and knowing if there has been
a crash
:return:
"""
contact_force_np = numpy.array((vector.x, vector.y, vector.z))
force_magnitude = numpy.linalg.norm(contact_force_np)
return force_magnitude
| [] |
2024-01-10 | AdrianAbeyta/openai_ros-1 | openai_ros~src~openai_ros~robot_envs~cube_single_disk_env.py | #! /usr/bin/env python
import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from openai_ros.openai_ros_common import ROSLauncher
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="moving_cube_description",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states", JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_all_systems_ready()
# We pause the simulation once everything is ready
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
self._check_publishers_connection()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
"/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr(
"Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug(
"Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr(
"Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) +
", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.