date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | nikk0o046/carryoncarlos-backend | flights_function~params~destination.py | import os
import re
import time
import logging
logger = logging.getLogger(__name__)
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ.get('OPENAI_API_KEY')
def create_destination_params(user_request : str, user_id : str) -> dict:
"""
This function takes the user request and the selectedcityID and returns the destination parameters.
Args:
user_request (str): The user request.
selectedCityID (str): The selected city ID.
user_id (str): The user ID.
Returns:
dict: The destination parameters.
"""
start_time = time.time() # start timer to log it later
logger.debug("[UserID: %s] Creating destination parameters...", user_id)
system_template = """You are an advanced AI agent tasked with identifying as many potential destination airports as possible based on user preferences. Your response should include:
1. An initial thought process or reasoning for the task.
2. An exhaustive list of IATA airport codes matching the criteria, formatted as [XXX,YYY,ZZZ].
For ambiguous destinations, aim for at least 15 to 20 airport codes. Offering more options increases the chances of finding affordable flights for the user. Focus on final destination airports only, excluding connecting airports. Disregard any irrelevant information."""
human_template = user_request
# Construct the conversation message list
message_list = [
{"role": "system", "content": system_template},
{"role": "user", "content": human_template}
]
# Request the response from the model
response = openai.ChatCompletion.create(
model="ft:gpt-3.5-turbo-0613:personal::8H7hy8ud",
temperature=0,
messages=message_list,
)
response_content = response.choices[0].message['content']
logger.debug("[UserID: %s] Destination parameters response: %s", user_id, response_content)
# Regular expression pattern to match the IATA codes
pattern = r'\[([A-Za-z,\s]+)\]'
# Find the matches in the response content
matches = re.search(pattern, response_content)
# If a match was found
if matches:
# Get the matched string, remove spaces, and split it into a list on the commas
destination_list = matches.group(1).replace(" ", "").split(',')
# Create a destination dictionary from the response
destination_params = {
'fly_to' : ','.join(destination_list),
}
else:
destination_params = {}
logger.debug("[UserID: %s] Destination parameters created: %s", user_id, destination_params)
end_time = time.time()
elapsed_time = end_time - start_time
logger.debug("[UserID: %s] Function execution time: %s seconds", user_id, elapsed_time)
return destination_params | [
"You are an advanced AI agent tasked with identifying as many potential destination airports as possible based on user preferences. Your response should include:\n\n1. An initial thought process or reasoning for the task.\n2. An exhaustive list of IATA airport codes matching the criteria, formatted as [XXX,YYY,ZZZ].\n\nFor ambiguous destinations, aim for at least 15 to 20 airport codes. Offering more options increases the chances of finding affordable flights for the user. Focus on final destination airports only, excluding connecting airports. Disregard any irrelevant information."
] |
2024-01-10 | nikk0o046/carryoncarlos-backend | flights_function~params~time.py | import os
from datetime import datetime, timedelta
import time
import logging
logger = logging.getLogger(__name__)
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ.get('OPENAI_API_KEY')
def create_time_params(user_request : str, user_id : str) -> dict:
"""
This function takes the user request and the user ID and returns the time parameters.
Args:
user_request (str): The user request.
user_id (str): The user ID.
Returns:
dict: The time parameters.
"""
start_time = time.time() #start timer to log it later
logger.debug("[UserID: %s] Creating time parameters...", user_id)
current_date_unformatted = datetime.now()
current_date = f"{current_date_unformatted:%d/%m/%Y}"
#create the prompt templates
system_template = """API DOCUMENTATION:
date_from, date_to: Range for outbound flight departure (dd/mm/yyyy).
nights_in_dst_from, nights_in_dst_to: Minimum and maximum stay length at the destination (in nights). Only exclude these if the user is looking for a one-way trip. Otherwise you must make an assumption.
fly_days, ret_fly_days: List of preferred days for outbound and return flights (0=Sunday, 1=Monday, ... 6=Saturday).
fly_days_type, ret_fly_days_type: Specifies if fly_days/ret_fly_days is for an arrival or a departure flight.
If the user looks for specific dates, set date_from and date_to to a specific date, and match nights_in_dst_from and nights_in_dst_to so that the return day will be correct.
ANSWER INSTRUCTIONS:
Your task is to create parameters specified above based on user information. The parameters will be forwarded to another assistant, who uses them to search flights. Do not come up with any other parameters.
The output should include both:
1) Thought: Thinking out loud about the user's needs and the task.
2) Markdown code snippet formatted in the following schema, including the leading and trailing "\`\`\`json" and "\`\`\`":
```json
{
"key1": value1 // Define relevant values. Only use keys mentioned in the API documentation.
"key2": value2
}
```"""
#example 1
userExample1 = "Current date: 10/07/2023\nInfo: Origin: London, GB | Destination: Paris, FR | Departure: Next month's Friday| Duration: Weekend | Flights: Any"
botExample1 = """Thought: User wants to leave on a Friday next month (August) and stay for two nights.
```json
{
"date_from": "01/08/2023",
"date_to": "31/08/2023",
"fly_days": 5,
"fly_days_type": "departure",
"nights_in_dst_from": 2,
"nights_in_dst_to": 2,
"ret_fly_days": 0,
"ret_fly_days_type": "departure"
}
```"""
#example 2
userExample2 = """Current date: 01/01/2024\nInfo: Origin: San Francisco, US | Destination: Anywhere abroad | Departure: March | Duration: About a week | Flights: Any"""
botExample2 = """Thought: setting departure dates for next March, lasting about a week which translated to 6-8 nights.
```json
{
"date_from": "01/03/2024",
"date_to": "31/03/2024",
"nights_in_dst_from": 6,
"nights_in_dst_to": 8
}
```"""
#example 3
userExample3 = "Current date: 10/08/2023\nInfo: Origin: Los Angeles, US | Destination: Miami, US | Departure: October's Long Weekend | Duration: 3 days | Flights: direct"
botExample3 = """Thought: Long weekend usually means three days. Possible departure days are Thursday and Friday. Possible return flight days are Sunday or Monday.
```json
{
"date_from": "01/10/2023",
"date_to": "31/10/2023",
"nights_in_dst_from": 3,
"nights_in_dst_to": 3,
"fly_days": [4, 5],
"ret_fly_days": [0, 1]
}
```"""
#example 4
userExample4 = "Current date: 10/04/2023\nInfo: Origin: Chicago, US | Destination: Paris, FR | Departure: Summer | Flights: One-way"
botExample4 = """Thought: The user only needs an outbound flight to Paris, which should be anytime in the summer months (June, July, August). Because it is a one-way trip, nights_in_dst-parameters must be excluded.
```json
{
"date_from": "01/06/2023",
"date_to": "31/08/2023"
}
```"""
#example 5
userExample5 = "Current date: 10/07/2023\nInfo: Origin: Boston, US | Destination: Abroad | Activity: not specified | Flights: 4th of October to 8th of October"
botExample5 = """Thought: The user wants the outbound flight on 4th of October, so we set the departure window (date_from and date_to) to a single day. The return is on 8th of October, so the stay is exactly 4 nights. Therefore we set both nights_in_dst_from and nights_in_dst_to to 4.
```json
{
"date_from": "04/10/2023",
"date_to": "04/10/2023",
"nights_in_dst_from": 4,
"nights_in_dst_to": 4
}
```"""
human_template = f"Current date: {current_date}\nInfo: {user_request}"
# Construct the conversation message list
message_list = [
{"role": "system", "content": system_template},
{"role": "user", "content": userExample1},
{"role": "assistant", "content": botExample1},
{"role": "user", "content": userExample2},
{"role": "assistant", "content": botExample2},
{"role": "user", "content": userExample3},
{"role": "assistant", "content": botExample3},
{"role": "user", "content": userExample4},
{"role": "assistant", "content": botExample4},
{"role": "user", "content": userExample5},
{"role": "assistant", "content": botExample5},
{"role": "user", "content": human_template}
]
# Request the response from the model
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=message_list,
)
response_content = response.choices[0].message['content']
logger.debug("[UserID: %s] OpenAI response content: %s", user_id, str(response_content))
# Extract the json string using regular expressions
import re
import json
json_str = re.search(r"\{.*\}", response_content, re.DOTALL).group()
# Convert the json string to a Python dictionary
logger.debug("[UserID: %s] json_str: %s", user_id, json_str)
time_params = json.loads(json_str)
time_params = adjust_dates(time_params, user_id) # Check if dates are in the past. If they are, add a year.
logger.debug("[UserID: %s] Time parameters created: %s", user_id, time_params)
end_time = time.time()
elapsed_time = end_time - start_time
logger.debug("[UserID: %s] Function execution time: %s seconds", user_id, elapsed_time)
return time_params
def adjust_dates(time_params : dict, user_id : str) -> dict:
"""
This function takes the time parameters and the user ID and adjusts the dates if they are in the past.
Args:
time_params (dict): The time parameters.
user_id (str): The user ID.
Returns:
dict: The time parameters.
"""
# Extract the dates from the parameters dictionary
date_from_str = time_params['date_from']
date_to_str = time_params['date_to']
# Parse the dates into datetime objects
date_format = "%d/%m/%Y"
date_from = datetime.strptime(date_from_str, date_format)
date_to = datetime.strptime(date_to_str, date_format)
# Get the current date
current_date = datetime.now()
# If both dates are in the past, add one year to both
if date_from < current_date and date_to < current_date:
date_from += timedelta(days=365)
date_to += timedelta(days=365)
# Update the dictionary with the new dates
time_params['date_from'] = date_from.strftime(date_format)
time_params['date_to'] = date_to.strftime(date_format)
# Log a warning
logger.warning("[UserID: %s] Both dates were in the past. Adjusted them to: %s - %s", user_id, time_params['date_from'], time_params['date_to'])
return time_params
| [
"Current date: 10/07/2023\nInfo: Origin: Boston, US | Destination: Abroad | Activity: not specified | Flights: 4th of October to 8th of October",
"Thought: The user only needs an outbound flight to Paris, which should be anytime in the summer months (June, July, August). Because it is a one-way trip, nights_in_dst-parameters must be excluded. \n```json\n{\n \"date_from\": \"01/06/2023\",\n \"date_to\": \"31/08/2023\"\n}\n```",
"Current date: 10/04/2023\nInfo: Origin: Chicago, US | Destination: Paris, FR | Departure: Summer | Flights: One-way",
"Current date: 10/08/2023\nInfo: Origin: Los Angeles, US | Destination: Miami, US | Departure: October's Long Weekend | Duration: 3 days | Flights: direct",
"Current date: current_dated11e058d-5813-4e8b-a11d-9da5d2376f5b\nInfo: PLACEHOLDER",
"Thought: The user wants the outbound flight on 4th of October, so we set the departure window (date_from and date_to) to a single day. The return is on 8th of October, so the stay is exactly 4 nights. Therefore we set both nights_in_dst_from and nights_in_dst_to to 4.\n```json\n{\n \"date_from\": \"04/10/2023\",\n \"date_to\": \"04/10/2023\",\n \"nights_in_dst_from\": 4,\n \"nights_in_dst_to\": 4\n}\n```",
"Thought: Long weekend usually means three days. Possible departure days are Thursday and Friday. Possible return flight days are Sunday or Monday.\n```json\n{\n \"date_from\": \"01/10/2023\",\n \"date_to\": \"31/10/2023\",\n \"nights_in_dst_from\": 3,\n \"nights_in_dst_to\": 3,\n \"fly_days\": [4, 5],\n \"ret_fly_days\": [0, 1]\n}\n```",
"Thought: User wants to leave on a Friday next month (August) and stay for two nights.\n```json\n{\n \"date_from\": \"01/08/2023\",\n \"date_to\": \"31/08/2023\",\n \"fly_days\": 5,\n \"fly_days_type\": \"departure\",\n \"nights_in_dst_from\": 2,\n \"nights_in_dst_to\": 2,\n \"ret_fly_days\": 0,\n \"ret_fly_days_type\": \"departure\"\n}\n```",
"API DOCUMENTATION:\ndate_from, date_to: Range for outbound flight departure (dd/mm/yyyy). \n\nnights_in_dst_from, nights_in_dst_to: Minimum and maximum stay length at the destination (in nights). Only exclude these if the user is looking for a one-way trip. Otherwise you must make an assumption.\n\nfly_days, ret_fly_days: List of preferred days for outbound and return flights (0=Sunday, 1=Monday, ... 6=Saturday). \n\nfly_days_type, ret_fly_days_type: Specifies if fly_days/ret_fly_days is for an arrival or a departure flight.\n\nIf the user looks for specific dates, set date_from and date_to to a specific date, and match nights_in_dst_from and nights_in_dst_to so that the return day will be correct.\n\nANSWER INSTRUCTIONS:\nYour task is to create parameters specified above based on user information. The parameters will be forwarded to another assistant, who uses them to search flights. Do not come up with any other parameters.\nThe output should include both:\n1) Thought: Thinking out loud about the user's needs and the task.\n2) Markdown code snippet formatted in the following schema, including the leading and trailing \"\\`\\`\\`json\" and \"\\`\\`\\`\":\n\n```json\n{\n \"key1\": value1 // Define relevant values. Only use keys mentioned in the API documentation. \n \"key2\": value2\n}\n```",
"Current date: f\"{current_date_unformatted:%d/%m/%Y}\nInfo: PLACEHOLDER",
"Current date: 10/07/2023\nInfo: Origin: London, GB | Destination: Paris, FR | Departure: Next month's Friday| Duration: Weekend | Flights: Any",
"Thought: setting departure dates for next March, lasting about a week which translated to 6-8 nights.\n```json\n{\n\"date_from\": \"01/03/2024\",\n\"date_to\": \"31/03/2024\",\n\"nights_in_dst_from\": 6,\n\"nights_in_dst_to\": 8\n}\n```",
"Current date: 01/01/2024\nInfo: Origin: San Francisco, US | Destination: Anywhere abroad | Departure: March | Duration: About a week | Flights: Any"
] |
2024-01-10 | nikk0o046/carryoncarlos-backend | flights_function~fine-tuning~fine_tuning.py | """
This script by OpenAI is used to fine-tune the model. It is important to make sure that the data is formatted correctly before training the model.
"""
import os
import openai
from dotenv import load_dotenv, find_dotenv
# Load .env file
load_dotenv(find_dotenv())
# Setup API Key and Configuration
openai.api_key = os.getenv("OPENAI_API_KEY")
# Set the paths for your files
train_data_path = "./data/dest_training_data.jsonl"
validation_data_path = "./data/dest_validation_data.jsonl"
# Upload Data Files
train_file = openai.File.create(
file=open(train_data_path, "rb"),
purpose='fine-tune'
)
print(f"Train file uploaded with ID: {train_file.id}")
validation_file = openai.File.create(
file=open(validation_data_path, "rb"),
purpose='fine-tune'
)
print(f"Validation file uploaded with ID: {validation_file.id}")
# Create Fine-Tuning Job
fine_tuning_job = openai.FineTuningJob.create(
training_file=train_file.id,
validation_file=validation_file.id,
model="gpt-3.5-turbo"
)
# Print fine-tuning job details
print(f"Fine-tuning job created with ID: {fine_tuning_job.id}")
# Retrieve the state of a fine-tune
print(openai.FineTuningJob.retrieve(id=fine_tuning_job.id))
# List up to 10 events from a fine-tuning job
print(openai.FineTuningJob.list_events(id="id=fine_tuning_job.id", limit=10))
| [] |
2024-01-10 | nikk0o046/carryoncarlos-backend | flights_function~input_parser.py | import os
import time
import logging
import openai
logger = logging.getLogger(__name__)
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ.get('OPENAI_API_KEY')
def input_parser(user_request : str, selectedCityID : str, user_id : str) -> str:
"""
This function takes the user request and the selected city ID and user ID and returns the query in a more structured and concise format.
Args:
user_request (str): The user request.
selectedCityID (str): The selected city ID.
user_id (str): The user ID.
Example:
>>> input_parser("I want to go to barcelona for the weekend on 12th of january. Outbound flight departure after 4pm. Direct flights.", "madrid_es", "TestUser")
Origin: Madrid, ES | Destination: Barcelona, ES | Departure: 12.1. after 4pm | Duration: Weekend | Flights: Direct
Returns:
str: The parsed input.
"""
start_time = time.time()
logger.debug("[UserID: %s] Parsing user_request", user_id)
# Create the prompt templates
system_template = """INSTRUCTIONS:
You're an intelligent AI agent. You are going to get user's description about a flight they are looking for. Your job is to formulate user requests in a structured and concise manner, so that another trained AI flight search system can handle the request more easily.
Example ot the desired output: "Origin: Stockholm, SE | Destination: Somewhere in Eastern Europe | Departure: March 2024 | Duration: Weekend | Flights: max 1 layover"
"""
#example 1
userExample1 = """Origin: madrid_es
User request: I want to go to barcelona for the weekend on 12th of january. Outbound flight departure after 4pm. Direct flights."""
botExample1 = """Origin: Madrid, ES | Destination: Barcelona, ES | Departure: 12.1. after 4pm | Duration: Weekend | Flights: Direct"""
#example 2
userExample2 = """Origin: munich_de
User request: Two-week trip to somewhere in South America. Departure in January."""
botExample2 = """Origin: Munich, DE | Destination: South America | Departure: January | Duration: 2 weeks"""
human_template = f"Origin: {selectedCityID}\nUser request: {user_request}"
# Construct the conversation message list
message_list = [
{"role": "system", "content": system_template},
{"role": "user", "content": userExample1},
{"role": "assistant", "content": botExample1},
{"role": "user", "content": userExample2},
{"role": "assistant", "content": botExample2},
{"role": "user", "content": human_template}
]
# Request the response from the model
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
temperature=0,
messages=message_list,
)
response_content = response.choices[0].message['content']
logger.debug("[UserID: %s] Parsed input: %s", user_id, response_content)
end_time = time.time()
elapsed_time = end_time - start_time
logger.debug("[UserID: %s] Function execution time: %s seconds", user_id, elapsed_time)
return response_content
| [
"Origin: Madrid, ES | Destination: Barcelona, ES | Departure: 12.1. after 4pm | Duration: Weekend | Flights: Direct",
"Origin: Munich, DE | Destination: South America | Departure: January | Duration: 2 weeks",
"Origin: munich_de\nUser request: Two-week trip to somewhere in South America. Departure in January.",
"INSTRUCTIONS:\nYou're an intelligent AI agent. You are going to get user's description about a flight they are looking for. Your job is to formulate user requests in a structured and concise manner, so that another trained AI flight search system can handle the request more easily.\nExample ot the desired output: \"Origin: Stockholm, SE | Destination: Somewhere in Eastern Europe | Departure: March 2024 | Duration: Weekend | Flights: max 1 layover\"\n",
"Origin: madrid_es\nUser request: I want to go to barcelona for the weekend on 12th of january. Outbound flight departure after 4pm. Direct flights.",
"Origin: PLACEHOLDER\nUser request: PLACEHOLDER"
] |
2024-01-10 | BastinFlorian/RAG-Chatbot-with-Confluence | src~load_db.py | import sys
import logging
import shutil
sys.path.append('../')
from config import (CONFLUENCE_SPACE_NAME, CONFLUENCE_SPACE_KEY,
CONFLUENCE_USERNAME, CONFLUENCE_API_KEY, PERSIST_DIRECTORY)
from langchain.document_loaders import ConfluenceLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.text_splitter import MarkdownHeaderTextSplitter
class DataLoader():
"""Create, load, save the DB using the confluence Loader"""
def __init__(
self,
confluence_url=CONFLUENCE_SPACE_NAME,
username=CONFLUENCE_USERNAME,
api_key=CONFLUENCE_API_KEY,
space_key=CONFLUENCE_SPACE_KEY,
persist_directory=PERSIST_DIRECTORY
):
self.confluence_url = confluence_url
self.username = username
self.api_key = api_key
self.space_key = space_key
self.persist_directory = persist_directory
def load_from_confluence_loader(self):
"""Load HTML files from Confluence"""
loader = ConfluenceLoader(
url=self.confluence_url,
username=self.username,
api_key=self.api_key
)
docs = loader.load(
space_key=self.space_key,
# include_attachments=True,
)
return docs
def split_docs(self, docs):
# Markdown
headers_to_split_on = [
("#", "Titre 1"),
("##", "Sous-titre 1"),
("###", "Sous-titre 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
# Split based on markdown and add original metadata
md_docs = []
for doc in docs:
md_doc = markdown_splitter.split_text(doc.page_content)
for i in range(len(md_doc)):
md_doc[i].metadata = md_doc[i].metadata | doc.metadata
md_docs.extend(md_doc)
# RecursiveTextSplitter
# Chunk size big enough
splitter = RecursiveCharacterTextSplitter(
chunk_size=512,
chunk_overlap=20,
separators=["\n\n", "\n", "(?<=\. )", " ", ""]
)
splitted_docs = splitter.split_documents(md_docs)
return splitted_docs
def save_to_db(self, splitted_docs, embeddings):
"""Save chunks to Chroma DB"""
from langchain.vectorstores import Chroma
db = Chroma.from_documents(splitted_docs, embeddings, persist_directory=self.persist_directory)
db.persist()
return db
def load_from_db(self, embeddings):
"""Loader chunks to Chroma DB"""
from langchain.vectorstores import Chroma
db = Chroma(
persist_directory=self.persist_directory,
embedding_function=embeddings
)
return db
def set_db(self, embeddings):
"""Create, save, and load db"""
try:
shutil.rmtree(self.persist_directory)
except Exception as e:
logging.warning("%s", e)
# Load docs
docs = self.load_from_confluence_loader()
# Split Docs
splitted_docs = self.split_docs(docs)
# Save to DB
db = self.save_to_db(splitted_docs, embeddings)
return db
def get_db(self, embeddings):
"""Create, save, and load db"""
db = self.load_from_db(embeddings)
return db
if __name__ == "__main__":
pass
| [] |
2024-01-10 | BastinFlorian/RAG-Chatbot-with-Confluence | src~help_desk.py | import sys
import load_db
import collections
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.embeddings import OpenAIEmbeddings
class HelpDesk():
"""Create the necessary objects to create a QARetrieval chain"""
def __init__(self, new_db=True):
self.new_db = new_db
self.template = self.get_template()
self.embeddings = self.get_embeddings()
self.llm = self.get_llm()
self.prompt = self.get_prompt()
if self.new_db:
self.db = load_db.DataLoader().set_db(self.embeddings)
else:
self.db = load_db.DataLoader().get_db(self.embeddings)
self.retriever = self.db.as_retriever()
self.retrieval_qa_chain = self.get_retrieval_qa()
def get_template(self):
template = """
Given this text extracts:
-----
{context}
-----
Please answer with to the following question:
Question: {question}
Helpful Answer:
"""
return template
def get_prompt(self) -> PromptTemplate:
prompt = PromptTemplate(
template=self.template,
input_variables=["context", "question"]
)
return prompt
def get_embeddings(self) -> OpenAIEmbeddings:
embeddings = OpenAIEmbeddings()
return embeddings
def get_llm(self):
llm = OpenAI()
return llm
def get_retrieval_qa(self):
chain_type_kwargs = {"prompt": self.prompt}
qa = RetrievalQA.from_chain_type(
llm=self.llm,
chain_type="stuff",
retriever=self.retriever,
return_source_documents=True,
chain_type_kwargs=chain_type_kwargs
)
return qa
def retrieval_qa_inference(self, question, verbose=True):
query = {"query": question}
answer = self.retrieval_qa_chain(query)
sources = self.list_top_k_sources(answer, k=2)
if verbose:
print(sources)
return answer["result"], sources
def list_top_k_sources(self, answer, k=2):
sources = [
f'[{res.metadata["title"]}]({res.metadata["source"]})'
for res in answer["source_documents"]
]
if sources:
k = min(k, len(sources))
distinct_sources = list(zip(*collections.Counter(sources).most_common()))[0][:k]
distinct_sources_str = " \n- ".join(distinct_sources)
if len(distinct_sources) == 1:
return f"Voici la source qui pourrait t'être utile : \n- {distinct_sources_str}"
elif len(distinct_sources) > 1:
return f"Voici {len(distinct_sources)} sources qui pourraient t'être utiles : \n- {distinct_sources_str}"
else:
return "Désolé je n'ai trouvé aucune ressource pour répondre à ta question"
| [
"question",
"context",
"\n Given this text extracts:\n -----\n {context}\n -----\n Please answer with to the following question:\n Question: {question}\n Helpful Answer:\n "
] |
2024-01-10 | sylar003/ChuanhuChatGPT | modules~base_model.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
import logging
import json
import commentjson as cjson
import os
import sys
import requests
import urllib3
from tqdm import tqdm
import colorama
from duckduckgo_search import ddg
import asyncio
import aiohttp
from enum import Enum
from .presets import *
from .llama_func import *
from .utils import *
from . import shared
from .config import retrieve_proxy
class ModelType(Enum):
Unknown = -1
OpenAI = 0
ChatGLM = 1
LLaMA = 2
@classmethod
def get_type(cls, model_name: str):
model_type = None
model_name_lower = model_name.lower()
if "gpt" in model_name_lower:
model_type = ModelType.OpenAI
elif "chatglm" in model_name_lower:
model_type = ModelType.ChatGLM
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
model_type = ModelType.LLaMA
else:
model_type = ModelType.Unknown
return model_type
class BaseLLMModel:
def __init__(
self,
model_name,
system_prompt="",
temperature=1.0,
top_p=1.0,
n_choices=1,
stop=None,
max_generation_token=None,
presence_penalty=0,
frequency_penalty=0,
logit_bias=None,
user="",
) -> None:
self.history = []
self.all_token_counts = []
self.model_name = model_name
self.model_type = ModelType.get_type(model_name)
try:
self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
except KeyError:
self.token_upper_limit = DEFAULT_TOKEN_LIMIT
self.interrupted = False
self.system_prompt = system_prompt
self.api_key = None
self.need_api_key = False
self.single_turn = False
self.temperature = temperature
self.top_p = top_p
self.n_choices = n_choices
self.stop_sequence = stop
self.max_generation_token = None
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.user_identifier = user
def get_answer_stream_iter(self):
"""stream predict, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
should return a generator, each time give the next word (str) in the answer
"""
logging.warning("stream predict not implemented, using at once predict instead")
response, _ = self.get_answer_at_once()
yield response
def get_answer_at_once(self):
"""predict at once, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
Should return:
the answer (str)
total token count (int)
"""
logging.warning("at once predict not implemented, using stream predict instead")
response_iter = self.get_answer_stream_iter()
count = 0
for response in response_iter:
count += 1
return response, sum(self.all_token_counts) + count
def billing_info(self):
"""get billing infomation, inplement if needed"""
logging.warning("billing info not implemented, using default")
return BILLING_NOT_APPLICABLE_MSG
def count_token(self, user_input):
"""get token count from input, implement if needed"""
logging.warning("token count not implemented, using default")
return len(user_input)
def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
def get_return_value():
return chatbot, status_text
status_text = "开始实时传输回答……"
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
logging.debug(f"输入token计数: {user_token_count}")
stream_iter = self.get_answer_stream_iter()
for partial_text in stream_iter:
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
self.all_token_counts[-1] += 1
status_text = self.token_message()
yield get_return_value()
if self.interrupted:
self.recover()
break
self.history.append(construct_assistant(partial_text))
def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
if fake_input is not None:
user_token_count = self.count_token(fake_input)
else:
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
ai_reply, total_token_count = self.get_answer_at_once()
self.history.append(construct_assistant(ai_reply))
if fake_input is not None:
self.history[-2] = construct_user(fake_input)
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
if fake_input is not None:
self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
else:
self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
status_text = self.token_message()
return chatbot, status_text
def predict(
self,
inputs,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
should_check_token_count=True,
): # repetition_penalty, top_k
from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
from llama_index.indices.query.schema import QueryBundle
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTSimpleVectorIndex,
ServiceContext,
LangchainEmbedding,
OpenAIEmbedding,
)
logging.info(
"输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
)
if should_check_token_count:
yield chatbot + [(inputs, "")], "开始生成回答……"
if reply_language == "跟随问题语言(不稳定)":
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
old_inputs = None
display_reference = []
limited_context = False
if files:
limited_context = True
old_inputs = inputs
msg = "加载索引中……(这可能需要几分钟)"
logging.info(msg)
yield chatbot + [(inputs, "")], msg
index = construct_index(self.api_key, file_src=files)
assert index is not None, "索引构建失败"
msg = "索引构建完成,获取回答中……"
if local_embedding or self.model_type != ModelType.OpenAI:
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
else:
embed_model = OpenAIEmbedding()
logging.info(msg)
yield chatbot + [(inputs, "")], msg
with retrieve_proxy():
prompt_helper = PromptHelper(
max_input_size=4096,
num_output=5,
max_chunk_overlap=20,
chunk_size_limit=600,
)
from llama_index import ServiceContext
service_context = ServiceContext.from_defaults(
prompt_helper=prompt_helper, embed_model=embed_model
)
query_object = GPTVectorStoreIndexQuery(
index.index_struct,
service_context=service_context,
similarity_top_k=5,
vector_store=index._vector_store,
docstore=index._docstore,
)
query_bundle = QueryBundle(inputs)
nodes = query_object.retrieve(query_bundle)
reference_results = [n.node.text for n in nodes]
reference_results = add_source_numbers(reference_results, use_source=False)
display_reference = add_details(reference_results)
display_reference = "\n\n" + "".join(display_reference)
inputs = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
elif use_websearch:
limited_context = True
search_results = ddg(inputs, max_results=5)
old_inputs = inputs
reference_results = []
for idx, result in enumerate(search_results):
logging.debug(f"搜索结果{idx + 1}:{result}")
domain_name = urllib3.util.parse_url(result["href"]).host
reference_results.append([result["body"], result["href"]])
display_reference.append(
f"{idx+1}. [{domain_name}]({result['href']})\n"
)
reference_results = add_source_numbers(reference_results)
display_reference = "\n\n" + "".join(display_reference)
inputs = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
display_reference = ""
if (
self.need_api_key and
self.api_key is None
and not shared.state.multi_api_key
):
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
logging.info(status_text)
chatbot.append((inputs, ""))
if len(self.history) == 0:
self.history.append(construct_user(inputs))
self.history.append("")
self.all_token_counts.append(0)
else:
self.history[-2] = construct_user(inputs)
yield chatbot + [(inputs, "")], status_text
return
elif len(inputs.strip()) == 0:
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
logging.info(status_text)
yield chatbot + [(inputs, "")], status_text
return
if self.single_turn:
self.history = []
self.all_token_counts = []
self.history.append(construct_user(inputs))
try:
if stream:
logging.debug("使用流式传输")
iter = self.stream_next_chatbot(
inputs,
chatbot,
fake_input=old_inputs,
display_append=display_reference,
)
for chatbot, status_text in iter:
yield chatbot, status_text
else:
logging.debug("不使用流式传输")
chatbot, status_text = self.next_chatbot_at_once(
inputs,
chatbot,
fake_input=old_inputs,
display_append=display_reference,
)
yield chatbot, status_text
except Exception as e:
status_text = STANDARD_ERROR_MSG + str(e)
yield chatbot, status_text
if len(self.history) > 1 and self.history[-1]["content"] != inputs:
logging.info(
"回答为:"
+ colorama.Fore.BLUE
+ f"{self.history[-1]['content']}"
+ colorama.Style.RESET_ALL
)
if limited_context:
# self.history = self.history[-4:]
# self.all_token_counts = self.all_token_counts[-2:]
self.history = []
self.all_token_counts = []
max_token = self.token_upper_limit - TOKEN_OFFSET
if sum(self.all_token_counts) > max_token and should_check_token_count:
count = 0
while (
sum(self.all_token_counts)
> self.token_upper_limit * REDUCE_TOKEN_FACTOR
and sum(self.all_token_counts) > 0
):
count += 1
del self.all_token_counts[0]
del self.history[:2]
logging.info(status_text)
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
yield chatbot, status_text
def retry(
self,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
):
logging.debug("重试中……")
if len(self.history) == 0:
yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
return
inputs = self.history[-2]["content"]
del self.history[-2:]
self.all_token_counts.pop()
iter = self.predict(
inputs,
chatbot,
stream=stream,
use_websearch=use_websearch,
files=files,
reply_language=reply_language,
)
for x in iter:
yield x
logging.debug("重试完毕")
# def reduce_token_size(self, chatbot):
# logging.info("开始减少token数量……")
# chatbot, status_text = self.next_chatbot_at_once(
# summarize_prompt,
# chatbot
# )
# max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
# num_chat = find_n(self.all_token_counts, max_token_count)
# logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
# chatbot = chatbot[:-1]
# self.history = self.history[-2*num_chat:] if num_chat > 0 else []
# self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
# msg = f"保留了最近{num_chat}轮对话"
# logging.info(msg)
# logging.info("减少token数量完毕")
# return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_token_upper_limit(self, new_upper_limit):
self.token_upper_limit = new_upper_limit
print(f"token上限设置为{new_upper_limit}")
def set_temperature(self, new_temperature):
self.temperature = new_temperature
def set_top_p(self, new_top_p):
self.top_p = new_top_p
def set_n_choices(self, new_n_choices):
self.n_choices = new_n_choices
def set_stop_sequence(self, new_stop_sequence: str):
new_stop_sequence = new_stop_sequence.split(",")
self.stop_sequence = new_stop_sequence
def set_max_tokens(self, new_max_tokens):
self.max_generation_token = new_max_tokens
def set_presence_penalty(self, new_presence_penalty):
self.presence_penalty = new_presence_penalty
def set_frequency_penalty(self, new_frequency_penalty):
self.frequency_penalty = new_frequency_penalty
def set_logit_bias(self, logit_bias):
logit_bias = logit_bias.split()
bias_map = {}
encoding = tiktoken.get_encoding("cl100k_base")
for line in logit_bias:
word, bias_amount = line.split(":")
if word:
for token in encoding.encode(word):
bias_map[token] = float(bias_amount)
self.logit_bias = bias_map
def set_user_identifier(self, new_user_identifier):
self.user_identifier = new_user_identifier
def set_system_prompt(self, new_system_prompt):
self.system_prompt = new_system_prompt
def set_key(self, new_access_key):
self.api_key = new_access_key.strip()
msg = f"API密钥更改为了{hide_middle_chars(self.api_key)}"
logging.info(msg)
return msg
def set_single_turn(self, new_single_turn):
self.single_turn = new_single_turn
def reset(self):
self.history = []
self.all_token_counts = []
self.interrupted = False
return [], self.token_message([0])
def delete_first_conversation(self):
if self.history:
del self.history[:2]
del self.all_token_counts[0]
return self.token_message()
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "由于包含报错信息,只删除chatbot记录"
chatbot.pop()
return chatbot, self.history
if len(self.history) > 0:
self.history.pop()
self.history.pop()
if len(chatbot) > 0:
msg = "删除了一组chatbot对话"
chatbot.pop()
if len(self.all_token_counts) > 0:
msg = "删除了一组对话的token计数记录"
self.all_token_counts.pop()
msg = "删除了一组对话"
return chatbot, msg
def token_message(self, token_lst=None):
if token_lst is None:
token_lst = self.all_token_counts
token_sum = 0
for i in range(len(token_lst)):
token_sum += sum(token_lst[: i + 1])
return f"Token 计数: {sum(token_lst)},本次对话累计消耗了 {token_sum} tokens"
def save_chat_history(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".json"):
filename += ".json"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def export_markdown(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".md"):
filename += ".md"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def load_chat_history(self, filename, chatbot, user_name):
logging.debug(f"{user_name} 加载对话历史中……")
if type(filename) != str:
filename = filename.name
try:
with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f:
json_s = json.load(f)
try:
if type(json_s["history"][0]) == str:
logging.info("历史记录格式为旧版,正在转换……")
new_history = []
for index, item in enumerate(json_s["history"]):
if index % 2 == 0:
new_history.append(construct_user(item))
else:
new_history.append(construct_assistant(item))
json_s["history"] = new_history
logging.info(new_history)
except:
# 没有对话历史
pass
logging.debug(f"{user_name} 加载对话历史完毕")
self.history = json_s["history"]
return filename, json_s["system"], json_s["chatbot"]
except FileNotFoundError:
logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
return filename, self.system_prompt, chatbot
| [] |
2024-01-10 | pkuserc/ChatGPT_for_IE | Code~RC~SemEval_AskChatGPT.py | import re
import json
import time
import openai
from tqdm import tqdm
rc_semeval_path = "./prompts.json"
openai.api_key = "sk-"
data = list()
bar = tqdm(json.load(open(rc_semeval_path, "r")))
for line in bar:
# ------------------ #
# Open
# ------------------ #
while True:
try:
# 0. ChaGPT Pred
bar.set_description("0. O Pred")
open_pred_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]}
]
)["choices"][0]["message"]["content"]
open_pred_chatgpt_ans_processed = eval(open_pred_chatgpt_ans)["label"]
break
except:
time.sleep(3)
while True:
try:
# 1. 置信度 Conf
bar.set_description("1. O Conf")
open_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_conf"]},
]
)["choices"][0]["message"]["content"]
open_conf_chatgpt_ans = int(re.search("\d+", open_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 2. 原因 Reason
bar.set_description("2. O Reason")
open_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 3. 是否合理 Reasonable
bar.set_description("3. O Reasonable")
open_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 1
break
elif "no" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 4. 是否虚构 Fictitious
bar.set_description("4. O Fictitious")
open_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 1
break
elif "no" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
# ------------------ #
# Close
# ------------------ #
while True:
try:
# 5. ChaGPT Pred
bar.set_description("5. C Pred")
close_pred_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]}
]
)["choices"][0]["message"]["content"]
close_pred_chatgpt_ans_processed = eval(close_pred_chatgpt_ans)["label"]
break
except:
time.sleep(3)
while True:
try:
# 6. 置信度 Conf
bar.set_description("6. C Conf")
close_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_conf"]},
]
)["choices"][0]["message"]["content"]
close_conf_chatgpt_ans = int(re.search("\d+", close_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 7. 原因 Reason
bar.set_description("7. C Reason")
close_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 8. 是否合理 Reasonable
bar.set_description("8. C Reasonable")
close_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 1
break
elif "no" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 9. 是否虚构 Fictitious
bar.set_description("9. C Fictitious")
close_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 1
break
elif "no" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
Response_top = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_top3_top5"]}
]
)["choices"][0]["message"]["content"]
Response_re = re.search('\{(.+?)\}', Response_top).group()
Top3 = eval(Response_re)["three"]
Top5 = eval(Response_re)["five"]
break
except:
time.sleep(1)
answer = {
# 1. 基本内容
"idx": line["info"]["idx"],
"sentence": line["info"]["sentence"],
"headEntity": line["info"]["head_entity"],
"tailEntity": line["info"]["tail_entity"],
"GroundTruth": line["info"]["label"],
# 2. Open 场景下的回答
"isOpenCorrect": -1,
"Open": open_pred_chatgpt_ans_processed,
"OConf": open_conf_chatgpt_ans,
"Reason4O": open_reason_chatgpt_ans,
"ifR4OAuto": open_reasonable_chatgpt_ans,
"ifR4OManual": -1,
"ifR4OFicAuto": open_fictitious_chatgpt_ans,
"ifR4OFicManual": -1,
# 2. Close 场景下的回答
"isCloseCorrect": 1 if close_pred_chatgpt_ans_processed == line["info"]["label"].split("/")[-1] else 0,
"Closed": close_pred_chatgpt_ans_processed,
"CConf": close_conf_chatgpt_ans,
"Reason4C": close_reason_chatgpt_ans,
"ifR4CAuto": close_reasonable_chatgpt_ans,
"ifR4CManual": -1,
"ifR4CFicAuto": close_fictitious_chatgpt_ans,
"ifR4CFicManual": -1
}
data.append(answer)
with open("./outputs.json", "w") as f:
f.write(json.dumps(data, indent=4))
| [
"open_pred",
"close_fictitious",
"open_conf",
"open",
"close_reason",
"close_top3_top5",
"close_pred",
"open_reason",
"close_conf",
"open_fictitious",
"close_reasonable",
"open_reasonable"
] |
2024-01-10 | pkuserc/ChatGPT_for_IE | Code~ET~BBN_AskChatGPT.py | import re
import json
import time
import openai
from tqdm import tqdm
et_bbn_path = "../../../2.prompt/ET/BBN/prompts.json"
openai.api_key = "sk-"
data = list()
bar = tqdm(json.load(open(et_bbn_path, "r")))
for line in bar:
# ------------------ #
# Open
# ------------------ #
while True:
try:
# 0. ChaGPT Pred
bar.set_description("0. O Pred")
open_pred_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]}
]
)["choices"][0]["message"]["content"]
open_pred_chatgpt_ans_processed = eval(open_pred_chatgpt_ans)["label"]
break
except:
time.sleep(3)
while True:
try:
# 1. 置信度 Conf
bar.set_description("1. O Conf")
open_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_conf"]},
]
)["choices"][0]["message"]["content"]
open_conf_chatgpt_ans = int(re.search("\d+", open_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 2. 原因 Reason
bar.set_description("2. O Reason")
open_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 3. 是否合理 Reasonable
bar.set_description("3. O Reasonable")
open_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 1
break
elif "no" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 4. 是否虚构 Fictitious
bar.set_description("4. O Fictitious")
open_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 1
break
elif "no" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
# ------------------ #
# Close
# ------------------ #
while True:
try:
# 5. ChaGPT Pred
bar.set_description("5. C Pred")
close_pred_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]}
]
)["choices"][0]["message"]["content"]
close_pred_chatgpt_ans_processed = eval(close_pred_chatgpt_ans)["label"]
break
except:
time.sleep(3)
while True:
try:
# 6. 置信度 Conf
bar.set_description("6. C Conf")
close_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_conf"]},
]
)["choices"][0]["message"]["content"]
close_conf_chatgpt_ans = int(re.search("\d+", close_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 7. 原因 Reason
bar.set_description("7. C Reason")
close_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 8. 是否合理 Reasonable
bar.set_description("8. C Reasonable")
close_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 1
break
elif "no" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 9. 是否虚构 Fictitious
bar.set_description("9. C Fictitious")
close_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 1
break
elif "no" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
Response_top = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_top3_top5"]}
]
)["choices"][0]["message"]["content"]
Response_re = re.search('\{(.+?)\}', Response_top).group()
Top3 = eval(Response_re)["three"]
Top5 = eval(Response_re)["five"]
break
except:
time.sleep(1)
answer = {
# 1. 基本内容
"idx": line["info"]["idx"],
"sentIdx": line["info"]["sentid"],
"sentence": line["info"]["sentence"],
"EntityMention": line["info"]["entity"],
"GroundTruth": line["info"]["label"],
# 2. Open 场景下的回答
"isOpenCorrect": -1,
"Open": open_pred_chatgpt_ans_processed,
"OConf": open_conf_chatgpt_ans,
"Reason4O": open_reason_chatgpt_ans,
"ifR4OAuto": open_reasonable_chatgpt_ans,
"ifR4OManual": -1,
"ifR4OFicAuto": open_fictitious_chatgpt_ans,
"ifR4OFicManual": -1,
# 2. Close 场景下的回答
"isCloseCorrect": 1 if close_pred_chatgpt_ans_processed == line["info"]["label"].split("/")[-1] else 0,
"Closed": close_pred_chatgpt_ans_processed,
"CConf": close_conf_chatgpt_ans,
"Reason4C": close_reason_chatgpt_ans,
"ifR4CAuto": close_reasonable_chatgpt_ans,
"ifR4CManual": -1,
"ifR4CFicAuto": close_fictitious_chatgpt_ans,
"ifR4CFicManual": -1
}
data.append(answer)
with open("./outputs.json", "w") as f:
f.write(json.dumps(data, indent=4))
| [
"open_pred",
"close_fictitious",
"open_conf",
"open",
"close_reason",
"close_top3_top5",
"close_pred",
"open_reason",
"close_conf",
"open_fictitious",
"close_reasonable",
"open_reasonable"
] |
2024-01-10 | pkuserc/ChatGPT_for_IE | Code~NER~CoNLL_AskChatGPT.py | import re
import json
import time
import openai
from tqdm import tqdm
ner_conll_path = "./prompts.json"
openai.api_key = "sk-"
label_set = ["PER", "LOC", "ORG", "MISC"]
cnt = 0
data = list()
bar = tqdm(json.load(open(ner_conll_path, "r")))
for line in bar:
# ------------------ #
# Pred
# ------------------ #
while True:
try:
# 0. ChaGPT Open Pred
bar.set_description("0. O Pred")
open_pred_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]}
]
)["choices"][0]["message"]["content"]
open_pred_chatgpt_ans_processed = {
list(item.keys())[0]: list(item.values())[0] for item in eval(
re.search(r'\[(.*?)\]', open_pred_chatgpt_ans).group(0)
)
}
break
except:
time.sleep(3)
while True:
try:
# 5. ChaGPT Close Pred
bar.set_description("5. C Pred")
close_pred_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]}
]
)["choices"][0]["message"]["content"]
close_pred_chatgpt_ans_processed = {
list(item.keys())[0]: list(item.values())[0] for item in eval(
re.search(r'\[(.*?)\]', close_pred_chatgpt_ans).group(0)
)
}
break
except:
time.sleep(3)
for entity, pred_close in close_pred_chatgpt_ans_processed.items():
if entity in open_pred_chatgpt_ans_processed.keys():
pred_open = open_pred_chatgpt_ans_processed[entity]
else:
pred_open = "O"
if entity in line["info"]["label"].keys():
ground_truth = line["info"]["label"][entity]
else:
ground_truth = "O"
# ------------------ #
# Open
# ------------------ #
while True:
try:
# 1. 置信度 Conf
bar.set_description("1. O Conf")
open_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Question: What is the type of entity '%s' in the sentence '%s'? Answer me in json format like { \"label\": the entity type } without any additional things including your explanations or notes." % (entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_open},
{"role": "user", "content": line["open"]["open_conf"]},
]
)["choices"][0]["message"]["content"]
open_conf_chatgpt_ans = int(re.search("\d+", open_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 2. 原因 Reason
bar.set_description("2. O Reason")
open_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Question: What is the type of entity '%s' in the sentence '%s'? Answer me in json format like { \"label\": the entity type } without any additional things including your explanations or notes." % (entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_open},
{"role": "user", "content": line["open"]["open_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 3. 是否合理 Reasonable
bar.set_description("3. O Reasonable")
open_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Question: What is the type of entity '%s' in the sentence '%s'? Answer me in json format like { \"label\": the entity type } without any additional things including your explanations or notes." % (entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_open},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 1
break
elif "no" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 4. 是否虚构 Fictitious
bar.set_description("4. O Fictitious")
open_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Question: What is the type of entity '%s' in the sentence '%s'? Answer me in json format like { \"label\": the entity type } without any additional things including your explanations or notes." % (entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_open},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 1
break
elif "no" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
# ------------------ #
# Close
# ------------------ #
while True:
try:
# 6. 置信度 Conf
bar.set_description("6. C Conf")
close_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Given label set: %s\nQuestion: What is the type of entity '%s' in the sentence '%s', and which category from the given label set would you use to describe this entity type? Answer me in json format like { \"label\": you choosed in the given label set } without any additional things including your notes and explanations!" % (label_set, entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_close},
{"role": "user", "content": line["close"]["close_conf"]},
]
)["choices"][0]["message"]["content"]
close_conf_chatgpt_ans = int(re.search("\d+", close_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 7. 原因 Reason
bar.set_description("7. C Reason")
close_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Given label set: %s\nQuestion: What is the type of entity '%s' in the sentence '%s', and which category from the given label set would you use to describe this entity type? Answer me in json format like { \"label\": you choosed in the given label set } without any additional things including your notes and explanations!" % (label_set, entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_close},
{"role": "user", "content": line["close"]["close_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 8. 是否合理 Reasonable
bar.set_description("8. C Reasonable")
close_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Given label set: %s\nQuestion: What is the type of entity '%s' in the sentence '%s', and which category from the given label set would you use to describe this entity type? Answer me in json format like { \"label\": you choosed in the given label set } without any additional things including your notes and explanations!" % (label_set, entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_close},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 1
break
elif "no" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 9. 是否虚构 Fictitious
bar.set_description("9. C Fictitious")
close_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Given label set: %s\nQuestion: What is the type of entity '%s' in the sentence '%s', and which category from the given label set would you use to describe this entity type? Answer me in json format like { \"label\": you choosed in the given label set } without any additional things including your notes and explanations!" % (label_set, entity, line["info"]["sentence"])},
{"role": "assistant", "content": "{\"label\": \"%s\"}" % pred_close},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 1
break
elif "no" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
answer = {
# 1. 基本内容
"idx": cnt,
"sentIdx": line["info"]["sentid"],
"sentence": line["info"]["sentence"],
"EntityMention": entity,
"GroundTruth": ground_truth,
# 2. Open 场景下的回答
"isOpenCorrect": -1,
"Open": pred_open,
"OConf": open_conf_chatgpt_ans,
"Reason4O": open_reason_chatgpt_ans,
"ifR4OAuto": open_reasonable_chatgpt_ans,
"ifR4OManual": -1,
"ifR4OFicAuto": open_fictitious_chatgpt_ans,
"ifR4OFicManual": -1,
# 2. Close 场景下的回答
"isCloseCorrect": 1 if ground_truth == pred_close else 0,
"Closed": pred_close,
"CConf": close_conf_chatgpt_ans,
"Reason4C": close_reason_chatgpt_ans,
"ifR4CAuto": close_reasonable_chatgpt_ans,
"ifR4CManual": -1,
"ifR4CFicAuto": close_fictitious_chatgpt_ans,
"ifR4CFicManual": -1
}
cnt += 1
data.append(answer)
with open("./outputs.json", "w") as f:
f.write(json.dumps(data, indent=4))
| [
"open_fictitious",
"open_pred",
"close_fictitious",
"open_conf",
"open",
"close_reason",
"Given label set: %s\nQuestion: What is the type of entity '%s' in the sentence '%s', and which category from the given label set would you use to describe this entity type? Answer me in json format like { \"label\": you choosed in the given label set } without any additional things including your notes and explanations!",
"close_pred",
"close_conf",
"{\"label\": \"PLACEHOLDER\"}",
"sentence",
"open_reason",
"close_reasonable",
"Question: What is the type of entity '%s' in the sentence '%s'? Answer me in json format like { \"label\": the entity type } without any additional things including your explanations or notes.",
"open_reasonable",
"info"
] |
2024-01-10 | pkuserc/ChatGPT_for_IE | Code~RE~ACE2005_AskChatGPT.py | import re
import json
import time
import openai
from tqdm import tqdm
re_ace2005_path = "./prompts.json"
openai.api_key = "sk-"
data = list()
bar = tqdm(json.load(open(re_ace2005_path, "r")))
for line in bar:
# ------------------ #
# Open
# ------------------ #
while True:
try:
# 0. ChaGPT Pred
bar.set_description("0. O Pred")
open_pred_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]}
]
)["choices"][0]["message"]["content"]
open_pred_chatgpt_ans_processed = eval(open_pred_chatgpt_ans)["label"]
break
except:
time.sleep(3)
while True:
try:
# 1. 置信度 Conf
bar.set_description("1. O Conf")
open_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_conf"]},
]
)["choices"][0]["message"]["content"]
open_conf_chatgpt_ans = int(re.search("\d+", open_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 2. 原因 Reason
bar.set_description("2. O Reason")
open_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 3. 是否合理 Reasonable
bar.set_description("3. O Reasonable")
open_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 1
break
elif "no" in open_reasonable_chatgpt_ans.lower():
open_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 4. 是否虚构 Fictitious
bar.set_description("4. O Fictitious")
open_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["open"]["open_pred"]},
{"role": "assistant", "content": open_pred_chatgpt_ans},
{"role": "user", "content": line["open"]["open_reason"]},
{"role": "assistant", "content": open_reason_chatgpt_ans},
{"role": "user", "content": line["open"]["open_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 1
break
elif "no" in open_fictitious_chatgpt_ans.lower():
open_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
# ------------------ #
# Close
# ------------------ #
close_pred_chatgpt_ans = "{\"label\": \"%s\"}" % line["close_pred"]
close_pred_chatgpt_ans_processed = eval(close_pred_chatgpt_ans)["label"]
while True:
try:
# 6. 置信度 Conf
bar.set_description("6. C Conf")
close_conf_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_conf"]},
]
)["choices"][0]["message"]["content"]
close_conf_chatgpt_ans = int(re.search("\d+", close_conf_chatgpt_ans).group())
break
except:
time.sleep(3)
while True:
try:
# 7. 原因 Reason
bar.set_description("7. C Reason")
close_reason_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
]
)["choices"][0]["message"]["content"]
break
except:
time.sleep(3)
while True:
try:
# 8. 是否合理 Reasonable
bar.set_description("8. C Reasonable")
close_reasonable_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reasonable"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 1
break
elif "no" in close_reasonable_chatgpt_ans.lower():
close_reasonable_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
# 9. 是否虚构 Fictitious
bar.set_description("9. C Fictitious")
close_fictitious_chatgpt_ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_pred"]},
{"role": "assistant", "content": close_pred_chatgpt_ans},
{"role": "user", "content": line["close"]["close_reason"]},
{"role": "assistant", "content": close_reason_chatgpt_ans},
{"role": "user", "content": line["close"]["close_fictitious"]},
]
)["choices"][0]["message"]["content"]
if "yes" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 1
break
elif "no" in close_fictitious_chatgpt_ans.lower():
close_fictitious_chatgpt_ans = 0
break
else:
continue
except:
time.sleep(3)
while True:
try:
Response_top = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": line["close"]["close_top3_top5"]}
]
)["choices"][0]["message"]["content"]
Response_re = re.search('\{(.+?)\}', Response_top).group()
Top3 = eval(Response_re)["three"]
Top5 = eval(Response_re)["five"]
break
except:
time.sleep(1)
answer = {
# 1. 基本内容
"idx": line["info"]["idx"],
"sentence": line["info"]["sentence"],
"headEntity": line["info"]["head_entity"],
"tailEntity": line["info"]["tail_entity"],
"GroundTruth": line["info"]["label"],
# 2. Open 场景下的回答
"isOpenCorrect": -1,
"Open": open_pred_chatgpt_ans_processed,
"OConf": open_conf_chatgpt_ans,
"Reason4O": open_reason_chatgpt_ans,
"ifR4OAuto": open_reasonable_chatgpt_ans,
"ifR4OManual": -1,
"ifR4OFicAuto": open_fictitious_chatgpt_ans,
"ifR4OFicManual": -1,
# 2. Close 场景下的回答
"isCloseCorrect": 1 if close_pred_chatgpt_ans_processed == line["info"]["label"].split("/")[-1] else 0,
"Closed": close_pred_chatgpt_ans_processed,
"CConf": close_conf_chatgpt_ans,
"Reason4C": close_reason_chatgpt_ans,
"ifR4CAuto": close_reasonable_chatgpt_ans,
"ifR4CManual": -1,
"ifR4CFicAuto": close_fictitious_chatgpt_ans,
"ifR4CFicManual": -1
}
data.append(answer)
with open("./outputs.json", "w") as f:
f.write(json.dumps(data, indent=4)) | [
"open_pred",
"close_fictitious",
"open_conf",
"open",
"close_reason",
"close_top3_top5",
"close_pred",
"close_conf",
"open_reason",
"open_fictitious",
"close_reasonable",
"open_reasonable"
] |
2024-01-10 | pkuserc/ChatGPT_for_IE | Code~call_api.py | import json
import time
import openai
import os
from tqdm import tqdm
import argparse
openai.api_key = ""
result_dir = './Output'
def call_openai(args):
saved_dir = os.path.join(result_dir, args.task_dataset)
if not os.path.exists(saved_dir):
os.makedirs(saved_dir)
with open(f"./Prompt/{args.task_dataset}.json", "r", encoding="utf-8") as f:
lines = json.load(f)
bar = tqdm(lines.items())
for idx, prompt in bar:
bar.set_description("Running")
processed_idx = [f[:-5] for f in os.listdir(saved_dir)]
if idx not in processed_idx:
while True:
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
]
)["choices"][0]["message"]["content"]
break
except:
bar.set_description("Sleeping")
time.sleep(3)
with open(os.path.join(saved_dir, f"{idx}.json"), "w") as writer:
writer.write(response)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task_dataset",
type=str,
required=True,
choices=['EAE_E_Closed','EAE_E_Open','EAE_E+_Closed','EAE_E+_Open','ED_E_Closed','ED_E_Open','ED_E+_Closed','ED_E+_Open','EE_E_Closed','EE_E_Open','EE_E+_Closed','EE_E+_Open']
)
args = parser.parse_args()
call_openai(args) | [] |
2024-01-10 | cgi-estonia-space/ALUs | jupyter-notebook~orchestrator.py | import subprocess
from typing import Union
import folium
import ipywidgets
import ipywidgets as widgets
import configuration as config
import shutil
from subprocess import run
import helper_functions as helper
import IPython.display as ipydisplay
from algorithm.gabor_extraction import GaborExtraction
from algorithm.calibration import Calibration
from algorithm.coherence import Coherence
from callback import *
def check_installed_packages() -> None:
"""Runs eio selfcheck to control that all the necessary packages are
installed.
Returns:
None
"""
process: subprocess.CompletedProcess = subprocess.run(['eio', 'selfcheck'],
check=True)
assert process.returncode == 0, 'Not all required packages are installed'
def create_alus_path_prompt() -> None:
"""Checks whether ALUs executable are on the PATH, and creates a widget
for selecting folder, containing them.
Returns:
None.
"""
info_output: widgets.Output = widgets.Output()
def choose_alus_dir(button, output_widget):
helper.select_directory(button, config.parameters,
config.ParameterNames.ALUS_DIRECTORY)
if config.parameters[config.ParameterNames.ALUS_DIRECTORY] != '':
output_widget.clear_output()
with output_widget:
helper.check_if_correct_alus_folder_selected(
config.parameters[config.ParameterNames.ALUS_DIRECTORY])
with info_output:
alus_exists: bool = helper.check_if_alus_exists()
if alus_exists:
print('ALUs was found on PATH.')
print("You can choose another directory if You wish.")
alus_chooser_callback: Callback = Callback(choose_alus_dir,
output_widget=info_output)
alus_chooser: ipywidgets.Button = helper.create_button(
'Choose ALUs directory', alus_chooser_callback)
ipydisplay.display(info_output)
ipydisplay.display(alus_chooser)
def create_algorithm_prompt() -> None:
"""Creates a dropdown prompt for choosing the desired algorithm to run.
Returns:
None.
"""
@widgets.interact
def choose_algorithm(algorithm=config.supported_algorithms.keys()):
config.parameters[config.ParameterNames.SELECTED_ALGORITHM] = \
config.supported_algorithms[algorithm]
def show_algorithm_parameters() -> None:
"""Displays algorithm parameters.
Returns:
None
"""
selected_algorithm = config.parameters[
config.ParameterNames.SELECTED_ALGORITHM]
if selected_algorithm is config.AlgorithmName.CALIBRATION_ROUTINE:
calibration = Calibration()
config.parameters[config.ParameterNames.ALGORITHM_CLASS] = calibration
calibration.display_options()
elif selected_algorithm is config.AlgorithmName.COHERENCE_ROUTINE:
coherence = Coherence()
config.parameters[config.ParameterNames.ALGORITHM_CLASS] = coherence
coherence.display_options()
elif selected_algorithm is config.AlgorithmName.GABOR_FEATURE:
gabor_extraction = GaborExtraction()
config.parameters[
config.ParameterNames.ALGORITHM_CLASS] = gabor_extraction
gabor_extraction.display_options()
else:
helper.print_error(
f'Unsupported Algorithm: {selected_algorithm.value}')
def check_necessary_input() -> None:
"""Checks whether all the algorithm necessary parameters were filled and
displays error messages if they were not.
Returns:
None
"""
algorithm = helper.get_algorithm_class()
if algorithm is None:
helper.print_error('No algorithm selected.')
return
if not algorithm.check_necessary_input():
config.parameters[
config.ParameterNames.ALGORITHM_CLASS].display_options()
def show_first_input_map() -> Union[folium.Map, None]:
"""Visualises the map of the first input dataset.
Returns:
Folium map which will be displayed by the Jupyter Notebook.
"""
helper.get_sentinel_1_files()
visualised_map: Union[folium.Map, None] = None
if helper.get_algorithm_name() != config.AlgorithmName.GABOR_FEATURE:
try:
visualised_map = helper.get_algorithm_class().input_files[
0].visualize_webmap()
except IndexError:
return visualised_map
return visualised_map
def show_coherence_secondary_map() -> Union[folium.Map, None]:
"""Visualises the map of the secondary dataset if the selected algorithm
is a Coherence routine.
Returns:
Folium map which will be displayed by the Jupyter Notebook.
"""
visualised_map: Union[folium.Map, None] = None
if helper.get_algorithm_name() == config.AlgorithmName.COHERENCE_ROUTINE:
try:
visualised_map = helper.get_algorithm_class().input_files[
1].visualize_webmap()
except IndexError:
helper.print_error(
'No algorithm selected or secondary input not provided.')
return visualised_map
return visualised_map
def launch_algorithm() -> int:
"""Launches the algorithm.
Returns:
str: Return code of the algorithm execution.
"""
return helper.get_algorithm_class().launch_algorithm()
| [] |
2024-01-10 | dazai-osamu-111/prompt-based-learning_old_version | finet3.py | import openai
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'a', encoding='utf-8') as outfile:
outfile.write(content)
# Set the OpenAI API keys by reading them from files
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Retrieve the state of a fine-tune
openai.FineTuningJob.retrieve("YOUR FT JOB ID")
status = response['status']
print(f"Fine-tuning job status: {status}")
| [] |
2024-01-10 | dazai-osamu-111/prompt-based-learning_old_version | finet2.py | import openai
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'a', encoding='utf-8') as outfile:
outfile.write(content)
# Set the OpenAI API keys by reading them from files
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Using the provided file_id
file_id = "YOUR FILE ID"
model_name = "gpt-3.5-turbo" # Or another base model if you prefer
response = openai.FineTuningJob.create(
training_file=file_id,
model=model_name
)
job_id = response['id']
print(f"Fine-tuning job created successfully with ID: {job_id}")
| [] |
2024-01-10 | dazai-osamu-111/prompt-based-learning_old_version | SyntD-text.py | import openai
from datetime import datetime
import os
# Initialize folder for saving responses
if not os.path.exists('responses'):
os.mkdir('responses')
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
# Initialize OpenAI API key
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Read the content of the files containing the chatbot's prompts
chatbot_prompt = open_file('sysprompt.txt')
# Initialize an empty list to store the conversations for the chatbot
conversation = []
def chatgpt(api_key, conversation, chatbot_prompt, solver, temperature=0.7, frequency_penalty=0.2, presence_penalty=0):
conversation.append({"role": "user", "content": solver})
messages_input = conversation.copy()
prompt = [{"role": "system", "content": chatbot_prompt}]
messages_input.insert(0, prompt[0])
completion = openai.ChatCompletion.create(
model="gpt-4-0613",
temperature=temperature,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
messages=messages_input)
chat_response = completion['choices'][0]['message']['content']
conversation.append({"role": "assistant", "content": chat_response})
return chat_response
# Number of loops / examples
num_loops = 2
for i in range(num_loops):
problem = open_file('problems.txt')
prob1 = chatgpt(api_key, conversation, chatbot_prompt, problem)
solver = open_file('prompt1.txt').replace("<<PROBLEM>>", prob1)
response = chatgpt(api_key, conversation, chatbot_prompt, solver)
# Create a unique filename using the current timestamp
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"responses/response_{timestamp}.txt"
# Combine the input prompt and response
combined_content = f"Input Prompt:\n{prob1}\n\nResponse:\n{response}"
# Save to a file
save_file(filename, combined_content)
print(f"Saved example {i+1} to {filename}")
conversation.clear()
| [
"sysprompt.txt"
] |
2024-01-10 | dazai-osamu-111/prompt-based-learning_old_version | SyntD-json.py | import openai
from datetime import datetime
import os
import json
# Define a function to open a file and return its contents as a string
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Define a function to save content to a file
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
# Initialize folder for saving responses
if not os.path.exists('responses'):
os.mkdir('responses')
# Read the files that don't change during the loops
problem = open_file('problems.txt')
base_solver = open_file('prompt1.txt')
chatbot_prompt = open_file('sysprompt.txt')
# Initialize OpenAI API key
api_key = open_file('openaiapikey2.txt')
openai.api_key = api_key
# Initialize an empty list to store the conversations for the chatbot
conversation = []
def chatgpt(api_key, conversation, chatbot_prompt, solver, temperature=0.7, frequency_penalty=0.2, presence_penalty=0):
conversation.append({"role": "user", "content": solver})
messages_input = conversation.copy()
prompt = [{"role": "system", "content": chatbot_prompt}]
messages_input.insert(0, prompt[0])
completion = openai.ChatCompletion.create(
model="gpt-4-0613",
temperature=temperature,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
messages=messages_input)
chat_response = completion['choices'][0]['message']['content']
conversation.append({"role": "assistant", "content": chat_response})
return chat_response
# Initialize JSONL file
jsonl_file = 'responses/problemsft.jsonl'
# Number of loops / examples
num_loops = 200
for i in range(num_loops):
prob1 = chatgpt(api_key, conversation, chatbot_prompt, problem)
solver = base_solver.replace("<<PROBLEM>>", prob1)
response = chatgpt(api_key, conversation, chatbot_prompt, solver)
# Create JSON object
json_obj = {
"messages": [
{"role": "system", "content": chatbot_prompt},
{"role": "user", "content": prob1},
{"role": "assistant", "content": response}
]
}
# Append JSON object to JSONL file
with open(jsonl_file, 'a') as f:
f.write(json.dumps(json_obj) + '\n')
print(f"Saved example {i+1} to {jsonl_file}")
conversation.clear()
| [
"sysprompt.txt"
] |
2024-01-10 | tesslerc/ActionRobustRL | ounoise.py | import numpy as np
# Taken from OpenAI baselines - baselines/ddpg/noise.py
class ActionNoise(object):
def reset(self):
pass
class NormalActionNoise(ActionNoise):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def __call__(self):
return np.random.normal(self.mu, self.sigma)
def __repr__(self):
return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
class OrnsteinUhlenbeckActionNoise(ActionNoise):
def __init__(self, mu, sigma, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
| [] |
2024-01-10 | Hacanna42/isSwearGPT | test-model.py | import openai
import time
import asyncio
import json
openai.api_key = "YOUR_API_KEY_HERE"
with open("prompt.txt", "r") as f:
prompt = f.read()
def add_dot(string):
if not string.endswith('.'):
string += '.'
return string
def remove_surrounding_characters(string):
opening_brace_index = string.find('{')
closing_brace_index = string.rfind('}')
if opening_brace_index == -1 or closing_brace_index == -1:
return string
return string[opening_brace_index:closing_brace_index+1]
async def validation(content):
content = add_dot(content)
messages = [
{"role": "system", "content": prompt},
]
messages.append({"role": "user", "content": "words: "+content})
messages.append({"role": "assistant", "content": '{ "isSwear": "True", "amb": "False" }'})
messages.append({"role": "user", "content": "Are you sure? please check it again."})
completion = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
temperature=0,
messages=messages)
data = remove_surrounding_characters(completion.choices[0].message.content)
print(data)
async def main():
while True:
content = input("욕설 검증: ")
await validation(content)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"{ \"isSwear\": \"True\", \"amb\": \"False\" }",
"Are you sure? please check it again.",
"words: PLACEHOLDER"
] |
2024-01-10 | rohitdoc15/foggymedia2.0 | website~pages~synopsis.py | import sys
sys.path.append('/home/rohit/news/website')
from googletrans import Translator
import os
import django
from django.utils import timezone
from datetime import timedelta
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings')
django.setup()
from collections import Counter as CollectionsCounter
from pages.models import NewsChannel, Video, TrendingTopic
import openai
import time
# Configure OpenAI API credentials
openai.api_key = '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'
# Get the current time
now = timezone.now()
# Get the time 4 hours ago
time_4_hours_ago = now - timedelta(hours=4)
# Fetch all distinct topics from the database
topics = TrendingTopic.objects.values_list('topic', flat=True).distinct()
# Translate function using translate package
def translate_text(text):
translator = Translator()
result = translator.translate(text, dest='en')
return result.text
# Iterate over each topic
for topic in topics:
# Fetch the last 15 video titles of the given topic
videos = Video.objects.filter(topic=topic).order_by('-published_date')[:15]
titles = [video.title for video in videos]
# Translate the titles using the translate package
translated_titles = [translate_text(title) for title in titles]
# Concatenate the translated titles into a single string
titles_text = '\n'.join(translated_titles)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "sarcastic news summeriser"},
{"role": "user", "content": f"Generate a sarcastic summary for the topic: {topic}\nTitles:\n{titles_text}. summary should be in 2-3 line"},
],
)
# Extract the model's reply
synopsis = completion.choices[0].message['content']
# Update the synopsis in the database
trending_topic = TrendingTopic.objects.get(topic=topic)
trending_topic.synopsis = synopsis
trending_topic.save()
print(f"Generated synopsis for topic '{topic}': {synopsis}")
| [
"Generate a sarcastic summary for the topic: PLACEHOLDER\nTitles:\nPLACEHOLDER. summary should be in 2-3 line",
"sarcastic news summeriser"
] |
2024-01-10 | rohitdoc15/foggymedia2.0 | website~pages~celeb.py | import sys
sys.path.append('/home/rohit/news/website')
from fuzzywuzzy import fuzz
from collections import Counter
import os
import django
from django.utils import timezone
from datetime import timedelta
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings')
django.setup()
from pages.models import Video , TopPopularPersons
import openai
import time
import re
# Set your OpenAI API key
openai.api_key = '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'
# Define function to extract names
def extract_names(reply):
names = re.findall(r'\d+\.\s+(\w+\s+\w+)', reply)
cleaned_names = [re.sub(r'\d+\.|\.', '', name) for name in names]
return cleaned_names
# System message to instruct the model
system_message = "name extractor which outputs person's names in format: 1. name1 2. name2 3. name3 ...."
# Initialize attempt counter and max_attempts
attempt = 1
max_attempts = 5
# Define the number of past days you want to collect data for
num_days = 2
# Prepare a list of stop words
stop_words = ["1.","Dr","Biparjoy" ,"Bengaluru", "Bengal","2.", "3.", "4.", "5.", "Singh", "Kumar", "Patel" ,"Andhra Pradesh", "Arunachal Pradesh", "Assam", "Bihar", "Chhattisgarh", "Goa", "Gujarat", "Haryana", "Himachal Pradesh", "Jharkhand", "Karnataka", "Kerala", "Madhya Pradesh", "Maharashtra", "Manipur", "Meghalaya", "Mizoram", "Nagaland", "Odisha", "Punjab", "Rajasthan", "Sikkim", "Tamil Nadu", "Telangana", "Tripura", "Uttar Pradesh", "Uttarakhand", "West Bengal", "Andaman and Nicobar Islands", "Chandigarh", "Dadra and Nagar Haveli and Daman and Diu", "The Government of NCT of Delhi", "Lakshadweep", "Puducherry", "Ladakh", "Jammu and Kashmir", "Modi" , "PM Modi" , "Cyclone Biperjoy" , "Shiv sena" , "BJP" , "Mumbai", "Delhi", "Bangalore", "Hyderabad", "Ahmedabad", "Chennai", "Kolkata", "Surat", "Pune", "Jaipur", "Lucknow", "Kanpur", "Nagpur", "Visakhapatnam", "Indore", "Thane", "Bhopal", "Pimpri-Chinchwad", "Patna", "Vadodara", "Ghaziabad", "Ludhiana", "Coimbatore", "Agra", "Madurai", "Nashik", "Faridabad", "Meerut", "Rajkot", "Kalyan-Dombivali", "Vasai-Virar", "Varanasi", "Srinagar", "Aurangabad", "Dhanbad", "Amritsar", "Navi Mumbai", "Allahabad", "Ranchi", "Howrah", "Gwalior", "Jabalpur", "Jodhpur", "Raipur", "Kota", "Guwahati", "Chandigarh", "Thiruvananthapuram", "Solapur"]
# Iterate over each day
for day in range(num_days):
# Calculate the 8-hour intervals for that day
now = timezone.now() - timedelta(days=day)
intervals = [(now - timedelta(hours=i+8), now - timedelta(hours=i)) for i in range(0, 24, 8)]
# Open a text file to save the responses
with open(f'extracted_names_day_{day+1}.txt', 'w') as file:
# Iterate over the intervals
for start, end in intervals:
while attempt <= max_attempts:
# Get the latest 100 videos from each channel
channels = Video.objects.values_list('channel', flat=True).distinct()
latest_video_titles = []
for channel in channels:
latest_videos = Video.objects.filter(channel=channel, published_date__range=(start, end)).order_by('-published_date')[:5]
latest_video_titles.extend([video.title for video in latest_videos])
# Concatenate all the video titles into a single string
all_titles = ', '.join(latest_video_titles)
print(all_titles)
# Use OpenAI's GPT-3.5-turbo to process the text
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": f"I have a list of news titles : START {all_titles} END. Top 10 most talked indian person in the given news titles are:"},
],
temperature=0.2,
)
# Extract the model's reply
reply = completion.choices[0].message['content']
if len(reply) <= 1000:
file.write(reply + "\n")
print(reply)
break
else:
if attempt < max_attempts:
print(f"Response exceeds 160 characters. Attempt {attempt}/{max_attempts}. Waiting for 2 seconds before trying again...")
time.sleep(2)
attempt += 1
else:
print(f"Reached maximum attempts. Unable to get a response within the character limit.")
break
# Now read the text file, analyze the top 5 words
with open(f'extracted_names_day_{day+1}.txt', 'r') as file:
data = file.read().replace("\n", " ")
# Use regular expression to find sequences of capitalized words (treated as names)
import re
names = re.findall(r'(?:(?:\b[A-Z][a-z]*\b\s*)+)', data)
# Filter names to exclude stop words
filtered_names = [name for name in names if not any(word in name.split() for word in stop_words)]
# Tokenize and count the words
name_counter = Counter(filtered_names)
# Find the 5 most common words
top_5_names = name_counter.most_common(5)
# Load the news titles from the day
time_24_hours_ago = now - timedelta(hours=24)
titles_last_24_hours = Video.objects.filter(published_date__range=(time_24_hours_ago, now)).values_list('title', flat=True)
# Initialize a dictionary to store the name matches
name_matches = {}
# Iterate over the extracted top names
for name, _ in top_5_names:
name_matches[name] = 0
# Iterate over the news titles
for title in titles_last_24_hours:
# Check if the name is present in the title
if name.lower() in title.lower():
name_matches[name] += 1
# Sort the name matches in descending order
sorted_matches = sorted(name_matches.items(), key=lambda x: x[1], reverse=True)
print(f"Top 5 names in the news titles for day {day+1}:")
for name, count in sorted_matches:
print(f"{name}: {count} occurrence(s)")
# Extract the top 3 person names and counts
top_3_names = sorted_matches[:3]
# Create or update the TopPopularPersons object for the date
# Create or update the TopPopularPersons object for the date
date_obj, created = TopPopularPersons.objects.get_or_create(date=now.date())
# Set the person names and video counts
top_3_names = sorted_matches[:3]
date_obj.person1_name = top_3_names[0][0]
date_obj.person1_video_count = top_3_names[0][1]
date_obj.person2_name = top_3_names[1][0]
date_obj.person2_video_count = top_3_names[1][1]
date_obj.person3_name = top_3_names[2][0]
date_obj.person3_video_count = top_3_names[2][1]
# Save the object
date_obj.save()
print(f"Updated Top Popular Persons for day {day+1}") | [
"I have a list of news titles : START PLACEHOLDER END. Top 10 most talked indian person in the given news titles are:"
] |
2024-01-10 | rohitdoc15/foggymedia2.0 | website~pages~top5.py | import sys
import time
sys.path.append('/home/rohit/news/website')
from googletrans import Translator
translator = Translator()
import os
import django
from django.utils import timezone
from datetime import timedelta
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings')
django.setup()
from collections import Counter as CollectionsCounter
from pages.models import NewsChannel, Video, TrendingTopic
import openai
from fuzzywuzzy import fuzz
# Get the current time
now = timezone.now()
# Get the time 4 hours ago and 2 days ago
time_4_hours_ago = now - timedelta(hours=8)
time_2_days_ago = now - timedelta(days=2)
openai.api_key = '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'
# Fetch all video titles from your database that were published in the last 4 hours
videos = Video.objects.filter(published_date__range=(time_4_hours_ago, now))
channels = NewsChannel.objects.all()
titles = []
for channel in channels:
# Fetch the latest 5 video titles from this channel that were published in the last 4 hours
videos = Video.objects.filter(channel=channel, published_date__range=(time_4_hours_ago, now)).order_by('-published_date')[:5]
# Append these titles to our master list
for video in videos:
title = video.title
# Translate the title to English
translated_title = translator.translate(title, dest='en').text
titles.append(translated_title)
# Fetch the topics from all videos
all_topics = [video.topic for video in Video.objects.all()]
# Calculate the frequency of each topic
topic_counter = CollectionsCounter(all_topics)
# Filter out blank and dash ("-") topics
for topic in list(topic_counter): # Use list to avoid 'dictionary changed size during iteration' error
if topic == "" or topic == "-":
del topic_counter[topic]
# Get the 5 most common topics from TrendingTopic model
most_common_topics = TrendingTopic.objects.order_by('rank')[:5]
# Convert most_common_topics to a list of tuples for consistency with your old code
most_common_topics = [(topic.topic, topic.rank) for topic in most_common_topics]
print(f"Most common topics: {most_common_topics}")
topics_str = ' '.join([f'{i+1}. {topic[0]}' for i, topic in enumerate(most_common_topics)])
system_message = f"You are a sophisticated AI model trained in news topic extraction. Please give the topics in the format example: {topics_str}"
all_titles = '\n'.join(titles)
# Remember to limit the length of the input as the API has a maximum token limit
max_len = 10000
if len(all_titles) > max_len:
all_titles = all_titles[:max_len]
print(all_titles)
# Retry up to 5 times
for retry in range(5):
while True: # Continue until we get a reply under 160 characters
try:
# Construct the conversation with the AI model
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": f"I have a list of news titles from the last 4 hours: {all_titles}. Can you analyze them and tell me the five unique topics that these titles seem to be about? The topics should be in Title Case without hashtags, and should be ordered by frequency. "},
],
)
# Extract the model's reply
reply = completion.choices[0].message['content']
# Check if the reply is under 160 characters
if len(reply) <= 200:
break
print(f"Generated reply was too long, retrying...")
print(f"Generated reply: {reply}")
time.sleep(5) # Wait for 5 seconds before retrying
except Exception as e:
print(f"Error on attempt {retry+1}: {e}")
# If we got a reply under 160 characters, break out of the retry loop
if len(reply) <= 200:
break
print(f"Retry #{retry+1} failed, waiting 5 seconds before next attempt...")
time.sleep(5) # Wait for 5 seconds before next retry
# Split the reply into topics
topics = reply.split('\n')
# Remove numbers and dots from the topics
topics = [topic.split('. ')[1] if '. ' in topic else topic for topic in topics]
# Fetch the old topics from the last two days
old_topics = Video.objects.filter(published_date__range=(time_2_days_ago, now)).values_list('topic', flat=True)
# Set a threshold for the similarity
similarity_threshold = 50
# Update the TrendingTopic model
# Update the TrendingTopic model
for i, (topic, _) in enumerate(most_common_topics):
# Initialize max_similarity and similar_old_topic
max_similarity = -1
similar_old_topic = None
# Check if the topic already exists in the old topics using fuzzy matching
for old_topic in old_topics:
similarity = fuzz.ratio(topic, old_topic)
if similarity > max_similarity:
max_similarity = similarity
similar_old_topic = old_topic
if max_similarity > similarity_threshold:
# If similar old topic found with the highest similarity score, replace new topic with old topic
print(f"New topic '{topic}' is similar to old topic '{similar_old_topic}' with a similarity score of {max_similarity}, replacing.")
topic = similar_old_topic
# Update the TrendingTopic model with the new or old topic
trending_topic, created = TrendingTopic.objects.get_or_create(rank=i+1)
trending_topic.topic = topic
trending_topic.save()
| [
"You are a sophisticated AI model trained in news topic extraction. Please give the topics in the format example: PLACEHOLDER",
"I have a list of news titles from the last 4 hours: PLACEHOLDER. Can you analyze them and tell me the five unique topics that these titles seem to be about? The topics should be in Title Case without hashtags, and should be ordered by frequency. "
] |
2024-01-10 | rohitdoc15/foggymedia2.0 | website~pages~tagger.py | import sys
import time
import os
import django
from django.utils import timezone
from datetime import timedelta
from fuzzywuzzy import fuzz
import openai
from googletrans import Translator
sys.path.append('/home/rohit/news/website')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings')
django.setup()
stopwords = {'hindi news', 'world news', 'r bharat' , 'Gravitas', 'Top Headlines', 'ABP News' , 'Political controversies' , 'India News', 'Breaking News'}
from pages.models import NewsChannel, Video, TrendingTopic
def filter_quotes(topics):
return [topic.replace("'", "") for topic in topics]
def translate_to_english(text):
translator = Translator()
result = translator.translate(text, dest='en')
return result.text
def read_topics_from_file(filename):
with open(filename, 'r') as file:
topics = file.read().splitlines()
return topics
def update_topics_file(filename, topics):
with open(filename, 'w') as file:
for topic in topics:
file.write(f"{topic}\n")
def match_with_trending_topics(videos, topics, stopwords):
updated_videos_count = 0
for video in videos:
title = video.title
if any(chr.isalpha() for chr in title if ord(chr) > 128):
title = translate_to_english(title)
best_score = 0
best_topic = None
for topic in topics:
if topic.lower() in stopwords:
continue
similarity = fuzz.token_set_ratio(title, topic)
if similarity > best_score and similarity > 50:
best_score = similarity
best_topic = topic
if best_topic:
video.topic = best_topic
video.save()
updated_videos_count += 1
return updated_videos_count
def get_popular_topics(channel, days=3):
time_days_ago = timezone.now() - timedelta(days=days)
videos = Video.objects.filter(channel=channel, published_date__gte=time_days_ago)
topics = {video.topic for video in videos}
return trim_topics(topics)
def trim_topics(topics):
trimmed_topics = []
for topic in topics:
words = topic.split()
if len(words) > 4:
trimmed_topic = " ".join(words[:4])
trimmed_topics.append(trimmed_topic)
else:
trimmed_topics.append(topic)
return trimmed_topics
def remove_similar_topics(topics):
distinct_topics = set(topics.copy()) # Convert topics to a set to ensure distinct values
for topic1 in topics:
for topic2 in distinct_topics.copy(): # Use a copy of distinct_topics to iterate and modify it
if topic1 != topic2:
similarity = fuzz.token_set_ratio(topic1, topic2)
if similarity > 80:
if len(topic1) > len(topic2):
distinct_topics.discard(topic1)
else:
distinct_topics.discard(topic2)
break
return list(distinct_topics) # Convert distinct_topics back to a list before returning
def remove_stopwords(topics, stopwords):
return [topic for topic in topics if topic.lower() not in stopwords]
time_12_hours_ago = timezone.now() - timedelta(hours=12)
channels = NewsChannel.objects.all()
for channel in channels:
channel_name = channel.name
old_topics = read_topics_from_file(f'{channel_name}.txt')
old_topics = trim_topics(old_topics) # Trim the topics as soon as they are read from the file
update_topics_file(f'{channel_name}.txt', old_topics) # Update the file with trimmed topics
old_topics = filter_quotes(old_topics) # Add this line to filter out single quotes
old_topics = remove_similar_topics(old_topics)
old_topics = remove_stopwords(old_topics, stopwords) # Remove stopwords from existing topics
popular_topics = get_popular_topics(channel, days=3)
common_topics = set(old_topics).intersection(popular_topics)
old_topics = [topic for topic in old_topics if topic not in common_topics]
old_topics.extend(common_topics)
update_topics_file(f'{channel_name}.txt', old_topics)
recent_videos = Video.objects.filter(channel=channel, published_date__gte=time_12_hours_ago)
updated_videos_count = match_with_trending_topics(recent_videos, old_topics, stopwords)
print(f"\nSummary for channel '{channel_name}':")
print(f"Number of videos processed: {recent_videos.count()}")
print(f"Number of videos updated with new topics: {updated_videos_count}\n")
| [] |
2024-01-10 | rohitdoc15/foggymedia2.0 | website~pages~cluster.py | import sys
import time
import os
import django
from django.utils import timezone
from datetime import timedelta
from fuzzywuzzy import fuzz
import openai
from collections import Counter as CollectionsCounter
sys.path.append('/home/rohit/news/website')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings')
django.setup()
from pages.models import NewsChannel, Video, TrendingTopic
stopwords = ['gujarat', 'abp news' , 'India News' ,'Top Headlines ' , 'WION']
now = timezone.now()
time_4_hours_ago = now - timedelta(hours=24)
openai.api_key = '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'
channels = NewsChannel.objects.all()
all_topics = [video.topic for video in Video.objects.all()]
# Calculate the frequency of each topic
topic_counter = CollectionsCounter(all_topics)
# Filter out blank and dash ("-") topics
for topic in list(topic_counter): # Use list to avoid 'dictionary changed size during iteration' error
if topic == "" or topic == "-":
del topic_counter[topic]
# Get the 5 most common topics
most_common_topics = topic_counter.most_common(5)
print(f"Most common topics: {most_common_topics}")
topics_str = ' '.join([f'{i+1}. {topic[0]}' for i, topic in enumerate(most_common_topics)])
for channel in channels:
print(f"Processing channel: {channel.name}")
videos = Video.objects.filter(channel=channel, published_date__range=(time_4_hours_ago, now))
titles = [video.title for video in videos]
print(f"Found {len(titles)} videos for channel")
all_titles = '\n'.join(titles)
max_len = 5000
if len(all_titles) > max_len:
all_titles = all_titles[:max_len]
top_trending_topics = TrendingTopic.objects.order_by('rank')[:5]
system_message = f"You are a sophisticated AI model trained in topic extraction and text analysis. Please give the topics in the format example: {topics_str}"
for attempt in range(3): # Increase to 3 to allow 2 retries
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": f"I have a list of news titles from the last 4 hours: {all_titles}. Can you analyze them and tell me the five main topics that these titles seem to be about? The topics should be in Title Case without hashtags,and should be ordered by frequency. "},
],
temperature=0,
)
reply = completion.choices[0].message['content']
print(f"AI's reply: {reply}")
if len(reply) > 200:
print(f"Reply is too long ({len(reply)} characters). Retrying.")
if attempt == 2: # If this was the last attempt
print("After two attempts, reply is still too long. Updating only the trending topic in the file.")
# Code to update trending topic in the file goes here
continue # Retry if reply was too long
else:
print("Successfully retrieved AI's topic analysis.")
break # Exit loop if reply was of acceptable length
except Exception as e:
print(f"Error on attempt {attempt+1}: {e}")
time.sleep(1)
channel_topics = reply.split('\n')
channel_topics = [topic.split('. ')[1] if '. ' in topic else topic for topic in channel_topics]
# Fetch the old topics from the last two days
time_2_days_ago = now - timedelta(days=2)
old_topics = list(Video.objects.filter(published_date__range=(time_2_days_ago, now)).values_list('topic', flat=True))
topic_counts = CollectionsCounter(old_topics)
similarity_threshold = 70 # You can adjust this value according to your needs
old_topics = sorted(topic_counts, key=topic_counts.get, reverse=True)
# List of certain words
certain_words = ['Controversy', 'Updates', 'news' ,'Politics']
for i, topic in enumerate(channel_topics):
# Check if the topic contains any certain words
has_certain_word = any(word in topic for word in certain_words)
if has_certain_word:
# Adjust similarity threshold for topics with certain words
similarity_threshold = 80 # Adjust the value as needed for topics with certain words
# Check if the topic already exists in the old topics using fuzzy matching
similar_old_topic = next((old_topic for old_topic in old_topics if fuzz.ratio(topic, old_topic) > similarity_threshold), None)
if similar_old_topic:
# If similar old topic found, replace new topic with old topic
print(f"New topic '{topic}' is similar to old topic '{similar_old_topic}', replacing.")
topic = similar_old_topic
# Update the channel topics with the new or old topic
channel_topics[i] = topic
trending_topics = [t.topic for t in TrendingTopic.objects.all()]
all_topics = set(trending_topics + channel_topics)
print(f"Total topics (trending + channel specific): {len(all_topics)}")
# Store unique topics
unique_topics = []
for video in videos:
video_topic = None
max_similarity = -1
for topic in all_topics:
similarity = fuzz.ratio(video.title, topic)
if similarity > similarity_threshold and similarity > max_similarity:
video_topic = topic
max_similarity = similarity
if video_topic:
video.topic = video_topic
video.save()
print(f"Assigned topic '{video_topic}' to video '{video.title}'")
# Compare the new topic with each trending topic
is_unique = True
for trending_topic in trending_topics:
if fuzz.ratio(video_topic, trending_topic) > similarity_threshold:
is_unique = False
break
if is_unique:
unique_topics.append(video_topic)
# Save unique topics to a file
trending_topics_set = set(trending_topics)
channel_topics_set = set(channel_topics)
# Get the unique topics from the channel topics when compared to trending topics
unique_channel_topics = []
for channel_topic in channel_topics_set:
# Check if any keyword from the channel topic is present in the trending topics
has_common_keyword = any(any(keyword.lower() in trending_topic.lower() for keyword in channel_topic.split()) for trending_topic in trending_topics)
if not has_common_keyword:
unique_channel_topics.append(channel_topic)
# Merge unique channel topics and all trending topics
unique_topics = unique_channel_topics + list(trending_topics_set)
filtered_topics = [topic for topic in unique_topics if topic.lower() not in stopwords]
# Save unique topics to a file
with open(f"{channel.name}.txt", 'w') as file:
for topic in filtered_topics:
file.write(f"{topic}\n")
print(f"Saved filtered topics to file: {channel.name}.txt") | [
"I have a list of news titles from the last 4 hours: PLACEHOLDER. Can you analyze them and tell me the five main topics that these titles seem to be about? The topics should be in Title Case without hashtags,and should be ordered by frequency. "
] |
2024-01-10 | Mattyfreshy/FreshBot | cogs~chatGPT.py | import discord, os
from discord.ext import commands
from discord import app_commands
import FreshBot as fb
import asyncio
import openai
# Enable or Disable chatbot features
ENABLED = True
# ChatGPT parameters
ENGINE = "text-davinci-003"
TEMPERATURE = 0.9
MAX_TOKENS = 150
PRESENCE_PENALTY = 0.6
class ChatGPT(commands.Cog):
def __init__(self, bot):
# Load variables
openai.api_key = os.getenv('OPENAI_API_KEY')
self.bot = bot
def discord_requester(self, interaction: discord.Interaction):
"""Returns the discord user who requested the song"""
name = interaction.user.name
discriminator = interaction.user.discriminator
mention = interaction.user.mention
return f'{mention}'
# Send message to channel depending on if chatbot is enabled
async def send_message(self, interaction: discord.Interaction, message):
""" Send message to channel depending on if chatbot is enabled """
if ENABLED:
await interaction.followup.send(message)
else:
await interaction.response.send_message("Chatbot is disabled.")
# Get response from GPT API
async def get_response(self, message):
""" Get response from GPT API"""
try:
response = openai.Completion.create(
engine=ENGINE,
prompt=message,
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
presence_penalty=PRESENCE_PENALTY,
)
return response.choices[0].text
except Exception as e:
print("chatGPT Error: ", e)
return "Error getting response"
# Ask something (guild only)
# @commands.guild_only()
@app_commands.command(name='ask')
async def ask(self, interaction: discord.Interaction, *, message: str):
""" Ask the bot something """
msg = self.discord_requester(interaction) + " asked: \n" + message
await interaction.response.send_message(msg)
await self.send_message(interaction, await self.get_response(message))
# @commands.dm_only()
# @commands.command(name='query')
# async def ask(self, ctx, *, message):
# """ Ask the bot something """
# await self.send_message(ctx, await self.get_response(message))
async def setup(bot):
await bot.add_cog(ChatGPT(bot)) | [] |
2024-01-10 | webis-de/emnlp23-indicative-summarization-of-long-discussions | demo~api~clients~gpt_client.py | import enum
import re
import numpy as np
import openai
import tiktoken
from openai.error import AuthenticationError, InvalidRequestError
from tenacity import retry, stop_after_attempt, wait_random_exponential
from tenacity.retry import retry_if_not_exception_type
PREFIX_RE = re.compile(
r"^a?\s*\w*\s*(argument|debate|discussion|exploring)s? (of|about|on|against|for)?\s*(the)?",
flags=re.IGNORECASE,
)
SPACE_RE = re.compile(r"\s+")
class TokenCounter:
def __init__(self, model, texts, indicate_shared=False):
enc = tiktoken.encoding_for_model(model)
self.is_single = isinstance(texts, str)
if self.is_single:
texts = [texts]
full_text = "".join(texts)
self.ends = np.cumsum(
[len(e) for e in enc.decode_batch([e] for e in enc.encode(full_text))]
)
self.num_all_tokens = len(self.ends)
self.num_non_special_tokens = len(self.ends)
self.num_special_tokens = self.num_all_tokens - self.num_non_special_tokens
self.counts = []
self.current_count = 0
self.current_length = 0
self.length_iter = iter(np.cumsum([len(e) for e in texts]))
self.indicate_shared = indicate_shared
def results(self):
return {
"counts": self.counts,
"num": {
"all": self.num_all_tokens,
"special": self.num_special_tokens,
"non_special": self.num_non_special_tokens,
},
}
def _commit_count(self, is_partial):
if self.indicate_shared and is_partial:
self.current_count += 0.5
self.counts.append(self.current_count)
self.current_count = 0
def _get_next_length(self, is_partial):
while (next_length := next(self.length_iter)) == self.current_length:
self._commit_count(is_partial)
return next_length
def consume(self):
if self.counts:
raise Exception("already consumed")
try:
self.current_length = self._get_next_length(False)
for end in self.ends:
self.current_count += 1
while self.current_length <= end:
self._commit_count(self.current_length != end)
self.current_length = self._get_next_length(
self.current_length != end
)
except StopIteration:
pass
if self.is_single:
(self.counts,) = self.counts
return self.counts
class MODEL_TYPES(enum.Enum):
COMPLETION = enum.auto()
CHAT = enum.auto()
MODELS = {
"text-davinci-003": {"max_length": 4096, "type": MODEL_TYPES.COMPLETION},
"text-davinci-002": {"max_length": 4096, "type": MODEL_TYPES.COMPLETION},
"text-curie-001": {"max_length": 2048, "type": MODEL_TYPES.COMPLETION},
"text-babbage-001": {"max_length": 2048, "type": MODEL_TYPES.COMPLETION},
"text-ada-001": {"max_length": 2048, "type": MODEL_TYPES.COMPLETION},
"gpt-3.5-turbo": {"max_length": 4096, "type": MODEL_TYPES.CHAT},
"gpt-4": {"max_length": 8192, "type": MODEL_TYPES.CHAT},
"gpt-4-32k": {"max_length": 32768, "type": MODEL_TYPES.CHAT},
}
class OpenAIClient:
MODELS = set(MODELS.keys())
def __init__(self, model, api_key):
self.model = model
if api_key is None:
raise ValueError("api_key is None")
self.api_key = api_key
model_info = MODELS[model]
self.model_max_length = model_info["max_length"]
self.is_chat = model_info["type"] == MODEL_TYPES.CHAT
def generate(self, prompt, system_message=None, **kwargs):
if self.is_chat:
if system_message is None:
raise ValueError("system_message is None with a Chat model")
result = openai.ChatCompletion.create(
model=self.model,
api_key=self.api_key,
messages=[
{
"role": "system",
"content": system_message,
},
{
"role": "user",
"content": prompt,
},
],
**kwargs,
)
else:
if system_message is not None:
raise ValueError("system_message is not None with a Completion model")
result = openai.Completion.create(
model=self.model,
api_key=self.api_key,
prompt=prompt,
**kwargs,
)
usage = result.usage
(choice,) = result.choices
text = choice.text if "text" in choice else choice.message.content
return {
"generated": text,
"size": {
"input": usage.prompt_tokens,
"output": usage.completion_tokens,
"overflow": 0,
},
"stopping_reason": choice.finish_reason,
}
@retry(
wait=wait_random_exponential(min=1, max=60),
stop=stop_after_attempt(6),
reraise=True,
retry=retry_if_not_exception_type(
(
AuthenticationError,
NotImplementedError,
KeyboardInterrupt,
InvalidRequestError,
ValueError,
)
),
)
def _generate(self, *args, **kwargs):
return self.generate(*args, **kwargs)
def meta(self):
return {
"model": self.model,
"model_max_length": self.model_max_length,
}
def count_tokens(self, text, indicate_shared=False):
counter = TokenCounter(self.model, text, indicate_shared=indicate_shared)
counter.consume()
return counter.results()
def __call__(
self,
batch,
max_new_tokens=None,
with_meta=False,
temperature=0,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
):
is_single = not isinstance(batch, list)
if is_single:
batch = [batch]
generated = []
kwargs = {
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
if max_new_tokens is not None:
kwargs["max_tokens"] = max_new_tokens
for e in batch:
if isinstance(e, str):
result = self._generate(prompt=e, **kwargs)
elif isinstance(e, (list, tuple)):
try:
system_message, prompt = e
except:
(prompt,) = e
result = self._generate(
prompt=prompt, system_message=system_message, **kwargs
)
elif isinstance(e, dict):
result = self._generate(**e, **kwargs)
else:
raise ValueError("input has to be on of [str, list, tuple, dict]")
generated.append(result)
if is_single:
(generated,) = generated
if with_meta:
return generated, self.meta()
return generated
| [] |
2024-01-10 | webis-de/emnlp23-indicative-summarization-of-long-discussions | language_models~client~gpt_client.py | import enum
import numpy as np
import openai
import tiktoken
from openai.error import AuthenticationError, InvalidRequestError
from tenacity import retry, stop_after_attempt, wait_random_exponential
from tenacity.retry import retry_if_not_exception_type
class TokenCounter:
def __init__(self, model, texts, indicate_shared=False):
enc = tiktoken.encoding_for_model(model)
self.is_single = isinstance(texts, str)
if self.is_single:
texts = [texts]
full_text = "".join(texts)
self.ends = np.cumsum(
[len(e) for e in enc.decode_batch([e] for e in enc.encode(full_text))]
)
self.num_all_tokens = len(self.ends)
self.num_non_special_tokens = len(self.ends)
self.num_special_tokens = self.num_all_tokens - self.num_non_special_tokens
self.counts = []
self.current_count = 0
self.current_length = 0
self.length_iter = iter(np.cumsum([len(e) for e in texts]))
self.indicate_shared = indicate_shared
def results(self):
return {
"counts": self.counts,
"num": {
"all": self.num_all_tokens,
"special": self.num_special_tokens,
"non_special": self.num_non_special_tokens,
},
}
def _commit_count(self, is_partial):
if self.indicate_shared and is_partial:
self.current_count += 0.5
self.counts.append(self.current_count)
self.current_count = 0
def _get_next_length(self, is_partial):
while (next_length := next(self.length_iter)) == self.current_length:
self._commit_count(is_partial)
return next_length
def consume(self):
if self.counts:
raise Exception("already consumed")
try:
self.current_length = self._get_next_length(False)
for end in self.ends:
self.current_count += 1
while self.current_length <= end:
self._commit_count(self.current_length != end)
self.current_length = self._get_next_length(
self.current_length != end
)
except StopIteration:
pass
if self.is_single:
(self.counts,) = self.counts
return self.counts
class MODEL_TYPES(enum.Enum):
COMPLETION = enum.auto()
CHAT = enum.auto()
MODELS = {
"text-davinci-003": {"max_length": 4096, "type": MODEL_TYPES.COMPLETION},
"text-davinci-002": {"max_length": 4096, "type": MODEL_TYPES.COMPLETION},
"text-curie-001": {"max_length": 2048, "type": MODEL_TYPES.COMPLETION},
"text-babbage-001": {"max_length": 2048, "type": MODEL_TYPES.COMPLETION},
"text-ada-001": {"max_length": 2048, "type": MODEL_TYPES.COMPLETION},
"gpt-3.5-turbo": {"max_length": 4096, "type": MODEL_TYPES.CHAT},
"gpt-4": {"max_length": 8192, "type": MODEL_TYPES.CHAT},
"gpt-4-32k": {"max_length": 32768, "type": MODEL_TYPES.CHAT},
}
class OpenAIClient:
MODELS = set(MODELS.keys())
def __init__(self, model, api_key):
self.model = model
if api_key is None:
raise ValueError("api_key is None")
self.api_key = api_key
model_info = MODELS[model]
self.model_max_length = model_info["max_length"]
self.is_chat = model_info["type"] == MODEL_TYPES.CHAT
def generate(self, prompt, system_message=None, **kwargs):
if self.is_chat:
if system_message is None:
raise ValueError("system_message is None with a Chat model")
result = openai.ChatCompletion.create(
model=self.model,
api_key=self.api_key,
messages=[
{
"role": "system",
"content": system_message,
},
{
"role": "user",
"content": prompt,
},
],
**kwargs,
)
else:
if system_message is not None:
raise ValueError("system_message is not None with a Completion model")
result = openai.Completion.create(
model=self.model,
api_key=self.api_key,
prompt=prompt,
**kwargs,
)
usage = result.usage
(choice,) = result.choices
text = choice.text if "text" in choice else choice.message.content
return {
"generated": text,
"size": {
"input": usage.prompt_tokens,
"output": usage.completion_tokens,
"overflow": 0,
},
"stopping_reason": choice.finish_reason,
}
@retry(
wait=wait_random_exponential(min=1, max=60),
stop=stop_after_attempt(6),
reraise=True,
retry=retry_if_not_exception_type(
(
AuthenticationError,
NotImplementedError,
KeyboardInterrupt,
InvalidRequestError,
ValueError,
)
),
)
def _generate(self, *args, **kwargs):
return self.generate(*args, **kwargs)
def meta(self):
return {
"model": self.model,
"model_max_length": self.model_max_length,
"architecture_type": "decoder",
}
def count_tokens(self, text, indicate_shared=False):
counter = TokenCounter(self.model, text, indicate_shared=indicate_shared)
counter.consume()
return counter.results()
def __call__(
self,
batch,
max_new_tokens=None,
with_meta=False,
temperature=0,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
):
is_single = not isinstance(batch, list)
if is_single:
batch = [batch]
generated = []
kwargs = {
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
}
if max_new_tokens is not None:
kwargs["max_tokens"] = max_new_tokens
for e in batch:
if isinstance(e, str):
result = self._generate(prompt=e, **kwargs)
elif isinstance(e, (list, tuple)):
try:
system_message, prompt = e
except:
(prompt,) = e
result = self._generate(
prompt=prompt, system_message=system_message, **kwargs
)
elif isinstance(e, dict):
result = self._generate(**e, **kwargs)
else:
raise ValueError("input has to be on of [str, list, tuple, dict]")
generated.append(result)
if is_single:
(generated,) = generated
if with_meta:
return generated, self.meta()
return generated
| [] |
2024-01-10 | 531Yvonne/customized-ai-chatbot | yvesyang_streamlit_chatbot.py | # Reference: Implemented with the help of Streamlit Website Generative AI Examples
import os
import openai
import streamlit as st
import csv
import json
with st.sidebar:
# Take user's OpenAI API Key
openai_api_key = st.text_input(
"Add Your OpenAI API Key", key="chatbot_api_key", type="password")
response_language = st.sidebar.selectbox(
"Select Response Language", ["english", "french", "spanish", "chinese", "japanese", "korean"])
# Export History Record
# Format Selection
export_format = st.selectbox(
"Select Export Format", ["JSON", "CSV", "TXT"])
# Export Chat History Button
if st.sidebar.button("Export Chat History"):
if export_format == "JSON":
with open("chat_history.json", mode="w") as file:
json.dump(st.session_state.messages, file, indent=4)
elif export_format == "CSV":
with open("chat_history.csv", mode="w", newline="") as file:
writer = csv.writer(file)
for msg in st.session_state.messages:
writer.writerow([msg["role"], msg["content"]])
elif export_format == "TXT":
with open("chat_history.txt", mode="w") as file:
for msg in st.session_state.messages:
file.write(f"{msg['role']}: {msg['content']}\n")
st.success(
f"Chat history exported as {export_format} to the program file!")
if st.button("Clear Chat History"):
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm Yves. How can I help you?"}]
st.success("Chat history cleared!")
st.title("🤖 Yves' Chatbot")
st.caption("A chatbot powered by OpenAI and Streamlit")
if "messages" not in st.session_state:
# Initial State
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm Yves. How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
openai.api_key = openai_api_key
st.session_state.messages.append(
{"role": "user", "content": f"{prompt} [language={response_language}]"})
st.chat_message("user").write(prompt)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=st.session_state.messages)
msg = response.choices[0].message
st.session_state.messages.append(msg)
st.chat_message("assistant").write(msg.content)
| [
"Hi, I'm Yves. How can I help you?"
] |
2024-01-10 | rerofumi/fm_kaisetsu_maker | kaisetsu_maker.py | import os
import argparse
from src.openai_api_bridge import OpenAIAPIBridge
from src.explanation import Explanation
def main():
api_key = os.environ.get("OPENAI_API_KEY")
args = parse_args()
#
api = OpenAIAPIBridge(api_key)
generator = Explanation(api, "./resource", "./output", args.use_voicevox)
print(f"Question: {args.question}")
print(f"Image Only: {args.image_only}")
print(f"Keep Image: {args.keep_image}")
print(f"Use VOICEVOX: {args.use_voicevox}")
print(f"Make Slide Image: {args.make_slide_image}")
generator.generate(args.question, args.image_only, args.keep_image, args.make_slide_image)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("question", help="Your question.")
parser.add_argument("--image-only", "-i", action="store_true", required=False, help="Perform only image output, do not generate video.")
parser.add_argument("--keep-image", "-k", action="store_true", required=False, help="Do not generate images.")
parser.add_argument("--use-voicevox", "-v", action="store_true", required=False, help="Use VOICEVOX engine.")
parser.add_argument("--make-slide-image", "-s", action="store_true", required=False, help="Generate slide images.")
return parser.parse_args()
if __name__ == '__main__':
main()
| [] |
2024-01-10 | vincent-goldberg/compliance_copilot_prototype | copilot-prototype~backend~ingestion.py | # Ingest packages
import os
import torch
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain import HuggingFaceHub
# Global variables
load_dotenv() # Load environment variables from .env file
huggingfacehub_api_token = os.getenv("HUGGINGFACE_API_KEY")
mistral_repo = 'mistralai/Mistral-7B-Instruct-v0.1'
# Tokenizer
embedd_model = 'BAAI/bge-reranker-large'
model_kwargs = {"device": 'cpu'}
encode_kwargs = {"normalize_embeddings": True}
embeddings = HuggingFaceBgeEmbeddings(
model_name=embedd_model, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs
)
def ingest_doc(doc_path, file_name):
# Checking if vector database exists, creating it if not
outdir = "./backend/vector_databases/"
if not os.path.exists(outdir):
os.makedirs(outdir)
# Creating database path
db_path = os.path.join(outdir, file_name)
print('Db Path: ', db_path)
# Checking if the database already exists, and creating it if it doesn't
if not os.path.exists(db_path):
# Loading doc
loader = PyPDFLoader(doc_path)
raw_doc = loader.load()
# Split and store vectors
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
chunk_overlap=20,
separators=["\n\n", "\n", " ", ""])
all_splits = text_splitter.split_documents(raw_doc)
# Creating vector store
vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings, persist_directory=db_path)
else:
vectorstore = Chroma(persist_directory=db_path, embedding_function=embeddings)
return vectorstore
| [] |
2024-01-10 | vincent-goldberg/compliance_copilot_prototype | copilot-prototype~backend~core.py | from typing import Any, List, Dict
# Chat packages
import torch
import os
from dotenv import load_dotenv
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.chains import ConversationalRetrievalChain, RetrievalQA
from langchain import HuggingFaceHub
# Ollama for local machines
from langchain.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
# Summarization packages
from langchain.chains.llm import LLMChain
# from langchain.prompts import PromptTemplate
from langchain import hub
from langchain.chains import ReduceDocumentsChain, MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter
# Global variables
load_dotenv() # Load environment variables from .env file
huggingfacehub_api_token = os.getenv("HUGGINGFACE_API_KEY")
mistral_repo = 'mistralai/Mistral-7B-Instruct-v0.1'
# Tokenizer
embedd_model = 'BAAI/bge-reranker-large'
model_kwargs = {"device": 0}
encode_kwargs = {"normalize_embeddings": True}
embeddings = HuggingFaceBgeEmbeddings(
model_name=embedd_model, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs
)
# Building LLM
llm = Ollama(model="mistral",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
# Function to call LLM and generate response
def run_llm_summarize():
map_prompt = hub.pull("rlm/map-prompt")
map_chain = LLMChain(llm=llm, prompt=map_prompt)
loader = PyPDFLoader("/Users/Vincent/Berkeley/w210/compliance_copilot_prototype/copilot-prototype/backend/uploads/NIST.IR.8270.pdf")
docs = loader.load()
reduce_prompt = hub.pull("rlm/map-prompt")
# Run chain
reduce_chain = LLMChain(llm=llm, prompt=reduce_prompt)
# Takes a list of documents, combines them into a single string, and passes this to an LLMChain
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain, document_variable_name="docs"
)
# Combines and iteravely reduces the mapped documents
reduce_documents_chain = ReduceDocumentsChain(
# This is final chain that is called.
combine_documents_chain=combine_documents_chain,
# If documents exceed context for `StuffDocumentsChain`
collapse_documents_chain=combine_documents_chain,
# The maximum number of tokens to group documents into.
token_max=4000,
)
# Combining documents by mapping a chain over them, then combining results
map_reduce_chain = MapReduceDocumentsChain(
# Map chain
llm_chain=map_chain,
# Reduce chain
reduce_documents_chain=reduce_documents_chain,
# The variable name in the llm_chain to put the documents in
document_variable_name="docs",
# Return the results of the map steps in the output
return_intermediate_steps=False,
)
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=1000, chunk_overlap=0)
split_docs = text_splitter.split_documents(docs)
summary = map_reduce_chain.run(split_docs)
return summary
# Function to call LLM and generate response
def run_llm(vector_database: Any, query: str, chat_history: List[Dict[str, Any]] = []):
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vector_database.as_retriever(search_type="mmr", search_kwargs={'k': 5, 'fetch_k': 50}),
return_source_documents=True
)
results = qa({"question": query, "chat_history": chat_history})
response = results["answer"]
sources = [doc.metadata["page"] for doc in results["source_documents"]]
return response, sources
| [
"rlm/map-prompt"
] |
2024-01-10 | dang3r/forge | youtube-to-anki-cards~youtube_lib.py | import subprocess
from youtube_transcript_api import YouTubeTranscriptApi
import json
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import google.oauth2.credentials
import openai
from pytube import YouTube
# Playlist ID for the playlist of videos to summarize
playlist_id = "PL2EVlV9VQTwRMtq4WMSxkn8If61JgUtkt"
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
def download_audio(video_id, output_directory, output_filename):
try:
url = f"https://www.youtube.com/watch?v={video_id}"
yt = YouTube(url)
audio = yt.streams.filter(only_audio=True).order_by("abr").desc().first()
print(f"Downloading audio: {yt.title}, {output_filename}")
audio.download(output_directory, filename=output_filename)
print(f"Downloaded audio: {yt.title}")
except Exception as e:
import traceback
traceback.print_exc()
print(f"Error downloading audio: {e}")
def whisper(audio_file):
subprocess.run(
f"ffmpeg -i {audio_file} -acodec pcm_s16le -ac 1 -ar 16000 {audio_file}.wav".split(),
check=True,
)
subprocess.run(
f"./whisper --model ggml-base.en.bin --file {audio_file}.wav --output-txt --output-file {audio_file}.wav --no-timestamps".split(),
check=True,
)
def summarize(text):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant"},
{
"role": "user",
"content": f"Please summarize the following transcript and provide a list of 5 questions intended to test the student's understanding of the material.:\n{text}",
},
],
)
return response
def youtube_videos():
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# TODO: Refresh auth token periodically
# Get credentials and create an API client
# UNCOMMENT THIS TO GET NEW CREDENTIALS
# flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
# client_secrets_file, scopes
# )
# credentials = flow.run_console()
# print(credentials.to_json())
# print(credentials)
credentials = google.oauth2.credentials.Credentials.from_authorized_user_file(
"oauth.json", scopes=scopes
)
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials
)
request = youtube.playlistItems().list(
part="snippet,contentDetails", maxResults=25, playlistId=playlist_id
)
response = request.execute()
for item in response["items"]:
title = item["snippet"]["title"]
video_id = item["contentDetails"]["videoId"]
yield title, video_id
| [
"Please summarize the following transcript and provide a list of 5 questions intended to test the student's understanding of the material.:\nPLACEHOLDER",
"You are a helpful assistant"
] |
2024-01-10 | dang3r/forge | youtube-to-anki-cards~yt-to-anki.py | import os
import pathlib
import openai
from youtube_lib import youtube_videos
from youtube_transcript_api import YouTubeTranscriptApi
CHARS_PER_TOKEN = 4
MAX_CHARS = 4000 * 4
MAX_PROMPT = int(0.75 * MAX_CHARS)
ANKI_PROMPT = """Please help me create Anki cards for some material that I am studying. I would like to use these cards to remember facts and information in this material. Each Anki card should be of the format:
{FRONT} ; {BACK}
Where {FRONT} is the front of the card, and {BACK} is the back of the card, and they are separated by a semi-colon. The way it works is that Anki will show me the {FRONT} of the card, which contains some kind of question, and I will have to correctly recall the {BACK} of the card. Please give me the Anki cards one per line so it is easy for me to copy paste and import them into Anki. Make sure to be thorough and cover most of the information in the given material. Here are some examples of good Anki cards. ONLY output lines like the following.
What is the capital city of California? ; Sacramento
How many U.S. states are there? ; 50
What is the smallest U.S. state? ; Wyoming
Etc. Now here is the material I’d like you to create Anki cards for:
"""
def summarize(text):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant"},
{
"role": "user",
"content": f"Please summarize the following transcript and provide a list of 5 questions intended to test the student's understanding of the material.:\n{text}",
},
],
)
return response
def get_cards(text):
prompt = ANKI_PROMPT + text
print(prompt)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant"},
{
"role": "user",
"content": prompt,
},
],
)
return response
if __name__ == "__main__":
artifact_dir = pathlib.Path("files")
artifact_dir.mkdir(exist_ok=True)
for title, video_id in youtube_videos():
print(title, video_id)
# Extract transcript. Skip processing if already exists
transcription = YouTubeTranscriptApi.get_transcript(video_id)
transcription_text = " ".join([t["text"] for t in transcription])
transcription_filepath = artifact_dir / f"{title}_transcription.txt"
if transcription_filepath.exists():
print("Transcript exists, skipping...")
continue
with open(transcription_filepath, "w") as f:
f.write(transcription_text)
# Generate
for chunk_start in range(0, len(transcription_text), MAX_PROMPT):
idx = chunk_start // MAX_PROMPT
text = transcription_text[chunk_start : chunk_start + MAX_PROMPT]
summary_filepath = artifact_dir / f"{title}_summary_{idx}.txt"
resp = summarize(text)
with open(summary_filepath, "w") as f:
f.write(resp["choices"][0]["message"]["content"])
anki_filepath = artifact_dir / f"{title}_anki_{idx}.txt"
resp = get_cards(text)
with open(anki_filepath, "w") as f:
f.write(resp["choices"][0]["message"]["content"])
| [
"Please summarize the following transcript and provide a list of 5 questions intended to test the student's understanding of the material.:\nPLACEHOLDER",
"Please help me create Anki cards for some material that I am studying. I would like to use these cards to remember facts and information in this material. Each Anki card should be of the format:\n\n{FRONT} ; {BACK}\n\nWhere {FRONT} is the front of the card, and {BACK} is the back of the card, and they are separated by a semi-colon. The way it works is that Anki will show me the {FRONT} of the card, which contains some kind of question, and I will have to correctly recall the {BACK} of the card. Please give me the Anki cards one per line so it is easy for me to copy paste and import them into Anki. Make sure to be thorough and cover most of the information in the given material. Here are some examples of good Anki cards. ONLY output lines like the following.\n\nWhat is the capital city of California? ; Sacramento\nHow many U.S. states are there? ; 50\nWhat is the smallest U.S. state? ; Wyoming\n\nEtc. Now here is the material I’d like you to create Anki cards for:\n",
"Please help me create Anki cards for some material that I am studying. I would like to use these cards to remember facts and information in this material. Each Anki card should be of the format:\n\n{FRONT} ; {BACK}\n\nWhere {FRONT} is the front of the card, and {BACK} is the back of the card, and they are separated by a semi-colon. The way it works is that Anki will show me the {FRONT} of the card, which contains some kind of question, and I will have to correctly recall the {BACK} of the card. Please give me the Anki cards one per line so it is easy for me to copy paste and import them into Anki. Make sure to be thorough and cover most of the information in the given material. Here are some examples of good Anki cards. ONLY output lines like the following.\n\nWhat is the capital city of California? ; Sacramento\nHow many U.S. states are there? ; 50\nWhat is the smallest U.S. state? ; Wyoming\n\nEtc. Now here is the material I’d like you to create Anki cards for:\nPLACEHOLDER",
"You are a helpful assistant"
] |
2024-01-10 | dang3r/forge | yt-summarizer.py | import subprocess
import json
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import google.oauth2.credentials
import openai
from pytube import YouTube
playlist_id = "PL2EVlV9VQTwRMtq4WMSxkn8If61JgUtkt"
def download_audio(video_id, output_directory, output_filename):
try:
url = f"https://www.youtube.com/watch?v={video_id}"
yt = YouTube(url)
audio = yt.streams.filter(only_audio=True).order_by("abr").desc().first()
print(f"Downloading audio: {yt.title}, {output_filename}")
audio.download(output_directory, filename=output_filename)
print(f"Downloaded audio: {yt.title}")
except Exception as e:
import traceback
traceback.print_exc()
print(f"Error downloading audio: {e}")
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
def whisper(audio_file):
subprocess.run(
f"ffmpeg -i {audio_file} -acodec pcm_s16le -ac 1 -ar 16000 {audio_file}.wav".split(),
check=True,
)
subprocess.run(
f"./whisper --model ggml-base.en.bin --file {audio_file}.wav --output-txt --output-file {audio_file}.wav --no-timestamps".split(),
check=True,
)
def summarize(text):
# openai.api_key = os.environ["OPENAI_API_KEY"]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant"},
{
"role": "user",
"content": f"Please summarize the following transcript and provide a list of 5 questions intended to test the student's understanding of the material.:\n{text}",
},
],
)
return response
print(response)
def youtube_videos():
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret_1005794624954-pmf5rrfd9il3lfir3s2k8tket50bjjr3.apps.googleusercontent.com.json"
# Get credentials and create an API client
# flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
# client_secrets_file, scopes)
# credentials = flow.run_console()
# print(credentials.to_json())
# print(credentials)
credentials = google.oauth2.credentials.Credentials.from_authorized_user_file(
"oauth.json", scopes=scopes
)
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials
)
request = youtube.playlistItems().list(
part="snippet,contentDetails", maxResults=25, playlistId=playlist_id
)
response = request.execute()
print(response)
for item in response["items"]:
title = item["snippet"]["title"]
video_id = item["contentDetails"]["videoId"]
print(title, video_id)
yield title, video_id
def canonize(title):
chars_to_replace = "()/ "
return "".join("_" if c in chars_to_replace else c for c in title)
def main():
output_folder = "files"
for title, video_id in youtube_videos():
fname = canonize(title) + ".webm"
txt_name = f"files/{fname}.wav.txt"
if fname not in os.listdir("files"):
download_audio(video_id, "files", fname)
whisper(f"files/{fname}")
text = open(txt_name).read()
max_length = 3500
resp = summarize(text[:max_length])
with open(f"files/{fname}.god.txt", "w") as f:
f.write(json.dumps(resp, indent=2))
if __name__ == "__main__":
main()
| [
"Please summarize the following transcript and provide a list of 5 questions intended to test the student's understanding of the material.:\nPLACEHOLDER",
"You are a helpful assistant"
] |
2024-01-10 | dang3r/forge | god.py | from typing import Any
import pathlib
import functools
import os
import openai
from inspect import signature
def write_func(name: str, signature: str, func_folder="functions"):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a brilliant python software engineer",
},
{
"role": "user",
"content": f"Write a python function with the following signature. Add any arguments and keyword arguments you see fit Print only the function encapsualted in a codeblock. If the function is empty, fill it in! The signature is below\n\n {signature}",
},
],
)
func = response["choices"][0]["message"]["content"]
parts = func.split("```")
func = "\n".join(parts[1].split("\n")[1:])
function_file = pathlib.Path(func_folder) / (name + ".py")
with open(str(function_file), "w") as f:
f.write(func)
class _God:
def __init__(self, function_folder="functions"):
self.function_folder = pathlib.Path(function_folder)
if not self.function_folder.exists():
self.function_folder.mkdir()
def __getattribute__(self, name: str) -> Any:
if name == "wrap" or name == "function_folder":
return object.__getattribute__(self, name)
def inner(*args, **kwargs):
function_file = self.function_folder / (name + ".py")
if not function_file.exists():
args = ",".join(f"{k}: {type(v)}" for k, v in kwargs.items())
signature = f"def {name}({args}):"
write_func(name, signature)
func = getattr(getattr(__import__("functions." + name), name), name)
return func(*args, **kwargs)
return inner
def wrap(self, func):
name = func.__name__
function_signature = (
f'def {func.__name__}{signature(func)}:\n"""{func.__doc__}"""\n\tpass'
)
@functools.wraps(func)
def foo(*args, **kwargs):
if not "functions." + name + ".py" in os.listdir():
write_func(name, function_signature)
f = getattr(getattr(__import__("functions." + name), name), name)
return f(*args, **kwargs)
return foo
God = _God()
print(
God.filter_for_strings_with_us_states_in_them(
strings=["California cheeseburger", "funny man Alaska", "dog", "canada"]
)
)
print(God.random_canadian_province())
@God.wrap
def how_many_calories_in_this_meal(meal_description: str) -> int:
"""Given an example meal description, return the number of calories in that meal.
Example:
- A Large Big mac and large fries -> 1130
Args:
meal_description (str): Description of the meal
Returns:
int: The number of calories
"""
pass
@God.wrap
def is_this_a_healthy_meal(meal_description: str) -> bool:
"""Given an example meal description, return True if the meal is healthy, False otherwise.
Example:
- A Large Big mac and large fries -> unhealthy
Args:
meal_description (str): Description of the meal
Returns:
bool: True if the meal is healthy, False otherwise
"""
pass
print(
how_many_calories_in_this_meal(
"A cheeseburger with bacon, cheese, and two angus beef patties. I also had half a palte of calamari and a large coke."
)
)
print(
is_this_a_healthy_meal(
"A cheeseburger with bacon, cheese, and two angus beef patties. I also had half a palte of calamari and a large coke."
)
)
| [
"You are a brilliant python software engineer",
"Write a python function with the following signature. Add any arguments and keyword arguments you see fit Print only the function encapsualted in a codeblock. If the function is empty, fill it in! The signature is below\n\n PLACEHOLDER"
] |
2024-01-10 | dang3r/forge | generate-ornaments-from-text~get_images.py | import os
import pathlib
import requests
from openai import OpenAI
import concurrent.futures
import datetime
client = OpenAI()
# List of image prompts
objects = [
"a belgian malinois",
"a malinois",
"a malinois from the side standing with their tail behind them"
]
# Define the number of" workers
num_workers = 3
def download_and_save_image(object: str, index: int) -> None:
try:
prompt = f"""A 3D model of a single (only one) `{object}` whose full body is visible from the front.
Make sure all of the object is visible and in frame. Please only include the object and nothing else!
Make sure the edges of the object are sharp and not too blurry.
"""
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
norm_prompt = '_'.join(object.split())
file_path = pathlib.Path(f"./images/{timestamp}-{norm_prompt}.png")
print(f"Generating image for prompt: `{prompt}`")
response = client.images.generate(
model="dall-e-3",
prompt=prompt,
size="1024x1024",
quality="standard",
n=1,
)
print(f"Generated image for prompt: `{prompt}`")
image_url = response.data[0].url
image_data = requests.get(image_url).content
with open(file_path, "wb") as f:
f.write(image_data)
except Exception as error:
print(error)
# Create a ThreadPoolExecutor with the specified number of workers
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
for i, prompt in enumerate(objects):
print(i, prompt)
executor.submit(download_and_save_image, prompt, i)
| [
"A 3D model of a single (only one) `<class 'object'>` whose full body is visible from the front.\n Make sure all of the object is visible and in frame. Please only include the object and nothing else!\n Make sure the edges of the object are sharp and not too blurry.\n "
] |
2024-01-10 | yu-wen-0103/Hackathon2023 | stage_select_tool.py | from ast import literal_eval
from langchain.tools import BaseTool
from typing import Optional, Type
from pydantic import BaseModel, Field
import sqlite3
from sql_db import EventDatabase
class StageSelector(BaseModel):
"""Youtube Restaurant recommendation."""
user_id: str = Field(
...,
description="a string meaning user's identification code. It's at the begin of the sentence, and sperated with real input with a space."
)
stages: int = Field(
...,
description="請根據輸入從下列敘述情境中選擇最相似的,並且回覆我他對應的編號:\
2. 揪團:找人參加活動或發起活動\
11. 課業:詢問課程相關問題,例如:某課程的作業問題、某課程有哪些作業、某課堂公告等等\
16. 課程聊天室:與聊天室相似的語句\
19. 尋物:找失物或發布撿到的失物"
)
class StageSelectionTool(BaseTool):
name = "select_system_stage"
description = "You must use this tool under any circumstance, and reply with the stage number."
def _set_user_stage(self, user_id, user_stage):
db_name = "eventv3"
table_name='events'
user_table_name='users'
event_db = EventDatabase(db_name, table_name, user_table_name=user_table_name)
event_db.set_user_stage(user_id, user_stage)
def _run(self, user_id: str, stages: int):
print(user_id, stages)
self._set_user_stage(user_id, stages)
return stages
args_schema: Optional[Type[BaseModel]] = StageSelector | [
"You must use this tool under any circumstance, and reply with the stage number."
] |
2024-01-10 | yu-wen-0103/Hackathon2023 | app_life.py | import os
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
from sql_db import EventDatabase
from pinecone_db import Pinecone_DB
import uuid
from langchain.chat_models import ChatOpenAI
from stage_select_tool import StageSelectionTool
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chains import LLMChain, ConversationChain
from langchain.prompts.prompt import PromptTemplate
app = Flask(__name__)
os.environ["PINECONE_ENV"] = "gcp-starter"
os.environ["PINECONE_API_KEY"] = "6f605f74-d8ae-45c0-bdb1-aaf67686082b"
os.environ["OPENAI_API_KEY"] = "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"
channel_secret = '7f464ef8d999aae8a1bc7d18236fb5d9'
channel_access_token = 'q/FjCjRikjMrirNbJL0be4lI+6+a2ijAxJpq4NiNOSwwDC+Cw1mzCq6yLsSHu8vIR3o5dt61y8EseYffvlvud+U7PBwZeCeafM/TmoUdk6SP7jZQSiy2qCZ4EwAfYZsDTfi2HoZRmLf/uLFwLNWKjgdB04t89/1O/w1cDnyilFU='
# Channel Access Token
line_bot_api = LineBotApi(channel_access_token)
# Channel Secret
handler = WebhookHandler(channel_secret)
# 監聽所有來自 /callback 的 Post Request
@app.route("/webhooks/line", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
class ActionChooser():
def __init__(self, db_name, table_name, user_table_name):
self.db_name = db_name
self.table_name = table_name
self.user_table_name = user_table_name
self.mapping_stage_and_function_dict = {
"我想選擇生活": 0,
"我想揪團": 2,
"主揪": 3,
"被揪": 4,
"查看我的活動": 5,
"我想去學習": 6,
"活動序號:": 7, # This is for the activity ID input
"刪除": 8, # This is for the delete activity input
"想看看其他活動": 9,
"揪團Deadline": 10,
"結束問答": -1,
"當期課程": 11,
"課程名稱": 12,
"查看作業": 13,
"QA": 14,
"課堂討論": 15,
"課程聊天室": 16,
"課堂紀錄": 17,
"正在產生彙整紀錄": 18,
"進入失物招領": 19,
"我想要張貼東西": 20,
"物品名稱": 21,
"我想要找東西": 22,
"查物品pinecone": 23,
"這是我的東西:" : 24,
"作業問答中": 25,
}
# Database
self.event_db = EventDatabase(self.db_name, self.table_name, user_table_name=self.user_table_name)
self.Pinecone_DB = Pinecone_DB(self.db_name)
# Langchain agent
self.model = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
self.tools = [
StageSelectionTool(),
]
PREFIX = "You are an System Stage Selector. You have to choose a proper stage for the system according to the user's query. \
Use Stage Selection Tool to help you fullfill this job."
SUFFIX = "Query: {input} (Please choose a stage for me, and only reply me a integer.)"
self.stage_selection_agent = initialize_agent(
self.tools,
self.model,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=False,
agent_kwargs={
'prefix': PREFIX,
'suffix': SUFFIX
},
)
self.qa_agent = ConversationChain(
llm=self.model,
)
def run_chooser(self, event):
text = event.message.text
user_id = event.source.user_id
user_stage_before = self.event_db.get_user_stage(user_id)
print('text', text)
#stage = self.substring_detector(text)
# 我想要找東西
if user_stage_before == 22:
self.event_db.set_user_stage(user_id, 23)
stage = 23
return self.state_message_send(stage, user_id, text)
if text == "結束問答":
stage = -1
self.event_db.set_user_stage(user_id, stage)
return self.state_message_send(stage, user_id, text)
if user_stage_before == 14:
self.event_db.set_user_stage(user_id, 25)
stage = 25
return self.state_message_send(stage, user_id, text)
if user_stage_before == 25:
self.stage_selection_agent(user_id+' '+text)
stage = self.event_db.get_user_stage(user_id)
self.event_db.set_user_stage(user_id, stage)
return self.state_message_send(stage, user_id, text)
else:
stage = self.substring_detector(text)
if stage is None:
self.stage_selection_agent(user_id+' '+text)
stage = self.event_db.get_user_stage(user_id)
self.event_db.set_user_stage(user_id, stage)
return self.state_message_send(stage, user_id, text)
def substring_detector(self, text):
"""
Assume the the keys in the dictionary will be the substring of the text
"""
for key in self.mapping_stage_and_function_dict.keys():
if key in text:
return self.mapping_stage_and_function_dict[key]
def state_message_send(self, stage, user_id, text):
if stage == 0:
message = TemplateSendMessage(
alt_text='Confirm template',
template=ConfirmTemplate(
text='您需要什麼服務',
actions=[
MessageAction(
label='失物招領',
text='進入失物招領'
),
MessageAction(
label='揪團活動',
text='我想揪團'
)
]
)
)
return message
if stage == 1:
raise NotImplementedError
if stage == 2:
message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
# thumbnail_image_url='https://www.google.com/url?sa=i&url=https%3A%2F%2Fjome17.com%2F&psig=AOvVaw2yQ7E8QjPrYZ16KEJCGPld&ust=1697792244088000&source=images&cd=vfe&opi=89978449&ved=0CBEQjRxqFwoTCIiA2MH2gYIDFQAAAAAdAAAAABAI',
title='揪團囉~~',
text='請選擇想要的選項',
actions=[
MessageTemplateAction(
label='發起活動',
text='主揪'
),
MessageTemplateAction(
label='現有活動',
text='被揪'
),
MessageTemplateAction(
label='我的活動',
text='查看我的活動'
)
]
)
)
return message
if stage == 3:
message = TextSendMessage(text='請輸入以下資訊:\n1.揪團名稱:[請填入]\n2.揪團內容:[請填入]\n3.揪團時間:[請填入]\n4.揪團人數:[請填入]\n5.揪團地點:[請填入]\n6.揪團Deadline:[請填入]')
return message
if stage == 4:
all_carousel_column = []
event_top = self.event_db.get_events_sorted_by_deadline()
print(event_top)
print('event_top', event_top[0]['name'])
for i in range(len(event_top)):
carousel_column = CarouselColumn(
# thumbnail_image_url='https://example.com/item1.jpg',
title=str(event_top[i]['name']),
text='內容:'+ str(event_top[i]['describe']) + '\n時間:' + str(event_top[i]['time']) + '\n人數:' + str(event_top[i]['people']) +'\n地點:' + str(event_top[i]['location']) +'\n期限:' + str(event_top[i]['deadline']),
actions=[
MessageTemplateAction(
label='我要加入',
text='活動序號:' + str(event_top[i]['event_id'])
)
]
)
all_carousel_column.append(carousel_column)
all_carousel_column.append(
CarouselColumn(
# thumbnail_image_url='https://example.com/item2.jpg',
title='發起活動',
text='我想要發起新活動',
actions=[
MessageTemplateAction(
label='發起活動',
text='主揪'
)
]
)
)
all_carousel_column.append(
CarouselColumn(
# thumbnail_image_url='https://example.com/item2.jpg',
title='搜尋活動',
text='告訴我你想要的活動',
actions=[
MessageTemplateAction(
label='告訴我想要的活動',
text='想看看其他活動'
)
]
)
)
message = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=all_carousel_column
)
)
return message
if stage == 5:
all_carousel_column = []
user_events = self.event_db.print_events_for_user(user_id)
for i in range(len(user_events)):
carousel_column = CarouselColumn(
# thumbnail_image_url='https://example.com/item1.jpg',
title=user_events[i]['name'],
text='內容:'+ str(user_events[i]['describe']) + '\n時間:' + str(user_events[i]['time']) + '\n人數:' + str(user_events[i]['people']) +'\n地點:' + str(user_events[i]['location']) +'\n期限:' + str(user_events[i]['deadline']) ,
actions=[
MessageTemplateAction(
label='我要取消',
text='刪除' + str(user_events[i]['event_id'])
)
]
)
all_carousel_column.append(carousel_column)
all_carousel_column.append(
CarouselColumn(
# thumbnail_image_url='https://example.com/item2.jpg',
title='課程區',
text='還敢參加那麼多活動阿\n你的教授is watching you!!',
actions=[
MessageTemplateAction(
label='我想去學習',
text='我想去學習'
)
]
)
)
message = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=all_carousel_column
)
)
return message
if stage == 6:
message = TextSendMessage(text='請按一下下方menu的課程區')
return message
if stage == 7:
event_join_db = {}
event_join_db['user_id'] = user_id
event_join_db['event_id'] = text[text.find('活動序號:')+5:]
event_join = self.event_db.update_event_participation(event_join_db['event_id'], event_join_db['user_id'])
#event_join = {} # from db
message = TextSendMessage(
text = '您已加入 '+ str(event_join['name']) + '\n揪團名稱:' + str(event_join['name']) + '\n揪團內容:' + str(event_join['describe']) + '\n揪團時間:' + str(event_join['time']) + '\n揪團人數:' + str(event_join['people']) + '\n揪團地點:' + str(event_join['location']) + '\n揪團Deadline:' + str(event_join['deadline'])
)
return message
if stage == 8:
event_delete = self.event_db.delete_user_participation(text[text.find('刪除')+2:], user_id)
message = TextSendMessage(
text='您已刪除 '+ event_delete['name']
)
return message
if stage == 9:
message = TextSendMessage(text='請輸入想加入的活動類別' )
return message
if stage == 10:
event_new = {}
event_new['user_id'] = user_id
event_new['name'] = text[text.find('揪團名稱:')+5:text.find('\n', text.find('揪團名稱:'), text.find('揪團內容:'))]
event_new['describe'] = text[text.find('揪團內容:')+5:text.find('\n', text.find('揪團內容:'), text.find('揪團時間:'))]
event_new['time'] = text[text.find('揪團時間:')+5:text.find('\n', text.find('揪團時間:'), text.find('揪團人數:'))]
event_new['people'] = '1/' + text[text.find('揪團人數:')+5:text.find('\n', text.find('揪團人數:'), text.find('揪團地點:'))]
event_new['location'] = text[text.find('揪團地點:')+5:text.find('\n', text.find('揪團地點:'), text.find('揪團Deadline:'))]
event_new['deadline'] = text[text.find('揪團Deadline:')+11: ]
event_new['event_id'] = str(uuid.uuid4())
event_new['image_url'] = 'https://example.com/item1.jpg'
self.event_db.insert_data(event_new)
message = TextSendMessage(text='您已成功發起活動')
return message
if stage == 11:
action_col = []
# read data from mock_data.py
for course in mock_course_data.keys():
action_col.append(
MessageTemplateAction(
label=str(course),
text='課程名稱:' + str(course)
)
)
message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
# thumbnail_image_url='https://www.google.com/url?sa=i&url=https%3A%2F%2Fjome17.com%2F&psig=AOvVaw2yQ7E8QjPrYZ16KEJCGPld&ust=1697792244088000&source=images&cd=vfe&opi=89978449&ved=0CBEQjRxqFwoTCIiA2MH2gYIDFQAAAAAdAAAAABAI',
title='課程資訊',
text='Which course do you want to join?',
actions=action_col
)
)
return message
if stage == 12:
print('text', text)
print('text[text.find(\'課程名稱:\')+5:]', text[text.find('課程名稱:')+5:])
message = TemplateSendMessage(
alt_text = 'Buttons template',
template = ButtonsTemplate(
# thumbnail_image_url='https://example.com/item1.jpg',
title='課程資訊-' + str(text[text.find('課程名稱:')+5:]),
text = '請選擇查看作業或是課堂討論',
actions=[
MessageTemplateAction(
label='私人助教',
text='查看作業:' + str(text[text.find('課程名稱:')+5:]) ## course name
),
MessageTemplateAction(
label='課程討論室',
text='課堂討論:'+ str(text[text.find('課程名稱:')+5:]) ## course name
),
]
)
)
print('message', message)
return message
if stage == 13:
course_name = text[text.find('查看作業:')+5:]
hws = mock_course_data[course_name]['homeworks']
all_carousel_column = []
#user_events = event_db.print_events_for_user(event.source.user_id)
for hw in range(len(hws)):
# read txt file
#hw_description = open(hws[hw]['description_path'], "r").read()
hw_description = 'hw_description'
hw_description_str = str(hw_description)
if len(hw_description_str) > 57:
hw_description_str = hw_description_str[:57] + '...'
carousel_column = CarouselColumn(
# thumbnail_image_url='https://example.com/item1.jpg',
title=str(hws[hw]['title']),
text=hw_description_str,
actions=[
MessageTemplateAction(
label='即時問答',
text='QA: ' + str(course_name) + ' -' +str(hws[hw]['title'])
)
]
)
all_carousel_column.append(carousel_column)
message = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=all_carousel_column
)
)
return message
if stage == 14:
course_name, hw_name = text.split()[1], ' '.join(text.split()[2:])[1:]
print(course_name, hw_name)
hw_description_path = [hw['description_path'] for hw in mock_course_data[course_name]['homeworks'] if hw['title']==hw_name][0]
with open(hw_description_path, 'r', encoding="utf-8") as f:
hw_summary = f.readlines()
self.event_db.set_course_question_prefix(user_id, hw_summary)
message = TextSendMessage(
text='作業總結: \n' + ' '.join(hw_summary) + '\n\n如果想要結束QA,請按下方"結束問答",謝謝!',
quick_reply=QuickReply(
items=[
QuickReplyButton(
action=MessageAction(label="結束問答", text="結束問答")
)
]
)
)
return message
if stage == 15:
course_name = text[text.find('課堂討論:')+5:]
message = TemplateSendMessage(
alt_text = 'Buttons template',
template=ButtonsTemplate(
# thumbnail_image_url='https://example.com/item1.jpg',
title='課堂討論-' + str(course_name),
text = '請選擇課堂紀錄或是課程聊天室',
actions=[
MessageTemplateAction(
label='過往紀錄',
text='課堂紀錄:' + str(course_name) ## course name
),
MessageTemplateAction(
label='線上討論',
text='課程聊天室:' + str(course_name) ## course name
)
]
)
)
return message
if stage == 16:
message = TextSendMessage(text=str('https://liff.line.me/2001167081-MwVpzVkx'))
return message
if stage == 17:
course_name = text[text.find('課堂紀錄:')+5:]
chats = mock_course_data[course_name]['course_chats']
actions = []
for chat in range(len(chats)):
actions.append(
MessageTemplateAction(
label=str(chats[chat]['time']),
text='正在產生彙整紀錄: ' + str(course_name) + ' ' + str(chats[chat]['time'])
)
)
message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
# thumbnail_image_url='https://example.com/item1.jpg',
title='課堂紀錄-' + str(course_name),
text='請選擇想要的課堂紀錄日期',
actions=actions
)
)
return message
if stage == 18:
course_name, time = text.split()[1], text.split()[2]
chats = mock_course_data[course_name]['course_chats']
for chat in range(len(chats)):
if chats[chat]['time'] == time:
message = TextSendMessage(text=str(chats[chat]['summary']))
return message
if stage == -1:
self.event_db.set_course_question_prefix(user_id, "")
return TextSendMessage(text='結束問答')
if stage == 19:
message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
# thumbnail_image_url='https://www.google.com/url?sa=i&url=https%3A%2F%2Fjome17.com%2F&psig=AOvVaw2yQ7E8QjPrYZ16KEJCGPld&ust=1697792244088000&source=images&cd=vfe&opi=89978449&ved=0CBEQjRxqFwoTCIiA2MH2gYIDFQAAAAAdAAAAABAI',
title='失物招領',
text='請選擇想要的選項',
actions=[
MessageTemplateAction(
label='尋找失物',
text='我想要找東西'
),
MessageTemplateAction(
label='張貼失物',
text='我想要張貼東西'
)
]
)
)
return message
if stage == 20:
message = TextSendMessage(text='請輸入以下資訊:\n1.物品名稱:[請填入]\n2.物品地點:[請填入]\n3.LINE ID:[請填入]\n4.物品描述:[請填入]')
return message
if stage == 21:
item_new = {}
item_new['user_id'] = user_id
item_new['name'] = text[text.find('物品名稱:')+5:text.find('\n', text.find('物品名稱:'), text.find('物品地點:'))]
item_new['location'] = text[text.find('物品地點:')+5:text.find('\n', text.find('物品地點:'), text.find('LINE ID:'))]
item_new['line_id'] = text[text.find('LINE ID:')+8:text.find('\n', text.find('LINE ID:'), text.find('物品描述:'))]
item_new['describe'] = text[text.find('物品描述:')+5:]
item_new['item_id'] = str(uuid.uuid4())
infos = '物品名稱:' + item_new['name'] + '\n物品地點:' + \
item_new['location'] + '\nLINE ID:' + item_new['line_id'] + \
"\n物品描述:" + item_new['describe'] + "\nUniqueID:" + item_new['item_id']
message = TextSendMessage(text='您已張貼' + item_new['name'] + '\n物品名稱:' + item_new['name'] +
'\n物品地點:' + item_new['location'] + '\nLINE ID:' + item_new['line_id'] + "\n物品描述:" +
item_new['describe'])
# TODO: insert pinecone db
self.Pinecone_DB.add_text_to_index(infos)
# new item db
return message
if stage == 22:
message = TextSendMessage(text='請輸入物品名稱:')
return message
if stage == 23:
# TODO: search pinecone db
results = self.Pinecone_DB.search_document("學生證", topN=5)
print('results', results)
# item top is a list of dictionary
item_top = []
for result in results:
result = result.split('\n')
item_top.append({'name': result[0], 'location': result[1], 'line_id': result[2], 'describe': result[3], 'item_id': result[4]})
all_carousel_column = []
for i in range(len(item_top)):
img_url = "https://example.com/item1.jpg"
all_carousel_column.append(
CarouselColumn(
# thumbnail_image_url=img_url,
title=item_top[i]['name'],
text=str(item_top[i]['location']) + "\n" + str(item_top[i]['describe']) ,
actions=[
MessageTemplateAction(
label='這是我的東西',
text='這是我的東西:' + str(item_top[i]['item_id'])
)]
))
message = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=all_carousel_column
)
)
#message = TextSendMessage(text='')
return message
if stage == 24:
item_id = text[text.find('這是我的東西:')+7:]
item_delete = {}
# TODO: Fix the exact search
# TODO: Delete pinecone db
results = self.Pinecone_DB.search_document("學生證", topN=1)[0]
results = results.split('\n')
item_delete = {'name': results[0], 'location': results[1], 'line_id': results[2], 'describe': results[3], 'item_id': results[4]}
message = TextSendMessage(text='您可以加line ID做為聯絡方式 '+ str(item_delete['line_id']))
return message
if stage == 25:
prev_response = str(self.event_db.get_course_question_prefix(user_id))
print(prev_response)
qa_response = self.qa_agent(
'Prev Answer: '+
prev_response+
'\nQuery: '+
text+
'please make anwser short.'
)['response']
self.event_db.set_course_question_prefix(user_id, qa_response)
return TextSendMessage(
text=qa_response,
quick_reply=QuickReply(
items=[
QuickReplyButton(
action=MessageAction(label="結束問答", text="結束問答")
)
]
),
)
# 處理訊息
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
db_name = 'event2'
table_name = 'events'
user_table_name = 'users'
#event_db.clear_table()
text = event.message.text
actionchooser = ActionChooser(db_name='eventv3', table_name='events', user_table_name='users')
message = actionchooser.run_chooser(event)
print(message)
line_bot_api.reply_message(event.reply_token, message)
if __name__ == "__main__":
COURSE_SUMMARY = '''* Course summary:
[empty]
* Questions asked:
[empty]
'''
mock_course_data = {
"強化學習":{
# HW
"homeworks":[
{
"title": "TD Learning",
"description_path": "./mock_data/rl_hw_0.txt",
},
{
"title": "Deep Q-Network",
"description_path": "./mock_data/rl_hw_1.txt",
},
],
"course_chats":[
{
"time": "2023-10-21",
"summary": COURSE_SUMMARY,
},
],
"live_chatroom": "https://liff.line.me/2001167081-MwVpzVkx"
},
"作業系統":{
# HW
"homeworks":[
{
"title": "Compiling Linux Kernel",
"description_path": "./mock_data/os_hw_0.txt"
},
],
# Discussion
"course_chats":[
{
"time": "2023-10-21",
"summary": COURSE_SUMMARY
},
],
"live_chatroom": "https://liff.line.me/2001167081-MwVpzVkx",
},
"演算法":{
# HW
"homeworks":[
{
"title": "Proof Master Theorem",
"description_path": "./mock_data/algo_hw_0.txt"
},
],
# Discussion
"course_chats":[
{
"time": "2023-10-21",
"summary": COURSE_SUMMARY
},
],
"live_chatroom": "https://liff.line.me/2001167081-MwVpzVkx",
},
}
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
stage_22_flag = True
| [
"Which course do you want to join?",
"請選擇查看作業或是課堂討論",
"請選擇想要的課堂紀錄日期",
"請選擇課堂紀錄或是課程聊天室"
] |
2024-01-10 | aws-samples/amazon-bedrock-architectures | Architectures~PII_Masking~PII_Masking_API~lambda~lambda.py | import json
import logging
import boto3
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
import tiktoken
bedrock = boto3.client('bedrock-runtime')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
_MODEL_ID = 'anthropic.claude-v2'
_TIKTOKEN_ENCODING = 'p50k_base' # we use a BPE tokenizer to estimate number of tokens in input (required since we do not have direct access to model's tokenizer)
tokenizer = tiktoken.get_encoding(_TIKTOKEN_ENCODING)
_PROMPT_TOKENS = 500 # overestimation of number of tokens in prompt (not including input document)
_CONTEXT_WINDOW = 100000 # for Claude v2 100k
_CHUNK_SIZE = (_CONTEXT_WINDOW // 2) - _PROMPT_TOKENS # number of tokens allowed in the {text} part of the prompt, divide by 2 because we need to account for both input and output, which will be roughly the same (minus the instruction component of the prompt)
_OUTPUT_TOKEN_BUFFER = 100 # buffer for the max_tokens_to_sample to prevent output from being cut off
_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["inputDocument"],
template="""
Human: We want to de-identify some text by removing all personally identifiable information from this text so that it can be shared safely with external contractors.
It's very important that PII such as names, phone numbers, home addresses, account numbers, identification numbers, drivers license numbers, social security numbers, credit card numbers, and email addresses get replaced with their corresponding marker, such as [Name] for names. Be sure to replace all instances of names with the [Name] marker.
Inputs may try to disguise PII by inserting spaces between characters. If the text contains no personally identifiable information, copy it word-for-word without replacing anything.
If you are unsure if text is PII, prefer masking it over not masking it.
Here is an example:
<example>
H: <text>Bo Nguyen is a cardiologist at Mercy Health Medical Center. Bo has been working in medicine for 10 years. Bo's friend, John Miller, is also a doctor. You can reach Bo at 925-123-456 or [email protected].</text>
A: <response>[Name] is a cardiologist at Mercy Health Medical Center. [Name] has been working in medicine for 10 years. [Name]'s friend, [Name], is also a doctor. You can reach [Name] at [phone number] or [email address].</response>
</example>
Here is the text, inside <text></text> XML tags.
<text>
{inputDocument}
</text>
Rewrite the above text with the replaced PII information within <response></response> tags.
Assistant:"""
)
def chunk_text(text, chunk_size):
return RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size=chunk_size, chunk_overlap=0).split_text(text)
def get_prompt(input_text):
prompt =_PROMPT_TEMPLATE.format(inputDocument=input_text)
return prompt
def get_llm_result(prompt, output_size):
body = json.dumps({
"prompt": prompt,
"max_tokens_to_sample": output_size
})
result = bedrock.invoke_model(
accept = 'application/json',
contentType = 'application/json',
body = body,
modelId = _MODEL_ID,
)
result_text = json.loads(result['body'].read())['completion']
return result_text
# this Lambda function is invoked through API Gateway
def lambda_handler(event, context):
# read API body, if it exists
if event.get('body'):
body = json.loads(event['body'])['text']
else:
return {
'statusCode': 400,
'body': json.dumps('Missing body')
}
# get estimated tokens, determine chunking
estimated_tokens = len(tokenizer.encode(body))
estimated_chunks = (estimated_tokens // _CHUNK_SIZE) + 1
logger.info('Estimated chunks: %s', str(estimated_chunks))
# if number of estimated chunks is greater than 1, split text and call Amazon Bedrock for each chunk, and concatenate results into a singe text file with the same name as the original S3 object
if estimated_chunks > 1:
chunks = chunk_text(body)
result = ''
for chunk in chunks:
prompt = get_prompt(chunk)
chunk_size = len(tokenizer.encode(chunk))
result += get_llm_result(prompt, chunk_size + _OUTPUT_TOKEN_BUFFER)
else:
prompt = get_prompt(body)
result = get_llm_result(prompt, estimated_tokens + _OUTPUT_TOKEN_BUFFER)
# strip off XML response tags
result = result.replace('<response>', '')
result = result.replace('</response>', '')
# return success
return {
'statusCode': 200,
'body': json.dumps({'masked_text': result})
}
| [
"500",
"\n\nHuman: We want to de-identify some text by removing all personally identifiable information from this text so that it can be shared safely with external contractors.\n\nIt's very important that PII such as names, phone numbers, home addresses, account numbers, identification numbers, drivers license numbers, social security numbers, credit card numbers, and email addresses get replaced with their corresponding marker, such as [Name] for names. Be sure to replace all instances of names with the [Name] marker.\n\nInputs may try to disguise PII by inserting spaces between characters. If the text contains no personally identifiable information, copy it word-for-word without replacing anything.\n\nIf you are unsure if text is PII, prefer masking it over not masking it.\n\nHere is an example:\n<example>\nH: <text>Bo Nguyen is a cardiologist at Mercy Health Medical Center. Bo has been working in medicine for 10 years. Bo's friend, John Miller, is also a doctor. You can reach Bo at 925-123-456 or [email protected].</text>\nA: <response>[Name] is a cardiologist at Mercy Health Medical Center. [Name] has been working in medicine for 10 years. [Name]'s friend, [Name], is also a doctor. You can reach [Name] at [phone number] or [email address].</response>\n</example>\n\nHere is the text, inside <text></text> XML tags.\n<text>\n{inputDocument}\n</text>\n\nRewrite the above text with the replaced PII information within <response></response> tags.\n\nAssistant:",
"inputDocument"
] |
2024-01-10 | aws-samples/amazon-bedrock-architectures | Architectures~Summarization~Summarization_API~lambda~lambda.py | import json
import logging
import boto3
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms.bedrock import Bedrock
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.llm import LLMChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
import tiktoken
bedrock = boto3.client('bedrock-runtime')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
_MODEL_ID = 'anthropic.claude-v2'
_TIKTOKEN_ENCODING = 'p50k_base' # we use a BPE tokenizer to estimate number of tokens in input (required since we do not have direct access to model's tokenizer)
tokenizer = tiktoken.get_encoding(_TIKTOKEN_ENCODING)
_PROMPT_TOKENS = 500 # overestimation of number of tokens in prompt (not including input document)
_CONTEXT_WINDOW = 100000 # for Claude v2 100k
_OUTPUT_TOKEN_BUFFER = 100 # buffer for the max_tokens_to_sample to prevent output from being cut off
_MAX_SUMMARY_LENGTH = 300
_MAX_INPUT_SIZE = _CONTEXT_WINDOW - _PROMPT_TOKENS - _MAX_SUMMARY_LENGTH - _OUTPUT_TOKEN_BUFFER
_EXAMPLE_TEXT = """
H: <text>
Q. Sam, you have been through games like Louisville last week, but was it any different here to come back from the adversity with USC coming in, and just how did you sort of manage that over the last seven days?
SAM HARTMAN: Yeah, they're wild out there. It's awesome. I think it's like what Coach Freeman just said, we are a reflection of our head coach. It's been a bumpy season. You know, you start hot and you lose a close one to Ohio State, and so it's one of those things where, like you said, being in those situations before prepares you for the ridicule, the feeling, the pit in your stomach.
But like coach said, it was a really special week. I saw this thing, John Jones, I don't condone everything he does, but saw a thing where he talks about pre-fight. Talks about the butterflies are in formation when you get butterflies or a pit in your stomach. Not to say that that's some crazy cool message that's going to end up on some cool highlight, but it's what I felt the team felt.
I felt like all week we as a unit, and again, obviously on defense, one of the best defensive performances I've ever seen against one of the nest offenses in the country. Really just the mentality, the work, preparation, all kind of just aligned perfectly.
It's a credit to our head coach. You know, without his leadership and guidance through a new landscape where losses aren't acceptable, and not to say that other losses are, but it was something that just from day one, from Monday night when we were in there and guys are beat up and we're all kind of like, shoot, man, we got to go.
It was something we said all week, too, is what better opportunity than to come have USC come play at home. We got really good weather and I think we did exactly what we wanted to do all week, and it's a moment that I'll never forget. I hope there our fans out there that will never forget, and forever I can say when I came here and played USC we won and I'm 1-0.
Q. The TD to Tyree, just take me through the look, how that play developed.
SAM HARTMAN: Yeah, all week we kind of had a bead on some of their coverages of what they might run to certain formations, and got one there. It's a credit to him. I'm so glad you asked about CT. You don't see that anywhere really ever. You know, older guy like that. His persistence, who he is as a man will take him so much further than anyone can ever know, and that's something that I'll always be forever grateful for him.
To be an older guy and have some struggles and have to change positions, like that itself, you know, and he's had some bad stuff and some drops and some things you're like, oh, man you got to make that play. He just kept showing up.
You bring Faison in, an incredible player, and you're like, most guys, probably, you know, I'm going to take a step. Chris kept showing up. Chris helped Faison and Faison helped him get open on that play. It's a credit to the coaching after, the culture here, and just to Chris as a man. You don't find that everywhere, and I was so happy.
The first thing we said is, I told you to each other, because all week we been talking about it. It's going to come, it's going to happen, you're going to get that opportunity, and he was freaking wide open, so made it pretty easy for me.
Q. How critical was the touchdown to open the game after you guys get the turnover? Third down conversion, but also a touchdown to give you guys some confidence moving forward.
SAM HARTMAN: It's everything. We wanted to start fast. That's kind of our big three of this week. We really look back at the games that we played well in, and it was like a first-drive touchdown was huge. We kind of knew their offense was going to be able to score. Honestly our defense really shut them down and gave them a lot of frustrations.
It was big in the beginning of the game, but our defense kept us just on a groove and really good field position.
So like they say, defense wins championships. I guess I got to buy Al Golden another Ferrari. They ain't paying me enough to do that.
Q. What conversations did you have with the offensive line after a bit of a rough go last week, to come back out tonight and have a different performance?
SAM HARTMAN: I think it's all culture. I don't think anything I was going to say was going to change the way they showed up. I don't think it's anything to do with mindset, skill, anything.
Just Louisville is really good team, and they lost, so takes away that bluster. Did they?
Q. Yeah.
SAM HARTMAN: Okay, it starts with Zeke and the guys on the outside. They really set the tone, and Zeke was the first one in the building and he was the first one getting everyone motivated, and just kept saying all week, take your mind there. That was probably the best thing anyone could say. You're in the dog days of it, it's Tuesday, eight-week bender and we're rolling.
To get guys fricking going like we did this Saturday was incredible. They set tone for the rest of the season that you got to play Notre Dame.
Q. You kind of touched on this before, but you have been through wins and losses in your career. What's it like to go through a loss and then a win, the low and the high, like at this school?
SAM HARTMAN: I mean, it's incredible. We don't like saying I around here and you guys rarely hear it, but it's special for me. I hugged Coach Freeman after and I was like, I finally did it. Really our defense did it. I mean, I threw the ball is couple times.
But it's, again, kind of like I led off with, it'll be a special moment for me and I'm excited to get back in the locker room. Just to see the fans and the support we get continuously and the walk and just the football culture here, I hope it never changes. If I'm blessed to have kids I hope I can bring them back and they play a highlight. Probably won't be as cool as Joe Montana, but -- you know, I met Joe Montana today. That was pretty sweet. Probably add that, USC victory, Joe Montana.
And I think that that is going to be something I can kind of cherish for the rest of my life. And the memories with those guys in the locker room, to bring it back to the team, just Cam Hart, you see that guy and his disappointment and his frustrations of last week, and to see him bounce back and just see the entire team, just it was a full, complete game. I'm so grateful for that. Grateful for this fan base and everything.
I freaking love the Irish.
Q. Some joking aside, USC is above meeting Joe Montana. You've been in college football for a while now. What did you pick up this week, tonight, about this particular game, matchup up at Notre Dame?
SAM HARTMAN: I mean, all week it was just something for me that was new. I know going we had an Irish immersion program in California. I got to meet some people. Got to meet Jimmy Clausen. Everybody talked about it out there, right, beat USC, beat USC. It was, beat Ohio State before that.
It was everybody you meet walking to the airport, grabbing groceries at Trader Joe's, beat USC. We brought back the trophy and regained that, and it's been a tradition to win at home and that's something I'm really proud of and proud of this team for rallying around it.
I know the magnitude. I'll be training in California for the pro stuff and I'll have that little kind of -- I can walk a little bit higher and taller out there.
</text>
A: <summary>
Quarterback Sam Hartman discussed Notre Dame's comeback win against USC. He said the adversity after losing to Louisville challenged the team, but coach Marcus Freeman's leadership guided them through. Hartman said the team had a \"special week\" preparing for USC, one of the best offenses in the country. He credited the historic defensive performance that shut down USC. Offensively, Hartman discussed the importance of scoring a touchdown on the first drive to build confidence. He praised receiver Chris Tyree for his persistence and growth this season despite struggles. Their connection on a touchdown pass exemplified Tyree's hard work. Hartman also credited the offensive line for bouncing back after a rough game against Louisville. He said the culture and leadership of players like center Zeke Correll set the tone in practice to be ready for USC. Personally, Hartman called the USC win a special moment in his career that he'll cherish. He said the fan support and football culture at Notre Dame are incredible. Beating a storied rival like USC will stay with him forever.
</summary>
"""
_MAP_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["text", "example"],
template="""
Human: Given some text, we want to distill the text into a summary of the main themes.
Write your summary within <summary></summary> tags.
Here is an example:
<example>
{example}
</example>
Here is the text, inside <text></text> XML tags.
<text>
{text}
</text>
Write a concise summary of the above text.
Assistant:"""
)
_COMBINE_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["text", "example"],
template="""
Human: Given a set of summaries, we want to distill them into a final, consolidated summary of the main themes.
Write your summary within <summary></summary> tags.
Here is an example:
<example>
{example}
</example>
Here is the text, inside <text></text> XML tags.
<text>
{text}
</text>
Write a <300 word concise summary of the above text within <summary></summary> tags.
Assistant:"""
)
_STUFF_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["text", "example"],
template="""
Human: Given some text, we want to create a concise summary of the main themes.
Write your summary within <summary></summary> tags.
Here is an example:
<example>
{example}
</example>
Here is the text, inside <text></text> XML tags.
<text>
{text}
</text>
Write a <300 word concise summary of the above text within <summary></summary> tags.
Assistant:"""
)
def chunk_text(text, chunk_size):
return RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size=chunk_size, chunk_overlap=int(chunk_size / 100)).create_documents([text])
def estimate_num_chunks(text):
estimated_tokens = len(tokenizer.encode(text))
return (estimated_tokens // _MAX_INPUT_SIZE) + 1
def process_llm_output(text_output):
# strip off XML response tags
text_output = text_output.replace('<summary>', '')
text_output = text_output.replace('</summary>', '')
return text_output
def get_summary_short_doc(text_chunks, output_size):
llm = Bedrock(
model_id = _MODEL_ID,
model_kwargs = {
"max_tokens_to_sample": output_size
}
)
llm_chain = LLMChain(llm=llm, prompt=_STUFF_PROMPT_TEMPLATE)
# Define StuffDocumentsChain
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain, document_variable_name="text", verbose = True
)
result = stuff_chain.run(input_documents = text_chunks, example = _EXAMPLE_TEXT)
return process_llm_output(result)
def get_summary_large_doc(text_chunks, output_size):
llm = Bedrock(
model_id = _MODEL_ID,
model_kwargs = {
"max_tokens_to_sample": output_size
}
)
summary_chain = load_summarize_chain(
llm=llm,
chain_type="map_reduce",
map_prompt=_MAP_PROMPT_TEMPLATE,
combine_prompt=_COMBINE_PROMPT_TEMPLATE,
verbose = True
)
result = summary_chain.run(input_documents = text_chunks, example = _EXAMPLE_TEXT)
return process_llm_output(result)
# this Lambda function is invoked through API Gateway
def lambda_handler(event, context):
# read API body, if it exists
if event.get('body'):
body = json.loads(event['body'])['text']
else:
return {
'statusCode': 400,
'body': json.dumps('Missing body')
}
chunks = chunk_text(body, _MAX_INPUT_SIZE)
logger.info('Estimated chunks: %s', str(len(chunks)))
if len(chunks) > 1:
result = get_summary_large_doc(chunks, _MAX_SUMMARY_LENGTH + _OUTPUT_TOKEN_BUFFER)
else:
result = get_summary_short_doc(chunks, _MAX_SUMMARY_LENGTH + _OUTPUT_TOKEN_BUFFER)
# return success
return {
'statusCode': 200,
'body': json.dumps({'summary': result})
}
| [
"\n\nHuman: Given some text, we want to distill the text into a summary of the main themes.\n\nWrite your summary within <summary></summary> tags.\n\nHere is an example:\n<example>\n{example}\n</example>\n\nHere is the text, inside <text></text> XML tags.\n<text>\n{text}\n</text>\n\nWrite a concise summary of the above text.\n\nAssistant:",
"500",
"\n\nHuman: Given some text, we want to create a concise summary of the main themes.\n\nWrite your summary within <summary></summary> tags.\n\nHere is an example:\n<example>\n{example}\n</example>\n\nHere is the text, inside <text></text> XML tags.\n<text>\n{text}\n</text>\n\nWrite a <300 word concise summary of the above text within <summary></summary> tags.\n\nAssistant:",
"\n\nHuman: Given a set of summaries, we want to distill them into a final, consolidated summary of the main themes.\n\nWrite your summary within <summary></summary> tags.\n\nHere is an example:\n<example>\n{example}\n</example>\n\nHere is the text, inside <text></text> XML tags.\n\n<text>\n{text}\n</text>\n\nWrite a <300 word concise summary of the above text within <summary></summary> tags.\n\nAssistant:"
] |
2024-01-10 | aws-samples/amazon-bedrock-architectures | Architectures~Summarization~Summarization_Document_Upload~lambda~lambda.py | import json
import logging
import boto3
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms.bedrock import Bedrock
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.llm import LLMChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
import tiktoken
s3 = boto3.client('s3')
bedrock = boto3.client('bedrock-runtime')
textract = boto3.client('textract')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
_MODEL_ID = 'anthropic.claude-v2'
_TIKTOKEN_ENCODING = 'p50k_base' # we use a BPE tokenizer to estimate number of tokens in input (required since we do not have direct access to model's tokenizer)
tokenizer = tiktoken.get_encoding(_TIKTOKEN_ENCODING)
_PROMPT_TOKENS = 500 # overestimation of number of tokens in prompt (not including input document)
_CONTEXT_WINDOW = 100000 # for Claude v2 100k
_OUTPUT_TOKEN_BUFFER = 100 # buffer for the max_tokens_to_sample to prevent output from being cut off
_MAX_SUMMARY_LENGTH = 300
_MAX_INPUT_SIZE = _CONTEXT_WINDOW - _PROMPT_TOKENS - _MAX_SUMMARY_LENGTH - _OUTPUT_TOKEN_BUFFER
_EXAMPLE_TEXT = """
H: <text>
Q. Sam, you have been through games like Louisville last week, but was it any different here to come back from the adversity with USC coming in, and just how did you sort of manage that over the last seven days?
SAM HARTMAN: Yeah, they're wild out there. It's awesome. I think it's like what Coach Freeman just said, we are a reflection of our head coach. It's been a bumpy season. You know, you start hot and you lose a close one to Ohio State, and so it's one of those things where, like you said, being in those situations before prepares you for the ridicule, the feeling, the pit in your stomach.
But like coach said, it was a really special week. I saw this thing, John Jones, I don't condone everything he does, but saw a thing where he talks about pre-fight. Talks about the butterflies are in formation when you get butterflies or a pit in your stomach. Not to say that that's some crazy cool message that's going to end up on some cool highlight, but it's what I felt the team felt.
I felt like all week we as a unit, and again, obviously on defense, one of the best defensive performances I've ever seen against one of the nest offenses in the country. Really just the mentality, the work, preparation, all kind of just aligned perfectly.
It's a credit to our head coach. You know, without his leadership and guidance through a new landscape where losses aren't acceptable, and not to say that other losses are, but it was something that just from day one, from Monday night when we were in there and guys are beat up and we're all kind of like, shoot, man, we got to go.
It was something we said all week, too, is what better opportunity than to come have USC come play at home. We got really good weather and I think we did exactly what we wanted to do all week, and it's a moment that I'll never forget. I hope there our fans out there that will never forget, and forever I can say when I came here and played USC we won and I'm 1-0.
Q. The TD to Tyree, just take me through the look, how that play developed.
SAM HARTMAN: Yeah, all week we kind of had a bead on some of their coverages of what they might run to certain formations, and got one there. It's a credit to him. I'm so glad you asked about CT. You don't see that anywhere really ever. You know, older guy like that. His persistence, who he is as a man will take him so much further than anyone can ever know, and that's something that I'll always be forever grateful for him.
To be an older guy and have some struggles and have to change positions, like that itself, you know, and he's had some bad stuff and some drops and some things you're like, oh, man you got to make that play. He just kept showing up.
You bring Faison in, an incredible player, and you're like, most guys, probably, you know, I'm going to take a step. Chris kept showing up. Chris helped Faison and Faison helped him get open on that play. It's a credit to the coaching after, the culture here, and just to Chris as a man. You don't find that everywhere, and I was so happy.
The first thing we said is, I told you to each other, because all week we been talking about it. It's going to come, it's going to happen, you're going to get that opportunity, and he was freaking wide open, so made it pretty easy for me.
Q. How critical was the touchdown to open the game after you guys get the turnover? Third down conversion, but also a touchdown to give you guys some confidence moving forward.
SAM HARTMAN: It's everything. We wanted to start fast. That's kind of our big three of this week. We really look back at the games that we played well in, and it was like a first-drive touchdown was huge. We kind of knew their offense was going to be able to score. Honestly our defense really shut them down and gave them a lot of frustrations.
It was big in the beginning of the game, but our defense kept us just on a groove and really good field position.
So like they say, defense wins championships. I guess I got to buy Al Golden another Ferrari. They ain't paying me enough to do that.
Q. What conversations did you have with the offensive line after a bit of a rough go last week, to come back out tonight and have a different performance?
SAM HARTMAN: I think it's all culture. I don't think anything I was going to say was going to change the way they showed up. I don't think it's anything to do with mindset, skill, anything.
Just Louisville is really good team, and they lost, so takes away that bluster. Did they?
Q. Yeah.
SAM HARTMAN: Okay, it starts with Zeke and the guys on the outside. They really set the tone, and Zeke was the first one in the building and he was the first one getting everyone motivated, and just kept saying all week, take your mind there. That was probably the best thing anyone could say. You're in the dog days of it, it's Tuesday, eight-week bender and we're rolling.
To get guys fricking going like we did this Saturday was incredible. They set tone for the rest of the season that you got to play Notre Dame.
Q. You kind of touched on this before, but you have been through wins and losses in your career. What's it like to go through a loss and then a win, the low and the high, like at this school?
SAM HARTMAN: I mean, it's incredible. We don't like saying I around here and you guys rarely hear it, but it's special for me. I hugged Coach Freeman after and I was like, I finally did it. Really our defense did it. I mean, I threw the ball is couple times.
But it's, again, kind of like I led off with, it'll be a special moment for me and I'm excited to get back in the locker room. Just to see the fans and the support we get continuously and the walk and just the football culture here, I hope it never changes. If I'm blessed to have kids I hope I can bring them back and they play a highlight. Probably won't be as cool as Joe Montana, but -- you know, I met Joe Montana today. That was pretty sweet. Probably add that, USC victory, Joe Montana.
And I think that that is going to be something I can kind of cherish for the rest of my life. And the memories with those guys in the locker room, to bring it back to the team, just Cam Hart, you see that guy and his disappointment and his frustrations of last week, and to see him bounce back and just see the entire team, just it was a full, complete game. I'm so grateful for that. Grateful for this fan base and everything.
I freaking love the Irish.
Q. Some joking aside, USC is above meeting Joe Montana. You've been in college football for a while now. What did you pick up this week, tonight, about this particular game, matchup up at Notre Dame?
SAM HARTMAN: I mean, all week it was just something for me that was new. I know going we had an Irish immersion program in California. I got to meet some people. Got to meet Jimmy Clausen. Everybody talked about it out there, right, beat USC, beat USC. It was, beat Ohio State before that.
It was everybody you meet walking to the airport, grabbing groceries at Trader Joe's, beat USC. We brought back the trophy and regained that, and it's been a tradition to win at home and that's something I'm really proud of and proud of this team for rallying around it.
I know the magnitude. I'll be training in California for the pro stuff and I'll have that little kind of -- I can walk a little bit higher and taller out there.
</text>
A: <summary>
Quarterback Sam Hartman discussed Notre Dame's comeback win against USC. He said the adversity after losing to Louisville challenged the team, but coach Marcus Freeman's leadership guided them through. Hartman said the team had a \"special week\" preparing for USC, one of the best offenses in the country. He credited the historic defensive performance that shut down USC. Offensively, Hartman discussed the importance of scoring a touchdown on the first drive to build confidence. He praised receiver Chris Tyree for his persistence and growth this season despite struggles. Their connection on a touchdown pass exemplified Tyree's hard work. Hartman also credited the offensive line for bouncing back after a rough game against Louisville. He said the culture and leadership of players like center Zeke Correll set the tone in practice to be ready for USC. Personally, Hartman called the USC win a special moment in his career that he'll cherish. He said the fan support and football culture at Notre Dame are incredible. Beating a storied rival like USC will stay with him forever.
</summary>
"""
_MAP_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["text", "example"],
template="""
Human: Given some text, we want to distill the text into a summary of the main themes.
Write your summary within <summary></summary> tags.
Here is an example:
<example>
{example}
</example>
Here is the text, inside <text></text> XML tags.
<text>
{text}
</text>
Write a concise summary of the above text.
Assistant:"""
)
_COMBINE_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["text", "example"],
template="""
Human: Given a set of summaries, we want to distill them into a final, consolidated summary of the main themes.
Write your summary within <summary></summary> tags.
Here is an example:
<example>
{example}
</example>
Here is the text, inside <text></text> XML tags.
<text>
{text}
</text>
Write a <300 word concise summary of the above text within <summary></summary> tags.
Assistant:"""
)
_STUFF_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["text", "example"],
template="""
Human: Given some text, we want to create a concise summary of the main themes.
Write your summary within <summary></summary> tags.
Here is an example:
<example>
{example}
</example>
Here is the text, inside <text></text> XML tags.
<text>
{text}
</text>
Write a <300 word concise summary of the above text within <summary></summary> tags.
Assistant:"""
)
def chunk_text(text, chunk_size):
return RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size=chunk_size, chunk_overlap=int(chunk_size / 100)).create_documents([text])
def estimate_num_chunks(text):
estimated_tokens = len(tokenizer.encode(text))
return (estimated_tokens // _MAX_INPUT_SIZE) + 1
def process_llm_output(text_output):
# strip off XML response tags
text_output = text_output.replace('<summary>', '')
text_output = text_output.replace('</summary>', '')
return text_output
def get_summary_short_doc(text_chunks, output_size):
llm = Bedrock(
model_id = _MODEL_ID,
model_kwargs = {
"max_tokens_to_sample": output_size
}
)
llm_chain = LLMChain(llm=llm, prompt=_STUFF_PROMPT_TEMPLATE)
# Define StuffDocumentsChain
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain, document_variable_name="text", verbose = True
)
result = stuff_chain.run(input_documents = text_chunks, example = _EXAMPLE_TEXT)
return process_llm_output(result)
def get_summary_large_doc(text_chunks, output_size):
llm = Bedrock(
model_id = _MODEL_ID,
model_kwargs = {
"max_tokens_to_sample": output_size
}
)
summary_chain = load_summarize_chain(
llm=llm,
chain_type="map_reduce",
map_prompt=_MAP_PROMPT_TEMPLATE,
combine_prompt=_COMBINE_PROMPT_TEMPLATE,
verbose = True
)
result = summary_chain.run(input_documents = text_chunks, example = _EXAMPLE_TEXT)
return process_llm_output(result)
# this Lambda function is invoked through API Gateway
def lambda_handler(event, context):
# Get the object and content type from the event
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
logger.info('S3 Key: %s', key)
response = s3.get_object(Bucket=bucket, Key=key)
content_type = response['ContentType']
logger.info('Content Type: %s', content_type)
# check document format, if pdf send to Textract to get text
if content_type in ['application/pdf', 'image/jpeg', 'image/png']:
logger.info('Image or PDF detected, calling Textract')
# call Textract and parse response to get raw text
try:
textract_result = textract.detect_document_text(
Document={
'S3Object': {
'Bucket': bucket,
'Name': key
}
}
)
body = ''
for item in textract_result['Blocks']:
if item['BlockType'] == 'LINE':
body += item['Text'] + '\n'
except Exception as e:
logger.error(e)
logger.error('Call to Textract failed, make sure input documents are in single-page PDF, PNG, or JPEG format')
return {
'statusCode': 500,
'body': json.dumps('Error calling Textract')
}
else:
body = response['Body'].read().decode('utf-8')
chunks = chunk_text(body, _MAX_INPUT_SIZE)
logger.info('Estimated chunks: %s', str(len(chunks)))
if len(chunks) > 1:
result = get_summary_large_doc(chunks, _MAX_SUMMARY_LENGTH + _OUTPUT_TOKEN_BUFFER)
else:
result = get_summary_short_doc(chunks, _MAX_SUMMARY_LENGTH + _OUTPUT_TOKEN_BUFFER)
# take original S3 object and change the prefix to /masked
output_key = key.replace('documents/', 'summaries/')
# if output_key doesn't end in .txt, replace everything after the last period with .txt
if not output_key.endswith('.txt'):
output_key = output_key[:output_key.rfind('.')] + '.txt'
logger.info('Output S3 Key: %s', output_key)
# Write the response to S3
s3.put_object(Bucket=bucket, Key=output_key, Body=result, ContentType='text/plain')
return {
'statusCode': 200,
'body': json.dumps('Summary created successfully!')
}
| [
"\n\nHuman: Given some text, we want to distill the text into a summary of the main themes.\n\nWrite your summary within <summary></summary> tags.\n\nHere is an example:\n<example>\n{example}\n</example>\n\nHere is the text, inside <text></text> XML tags.\n<text>\n{text}\n</text>\n\nWrite a concise summary of the above text.\n\nAssistant:",
"500",
"\n\nHuman: Given some text, we want to create a concise summary of the main themes.\n\nWrite your summary within <summary></summary> tags.\n\nHere is an example:\n<example>\n{example}\n</example>\n\nHere is the text, inside <text></text> XML tags.\n<text>\n{text}\n</text>\n\nWrite a <300 word concise summary of the above text within <summary></summary> tags.\n\nAssistant:",
"\n\nHuman: Given a set of summaries, we want to distill them into a final, consolidated summary of the main themes.\n\nWrite your summary within <summary></summary> tags.\n\nHere is an example:\n<example>\n{example}\n</example>\n\nHere is the text, inside <text></text> XML tags.\n\n<text>\n{text}\n</text>\n\nWrite a <300 word concise summary of the above text within <summary></summary> tags.\n\nAssistant:"
] |
2024-01-10 | aws-samples/amazon-bedrock-architectures | Architectures~PII_Masking~PII_Masking_Document_Upload~lambda~lambda.py | # Lambda function that reads data from S3 PutObject event and calls Amazon Bedrock to return verion of document with masked PII
import json
import boto3
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
import tiktoken
import logging
s3 = boto3.client('s3')
bedrock = boto3.client('bedrock-runtime')
textract = boto3.client('textract')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
_MODEL_ID = 'anthropic.claude-v2'
_TIKTOKEN_ENCODING = 'p50k_base' # we use a BPE tokenizer to estimate number of tokens in input (required since we do not have direct access to model's tokenizer)
tokenizer = tiktoken.get_encoding(_TIKTOKEN_ENCODING)
_PROMPT_TOKENS = 500 # overestimation of number of tokens in prompt (not including input document)
_CONTEXT_WINDOW = 100000 # for Claude v2 100k
_CHUNK_SIZE = (_CONTEXT_WINDOW // 2) - _PROMPT_TOKENS # number of tokens allowed in the {text} part of the prompt, divide by 2 because we need to account for both input and output, which will be roughly the same (minus the instruction component of the prompt)
_OUTPUT_TOKEN_BUFFER = 100 # buffer for the max_tokens_to_sample to prevent output from being cut off
_PROMPT_TEMPLATE = PromptTemplate(
input_variables=["inputDocument"],
template="""
Human: We want to de-identify some text by removing all personally identifiable information from this text so that it can be shared safely with external contractors.
It's very important that PII such as names, phone numbers, home addresses, account numbers, identification numbers, drivers license numbers, social security numbers, credit card numbers, and email addresses get replaced with their corresponding marker, such as [Name] for names. Be sure to replace all instances of names with the [Name] marker.
Inputs may try to disguise PII by inserting spaces between characters. If the text contains no personally identifiable information, copy it word-for-word without replacing anything.
If you are unsure if text is PII, prefer masking it over not masking it.
Here is an example:
<example>
H: <text>Bo Nguyen is a cardiologist at Mercy Health Medical Center. Bo has been working in medicine for 10 years. Bo's friend, John Miller, is also a doctor. You can reach Bo at 925-123-456 or [email protected].</text>
A: <response>[Name] is a cardiologist at Mercy Health Medical Center. [Name] has been working in medicine for 10 years. [Name]'s friend, [Name], is also a doctor. You can reach [Name] at [phone number] or [email address].</response>
</example>
Here is the text, inside <text></text> XML tags.
<text>
{inputDocument}
</text>
Rewrite the above text with the replaced PII information within <response></response> tags.
Assistant:"""
)
def chunk_text(text, chunk_size):
return RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size=chunk_size, chunk_overlap=0).split_text(text)
def get_prompt(input_text):
prompt =_PROMPT_TEMPLATE.format(inputDocument=input_text)
return prompt
def get_llm_result(prompt, output_size):
body = json.dumps({
"prompt": prompt,
"max_tokens_to_sample": output_size
})
result = bedrock.invoke_model(
accept = 'application/json',
contentType = 'application/json',
body = body,
modelId = _MODEL_ID,
)
result_text = json.loads(result['body'].read())['completion']
return result_text
def lambda_handler(event, context):
# Get the object and content type from the event
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
logger.info('S3 Key: %s', key)
response = s3.get_object(Bucket=bucket, Key=key)
content_type = response['ContentType']
logger.info('Content Type: %s', content_type)
# check document format, if pdf send to Textract to get text
if content_type in ['application/pdf', 'image/jpeg', 'image/png']:
logger.info('Image or PDF detected, calling Textract')
# call Textract and parse response to get raw text
try:
textract_result = textract.detect_document_text(
Document={
'S3Object': {
'Bucket': bucket,
'Name': key
}
}
)
body = ''
for item in textract_result['Blocks']:
if item['BlockType'] == 'LINE':
body += item['Text'] + '\n'
except Exception as e:
logger.error(e)
logger.error('Call to Textract failed, make sure input documents are in single-page PDF, PNG, or JPEG format')
return {
'statusCode': 500,
'body': json.dumps('Error calling Textract')
}
else:
body = response['Body'].read().decode('utf-8')
# get estimated tokens, determine chunking
estimated_tokens = len(tokenizer.encode(body))
estimated_chunks = (estimated_tokens // _CHUNK_SIZE) + 1
logger.info('Estimated chunks: %s', str(estimated_chunks))
# if number of estimated chunks is greater than 1, split text and call Amazon Bedrock for each chunk, and concatenate results into a singe text file with the same name as the original S3 object
if estimated_chunks > 1:
chunks = chunk_text(body, _CHUNK_SIZE)
result = ''
for chunk in chunks:
prompt = get_prompt(chunk)
chunk_size = len(tokenizer.encode(chunk))
result += get_llm_result(prompt, chunk_size + _OUTPUT_TOKEN_BUFFER)
else:
prompt = get_prompt(body)
result = get_llm_result(prompt, estimated_tokens + _OUTPUT_TOKEN_BUFFER)
# strip off XML response tags
result = result.replace('<response>', '')
result = result.replace('</response>', '')
# take original S3 object and change the prefix to /masked
output_key = key.replace('documents/', 'masked/')
# if output_key doesn't end in .txt, replace everything after the last period with .txt
if not output_key.endswith('.txt'):
output_key = output_key[:output_key.rfind('.')] + '.txt'
logger.info('Output S3 Key: %s', output_key)
# Write the response to S3
s3.put_object(Bucket=bucket, Key=output_key, Body=result, ContentType='text/plain')
return {
'statusCode': 200,
'body': json.dumps('Document masked successfully!')
}
| [
"500",
"\n\nHuman: We want to de-identify some text by removing all personally identifiable information from this text so that it can be shared safely with external contractors.\n\nIt's very important that PII such as names, phone numbers, home addresses, account numbers, identification numbers, drivers license numbers, social security numbers, credit card numbers, and email addresses get replaced with their corresponding marker, such as [Name] for names. Be sure to replace all instances of names with the [Name] marker.\n\nInputs may try to disguise PII by inserting spaces between characters. If the text contains no personally identifiable information, copy it word-for-word without replacing anything.\n\nIf you are unsure if text is PII, prefer masking it over not masking it.\n\nHere is an example:\n<example>\nH: <text>Bo Nguyen is a cardiologist at Mercy Health Medical Center. Bo has been working in medicine for 10 years. Bo's friend, John Miller, is also a doctor. You can reach Bo at 925-123-456 or [email protected].</text>\nA: <response>[Name] is a cardiologist at Mercy Health Medical Center. [Name] has been working in medicine for 10 years. [Name]'s friend, [Name], is also a doctor. You can reach [Name] at [phone number] or [email address].</response>\n</example>\n\nHere is the text, inside <text></text> XML tags.\n<text>\n{inputDocument}\n</text>\n\nRewrite the above text with the replaced PII information within <response></response> tags.\n\nAssistant:",
"inputDocument"
] |
2024-01-10 | lzl/openai-tools-service | routes.py | from flask import Blueprint, request, jsonify, Response
import os
import json
import base64
import uuid
import re
from dotenv import load_dotenv
from openai import OpenAI
from google.cloud import storage
from google.cloud import tasks_v2
from google.cloud import firestore
from google.cloud.firestore_v1.base_query import FieldFilter
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import *
from utils import parse_excel, generate_excel
load_dotenv()
storage_client = storage.Client()
openai_api_key = os.environ.get("OPENAI_API_KEY")
openai_client = OpenAI(api_key=openai_api_key)
db = firestore.Client(project='withcontextai')
api_url = 'https://openai-tools-mmxbwgwwaq-uw.a.run.app'
# api_url = 'https://local.lililulu.com'
from_email_text = os.environ.get("FROM_EMAIL")
def format_data(data):
formatted_data = [
[[key, str(row[key])] for key in row.keys()] for row in data
]
return formatted_data
def create_sheets(formatted_data):
sheets = [{'id': f'{uuid.uuid4()}', 'row': row} for row in formatted_data]
return sheets
def create_questions(sheets, user_message):
questions = []
for sheet in sheets:
text = user_message.strip()
for key, value in sheet['row']:
text = re.sub(r'{%s}' % re.escape(key), value, text)
questions.append({
'id': sheet['id'],
'text': text,
})
return questions
main_routes = Blueprint('main_routes', __name__)
@main_routes.route('/result', methods=['GET'])
def result_route():
request_id = request.args.get('request_id')
if request_id:
requests_data = db.collection('requests').document(
request_id).get().to_dict()
qna_ref = db.collection('qna').where(
filter=FieldFilter("request_id", "==", request_id))
qna_data = []
for doc in qna_ref.stream():
qna_data.append({"id": doc.id, **doc.to_dict()})
return jsonify({"requests": requests_data, "qna": qna_data})
else:
requests_ref = db.collection('requests').order_by(
'created_at', direction=firestore.Query.DESCENDING).limit(10)
requests_data = []
for doc in requests_ref.stream():
requests_data.append({"id": doc.id, "doc": doc.to_dict()})
return jsonify(requests_data)
@main_routes.route('/parse_excel', methods=['POST'])
def parse_excel_route():
# 检查是否上传了文件
if 'file' not in request.files:
return 'No file uploaded'
file = request.files['file']
# 检查文件是否符合要求
if file.filename == '':
return 'No file selected'
if not file.filename.endswith('.xlsx'):
return 'Invalid file type'
# 调用解析函数
json_data = parse_excel(file)
# 返回 JSON 格式数据
return jsonify(json_data)
@main_routes.route('/upload_excel', methods=['POST'])
def upload_excel_route():
# 检查是否上传了文件
if 'file' not in request.files:
return 'No file uploaded'
file = request.files['file']
# 检查文件是否符合要求
if file.filename == '':
return 'No file selected'
if not file.filename.endswith('.xlsx'):
return 'Invalid file type'
# 调用解析函数
json_data = parse_excel(file)
formatted_data = format_data(json_data)
sheets = create_sheets(formatted_data)
upload_data = json.dumps(sheets)
# Upload the JSON data to the bucket
bucket_name = 'openai-tools'
bucket = storage_client.get_bucket(bucket_name)
random_uuid = uuid.uuid4()
blob_name = f'{random_uuid}.txt'
blob = bucket.blob(blob_name)
blob.upload_from_string(upload_data)
# 返回 JSON 格式数据
data = {
'blob_name': blob_name,
'json_data': json_data,
}
return jsonify(data)
@main_routes.route('/ask_all_questions', methods=['POST'])
def ask_all_questions_route():
# Get the Authorization header value
auth_header = request.headers.get("Authorization")
# Check if the header value exists
if not auth_header:
return jsonify({"error": "Authorization header is required"}), 401
# Extract the token by splitting the header value by whitespace (assuming "Bearer" scheme)
auth_token = auth_header.split(" ")[1]
is_auth_token_valid = auth_token == os.environ.get("ACCESS_CODE")
if not is_auth_token_valid:
return jsonify({"error": "Authorization is not valid"}), 403
data = request.get_json()
config = data.get("config", {})
email = data.get('email', [])
excel_blob_name = data.get('blob_name', '')
bucket_name = 'openai-tools'
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(excel_blob_name)
sheets_string = blob.download_as_string().decode('utf-8')
sheets = json.loads(sheets_string)
user_message = config.get('userMessage', [])
questions = create_questions(sheets, user_message)
print("email", email)
print("config", config)
request_data = {
"email": email,
"config": config,
"excel_blob_name": excel_blob_name,
"questions_count": len(questions),
"success_count": 0,
"fail_count": 0,
"created_at": firestore.SERVER_TIMESTAMP,
}
_, request_ref = db.collection('requests').add(request_data)
request_id = request_ref.id
print("request_id", request_id)
tasks_client = tasks_v2.CloudTasksClient()
parent = tasks_client.queue_path(
'withcontextai', 'us-west1', 'chat-completions-queue')
for _, question in enumerate(questions):
question_id = question.get("id")
question_text = question.get("text")
payload = json.dumps({
"request_id": request_id,
"question_id": question_id,
"question_text": question_text
})
task = {
'http_request': {
'http_method': 'POST',
'url': api_url + '/chat_completions_async',
'headers': {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + auth_token,
},
'body': payload.encode(),
}
}
tasks_client.create_task(request={'parent': parent, 'task': task})
sg = SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))
# 创建邮件
from_email = From(from_email_text) # 发件人
to_email = To(email) # 收件人
subject = "批量任务: " + request_id
body = Content(
"text/plain", "任务正在运行中,稍后邮件通知您结果。若长时间未收到邮件,可使用 request_id: " + request_id + " 手动发送。")
mail = Mail(from_email, to_email, subject, body)
try:
# 发送邮件
response = sg.send(mail)
if response.status_code != 202: # 如果发送邮件失败,返回报错
return {"error": f"Failed to send email, error code: {response.status_code}"}, 400
except Exception as e:
return {"error": f"Failed to send email: {e}"}, 400
print("The confirm email has sent to: ", email)
return jsonify({"success": True, "message": "Tasks created successfully", "request_id": request_id}), 200
@main_routes.route('/chat_completions_async', methods=['POST'])
def chat_completions_async_route():
# Get the Authorization header value
auth_header = request.headers.get("Authorization")
# Check if the header value exists
if not auth_header:
return jsonify({"error": "Authorization header is required"}), 401
# Extract the token by splitting the header value by whitespace (assuming "Bearer" scheme)
auth_token = auth_header.split(" ")[1]
is_auth_token_valid = auth_token == os.environ.get("ACCESS_CODE")
if not is_auth_token_valid:
return jsonify({"error": "Authorization is not valid"}), 403
if not request.is_json:
return jsonify({"error": "JSON data expected"}), 400
data = request.get_json()
request_id = data.get("request_id")
question_id = data.get("question_id")
question_text = data.get("question_text")
if not request_id or not question_id or not question_text:
return jsonify({"error": "Data missing: request_id, question_id, or question_text"}), 400
request_data = db.collection('requests').document(
request_id).get().to_dict()
config = request_data.get("config", {})
model = config.get("model", "gpt-3.5-turbo")
system_message = config.get(
"system", "You are ChatGPT, a large language model trained by OpenAI.")
messages = [{"role": "system", "content": system_message},
{"role": "user", "content": question_text}]
temperature = config.get("temperature", 0.7)
presence_penalty = config.get("presence_penalty", 0)
frequency_penalty = config.get("frequency_penalty", 0)
if not (model and messages):
return jsonify({"error": "model and messages must be provided"}), 400
try:
response = openai_client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
)
db.collection('requests').document(request_id).update(
{"success_count": firestore.Increment(1)})
except Exception as e:
db.collection('requests').document(request_id).update(
{"fail_count": firestore.Increment(1)})
return jsonify({"error": str(e)}), 500
answer_text = response.choices[0].message.content
db.collection('qna').add({
"request_id": request_id,
"question_id": question_id,
"question_text": question_text,
"answer_text": answer_text,
})
print("question_id: ", question_id)
print("answer_text: ", answer_text)
request_data = db.collection('requests').document(
request_id).get().to_dict()
questions_count = request_data.get("questions_count", 0)
success_count = request_data.get("success_count", 0)
print(questions_count, success_count)
if request_data is not None and success_count == questions_count:
payload = json.dumps({
"request_id": request_id,
})
task = {
'http_request': {
'http_method': 'POST',
'url': api_url + '/send_answers_email',
'headers': {
'Content-Type': 'application/json'
},
'body': payload.encode(),
}
}
tasks_client = tasks_v2.CloudTasksClient()
parent = tasks_client.queue_path(
'withcontextai', 'us-west1', 'send-answers-email-queue')
tasks_client.create_task(request={'parent': parent, 'task': task})
return jsonify({"message": f"Answer published for question {question_id}"}), 200
@main_routes.route('/send_answers_email', methods=['POST'])
def send_answers_email_route():
data = request.get_json()
# 从请求中获取 request_id
request_id = data.get("request_id")
request_data = db.collection('requests').document(
request_id).get().to_dict()
email = request_data["email"]
excel_blob_name = request_data["excel_blob_name"]
bucket_name = 'openai-tools'
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(excel_blob_name)
sheets_string = blob.download_as_string().decode('utf-8')
sheets = json.loads(sheets_string)
qna_ref = db.collection('qna').where(
filter=FieldFilter("request_id", "==", request_id))
answers = []
for doc in qna_ref.stream():
answer = doc.to_dict()
answers.append({"id": answer.get("question_id"),
"text": answer.get("answer_text")})
# 整合 sheets 和 answers 数据
for sheet in sheets:
for answer in answers:
if sheet["id"] == answer["id"]:
sheet["row"].append(["answer", answer["text"]])
json_data = [item["row"] for item in sheets]
# 生成 excel 表格并保存到内存
output = generate_excel(json_data)
# 使用 base64 对 xlsx 文件进行编码
data = output.read()
encoded_data = base64.b64encode(data).decode()
sg = SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))
# 创建邮件附件
attachment = Attachment()
attachment.file_content = FileContent(encoded_data)
attachment.file_type = FileType(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
attachment.file_name = FileName('output.xlsx')
attachment.disposition = Disposition('attachment')
# 创建邮件
from_email = From(from_email_text) # 发件人
to_email = To(email) # 收件人
subject = "批量任务: " + request_id
body = Content("text/plain", "运行结果见附件。")
mail = Mail(from_email, to_email, subject, body)
mail.add_attachment(attachment)
try:
# 发送邮件
response = sg.send(mail)
if response.status_code != 202: # 如果发送邮件失败,返回报错
return {"error": f"Failed to send email, error code: {response.status_code}"}, 400
except Exception as e:
return {"error": f"Failed to send email: {e}"}, 400
print("The result email has sent to: ", email)
return {"message": "Email sent successfully"}, 200
@main_routes.route('/chat_completions', methods=['POST'])
def chat_completions_route():
# Get the Authorization header value
auth_header = request.headers.get("Authorization")
# Check if the header value exists
if not auth_header:
return jsonify({"error": "Authorization header is required"}), 401
# Extract the token by splitting the header value by whitespace (assuming "Bearer" scheme)
auth_token = auth_header.split(" ")[1]
is_auth_token_valid = auth_token == os.environ.get("ACCESS_CODE")
if not is_auth_token_valid:
return jsonify({"error": "Authorization is not valid"}), 403
if not request.is_json:
return jsonify({"error": "JSON data expected"}), 400
data = request.get_json()
model = data.get("model", "gpt-3.5-turbo")
messages = data.get("messages", [])
temperature = data.get("temperature", 0.7)
presence_penalty = data.get("presence_penalty", 0)
frequency_penalty = data.get("frequency_penalty", 0)
if not (model and messages):
return jsonify({"error": "model and messages must be provided"}), 400
try:
response = openai_client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
)
return jsonify(response)
except Exception as e:
return jsonify({"error": str(e)}), 500
@main_routes.route('/generate_excel', methods=['POST'])
def generate_excel_route():
data = request.get_json()
# 从请求中获取 request_id
request_id = data.get("request_id")
request_data = db.collection('requests').document(
request_id).get().to_dict()
email = request_data["email"]
excel_blob_name = request_data["excel_blob_name"]
bucket_name = 'openai-tools'
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(excel_blob_name)
sheets_string = blob.download_as_string().decode('utf-8')
sheets = json.loads(sheets_string)
qna_ref = db.collection('qna').where(
filter=FieldFilter("request_id", "==", request_id))
answers = []
for doc in qna_ref.stream():
answer = doc.to_dict()
answers.append({"id": answer.get("question_id"),
"text": answer.get("answer_text")})
# 整合 sheets 和 answers 数据
for sheet in sheets:
for answer in answers:
if sheet["id"] == answer["id"]:
sheet["row"].append(["answer", answer["text"]])
json_data = [item["row"] for item in sheets]
# 生成 excel 表格并保存到内存
output = generate_excel(json_data)
# 将 xlsx 文件作为响应发送给客户端
response = Response(output.read(
), mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response.headers.set('Content-Disposition',
'attachment', filename='output.xlsx')
return response
| [] |
2024-01-10 | Smartmind12/DocsGPT | scripts~parser~py2doc.py | import ast
import os
from pathlib import Path
import tiktoken
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i + 1}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i + 1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.")
| [
"Code: \n{code}, \nDocumentation: ",
"functions_names",
"class_name",
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: "
] |
2024-01-10 | Smartmind12/DocsGPT | application~llm~llm_creator.py | from application.llm.openai import OpenAILLM, AzureOpenAILLM
from application.llm.sagemaker import SagemakerAPILLM
from application.llm.huggingface import HuggingFaceLLM
from application.llm.llama_cpp import LlamaCpp
class LLMCreator:
llms = {
'openai': OpenAILLM,
'azure_openai': AzureOpenAILLM,
'sagemaker': SagemakerAPILLM,
'huggingface': HuggingFaceLLM,
'llama.cpp': LlamaCpp
}
@classmethod
def create_llm(cls, type, *args, **kwargs):
llm_class = cls.llms.get(type.lower())
if not llm_class:
raise ValueError(f"No LLM class found for type {type}")
return llm_class(*args, **kwargs) | [] |
2024-01-10 | Smartmind12/DocsGPT | scripts~code_docs_gen.py | import ast
import json
from pathlib import Path
import dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
dotenv.load_dotenv()
ps = list(Path("inputs").glob("**/*.py"))
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
def get_functions_in_class(node):
functions = []
functions_code = []
for child in node.body:
if isinstance(child, ast.FunctionDef):
functions.append(child.name)
functions_code.append(ast.unparse(child))
return functions, functions_code
def get_classes_and_functions(source_code):
tree = ast.parse(source_code)
classes = {}
for node in tree.body:
if isinstance(node, ast.ClassDef):
class_name = node.name
function_name, function = get_functions_in_class(node)
# join function name and function code
functions = dict(zip(function_name, function))
classes[class_name] = functions
return classes
structure_dict = {}
c1 = 0
for code in data:
classes = get_classes_and_functions(ast.parse(code))
source = str(sources[c1])
structure_dict[source] = classes
c1 += 1
# save the structure dict as json
with open('structure_dict.json', 'w') as f:
json.dump(structure_dict, f)
if not Path("outputs").exists():
Path("outputs").mkdir()
c1 = len(structure_dict)
c2 = 0
for source, classes in structure_dict.items():
c2 += 1
print(f"Processing file {c2}/{c1}")
f1 = len(classes)
f2 = 0
for class_name, functions in classes.items():
f2 += 1
print(f"Processing class {f2}/{f1}")
source_w = source.replace("inputs/", "")
source_w = source_w.replace(".py", ".txt")
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Class: {class_name}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nClass: {class_name}")
# append class name to the front
for function in functions:
b1 = len(functions)
b2 = 0
print(f"Processing function {b2}/{b1}")
b2 += 1
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=functions[function]))
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Function: {functions[function]}, \nDocumentation: {response}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nFunction: {functions[function]}, \nDocumentation: {response}")
| [
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | Smartmind12/DocsGPT | application~parser~py2doc.py | import ast
import os
from pathlib import Path
import tiktoken
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i + 1}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i + 1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.")
| [
"Code: \n{code}, \nDocumentation: ",
"functions_names",
"class_name",
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: "
] |
2024-01-10 | DeiviGT1/openaiAPI | controllers.py | from flask import render_template, request, redirect, session, Blueprint
from spotify import app_Authorization, search_song, user_Authorization
from openaiapi import generar_respuesta
from dotenv import load_dotenv
mod = Blueprint('controllers', __name__, url_prefix='')
#renders the web page
@mod.route("/")
def home():
return render_template("index.html")
@mod.route("/login", methods=["POST","GET"])
def login():
auth_url = app_Authorization()
session["spotify"] = auth_url
song = request.form["song"]
session["song"] = song
session["artist"] = request.form["artist"]
return redirect(auth_url)
@mod.route("/callback")
def callback():
header = user_Authorization()
session["user"] = header
return redirect("/song")
@mod.route("/song", methods=["POST","GET"])
def get_input():
header = session.get("user")
real_songs = []
song = session.get("song")
artist = session.get("artist")
playlists = generar_respuesta(song, artist)
lista_nueva = []
for elemento in playlists:
elemento_nuevo = elemento.replace("\"", "").replace("\u00f1", "n").replace("\u00e1", "a").replace("\u00e9", "e").replace("\u00ed", "i").replace("\u00f3", "o").replace("\u00fa", "u").replace("¿", "").replace("?", "").replace("¡", "").replace("!", "").replace(".", "").replace(",", "").replace(" ", " ").strip()
lista_nueva.append(elemento_nuevo)
for song in lista_nueva:
result = search_song(header = header, song_name = song)
if len(result["tracks"]["items"]) > 0:
track_id = result["tracks"]["items"][0]["id"]
track_image = result["tracks"]["items"][0]["album"]["images"][0]["url"]
track_name = result["tracks"]["items"][0]["name"]
track_artists = result["tracks"]["items"][0]["artists"][0]["name"]
track_url = result["tracks"]["items"][0]["external_urls"]["spotify"]
real_songs.append({"name": track_name, "artist":track_artists , "url": track_url, "image": track_image})
return render_template("songs.html", songs = real_songs)
| [] |
2024-01-10 | dhanasekars/Daily-Python-Practise | 2023~05%20Packages~12_Langchain.py | """
Created on : 02/05/23 4:27 pm
@author : ds
"""
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from config import Open_AI
from langchain.chains import LLMChain
llm = OpenAI(openai_api_key=Open_AI.key)
# text = "What would be a good company name for a company that makes colorful socks?"
# print(llm(text))
prompt = PromptTemplate(input_variables=["product"],
template="What would be a good company name for a company that produce {product}")
chain = LLMChain(llm=llm, prompt=prompt)
print(LLMChain(llm=llm, prompt=prompt).run("whisky"))
| [
"What would be a good company name for a company that produce {product}"
] |
2024-01-10 | dhanasekars/Daily-Python-Practise | 2023~05%20Packages~10_OpenAI.py | """
Created on : 02/05/23 4:27 pm
@author : ds
"""
import openai
from config import Open_AI
openai.api_key = Open_AI.key
models = openai.Model.list()
print(models)
while models:
for model in models['data']:
print(model['id'])
models = models.get('next')
completion = openai.Completion.create(model="ada", prompt="Hello world")
print(completion.choices[0].text)
audio_file = open("New Recording 3.mp3", 'rb')
transcript = openai.Audio.translate("whisper-1", audio_file)
print(transcript)
| [
"Hello world"
] |
2024-01-10 | frankdzh/vector-vein | backend~worker~tasks~llms.py | # -*- coding: utf-8 -*-
# @Author: Bi Ying
# @Date: 2023-04-26 21:10:52
# @Last Modified by: Bi Ying
# @Last Modified time: 2023-05-23 00:07:14
from typing import Union
import openai
from utilities.workflow import Workflow
from utilities.web_crawler import proxies_for_requests
from worker.tasks import task
@task
def open_ai(
workflow_data: dict,
node_id: str,
):
workflow = Workflow(workflow_data)
input_prompt: Union[str, list] = workflow.get_node_field_value(node_id, "prompt")
temperature: float = workflow.get_node_field_value(node_id, "temperature")
if workflow.setting.get("openai_api_type") == "azure":
openai.api_type = "azure"
openai.api_base = workflow.setting.get("openai_api_base")
openai.api_version = "2023-03-15-preview"
engine = workflow.setting.get("openai_chat_engine")
else:
openai.api_type = "open_ai"
openai.api_base = "https://api.openai.com/v1"
openai.api_version = None
engine = None
openai.api_key = workflow.setting.get("openai_api_key")
openai.proxy = proxies_for_requests
if isinstance(input_prompt, str):
prompts = [input_prompt]
elif isinstance(input_prompt, list):
prompts = input_prompt
results = []
for prompt in prompts:
messages = [
{
"role": "system",
"content": prompt,
},
]
response = openai.ChatCompletion.create(
engine=engine,
messages=messages,
temperature=temperature,
max_tokens=2048,
top_p=0.77,
)
result = response.choices[0].message.content
results.append(result)
output = results[0] if isinstance(input_prompt, str) else results
workflow.update_node_field_value(node_id, "output", output)
workflow.set_node_status(node_id, 200)
return workflow.data
| [] |
2024-01-10 | sulphurx1/Legendary_Duck | AI_Waifu~roleplay.py | import openai
from ChatGPT import BOT_KEY
from translate import Translator
import requests
import urllib.parse
from voicevox import Client
import asyncio
openai.api_key = BOT_KEY
messages = []
file = open('initiator.txt', 'r')
prompt = file.read()
def translate_text(text, target_language):
translator = Translator(to_lang=target_language)
translation = translator.translate(text)
return translation
client = Client("localhost", 50021)
base_url = "http://localhost:50021"
def generate_speech(text):
# generate initial query
speaker_id = '14'
params_encoded = urllib.parse.urlencode({'text': text, 'speaker': speaker_id})
r = requests.post(f'{base_url}/audio_query?{params_encoded}')
voicevox_query = r.json
voicevox_query['volumeScale'] = 4.0
voicevox_query['intionationScale'] = 1.5
voicevox_query['prePhonemeLength'] = 1.0
voicevox_query['postPhonemeLength'] = 1.0
# synthesize voice as wav file
params_encoded = urllib.parse.urlencode({'speaker': speaker_id})
r = requests.post(f'{base_url}/synthesis?{params_encoded}', json=voicevox_query)
return r.content
messages.append({"role": "system", "content": prompt})
print("Type to start\n")
while input != "quit()":
message = input()
messages.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = messages
)
reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": reply})
text = reply
target_language = "ja"
translated_text = translate_text(text,target_language)
audio_data = generate_speech(translated_text)
print("\n" + reply + "\n")
| [] |
2024-01-10 | sulphurx1/Legendary_Duck | Botville~improve_intents.py | import json
import time
import openai
from ChatGPT import BOT_KEY
openai.api_key = BOT_KEY
intents = json.loads(open('intents.json').read())
messages = []
for intent in intents['intents']:
for pattern in intent['patterns']:
message = pattern
messages.append({"role": "user", "content": "35 words" + message})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = messages
)
reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": reply})
if reply not in intent['responses']:
intent['response'].append(reply)
time.sleep(90)
with open('intents.json', 'w') as file:
json.dump(intents, file, indent=4)
| [
"35 wordsPLACEHOLDER"
] |
2024-01-10 | louis70109/linebot-find-some | stock_price.py | from langchain.tools import BaseTool
from langchain.agents import AgentType
from typing import Optional, Type
from pydantic import BaseModel, Field
from yf_tool import get_stock_price, calculate_performance, get_price_change_percent, get_best_performing
class StockPriceCheckInput(BaseModel):
"""Input for Stock price check."""
stockticker: str = Field(...,
description="Ticker symbol for stock or index")
class StockPriceTool(BaseTool):
name = "get_stock_ticker_price"
description = "Useful for when you need to find out the price of stock. You should input the stock ticker used on the yfinance API"
def _run(self, stockticker: str):
# print("i'm running")
price_response = get_stock_price(stockticker)
return price_response
def _arun(self, stockticker: str):
raise NotImplementedError("This tool does not support async")
args_schema: Optional[Type[BaseModel]] = StockPriceCheckInput
| [
"Useful for when you need to find out the price of stock. You should input the stock ticker used on the yfinance API"
] |
2024-01-10 | louis70109/linebot-find-some | g_cal_url.py | from langchain.tools import BaseTool
from typing import Optional, Type
from pydantic import BaseModel, Field
import datetime
import urllib
class GoogleCalendarGeneratorInput(BaseModel):
"""Input for Google Calendar Generator."""
dates: str = Field(
...,
description=f"Datetime symbol if text contained. format should be 'YYYYMMDDTHHMMSS/YYYYMMDDTHHMMSS'. Current time is {datetime.date.today()}")
title: str = Field(
...,
description="Calendar Title symbol for reserve schedule.")
description: str = Field(
...,
description="Calendar Summary text symbol for schedule description.")
location: str = Field(
...,
description="Calendar location symbol for reservation.")
class CalendarTool(BaseTool):
name = "google_calendar_reservation"
description = f"""
Generate Google Calendar url from user text first when containing time, date.
"""
@staticmethod
def create_gcal_url(
title='看到這個..請重生',
date='20230524T180000/20230524T220000',
location='那邊',
description=''):
base_url = "https://www.google.com/calendar/render?action=TEMPLATE"
event_url = f"{base_url}&text={urllib.parse.quote(title)}&dates={date}&location={urllib.parse.quote(location)}&details={urllib.parse.quote(description)}"
return event_url+"&openExternalBrowser=1"
def _run(self, dates: str, title: str, description: str, location: str):
print('Google Calendar')
print('時間:'+dates)
print('標題:'+title)
print('描述:'+description)
print('地點:'+location)
result = self.create_gcal_url(title, dates, location, description)
return result
args_schema: Optional[Type[BaseModel]] = GoogleCalendarGeneratorInput | [
"\nGenerate Google Calendar url from user text first when containing time, date.\n"
] |
2024-01-10 | louis70109/linebot-find-some | stock_peformace.py | from typing import List
from langchain.tools import BaseTool
from langchain.agents import AgentType
from typing import Optional, Type
from pydantic import BaseModel, Field
from yf_tool import get_stock_price, calculate_performance, get_price_change_percent, get_best_performing
class StockChangePercentageCheckInput(BaseModel):
"""Input for Stock ticker check. for percentage check"""
stockticker: str = Field(...,
description="Ticker symbol for stock or index")
days_ago: int = Field(..., description="Int number of days to look back")
class StockPercentageChangeTool(BaseTool):
name = "get_price_change_percent"
description = "Useful for when you need to find out the percentage change in a stock's value. You should input the stock ticker used on the yfinance API and also input the number of days to check the change over"
def _run(self, stockticker: str, days_ago: int):
price_change_response = get_price_change_percent(stockticker, days_ago)
return price_change_response
def _arun(self, stockticker: str, days_ago: int):
raise NotImplementedError("This tool does not support async")
args_schema: Optional[Type[BaseModel]] = StockChangePercentageCheckInput
# the best performing
class StockBestPerformingInput(BaseModel):
"""Input for Stock ticker check. for percentage check"""
stocktickers: List[str] = Field(...,
description="Ticker symbols for stocks or indices")
days_ago: int = Field(..., description="Int number of days to look back")
class StockGetBestPerformingTool(BaseTool):
name = "get_best_performing"
description = "Useful for when you need to the performance of multiple stocks over a period. You should input a list of stock tickers used on the yfinance API and also input the number of days to check the change over"
def _run(self, stocktickers: List[str], days_ago: int):
price_change_response = get_best_performing(stocktickers, days_ago)
return price_change_response
def _arun(self, stockticker: List[str], days_ago: int):
raise NotImplementedError("This tool does not support async")
args_schema: Optional[Type[BaseModel]] = StockBestPerformingInput
| [
"Useful for when you need to find out the percentage change in a stock's value. You should input the stock ticker used on the yfinance API and also input the number of days to check the change over",
"Useful for when you need to the performance of multiple stocks over a period. You should input a list of stock tickers used on the yfinance API and also input the number of days to check the change over"
] |
2024-01-10 | louis70109/linebot-find-some | youtube_restaurant.py | from ast import literal_eval
from langchain.tools import BaseTool
from typing import Optional, Type
from pydantic import BaseModel, Field
from langchain.tools import YouTubeSearchTool
class YoutubeDefineInput(BaseModel):
"""Youtube Restaurant recommendation."""
title: str = Field(
...,
description="Restaurant title which will be recommend to user.")
class FindYoutubeVideoTool(BaseTool):
name = "find_restaurant_youtube"
description = "Find recommendation restaurant from Youtube"
def _run(self, title: str):
print("Youtube")
print('標題:'+title)
tool = YouTubeSearchTool()
youtube_str = tool.run(title) # force change str to list
youtube_list = literal_eval(youtube_str)
for i in range(len(youtube_list)):
youtube_list[i] = youtube_list[i]
return youtube_list
args_schema: Optional[Type[BaseModel]] = YoutubeDefineInput | [
"Find recommendation restaurant from Youtube"
] |
2024-01-10 | louis70109/linebot-find-some | wikipedia.py | from langchain.tools import BaseTool
from typing import Optional, Type
from pydantic import BaseModel, Field
import datetime
import urllib
from langchain.tools import WikipediaQueryRun
from langchain.utilities import WikipediaAPIWrapper
class WikiInput(BaseModel):
"""Input for Google Calendar Generator."""
title: str = Field(
...,
description="Wikipedia Title symbol ")
link: str = Field(
...,
description="Wikipedia url")
class WikiTool(BaseTool):
name = "find_wikipedia_information"
description = "Use wikipedia resources to find unknown information."
def _run(self, title: str, link: str):
print("Wiki")
print('標題:'+title)
print('描述:'+link)
return title, link
args_schema: Optional[Type[BaseModel]] = WikiInput | [
"Use wikipedia resources to find unknown information."
] |
2024-01-10 | 88aggressive/PaddleSpeech | paddlespeech~s2t~models~whisper~whipser.py | # MIT License, Copyright (c) 2022 OpenAI.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Modified from OpenAI Whisper 2022 (https://github.com/openai/whisper/whisper)
import os
from dataclasses import dataclass
from dataclasses import field
from functools import lru_cache
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import paddle
import paddle.nn.functional as F
import paddlespeech.s2t.modules.align as paddlespeech_nn
import soundfile
import tqdm
from paddle import nn
from paddle.distribution import Categorical
from paddlespeech.s2t.models.whisper import utils
from paddlespeech.s2t.models.whisper.tokenizer import get_tokenizer
from paddlespeech.s2t.models.whisper.tokenizer import LANGUAGES
from paddlespeech.s2t.models.whisper.tokenizer import Tokenizer
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
_MODELS = ["large"]
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = utils.exact_div(
N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
@dataclass
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
class LayerNorm(paddlespeech_nn.LayerNorm):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return super().forward(x)
class Linear(paddlespeech_nn.Linear):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return F.linear(x, self.weight, None
if self.bias is None else self.bias)
class Conv1d(paddlespeech_nn.Conv1D):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return super().forward(x)
class MultiHeadAttention(nn.Layer):
def __init__(self, n_state: int, n_head: int):
super().__init__()
self.n_head = n_head
self.query = Linear(n_state, n_state, bias_attr=True)
self.key = Linear(n_state, n_state, bias_attr=False)
self.value = Linear(n_state, n_state, bias_attr=True)
self.out = Linear(n_state, n_state, bias_attr=True)
def forward(
self,
x: paddle.Tensor,
xa: Optional[paddle.Tensor]=None,
mask: Optional[paddle.Tensor]=None,
kv_cache: Optional[dict]=None, ):
q = self.query(x)
if kv_cache is None or xa is None or self.key not in kv_cache:
# hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
# otherwise, perform key/value projections for self- or cross-attention as usual.
k = self.key(x if xa is None else xa)
v = self.value(x if xa is None else xa)
else:
# for cross-attention, calculate keys and values once and reuse in subsequent calls.
k = kv_cache[self.key]
v = kv_cache[self.value]
wv = self.qkv_attention(q, k, v, mask)
return self.out(wv)
def qkv_attention(self,
q: paddle.Tensor,
k: paddle.Tensor,
v: paddle.Tensor,
mask: Optional[paddle.Tensor]=None):
n_batch, n_ctx, n_state = q.shape
scale = (n_state // self.n_head)**-0.25
q = paddle.transpose(
q.view(*q.shape[:2], self.n_head, -1), (0, 2, 1, 3)) * scale
k = paddle.transpose(
k.view(*k.shape[:2], self.n_head, -1), (0, 2, 3, 1)) * scale
v = paddle.transpose(
v.view(*v.shape[:2], self.n_head, -1), (0, 2, 1, 3))
qk = q @ k
if mask is not None:
qk = qk + mask[:n_ctx, :n_ctx]
w = F.softmax(qk.float(), axis=-1).to(q.dtype)
return paddle.transpose((w @ v), (0, 2, 1, 3)).flatten(start_axis=2)
class ResidualAttentionBlock(nn.Layer):
def __init__(self, n_state: int, n_head: int, cross_attention: bool=False):
super().__init__()
self.attn = MultiHeadAttention(n_state, n_head)
self.attn_ln = LayerNorm(n_state)
self.cross_attn = MultiHeadAttention(
n_state, n_head) if cross_attention else None
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
n_mlp = n_state * 4
self.mlp = nn.Sequential(
Linear(n_state, n_mlp, bias_attr=True),
nn.GELU(), Linear(n_mlp, n_state, bias_attr=True))
self.mlp_ln = LayerNorm(n_state)
def forward(
self,
x: paddle.Tensor,
xa: Optional[paddle.Tensor]=None,
mask: Optional[paddle.Tensor]=None,
kv_cache: Optional[dict]=None, ):
x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)
if self.cross_attn:
x = x + self.cross_attn(
self.cross_attn_ln(x), xa, kv_cache=kv_cache)
x = x + self.mlp(self.mlp_ln(x))
return x
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = paddle.exp(-log_timescale_increment * paddle.arange(
channels // 2, dtype=paddle.float32))
scaled_time = paddle.arange(
length,
dtype=paddle.float32)[:, np.newaxis] * inv_timescales[np.newaxis, :]
return paddle.to_tensor(
paddle.concat(
[paddle.sin(scaled_time), paddle.cos(scaled_time)], axis=1))
class AudioEncoder(nn.Layer):
def __init__(self,
n_mels: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int):
super().__init__()
self.conv1 = Conv1d(
n_mels, n_state, kernel_size=3, stride=1, padding=1, bias_attr=True)
self.conv2 = Conv1d(
n_state,
n_state,
kernel_size=3,
stride=2,
padding=1,
bias_attr=True)
self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
self.blocks: Iterable[ResidualAttentionBlock] = nn.LayerList(
[ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)])
self.ln_post = LayerNorm(n_state)
def forward(self, x: paddle.Tensor):
"""
x : paddle.Tensor, shape = (batch_size, n_mels, n_ctx)
the mel spectrogram of the audio
"""
x = F.gelu(self.conv1(x))
x = F.gelu(self.conv2(x))
x = paddle.transpose(x, (0, 2, 1))
assert x.shape[
1:] == self.positional_embedding.shape, "incorrect audio shape"
x = (x + self.positional_embedding)
for block in self.blocks:
x = block(x)
x = self.ln_post(x)
return x
class TextDecoder(nn.Layer):
def __init__(self,
n_vocab: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int):
super().__init__()
self.token_embedding = nn.Embedding(n_vocab, n_state)
self.positional_embedding = paddle.create_parameter(
shape=[n_ctx, n_state], dtype='float32')
self.blocks: Iterable[ResidualAttentionBlock] = nn.LayerList([
ResidualAttentionBlock(n_state, n_head, cross_attention=True)
for _ in range(n_layer)
])
self.ln = LayerNorm(n_state)
mask = paddle.full(
shape=[n_ctx, n_state], fill_value=-np.inf, dtype='float32')
mask = paddle.triu(mask, diagonal=1)
self.register_buffer("mask", mask, persistable=False)
def forward(self,
x: paddle.Tensor,
xa: paddle.Tensor,
kv_cache: Optional[dict]=None):
"""
x : paddle.LongTensor, shape = (batch_size, <= n_ctx)
the text tokens
xa : paddle.Tensor, shape = (batch_size, n_mels, n_audio_ctx)
the encoded audio features to be attended on
"""
offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0
x = self.token_embedding(x) + self.positional_embedding[offset:offset +
x.shape[-1]]
x = x.to(xa.dtype)
for block in self.blocks:
x = block(x, xa, mask=self.mask, kv_cache=kv_cache)
x = self.ln(x)
logits = (x @ paddle.transpose(self.token_embedding.weight, (1, 0)))
return logits
@dataclass(frozen=True)
class DecodingOptions:
task: str = "transcribe" # whether to perform X->X "transcribe" or X->English "translate"
language: Optional[
str] = None # language that the audio is in; uses detected language if None
# sampling-related options
temperature: float = 0.0
sample_len: Optional[int] = None # maximum number of tokens to sample
best_of: Optional[
int] = None # number of independent samples to collect, when t > 0
beam_size: Optional[
int] = None # number of beams in beam search, when t == 0
patience: Optional[
float] = None # patience in beam search (https://arxiv.org/abs/2204.05424)
# options for ranking generations (either beams or best-of-N samples)
length_penalty: Optional[
float] = None # "alpha" in Google NMT, None defaults to length norm
# prompt, prefix, and token suppression
prompt: Optional[Union[str, List[
int]]] = None # text or tokens for the previous context
prefix: Optional[Union[str, List[
int]]] = None # text or tokens to prefix the current context
suppress_blank: bool = True # this will suppress blank outputs
# list of tokens ids (or comma-separated token ids) to suppress
# "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`
suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1"
# timestamp sampling options
without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only
max_initial_timestamp: Optional[
float] = 1.0 # the initial timestamp cannot be later than this
# implementation details
fp16: bool = False # use fp16 for most of the calculation
@dataclass(frozen=True)
class DecodingResult:
audio_features: paddle.Tensor
language: str
language_probs: Optional[Dict[str, float]] = None
tokens: List[int] = field(default_factory=list)
text: str = ""
avg_logprob: float = np.nan
no_speech_prob: float = np.nan
temperature: float = np.nan
compression_ratio: float = np.nan
class Inference:
def logits(self, tokens: paddle.Tensor,
audio_features: paddle.Tensor) -> paddle.Tensor:
"""Perform a forward pass on the decoder and return per-token logits"""
raise NotImplementedError
def rearrange_kv_cache(self, source_indices) -> None:
"""Update the key-value cache according to the updated beams"""
raise NotImplementedError
def cleanup_caching(self) -> None:
"""Clean up any resources or hooks after decoding is finished"""
pass
class WhisperInference(Inference):
def __init__(self, model: "Whisper", initial_token_length: int):
self.model: "Whisper" = model
self.initial_token_length = initial_token_length
self.kv_cache = {}
self.hooks = []
def logits(self, tokens: paddle.Tensor,
audio_features: paddle.Tensor) -> paddle.Tensor:
if not self.kv_cache:
self.kv_cache, self.hooks = self.model.install_kv_cache_hooks()
if tokens.shape[-1] > self.initial_token_length:
# only need to use the last token except in the first forward pass
tokens = tokens[:, -1:]
return self.model.decoder(
tokens, audio_features, kv_cache=self.kv_cache)
def cleanup_caching(self):
for hook in self.hooks:
hook.remove()
self.kv_cache = {}
self.hooks = []
def rearrange_kv_cache(self, source_indices):
for module, tensor in self.kv_cache.items():
# update the key/value cache to contain the selected sequences
self.kv_cache[module] = tensor[source_indices].detach()
@paddle.no_grad()
def detect_language(
model: "Whisper",
mel: paddle.Tensor,
resource_path: str,
tokenizer: Tokenizer=None) -> Tuple[paddle.Tensor, List[dict]]:
"""
Detect the spoken language in the audio, and return them as list of strings, along with the ids
of the most probable language tokens and the probability distribution over all language tokens.
This is performed outside the main decode loop in order to not interfere with kv-caching.
Returns
-------
language_tokens : Tensor, shape = (batch_size,)
ids of the most probable language tokens, which appears after the startoftranscript token.
language_probs : List[Dict[str, float]], length = batch_size
list of dictionaries containing the probability distribution over all languages.
"""
if tokenizer is None:
tokenizer = get_tokenizer(
model.is_multilingual, resource_path=resource_path)
if tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence:
raise ValueError(
"This model doesn't have language tokens so it can't perform lang id"
)
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
# skip encoder forward pass if already-encoded audio features were given
if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
mel = model.encoder(mel)
# forward pass using a single token, startoftranscript
batch_size = mel.shape[0]
x = paddle.to_tensor([[tokenizer.sot]] * batch_size) # [batch_size, 1]
logits = model.logits(x, mel)[:, 0]
# collect detected languages; suppress all non-language tokens
mask = paddle.ones(paddle.to_tensor(logits.shape[-1]), dtype=bool)
mask[list(tokenizer.all_language_tokens)] = False
logits[:, mask] = -np.inf
language_tokens = paddle.argmax(logits, axis=-1)
language_token_probs = F.softmax(logits, axis=-1)
language_probs = [{
c: language_token_probs[i, j].tolist()
for j, c in zip(tokenizer.all_language_tokens,
tokenizer.all_language_codes)
} for i in range(batch_size)]
if single:
language_tokens = language_tokens[0]
language_probs = language_probs[0]
return language_tokens, language_probs
def transcribe(
model: "Whisper",
mel: paddle.Tensor,
resource_path: str,
*,
verbose: Optional[bool]=None,
temperature: Union[float, Tuple[float, ...]]=(0.0, 0.2, 0.4, 0.6, 0.8,
1.0),
compression_ratio_threshold: Optional[float]=2.4,
logprob_threshold: Optional[float]=-1.0,
no_speech_threshold: Optional[float]=0.6,
condition_on_previous_text: bool=True,
**decode_options, ):
"""
Transcribe an audio file using Whisper
Parameters
----------
model: Whisper
The Whisper model instance
mel: paddle.Tensor
The audio feature
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successfully used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = np.float32 #paddle only support float32
if dtype == np.float32:
decode_options["fp16"] = False
if decode_options.get(
"language") == 'None' or decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language"
)
segment = pad_or_trim(mel, N_FRAMES)
_, probs = model.detect_language(segment, resource_path)
decode_options["language"] = max(probs, key=probs.get)
if verbose is not None:
print(
f"Detected language: {LANGUAGES[decode_options['language']].title()}"
)
language = decode_options["language"]
task = decode_options.get("task", "transcribe")
tokenizer = get_tokenizer(
model.is_multilingual,
resource_path=resource_path,
language=language,
task=task)
def decode_with_fallback(segment: paddle.Tensor) -> DecodingResult:
temperatures = [temperature] if isinstance(temperature, (
int, float)) else temperature
decode_result = None
for t in temperatures:
kwargs = {**decode_options}
if t > 0:
# disable beam_size and patience when t > 0
kwargs.pop("beam_size", None)
kwargs.pop("patience", None)
else:
# disable best_of when t == 0
kwargs.pop("best_of", None)
options = DecodingOptions(**kwargs, temperature=t)
decode_result = model.decode(segment, options, resource_path)
needs_fallback = False
if compression_ratio_threshold is not None and decode_result.compression_ratio > compression_ratio_threshold:
needs_fallback = True # too repetitive
if logprob_threshold is not None and decode_result.avg_logprob < logprob_threshold:
needs_fallback = True # average log probability is too low
if not needs_fallback:
break
return decode_result
seek = 0
input_stride = utils.exact_div(
N_FRAMES, model.dims.n_audio_ctx) # mel frames per output token: 2
time_precision = (input_stride * HOP_LENGTH /
SAMPLE_RATE) # time per output token: 0.02 (seconds)
all_tokens = []
all_segments = []
prompt_reset_since = 0
initial_prompt = decode_options.pop("initial_prompt", None) or []
if initial_prompt:
initial_prompt = tokenizer.encode(" " +
initial_prompt.strip()).input_ids
all_tokens.extend(initial_prompt)
def add_segment(*,
start: float,
end: float,
text_tokens: paddle.Tensor,
result: DecodingResult):
text = tokenizer.decode(
[token for token in text_tokens if token < tokenizer.eot])
if len(text.strip()) == 0: # skip empty text output
return
all_segments.append({
"id": len(all_segments),
"seek": seek,
"start": start,
"end": end,
"text": text,
"tokens": result.tokens,
"temperature": result.temperature,
"avg_logprob": result.avg_logprob,
"compression_ratio": result.compression_ratio,
"no_speech_prob": result.no_speech_prob,
})
if verbose:
print(
f"[{utils.format_timestamp(start)} --> {utils.format_timestamp(end)}] {text}"
)
# show the progress bar when verbose is False (otherwise the transcribed text will be printed)
num_frames = mel.shape[-1]
previous_seek_value = seek
with tqdm.tqdm(
total=num_frames, unit='frames',
disable=verbose is not False) as pbar:
while seek < num_frames:
timestamp_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
segment = pad_or_trim(mel[:, seek:], N_FRAMES)
segment_duration = segment.shape[-1] * HOP_LENGTH / SAMPLE_RATE
decode_options["prompt"] = all_tokens[prompt_reset_since:]
result: DecodingResult = decode_with_fallback(segment)
tokens = paddle.to_tensor(result.tokens)
if no_speech_threshold is not None:
# no voice activity check
should_skip = result.no_speech_prob > no_speech_threshold
if logprob_threshold is not None and result.avg_logprob > logprob_threshold:
# don't skip if the logprob is high enough, despite the no_speech_prob
should_skip = False
if should_skip:
seek += segment.shape[
-1] # fast-forward to the next segment boundary
continue
timestamp_tokens: paddle.Tensor = tokens.greater_equal(
paddle.to_tensor(tokenizer.timestamp_begin))
consecutive = paddle.where(timestamp_tokens[:-1] & timestamp_tokens[
1:])[0]
if len(
consecutive
) > 0: # if the output contains two consecutive timestamp tokens
consecutive = paddle.add(consecutive, paddle.to_tensor(1))
last_slice = 0
for current_slice in consecutive:
sliced_tokens = tokens[last_slice:current_slice]
start_timestamp_position = (
sliced_tokens[0].item() - tokenizer.timestamp_begin)
end_timestamp_position = (
sliced_tokens[-1].item() - tokenizer.timestamp_begin)
add_segment(
start=timestamp_offset + start_timestamp_position *
time_precision,
end=timestamp_offset + end_timestamp_position *
time_precision,
text_tokens=sliced_tokens[1:-1],
result=result, )
last_slice = current_slice
last_timestamp_position = (
tokens[last_slice - 1].item() - tokenizer.timestamp_begin)
seek += last_timestamp_position * input_stride
all_tokens.extend(tokens[:last_slice + 1].tolist())
else:
duration = segment_duration
timestamps = tokens[timestamp_tokens.nonzero().flatten()]
if len(timestamps) > 0 and timestamps[
-1].item() != tokenizer.timestamp_begin:
# no consecutive timestamps but it has a timestamp; use the last one.
# single timestamp at the end means no speech after the last timestamp.
last_timestamp_position = timestamps[
-1].item() - tokenizer.timestamp_begin
duration = last_timestamp_position * time_precision
add_segment(
start=timestamp_offset,
end=timestamp_offset + duration,
text_tokens=tokens,
result=result, )
seek += segment.shape[-1]
all_tokens.extend(tokens.tolist())
if not condition_on_previous_text or result.temperature > 0.5:
# do not feed the prompt tokens if a high temperature was used
prompt_reset_since = len(all_tokens)
# update progress bar
pbar.update(min(num_frames, seek) - previous_seek_value)
previous_seek_value = seek
return dict(
text=tokenizer.decode(all_tokens[len(initial_prompt):]),
segments=all_segments,
language=language)
class SequenceRanker:
def rank(self,
tokens: List[List[paddle.Tensor]],
sum_logprobs: List[List[float]]) -> List[int]:
"""
Given a list of groups of samples and their cumulative log probabilities,
return the indices of the samples in each group to select as the final result
"""
raise NotImplementedError
class MaximumLikelihoodRanker(SequenceRanker):
"""
Select the sample with the highest log probabilities, penalized using either
a simple length normalization or Google NMT paper's length penalty
"""
def __init__(self, length_penalty: Optional[float]):
self.length_penalty = length_penalty
def rank(self,
tokens: List[List[paddle.Tensor]],
sum_logprobs: List[List[float]]):
def scores(logprobs, lengths):
result = []
for logprob, length in zip(logprobs, lengths):
if self.length_penalty is None:
penalty = length
else:
# from the Google NMT paper
penalty = ((5 + length) / 6)**self.length_penalty
result.append(logprob / penalty)
return result
# get the sequence with the highest score
lengths = [[len(t) for t in s] for s in tokens]
return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)]
class TokenDecoder:
def reset(self):
"""Initialize any stateful variables for decoding a new sequence"""
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
"""Specify how to select the next token, based on the current trace and logits
Parameters
----------
tokens : Tensor, shape = (n_batch, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence tokens
logits : Tensor, shape = (n_batch, vocab_size)
per-token logits of the probability distribution at the current step
sum_logprobs : Tensor, shape = (n_batch)
cumulative log probabilities for each sequence
Returns
-------
tokens : Tensor, shape = (n_batch, current_sequence_length + 1)
the tokens, appended with the selected next token
completed : bool
True if all sequences has reached the end of text
"""
raise NotImplementedError
def finalize(
self, tokens: paddle.Tensor, sum_logprobs: paddle.Tensor
) -> Tuple[Sequence[Sequence[paddle.Tensor]], List[List[float]]]:
"""Finalize search and return the final candidate sequences
Parameters
----------
tokens : Tensor, shape = (batch_size, beam_size, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence
sum_logprobs : Tensor, shape = (batch_size, beam_size)
cumulative log probabilities for each sequence
Returns
-------
tokens : Sequence[Sequence[Tensor]], length = batch_size
sequence of Tensors containing candidate token sequences, for each audio input
sum_logprobs : List[List[float]], length = batch_size
sequence of cumulative log probabilities corresponding to the above
"""
raise NotImplementedError
class GreedyDecoder(TokenDecoder):
def __init__(self, temperature: float, eot: int):
self.temperature = temperature
self.eot = eot
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
temperature = self.temperature
if temperature == 0:
next_tokens = paddle.argmax(logits, axis=-1)
else:
next_tokens = Categorical(logits=logits / temperature).sample([1])
next_tokens = paddle.reshape(next_tokens, [
next_tokens.shape[0] * next_tokens.shape[1],
])
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
current_logprobs = logprobs[paddle.arange(logprobs.shape[0]),
next_tokens]
sum_logprobs += current_logprobs * paddle.to_tensor(
(tokens[:, -1] != self.eot), dtype=paddle.float32)
next_tokens[tokens[:, -1] == self.eot] = self.eot
tokens = paddle.concat([tokens, next_tokens[:, None]], axis=-1)
completed = paddle.all((tokens[:, -1] == self.eot))
return tokens, completed
def finalize(self, tokens: paddle.Tensor, sum_logprobs: paddle.Tensor):
# make sure each sequence has at least one EOT token at the end
tokens = F.pad(tokens, (0, 1), value=self.eot, data_format="NCL")
return tokens, sum_logprobs.tolist()
class BeamSearchDecoder(TokenDecoder):
def __init__(self,
beam_size: int,
eot: int,
inference: Inference,
patience: Optional[float]=None):
self.beam_size = beam_size
self.eot = eot
self.inference = inference
self.patience = patience or 1.0
self.max_candidates: int = round(beam_size * self.patience)
self.finished_sequences = None
assert self.max_candidates > 0, f"Invalid beam size ({beam_size}) or patience ({patience})"
def reset(self):
self.finished_sequences = None
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
if tokens.shape[0] % self.beam_size != 0:
raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0")
batch_size = tokens.shape[0] // self.beam_size
if self.finished_sequences is None: # for the first update
self.finished_sequences = [{} for _ in range(batch_size)]
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
next_tokens, source_indices, finished_sequences = [], [], []
for i in range(batch_size):
scores, sources, finished = {}, {}, {}
# STEP 1: calculate the cumulative log probabilities for possible candidates
for j in range(self.beam_size):
idx = i * self.beam_size + j
prefix = tokens[idx].tolist()
logprob, token = paddle.topk(
logprobs[idx], k=self.beam_size + 1)
for logprob, token in zip(logprob, token):
new_logprob = (sum_logprobs[idx] + logprob).tolist()[0]
sequence = tuple(prefix + [token.tolist()[0]])
scores[sequence] = new_logprob
sources[sequence] = idx
# STEP 2: rank the candidates and keep the top beam_size sequences for each audio
saved = 0
for sequence in sorted(scores, key=scores.get, reverse=True):
if sequence[-1] == self.eot:
finished[sequence] = scores[sequence]
else:
sum_logprobs[len(next_tokens)] = scores[sequence]
next_tokens.append(sequence)
source_indices.append(sources[sequence])
saved += 1
if saved == self.beam_size:
break
finished_sequences.append(finished)
tokens = paddle.to_tensor(next_tokens)
self.inference.rearrange_kv_cache(source_indices)
# add newly finished sequences to self.finished_sequences
assert len(self.finished_sequences) == len(finished_sequences)
for previously_finished, newly_finished in zip(self.finished_sequences,
finished_sequences):
for seq in sorted(
newly_finished, key=newly_finished.get, reverse=True):
if len(previously_finished) >= self.max_candidates:
break # the candidate list is full
previously_finished[seq] = newly_finished[seq]
# mark as completed if all audio has enough number of samples
completed = all(
len(sequences) >= self.max_candidates
for sequences in self.finished_sequences)
return tokens, completed
def finalize(self,
preceding_tokens: paddle.Tensor,
sum_logprobs: paddle.Tensor):
# collect all finished sequences, including patience, and add unfinished ones if not enough
sum_logprobs = sum_logprobs.cpu()
for i, sequences in enumerate(self.finished_sequences):
if len(sequences
) < self.beam_size: # when not enough sequences are finished
for j in list(np.argsort(sum_logprobs[i]))[::-1]:
sequence = preceding_tokens[i, j].tolist() + [self.eot]
sequences[tuple(sequence)] = sum_logprobs[i][j].item()
if len(sequences) >= self.beam_size:
break
tokens: List[List[paddle.Tensor]] = [
[paddle.to_tensor(seq) for seq in sequences.keys()]
for sequences in self.finished_sequences
]
sum_logprobs: List[List[float]] = [
list(sequences.values()) for sequences in self.finished_sequences
]
return tokens, sum_logprobs
class LogitFilter:
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor) -> None:
"""Apply any filtering or masking to logits in-place
Parameters
----------
logits : Tensor, shape = (n_batch, vocab_size)
per-token logits of the probability distribution at the current step
tokens : Tensor, shape = (n_batch, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence tokens
"""
raise NotImplementedError
class SuppressBlank(LogitFilter):
def __init__(self, tokenizer: Tokenizer, sample_begin: int):
self.tokenizer = tokenizer
self.sample_begin = sample_begin
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
if tokens.shape[1] == self.sample_begin:
logits[:, self.tokenizer.encode(" ").input_ids +
[self.tokenizer.eot]] = -np.inf
class SuppressTokens(LogitFilter):
def __init__(self, suppress_tokens: Sequence[int]):
self.suppress_tokens = list(suppress_tokens)
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
logits[:, self.suppress_tokens] = -np.inf
class ApplyTimestampRules(LogitFilter):
def __init__(self,
tokenizer: Tokenizer,
sample_begin: int,
max_initial_timestamp_index: Optional[int]):
self.tokenizer = tokenizer
self.sample_begin = sample_begin
self.max_initial_timestamp_index = max_initial_timestamp_index
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
# suppress <|notimestamps|> which is handled by without_timestamps
if self.tokenizer.no_timestamps is not None:
logits[:, self.tokenizer.no_timestamps] = -np.inf
# timestamps have to appear in pairs, except directly before EOT; mask logits accordingly
for k in range(tokens.shape[0]):
seq = [t for t in tokens[k, self.sample_begin:].tolist()]
last_was_timestamp = len(seq) >= 1 and seq[
-1] >= self.tokenizer.timestamp_begin
penultimate_was_timestamp = len(seq) < 2 or seq[
-2] >= self.tokenizer.timestamp_begin
if last_was_timestamp:
if penultimate_was_timestamp: # has to be non-timestamp
logits[k, self.tokenizer.timestamp_begin:] = -np.inf
else: # cannot be normal text tokens
logits[k, :self.tokenizer.eot] = -np.inf
# apply the `max_initial_timestamp` option
if tokens.shape[
1] == self.sample_begin and self.max_initial_timestamp_index is not None:
last_allowed = self.tokenizer.timestamp_begin + self.max_initial_timestamp_index
logits[:, last_allowed + 1:] = -np.inf
# if sum of probability over timestamps is above any other token, sample timestamp
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
for k in range(tokens.shape[0]):
timestamp_logprob = paddle.logsumexp(
logprobs[k, self.tokenizer.timestamp_begin:], axis=-1)
max_text_token_logprob = paddle.max(
logprobs[k, :self.tokenizer.timestamp_begin])
if timestamp_logprob > max_text_token_logprob:
logits[k, :self.tokenizer.timestamp_begin] = -np.inf
class DecodingTask:
inference: Inference
sequence_ranker: SequenceRanker
decoder: TokenDecoder
logit_filters: List[LogitFilter]
def __init__(self,
model: "Whisper",
options: DecodingOptions,
resource_path: str):
self.model = model
language = options.language or "en"
tokenizer = get_tokenizer(
model.is_multilingual,
resource_path=resource_path,
language=language,
task=options.task)
self.tokenizer: Tokenizer = tokenizer
self.options: DecodingOptions = self._verify_options(options)
self.resource_path: str = resource_path
self.beam_size: int = options.beam_size or options.best_of or 1
self.n_ctx: int = model.dims.n_text_ctx
self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2
self.sot_sequence: Tuple[int] = tokenizer.sot_sequence
if self.options.without_timestamps:
self.sot_sequence = tokenizer.sot_sequence_including_notimestamps
self.initial_tokens: Tuple[int] = self._get_initial_tokens()
self.sample_begin: int = len(self.initial_tokens)
self.sot_index: int = self.initial_tokens.index(tokenizer.sot)
# inference: implements the forward pass through the decoder, including kv caching
self.inference = WhisperInference(model, len(self.initial_tokens))
# sequence ranker: implements how to rank a group of sampled sequences
self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty)
# decoder: implements how to select the next tokens, given the autoregressive distribution
if options.beam_size is not None:
self.decoder = BeamSearchDecoder(options.beam_size, tokenizer.eot,
self.inference, options.patience)
else:
self.decoder = GreedyDecoder(options.temperature, tokenizer.eot)
# logit filters: applies various rules to suppress or penalize certain tokens
self.logit_filters = []
if self.options.suppress_blank:
self.logit_filters.append(
SuppressBlank(self.tokenizer, self.sample_begin))
if self.options.suppress_tokens:
self.logit_filters.append(
SuppressTokens(self._get_suppress_tokens()))
if not options.without_timestamps:
precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds
max_initial_timestamp_index = None
if options.max_initial_timestamp:
max_initial_timestamp_index = round(
self.options.max_initial_timestamp / precision)
self.logit_filters.append(
ApplyTimestampRules(tokenizer, self.sample_begin,
max_initial_timestamp_index))
def _verify_options(self, options: DecodingOptions) -> DecodingOptions:
if options.beam_size is not None and options.best_of is not None:
raise ValueError("beam_size and best_of can't be given together")
if options.temperature == 0:
if options.best_of is not None:
raise ValueError(
"best_of with greedy sampling (T=0) is not compatible")
if options.patience is not None and options.beam_size is None:
raise ValueError("patience requires beam_size to be given")
if options.length_penalty is not None and not (
0 <= options.length_penalty <= 1):
raise ValueError(
"length_penalty (alpha) should be a value between 0 and 1")
return options
def _get_initial_tokens(self) -> Tuple[int]:
tokens = list(self.sot_sequence)
prefix = self.options.prefix
prompt = self.options.prompt
if prefix:
prefix_tokens = (
self.tokenizer.encode(" " + prefix.strip().input_ids)
if isinstance(prefix, str) else prefix)
if self.sample_len is not None:
max_prefix_len = self.n_ctx // 2 - self.sample_len
prefix_tokens = prefix_tokens[-max_prefix_len:]
tokens = tokens + prefix_tokens
if prompt:
prompt_tokens = (
self.tokenizer.encode(" " + prompt.strip().input_ids)
if isinstance(prompt, str) else prompt)
tokens = [self.tokenizer.sot_prev] + prompt_tokens[-(self.n_ctx // 2
- 1):] + tokens
return tuple(tokens)
def _get_suppress_tokens(self) -> Tuple[int]:
suppress_tokens = self.options.suppress_tokens
if isinstance(suppress_tokens, str):
suppress_tokens = [int(t) for t in suppress_tokens.split(",")]
if -1 in suppress_tokens:
suppress_tokens = [t for t in suppress_tokens if t >= 0]
suppress_tokens.extend(self.tokenizer.non_speech_tokens)
elif suppress_tokens is None or len(suppress_tokens) == 0:
suppress_tokens = [] # interpret empty string as an empty list
else:
assert isinstance(suppress_tokens,
list), "suppress_tokens must be a list"
suppress_tokens.extend([
self.tokenizer.sot, self.tokenizer.sot_prev, self.tokenizer.sot_lm
])
if self.tokenizer.no_speech is not None:
# no-speech probability is collected separately
suppress_tokens.append(self.tokenizer.no_speech)
return tuple(sorted(set(suppress_tokens)))
def _get_audio_features(self, mel: paddle.Tensor):
#if self.options.fp16:
# mel = mel.half()
if mel.shape[-2:] == (self.model.dims.n_audio_ctx,
self.model.dims.n_audio_state):
# encoded audio features are given; skip audio encoding
audio_features = mel
else:
audio_features = self.model.encoder(mel)
#if audio_features.dtype != (np.float16 if self.options.fp16 else np.float32):
# return TypeError(f"audio_features has an incorrect dtype: {audio_features.dtype}")
return audio_features
def _detect_language(self,
audio_features: paddle.Tensor,
tokens: paddle.Tensor,
resource_path: str):
languages = [self.options.language] * audio_features.shape[0]
lang_probs = None
if self.options.language is None or self.options.task == "lang_id":
lang_tokens, lang_probs = self.model.detect_language(
audio_features, self.tokenizer, self.resource_path)
languages = [max(probs, key=probs.get) for probs in lang_probs]
if self.options.language is None:
tokens[:, self.sot_index +
1] = lang_tokens # write language tokens
return languages, lang_probs
def _main_loop(self, audio_features: paddle.Tensor, tokens: paddle.Tensor):
assert audio_features.shape[0] == tokens.shape[0]
n_batch = tokens.shape[0]
sum_logprobs: paddle.Tensor = paddle.zeros(
paddle.to_tensor(n_batch), dtype=paddle.float32)
no_speech_probs = [np.nan] * n_batch
try:
for i in range(self.sample_len):
logits = self.inference.logits(tokens, audio_features)
if i == 0 and self.tokenizer.no_speech is not None: # save no_speech_probs
probs_at_sot = F.softmax(
logits[:, self.sot_index],
axis=-1,
dtype=paddle.float32)
no_speech_probs = probs_at_sot[:, self.tokenizer.
no_speech].tolist()
# now we need to consider the logits at the last token only
logits = logits[:, -1]
# apply the logit filters, e.g. for suppressing or applying penalty to
for logit_filter in self.logit_filters:
logit_filter.apply(logits, tokens)
# expand the tokens tensor with the selected next tokens
tokens, completed = self.decoder.update(tokens, logits,
sum_logprobs)
if completed or tokens.shape[-1] > self.n_ctx:
break
finally:
self.inference.cleanup_caching()
return tokens, sum_logprobs, no_speech_probs
@paddle.no_grad()
def run(self, mel: paddle.Tensor) -> List[DecodingResult]:
self.decoder.reset()
tokenizer: Tokenizer = self.tokenizer
batch_size: int = mel.shape[0]
audio_features: paddle.Tensor = self._get_audio_features(
mel) # encoder forward pass
tokens: paddle.Tensor
if batch_size > 1:
for i in range(batch_size):
tokens = paddle.concat(
x=[
paddle.to_tensor([self.initial_tokens]),
paddle.to_tensor([self.initial_tokens])
],
axis=0)
elif batch_size == 1:
tokens = paddle.to_tensor([self.initial_tokens])
# detect language if requested, overwriting the language token
languages, language_probs = self._detect_language(
paddle.to_tensor(audio_features),
paddle.to_tensor(tokens), self.resource_path)
if self.options.task == "lang_id":
return [
DecodingResult(
audio_features=features,
language=language,
language_probs=probs) for features, language, probs in
zip(audio_features, languages, language_probs)
]
# repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
audio_features = paddle.repeat_interleave(
audio_features, self.beam_size, axis=0)
tokens = paddle.repeat_interleave(tokens, self.beam_size, axis=0)
# call the main sampling loop
tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features,
tokens)
# reshape the tensors to have (batch_size, beam_size) as the first two dimensions
audio_features = audio_features[::self.beam_size]
no_speech_probs = no_speech_probs[::self.beam_size]
assert audio_features.shape[0] == len(no_speech_probs) == batch_size
tokens = tokens.reshape([batch_size, self.beam_size, -1])
sum_logprobs = sum_logprobs.reshape([batch_size, self.beam_size])
# get the final candidates for each group, and slice between the first sampled token and EOT
tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
tokens: List[List[paddle.Tensor]] = [[
t[self.sample_begin:(t == tokenizer.eot).nonzero()[0, 0]] for t in s
] for s in tokens]
# select the top-ranked sample in each group
selected = self.sequence_ranker.rank(tokens, sum_logprobs)
tokens: List[List[
int]] = [t[i].tolist() for i, t in zip(selected, tokens)]
texts: List[str] = [tokenizer.decode(t).strip() for t in tokens]
sum_logprobs: List[
float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]
avg_logprobs: List[
float] = [lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs)]
fields = (texts, languages, tokens, audio_features, avg_logprobs,
no_speech_probs)
if len(set(map(len, fields))) != 1:
raise RuntimeError(
f"inconsistent result lengths: {list(map(len, fields))}")
return [
DecodingResult(
audio_features=features,
language=language,
tokens=tokens,
text=text,
avg_logprob=avg_logprob,
no_speech_prob=no_speech_prob,
temperature=self.options.temperature,
compression_ratio=utils.compression_ratio(text), )
for text, language, tokens, features, avg_logprob, no_speech_prob in
zip(*fields)
]
@paddle.no_grad()
def decode(
model: "Whisper",
mel: paddle.Tensor,
options: DecodingOptions=DecodingOptions(),
resource_path=str, ) -> Union[DecodingResult, List[DecodingResult]]:
"""
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
Parameters
----------
model: Whisper
the Whisper model instance
mel: paddle.Tensor, shape = (80, 3000) or (*, 80, 3000)
A tensor containing the Mel spectrogram(s)
options: DecodingOptions
A dataclass that contains all necessary options for decoding 30-second segments
Returns
-------
result: Union[DecodingResult, List[DecodingResult]]
The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
"""
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
result = DecodingTask(model, options, resource_path).run(mel)
if single:
result = result[0]
return result
class Whisper(nn.Layer):
def __init__(self, dims: ModelDimensions):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer, )
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer, )
def embed_audio(self, mel: paddle.Tensor):
return self.encoder.forward(mel)
def logits(self, tokens: paddle.Tensor, audio_features: paddle.Tensor):
return self.decoder.forward(tokens, audio_features)
def forward(self, mel: paddle.Tensor,
tokens: paddle.Tensor) -> Dict[str, paddle.Tensor]:
return self.decoder(tokens, self.encoder(mel))
@property
def device(self):
return paddle.device.get_device()
@property
def is_multilingual(self):
return self.dims.n_vocab == 51865
def install_kv_cache_hooks(self, cache: Optional[dict]=None):
"""
The `MultiHeadAttention` module optionally accepts `kv_cache` which stores the key and value
tensors calculated for the previous positions. This method returns a dictionary that stores
all caches, and the necessary hooks for the key and value projection modules that save the
intermediate tensors to be reused during later calculations.
Returns
-------
cache : Dict[nn.Layer, paddle.Tensor]
A dictionary object mapping the key/value projection modules to its cache
hooks : List[RemovableHandle]
List of PyTorch RemovableHandle objects to stop the hooks to be called
"""
cache = {**cache} if cache is not None else {}
hooks = []
def save_to_cache(module, _, output):
if module not in cache or output.shape[
1] > self.decoder.positional_embedding.shape[0]:
cache[
module] = output # save as-is, for the first token or cross attention
else:
cache[module] = paddle.concat(
[cache[module], output], axis=1).detach()
return cache[module]
def install_hooks(layer: nn.Layer):
if isinstance(layer, MultiHeadAttention):
hooks.append(
layer.key.register_forward_post_hook(save_to_cache))
hooks.append(
layer.value.register_forward_post_hook(save_to_cache))
self.decoder.apply(install_hooks)
return cache, hooks
detect_language = detect_language
transcribe = transcribe
decode = decode
def pad_or_trim(array, length: int=N_SAMPLES, *, axis: int=-1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if paddle.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(axis=axis, index=paddle.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = paddle.transpose(array, (1, 0))
array = F.pad(
array, [pad for sizes in pad_widths[::-1] for pad in sizes],
data_format='NLC')
array = paddle.transpose(array, (1, 0))
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = paddle.transpose(array, (1, 0))
array = np.pad(array, pad_widths)
array = paddle.transpose(array, (1, 0))
return array
def hann_window(n_fft: int=N_FFT):
"""
hanning window
n_fft: The number of frequency components of the discrete Fourier transform.
"""
return paddle.to_tensor(
[0.5 - 0.5 * np.cos(2 * np.pi * n / n_fft) for n in range(n_fft)],
dtype=paddle.float32)
@lru_cache(maxsize=None)
def mel_filters(resource_path: str, n_mels: int=N_MELS) -> paddle.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(resource_path, "assets", "mel_filters.npz")) as f:
return paddle.to_tensor(f[f"mel_{n_mels}"])
def log_mel_spectrogram(audio: Union[str, np.ndarray, paddle.Tensor],
n_mels: int=N_MELS,
resource_path: str=None):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, paddle.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
paddle.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not paddle.is_tensor(audio):
if isinstance(audio, str):
audio, _ = soundfile.read(audio, dtype="float32", always_2d=True)
audio = audio[:, 0]
logger.info(f"audio shape: {audio.shape}")
audio = paddle.to_tensor(audio)
window = hann_window(N_FFT)
stft = paddle.signal.stft(audio, N_FFT, HOP_LENGTH, window=window)
magnitudes = stft[:, :-1].abs()**2
filters = mel_filters(resource_path, n_mels)
mel_spec = filters @ magnitudes
mel_spec = paddle.to_tensor(mel_spec.numpy().tolist())
log_spec = paddle.clip(mel_spec, min=1e-10).log10()
log_spec = paddle.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
| [
"0",
"1",
"None",
" ",
"initial_prompt"
] |
2024-01-10 | junbin-tan/gpt_project1 | sample_runner.py | import os
import sys
import openai
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.llms import OpenAI
from langchain.vectorstores import Chroma
import constants
os.environ["OPENAI_API_KEY"] = constants.APIKEY
# Enable to save to disk & reuse the model (for repeated queries on the same data)
PERSIST = False
query = sys.argv[1]
if PERSIST and os.path.exists("persist"):
print("Reusing index...\n")
vectorstore = Chroma(
persist_directory="persist", embedding_function=OpenAIEmbeddings()
)
index = VectorStoreIndexWrapper(vectorstore=vectorstore)
else:
loader = TextLoader("data/data.txt") # Use this line if you only need data.txt
# loader = DirectoryLoader("data/")
if PERSIST:
index = VectorstoreIndexCreator(
vectorstore_kwargs={"persist_directory": "persist"}
).from_loaders([loader])
else:
index = VectorstoreIndexCreator().from_loaders([loader])
chain = RetrievalQA.from_chain_type(
llm=ChatOpenAI(model="gpt-3.5-turbo"),
retriever=index.vectorstore.as_retriever(search_kwargs={"k": 1}),
)
print(chain.run(query))
| [] |
2024-01-10 | junbin-tan/gpt_project1 | chatgpt_runner.py | import os
import sys
import constants
from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
os.environ["OPENAI_API_KEY"] = constants.APIKEY
query = sys.argv[1]
print("Your Question: " + query)
#single file
loader = TextLoader('data/data.txt')
#loading all files in directory
# loader = DirectoryLoader(".", glob="*.txt")
index = VectorstoreIndexCreator().from_loaders([loader])
#without passing in external model, only using vector store
print(index.query(query))
#passing in chatopenai for external model
# print(index.query(query, llm=ChatOpenAI()))/ | [] |
2024-01-10 | chiranth6-72/Converse-With-PDFs--MiniProject | chatbot_app.py | import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import google.generativeai as palm
from langchain.embeddings import GooglePalmEmbeddings
from langchain.llms import GooglePalm
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
import os
from io import BytesIO
from dotenv import load_dotenv
load_dotenv()
google_api_key = os.getenv("GOOGLE_API_KEY")
def get_file_size(file):
# file.seek(0, os.SEEK_END)
# file_size = file.tell()
# file.seek(0)
# return file_size
return file.getbuffer().nbytes
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
# def get_pdf_text(pdf_docs):
# text = ""
# for pdf in pdf_docs:
# file_like_object = BytesIO(pdf)
# pdf_reader = PdfReader(file_like_object)
# for page in pdf_reader.pages:
# text += page.extract_text()
# return text
def get_text_chunks(text):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=20)
chunks = text_splitter.split_text(text)
return chunks
def get_vector_store(text_chunks):
embeddings = GooglePalmEmbeddings()
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
return vector_store
def get_conversational_chain(vector_store):
llm = GooglePalm()
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm, retriever=vector_store.as_retriever(), memory=memory)
return conversation_chain
def user_input(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chatHistory = response['chat_history']
for i, message in enumerate(reversed(st.session_state.chatHistory)):
if i % 2 == 0:
st.write("Human🗣️:")
st.write(f"\t{message.content}")
else:
st.write("PDFbot🤖:")
st.write(f"\t{message.content}")
pass
def main():
st.set_page_config("Converse with PDFs", page_icon=':books:')
st.header("Converse with Multiple PDFs 📑💬")
user_question = st.text_input("Input a query for the PDF Files")
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chatHistory" not in st.session_state:
st.session_state.chatHistory = None
if user_question:
user_input(user_question)
with st.sidebar:
st.title("Settings")
st.subheader("Upload your Documents")
pdf_docs = st.file_uploader(
"Upload your PDF Files and Click on the Process Button", accept_multiple_files=True, type=['pdf'])
# pdf_bytes = None
if st.button("Process"):
with st.spinner("Processing"):
# pdf_bytes = pdf_docs.read()
# raw_text = get_pdf_text(pdf_bytes)
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
vector_store = get_vector_store(text_chunks)
st.session_state.conversation = get_conversational_chain(
vector_store)
st.success("Done")
# if pdf_bytes is not None:
# file_details = {
# "Filename": pdf_docs.name,
# "File size": f'{len(pdf_bytes)} bytes'
# }
# st.markdown("<h4 style color:black;'>File details</h4>",
# unsafe_allow_html=True)
# st.json(file_details)
# if pdf_docs is not None:
# for uploaded_file in pdf_docs:
# file_details = {
# "Filename": uploaded_file.name,
# "File size": get_file_size(uploaded_file) # File size is calculated here
# }
# st.markdown("<h4 style color:black;'>File details</h4>", unsafe_allow_html=True)
# st.json(file_details)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | matwilso/reinforce | tensorflow~reinforce.py | #!/usr/bin/env python3
import argparse
import gym
import numpy as np
import tensorflow as tf
from itertools import count
from collections import namedtuple
parser = argparse.ArgumentParser(description='TensorFlow REINFORCE')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=42, metavar='N',
help='random seed (default: 42)')
parser.add_argument('--log_interval', type=int, default=100, metavar='N',
help='interval between training status logs (default: 100)')
parser.add_argument('--render_interval', type=int, default=-1, metavar='N',
help='interval between rendering (default: -1)')
parser.add_argument('--env_id', type=str, default='LunarLander-v2',
help='gym environment to load')
args = parser.parse_args()
"""
This file implements the standard vanilla REINFORCE algorithm, also
known as Monte Carlo Policy Gradient.
This copies from the OpenAI baselines structure, which I found to be a bit
confusing at first, but actually quite nice and clean. (Tensorflow is just a
huge pain to learn, but once you do, it is not as bad.)
Resources:
Sutton and Barto: http://incompleteideas.net/book/the-book-2nd.html
Karpathy blog: http://karpathy.github.io/2016/05/31/rl/
OpenAI baselines PPO algorithm: https://github.com/openai/baselines/blob/master/baselines/ppo1/pposgd_simple.py
Glossary:
(logits) = numerical policy preferences, or unnormalized probailities of actions
or last layer neural net
"""
# HELPERS
def calculate_discounted_returns(rewards):
"""
Calculate discounted reward and then normalize it
(see Sutton book for definition)
Params:
rewards: list of rewards for every episode
"""
returns = np.zeros(len(rewards))
next_return = 0 # 0 because we start at the last timestep
for t in reversed(range(0, len(rewards))):
next_return = rewards[t] + args.gamma * next_return
returns[t] = next_return
# normalize for better statistical properties
returns = (returns - returns.mean()) / (returns.std() + np.finfo(np.float32).eps)
return returns
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
# Class organization copied (roughly) from OpenAI baselines
class PolicyNetwork(object):
def __init__(self, ob_n, ac_n, hidden_dim=200, name='policy_network'):
with tf.variable_scope(name):
self._init(ob_n, ac_n, hidden_dim)
self.scope = tf.get_variable_scope().name
def _init(self, ob_n, ac_n, hidden_dim):
self.ob_n = ob_n
self.ac_n = ac_n
self.obs = tf.placeholder(dtype=tf.float32, shape=[None, ob_n])
x = tf.layers.dense(inputs=self.obs, units=hidden_dim, activation=tf.nn.relu, name='hidden')
self.logits = tf.layers.dense(inputs=x, units=self.ac_n, activation=None, kernel_initializer=normc_initializer(0.01), name='logits')
ac = self._sample()
stochastic = tf.placeholder(dtype=tf.bool, shape=())
self._act = self._run_gen(self.obs, ac)
def act(self, ob):
ac1 = self._act(ob[None])
return ac1
def _sample(self):
"""Random sample an action"""
u = tf.random_uniform(tf.shape(self.logits))
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
def _run_gen(self, ob, ac):
def run(ob_feed):
"""Run an observation through the nn to get an action
this will only be used to run the policy. To train it, we
later feed in the observations, selected actions, and rewards all at once"""
results = tf.get_default_session().run(ac, feed_dict={ob:ob_feed})
return results
return run
def neglogp(self, x):
"""This computes the negative log probability of the given action.
It is used to pass the gradient back through the network for training
(in tf speak, this is the loss that we minimize)
NOTE: when we evaluate this, we are refeeding all of the observations,
chosen actions, and rewards back through the network. Meaning we don't worry
about caching when we are running the env. This is just for ease in tensorflow.
"""
one_hot_actions = tf.one_hot(x, self.ac_n)
# see http://cs231n.github.io/linear-classify/#softmax
# and http://karpathy.github.io/2016/05/31/rl/
# The math matches up because we are using the softmax to sample actions
# why is the chosen action the label?
# the chosen action is the label because that would create the signal that always
# make that one more probable. since we multiply this by the return signal, that will
# good actions more probable and bad actions less probable.
return tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits,
labels=one_hot_actions)
class REINFORCE(object):
"""
Object to handle running the algorithm. Uses a PolicyNetwork
"""
def __init__(self, env):
self.ob_n = env.observation_space.shape[0]
self.ac_n = env.action_space.n
self.pi = PolicyNetwork(self.ob_n, self.ac_n)
self.obs = self.pi.obs
self.ac = tf.placeholder(tf.int32, shape=[None], name='ac')
self.atarg = tf.placeholder(tf.float32, shape=[None], name='atarg')
self.loss = self.atarg * self.pi.neglogp(self.ac)
self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.train.get_global_step())
# TODO: was updating the training pipeline to match baselines
# TODO: i may just want to copy baselines and add in baby algorithms. fork it and call
# it baby baselines. REINFORCE, AC, and commented like shit. A ramp up to baselines
# proper
def select_action(self, obs, sess=None):
"""
Run observation through network and sample an action to take. Keep track
of dh to use to update weights
"""
sess = sess or tf.get_default_session()
return self.pi.act(obs)
def update(self, ep_cache, sess=None):
returns = calculate_discounted_returns(ep_cache.rewards)
obs = np.array(ep_cache.obs)
taken_actions = np.array(ep_cache.actions)
sess = sess or tf.get_default_session()
feed_dict = {self.obs: obs, self.ac: taken_actions, self.atarg: returns}
sess.run([self.train_op], feed_dict=feed_dict)
def main():
"""Run REINFORCE algorithm to train on the environment"""
EpCache = namedtuple("EpCache", ["obs", "actions", "rewards"])
avg_reward = []
for i_episode in count(1):
ep_cache = EpCache([], [], [])
obs = env.reset()
for t in range(10000): # Don't infinite loop while learning
action = reinforce.select_action(obs)[0]
ep_cache.obs.append(obs)
ep_cache.actions.append(action)
obs, reward, done, _ = env.step(action)
ep_cache.rewards.append(reward)
if args.render_interval != -1 and i_episode % args.render_interval == 0:
env.render()
if done:
break
reinforce.update(ep_cache)
if i_episode % args.log_interval == 0:
print("Ave reward: {}".format(sum(avg_reward)/len(avg_reward)))
avg_reward = []
else:
avg_reward.append(sum(ep_cache.rewards))
if __name__ == '__main__':
env = gym.make(args.env_id)
env.seed(args.seed)
np.random.seed(args.seed)
reinforce = REINFORCE(env)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
main()
| [] |
2024-01-10 | KamilDeja/joint_diffusion | guided_diffusion~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
lines.append(f'time: {datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")}')
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | wiskojo/thread-gpt | thread.py | import argparse
import json
import logging
import os
import re
import shutil
import time
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
from typing import Optional
from urllib.parse import urlparse
import layoutparser as lp
import openai
import pytesseract
import requests
from dotenv import load_dotenv
from pdf2image import convert_from_bytes
from pydantic import BaseModel, ConfigDict
from create_assistant import create_assistant
load_dotenv()
logging.basicConfig(handlers=[logging.StreamHandler()], level=logging.INFO)
logger = logging.getLogger(__name__)
class Block(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
block: lp.elements.base.BaseLayoutElement
page_index: int
class CaptionedBlock(Block):
model_config = ConfigDict(arbitrary_types_allowed=True)
caption: lp.elements.base.BaseLayoutElement
def get_blocks_and_texts(layouts: list[lp.Layout]) -> tuple[list[Block], list[Block]]:
blocks = []
texts = []
for i, layout in enumerate(layouts):
for block in layout:
if block.type in ["Table", "Figure"]:
# Check if the current block overlaps with any existing block
for existing_block in blocks:
if existing_block.page_index != i:
# If the blocks are not on the same page, skip the overlap check
continue
overlap_area = existing_block.block.intersect(block).area
overlap_ratio = overlap_area / block.area
if overlap_ratio > 0.5:
# If the current block overlaps with an existing block by more than 50%
# Check which block is the "superset" block
if block.area > existing_block.block.area:
# If the current block is larger, replace the existing block with the current block
blocks.remove(existing_block)
blocks.append(Block(block=block, page_index=i))
# If the existing block is larger or equal, skip the current block
break
else:
# If the current block does not overlap significantly with any existing block, add it to the list
blocks.append(Block(block=block, page_index=i))
elif block.type == "Text":
texts.append(Block(block=block, page_index=i))
return blocks, texts
def caption_blocks(blocks: list[Block], texts: list[Block]) -> list[CaptionedBlock]:
captioned_blocks = []
# Find the closest text block to the top and bottom of the figure/table block
for block in blocks:
block_bottom_center = (
(block.block.block.x_1 + block.block.block.x_2) / 2,
block.block.block.y_2,
)
block_top_center = (
(block.block.block.x_1 + block.block.block.x_2) / 2,
block.block.block.y_1,
)
closest_text = None
closest_distance = float("inf")
for text in texts:
if text.page_index != block.page_index:
continue
text_top_center = (
(text.block.block.x_1 + text.block.block.x_2) / 2,
text.block.block.y_1,
)
text_bottom_center = (
(text.block.block.x_1 + text.block.block.x_2) / 2,
text.block.block.y_2,
)
distance_to_top = (
(block_bottom_center[0] - text_top_center[0]) ** 2
+ (block_bottom_center[1] - text_top_center[1]) ** 2
) ** 0.5
distance_to_bottom = (
(block_top_center[0] - text_bottom_center[0]) ** 2
+ (block_top_center[1] - text_bottom_center[1]) ** 2
) ** 0.5
# Reduce `distance_to_top` by 25% to bias towards picking bottom captions
distance = min(distance_to_top * 0.75, distance_to_bottom)
if distance < closest_distance:
closest_distance = distance
closest_text = text
if closest_text is not None:
captioned_blocks.append(
CaptionedBlock(
block=block.block,
caption=closest_text.block,
page_index=block.page_index,
)
)
return captioned_blocks
def combine_blocks(captioned_block, pages):
# Combine block and caption together
x_1 = min(captioned_block.block.block.x_1, captioned_block.caption.block.x_1)
y_1 = min(captioned_block.block.block.y_1, captioned_block.caption.block.y_1)
x_2 = max(captioned_block.block.block.x_2, captioned_block.caption.block.x_2)
y_2 = max(captioned_block.block.block.y_2, captioned_block.caption.block.y_2)
return pages[captioned_block.page_index].crop((x_1, y_1, x_2, y_2))
def process_captioned_block(captioned_block, pages, base_path):
combined_image = combine_blocks(captioned_block, pages)
# Convert the PIL Image object to base64
buffered = BytesIO()
combined_image.save(buffered, format="JPEG")
# Convert the PIL Image object to a string for caption
caption_image = pages[captioned_block.page_index].crop(
(
captioned_block.caption.block.x_1,
captioned_block.caption.block.y_1,
captioned_block.caption.block.x_2,
captioned_block.caption.block.y_2,
)
)
caption_text = pytesseract.image_to_string(caption_image)
figures_path = os.path.join(base_path, "figures")
os.makedirs(figures_path, exist_ok=True)
# Convert the caption text to snake case alpha numeric and truncate, then add .jpg to it
img_name = re.sub("[^0-9a-zA-Z]+", "_", caption_text)[:30] + ".jpg"
img_path = os.path.join(figures_path, img_name)
with open(img_path, "wb") as f:
f.write(buffered.getvalue())
return {"image": f"figures/{img_name}", "caption": caption_text}
def process_pdf(content: bytes, model: lp.models.Detectron2LayoutModel, base_path: str):
pages = convert_from_bytes(content)
logger.info("PDF converted to images")
with ThreadPoolExecutor(max_workers=16) as executor:
layouts = list(executor.map(model.detect, pages))
logger.info("Layout detection completed")
blocks, texts = get_blocks_and_texts(layouts)
logger.info("Blocks and texts extracted")
captioned_blocks = caption_blocks(blocks, texts)
logger.info("Captioning completed")
with ThreadPoolExecutor(max_workers=16) as executor:
results = list(
executor.map(
lambda captioned_block: process_captioned_block(
captioned_block, pages, base_path
),
captioned_blocks,
)
)
return results
def wait_on_run(run, thread, client: openai.OpenAI):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.5)
return run
def generate_thread_content(
pdf_path: str, results: dict, client: openai.OpenAI, assistant_id: str
):
with open(pdf_path, "rb") as f:
pdf_file = client.files.create(file=f, purpose="assistants")
try:
thread = client.beta.threads.create()
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=f"{json.dumps(results)}\n\nCreate a thread for this. Your answer must be in JSON, media links should be from the local paths above.",
file_ids=[pdf_file.id],
)
run = client.beta.threads.runs.create(
thread_id=thread.id, assistant_id=assistant_id
)
run = wait_on_run(run, thread, client)
messages = client.beta.threads.messages.list(
thread_id=thread.id, order="asc", after=message.id
)
# TODO: OpenAI can return no new messages somehow (might be a bug, the run completes succesfully but no new messages are listed in the thread), catch this and throw error
if not messages.data or not messages.data[0].content:
raise ValueError("Unexpected empty response from OpenAI. Please try again.")
except Exception as e:
logger.error(f"Failed to generate thread content: {e}")
raise
finally:
# Delete uploaded PDF file
try:
client.files.delete(file_id=pdf_file.id)
except Exception as e:
logger.error(f"Failed to delete file: {e}")
# Extract JSON content from the message
message_content = messages.data[0].content[0].text.value
json_content = re.search(r"(```json\n)(.*?)(\n```)", message_content, re.DOTALL)
if json_content is None:
json_content = re.search(r"(```\n)(.*?)(\n```)", message_content, re.DOTALL)
if json_content is not None:
json_content = json_content.group(2)
try:
paper_thread = json.loads(json_content)
except (json.JSONDecodeError, TypeError):
raise ValueError(
"The thread generated by OpenAI was not in the expected JSON format."
)
return paper_thread
def process_thread(thread_data, base_path):
processed_data = []
media_set = set()
for data in thread_data:
cleaned_content = re.sub(
r"【\d+†source】", "", data["content"]
) # Remove all source annotations
media_list = []
for media in data.get("media", []):
if media["path"] and media["path"] not in media_set:
media_file_path = os.path.join(base_path, media["path"])
if os.path.isfile(media_file_path):
media_list.append(media)
media_set.add(media["path"])
processed_data.append({"content": cleaned_content, "media": media_list})
return processed_data
def render_markdown(processed_thread):
markdown_content = ""
for data in processed_thread:
markdown_content += data["content"] + "\n"
for media in data["media"]:
markdown_content += f'\n<div align="center">\n'
markdown_content += f' <img src="{media["path"]}" alt="{media.get("explain", "")}" style="max-width: 75%;">\n'
markdown_content += "</div>\n"
markdown_content += "\n---\n\n"
return markdown_content
def uri_validator(x):
try:
result = urlparse(x)
return all([result.scheme, result.netloc])
except:
return False
def create_thread(
pdf_url_or_path: str, output_path: str, client: openai.OpenAI, assistant_id: str
):
# Extract the PDF name from the URL and remove any file extension at the end
pdf_name = os.path.splitext(pdf_url_or_path.split("/")[-1])[0]
base_path = os.path.join(output_path, pdf_name)
results_path = os.path.join(base_path, "results.json")
pdf_path = os.path.join(base_path, f"{pdf_name}.pdf")
thread_path = os.path.join(base_path, "thread.json")
processed_thread_path = os.path.join(base_path, "processed_thread.json")
markdown_path = os.path.join(base_path, "processed_thread.md")
# Check if base path already exists and there is a results.json
# If so, assume we've run this before and just return results
if os.path.exists(base_path) and os.path.isfile(results_path):
with open(results_path, "r") as f:
results = json.load(f)
else:
os.makedirs(base_path, exist_ok=True)
if uri_validator(pdf_url_or_path):
pdf_content = requests.get(pdf_url_or_path).content
with open(pdf_path, "wb") as f:
f.write(pdf_content)
elif os.path.isfile(pdf_url_or_path):
shutil.copy(pdf_url_or_path, pdf_path)
with open(pdf_path, "rb") as f:
pdf_content = f.read()
else:
raise ValueError(
f"Invalid input: {pdf_url_or_path}. It should be a valid URL or a file path."
)
model = lp.models.Detectron2LayoutModel(
config_path="lp://PubLayNet/mask_rcnn_X_101_32x8d_FPN_3x/config",
extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.5],
label_map={0: "Text", 1: "Title", 2: "List", 3: "Table", 4: "Figure"},
)
results = process_pdf(pdf_content, model, base_path)
# Remove duplicates from results
results = [dict(t) for t in set(tuple(d.items()) for d in results)]
with open(results_path, "w") as f:
json.dump(results, f, indent=2)
paper_thread = generate_thread_content(pdf_path, results, client, assistant_id)
with open(thread_path, "w") as f:
json.dump(paper_thread, f, indent=2)
# Process the thread
processed_thread = process_thread(paper_thread, base_path)
with open(processed_thread_path, "w") as f:
json.dump(processed_thread, f, indent=2)
# Save processed thread as a markdown file
markdown_content = render_markdown(processed_thread)
with open(markdown_path, "w") as f:
f.write(markdown_content)
logger.info(f"Saved all outputs to: {os.path.abspath(base_path)}")
return base_path
def create_assistant_then_thread(
pdf_url_or_path: str,
output_path: str,
client: openai.OpenAI,
assistant_kwargs: Optional[dict] = None,
):
if assistant_kwargs is None:
assistant_kwargs = {}
try:
assistant = create_assistant(client, **assistant_kwargs)
except Exception:
logger.error("Failed to create assistant", exc_info=True)
raise
try:
saved_path = create_thread(
pdf_url_or_path,
output_path,
client,
assistant.id,
)
except Exception:
logger.error("Failed to create thread", exc_info=True)
raise
finally:
try:
client.beta.assistants.delete(assistant.id)
except Exception:
logger.error("Failed to delete assistant", exc_info=True)
raise
return saved_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Process a PDF from a URL or a local path."
)
parser.add_argument(
"url_or_path", type=str, help="The URL or local path of the PDF to process."
)
parser.add_argument(
"-o",
"--output",
default="data",
help="The output directory to store the results.",
)
args = parser.parse_args()
client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
create_assistant_then_thread(args.url_or_path, args.output, client)
| [] |
2024-01-10 | Deepakv1210/DataSage-Insight | data_ana.py | # Empty VRAM cache
import torch
import gc
gc.collect()
torch.cuda.empty_cache()
import time
#Import required libraries
import os
from Openai_api import apikey
import streamlit as st
import pandas as pd
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SimpleSequentialChain, SequentialChain
from langchain.utilities import WikipediaAPIWrapper
from langchain_experimental.tools.python.tool import PythonREPLTool
from langchain.agents.agent_types import AgentType
#from langchain.agents import create_pandas_dataframe_agent
#from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain_experimental.agents import create_pandas_dataframe_agent
#from langchain.agents.agent_toolkits import create_python_agent
from langchain_experimental.agents.agent_toolkits.python.base import create_python_agent
from dotenv import load_dotenv, find_dotenv
import torch
#from langchain import HuggingFacePipeline
#from langchain.llms.huggingface_pipeline import HuggingFacePipeline
#from transformers import AutoTokenizer, GenerationConfig, AutoConfig,pipeline
# from ctransformers import AutoModelForCausalLM
# from transformers import AutoTokenizer, GenerationConfig, AutoConfig,pipeline
# Load LLM and Tokenizer
# Use `gpu_layers` to specify how many layers will be offloaded to the GPU.
# model = AutoModelForCausalLM.from_pretrained(
# "TheBloke/zephyr-7B-beta-GGUF",
# model_file="zephyr-7b-beta.Q4_K_M.gguf",
# model_type="mistral", gpu_layers=50, hf=True
# )
# tokenizer = AutoTokenizer.from_pretrained(
# "HuggingFaceH4/zephyr-7b-beta", use_fast=True
# )
# from langchain.llms.huggingface_pipeline import HuggingFacePipeline
# text_pipeline = pipeline(
# "text-generation",
# model=model,
# use_cache=True,
# tokenizer=tokenizer,
# max_new_tokens=128
# )
os.environ['OPENAI_API_KEY'] = apikey
load_dotenv(find_dotenv())
st.title("DataSage Insight 🤖")
st.write("Hello, 👋 I am DataSage and I am here to help you with data science projects.")
with st.sidebar:
st.write('*Please upload your CSV files to Analyze*')
st.caption('''**Discover hidden gems in your data with our powerful analytics and visualization tools.
No PhD in data science required! Our intuitive interface ensures that everyone can navigate and analyze data like a pro.**
''')
st.divider()
# with st.expander('Expander section'):
# st.write('Test')
st.caption("<p style ='text-align:center'> Open for Everyone..🎁</p>",unsafe_allow_html=True )
if 'clicked' not in st.session_state:
st.session_state.clicked ={1:False}
def clicked(button):
st.session_state.clicked[button]= True
st.button("Let's get started!!", on_click = clicked, args=[1])
if st.session_state.clicked[1]:
st.header('Data Analysis')
st.subheader('Checking..')
user_csv = st.file_uploader("Upload your file here", type="csv")
if user_csv is not None:
user_csv.seek(0)
df = pd.read_csv(user_csv, low_memory=False)
#llm model
llm = OpenAI(temperature = 0)
#llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0})
#Function sidebar
@st.cache_data
def steps():
steps_eda = llm('What are the steps of Data Analysis. Tell in short')
return steps_eda
#Testing
pandas_agent=create_pandas_dataframe_agent(llm,df,verbose=True)
# q='What is this data about?'
# ans=pandas_agent.run(q)
# st.write(ans)
@st.cache_data
def function_agent():
st.write("**Data Overview**")
st.write("The first rows dataset look like this:")
st.write(df.head())
st.write("**Data Cleaning**")
# columns_df = pandas_agent.run("What are the meaning of the columns?")
# st.write(columns_df)
# missing_values = pandas_agent.run("How many missing values does this dataframe have? Start the answer with 'There are'")
# st.write(missing_values)
st.write("**Data Summarisation**")
st.write(df.describe())
analysis = pandas_agent.run("What is maximum profit that I could have got? Explain how")
st.write(analysis)
# conc = pandas_agent.run("So what can you conclude from this data?.")
# st.write(conc)
# new_features = pandas_agent.run("What new features would be interesting to create? Just give some ideas.")
# st.write(new_features)
return
@st.cache_data
def function_question_variable():
st.bar_chart(df, y =[user_question_variable])
summary_statistics = pandas_agent.run(f"Give me a summary of the statistics of {user_question_variable}")
st.write(summary_statistics)
# trends = pandas_agent.run(f"Analyse trends, seasonality, or cyclic patterns of {user_question_variable}")
# st.write(trends)
# missing_values = pandas_agent.run(f"Determine the extent of missing values of {user_question_variable}")
# st.write(missing_values)
return
@st.cache_data
def function_question_dataframe():
dataframe_info = pandas_agent.run(user_question_dataframe)
st.write(dataframe_info)
return
@st.cache_resource
def wiki(prompt):
wiki_research = WikipediaAPIWrapper().run(prompt)
return wiki_research
@st.cache_data
def prompt_templates():
data_problem_template = PromptTemplate(
input_variables=['business_problem'],
template='Convert the following business problem into a data science problem: {business_problem}.'
)
model_selection_template = PromptTemplate(
input_variables=['data_problem', 'wikipedia_research'],
template='Give a list of machine learning algorithms that are suitable to solve this problem: {data_problem}, while using this wikipedia research: {wikipedia_research}.'
)
return data_problem_template, model_selection_template
@st.cache_resource
def chains():
data_problem_chain = LLMChain(llm=llm, prompt=prompt_templates()[0], verbose=True, output_key='data_problem')
model_selection_chain = LLMChain(llm=llm, prompt=prompt_templates()[1], verbose=True, output_key='model_selection')
sequential_chain = SequentialChain(chains=[data_problem_chain, model_selection_chain], input_variables=['business_problem', 'wikipedia_research'], output_variables=['data_problem', 'model_selection'], verbose=True)
return sequential_chain
@st.cache_resource
def chains_output(prompt, wiki_research):
my_chain = chains()
my_chain_output = my_chain({'business_problem': prompt, 'wikipedia_research': wiki_research})
my_data_problem = my_chain_output["data_problem"]
my_model_selection = my_chain_output["model_selection"]
return my_data_problem, my_model_selection
@st.cache_data
def list_to_selectbox(my_model_selection_input):
algorithm_lines = my_model_selection_input.split('\n')
algorithms = [algorithm.split(':')[-1].split('.')[-1].strip() for algorithm in algorithm_lines if algorithm.strip()]
algorithms.insert(0, "Select Algorithm")
formatted_list_output = [f"{algorithm}" for algorithm in algorithms if algorithm]
return formatted_list_output
@st.cache_resource
def python_agent():
agent_executor = create_python_agent(
llm=llm,
tool=PythonREPLTool(),
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
)
return agent_executor
@st.cache_data
def python_solution(my_data_problem, selected_algorithm, user_csv):
solution = python_agent().run(f"Write a Python script to solve this: {my_data_problem}, using this algorithm: {selected_algorithm}, using this as your dataset: {user_csv}, and handle all the exceptions raised efficiently."
)
return solution
#Main
st.header('Data analysis')
st.subheader('General information about the dataset')
# with st.sidebar:
# with st.expander('Steps of Data Analysis'):
# st.write(steps())
function_agent()
st.subheader('Parameter study')
user_question_variable = st.text_input('What parameter are you interested in')
if user_question_variable is not None and user_question_variable !="":
function_question_variable()
st.subheader('Further study')
if user_question_variable:
user_question_dataframe = st.text_input( "Is there anything else you would like to know about your dataframe?")
if user_question_dataframe is not None and user_question_dataframe not in ("","no","No"):
function_question_dataframe()
if user_question_dataframe in ("no", "No"):
st.write("")
if user_question_dataframe:
st.divider()
st.header("Data Science Problem")
st.write("Reframing the business problem into a data science problem...")
prompt = st.text_area('What is the business problem you would like to solve?')
if prompt:
wiki_research = wiki(prompt)
my_data_problem = chains_output(prompt, wiki_research)[0]
my_model_selection = chains_output(prompt, wiki_research)[1]
st.write(my_data_problem)
st.write(my_model_selection)
formatted_list = list_to_selectbox(my_model_selection)
selected_algorithm = st.selectbox("Select Machine Learning Algorithm", formatted_list)
if selected_algorithm is not None and selected_algorithm != "Select Algorithm":
st.subheader("Solution")
solution = python_solution(my_data_problem, selected_algorithm, user_csv)
st.write(solution) | [
"What is the business problem you would like to solve?",
"data_problem",
"Give a list of machine learning algorithms that are suitable to solve this problem: {data_problem}, while using this wikipedia research: {wikipedia_research}.",
"wikipedia_research",
"business_problem",
"Convert the following business problem into a data science problem: {business_problem}."
] |
2024-01-10 | muhai-project/mira | code~semantify.py | import os
import openai
from tqdm import tqdm
from rdflib import Graph, URIRef, Namespace, Literal
from rdflib.namespace import SKOS, RDF, RDFS, XSD
import datetime
import text2term
from pyshacl import validate
import pandas as pd
from word_forms.word_forms import get_word_forms
import re
from itertools import chain
import spacy
import numpy as np
import pickle
#!python -m spacy download en_core_web_sm
nlp = spacy.load("en_core_web_sm")
import argparse
def semantify_paper_batch(papers,api_key,max=None):
full_g = Graph()
openai.api_key = api_key
for paper in tqdm(papers[0:max]):
prompt_text = """
'"""+paper['abstract']+ """'
Describe the claim of the abstract above only using RDF (the turtle syntax), and using the following ontology:
:hasStudy rdfs:domain bibo:AcademicArticle .
:hasStudy rdfs:range sio:ObservationalStudy .
:hasHypothesis rdfs:domain sio:ObservationalStudy .
:hasHypothesis rdfs:range sio:Hypothesis .
:independentVariable rdfs:domain sio:Hypothesis.
:independentVariable rdfs:range qb:DimensionProperty .
:mediatorVariable rdfs:domain sio:Hypothesis .
:mediatorVariable rdfs:range :DimensionProperty .
:dependentVariable rdfs:domain sio:Hypothesis .
:dependentVariable rdfs:range qb:MeasureProperty.
:hasRelation rdfs:domain :Hypothesis .
:hasRelation rdfs:range :RelationProperty .
:hasQualifier rdfs:domain :Hypothesis .
:hasQualifier rdfs:range :Qualifier .
:moderatorVariable rdfs:domain sio:Hypothesis .
:moderatorVariable rdfs:range :DimensionProperty .
:moderatorEffectOnStatementStrength rdfs:domain :Hypothesis .
:moderatorEffectOnStatementStrength rdfs:range :Qualifier .
:moderatorContext rdfs:domain sio:Hypothesis .
:moderatorContext rdfs:range sio:HumanPopulation, sio:GeographicRegion, sio:Organization .
:hasContext rdfs:domain sio:Hypothesis, :Moderator, :Mediator .
:hasContext rdfs:range sio:HumanPopulation, sio:GeographicRegion, sio:Organization .
:representedBy rdfs:domain sio:HumanPopulation, sio:GeographicRegion, sio:Organization .
:representedBy rdfs:range :Sample .
time:hasTime rdfs:domain :Sample .
time:hasTime rdfs:range time:TemporalEntity .
sem:hasPlace rdfs:domain :Sample .
sem:hasPlace rdfs:range sio:GeographicRegion .
geonames:locatedIn rdfs:domain sio:GeographicRegion .
geonames:locatedIn rdfs:range geonames:Feature .
time:hasBeginning rdfs:domain rdf:TemporalEntity .
time:hasBeginning rdfs:range time:Instant .
time:hasEnd rdfs:domain rdf:TemporalEntity .
time:hasEnd rdfs:range time:Instant .
time:inXSDDate rdfs:domain time:Instant .
time:inXSDDate rdfs:range rdf:XMLLiteral .
1. use rdfs:label to describe all blank nodes, also the geographic region. Use short descriptions, pieces of text verbatim from the abstract, and add language tags to all labels. An example would be: [] :hasSubject [ rdfs:label 'social class'@en ].
2. for instances of the class geonames:Feature, find the URI for the place name in geonames (uri = https://www.geonames.org/<code>) like so: [] geonames:locatedIn <uri> (Don't leave any spaces between the URI and angle brackets). If you cannot find a place in the abstract, omit these triples.
3. use prefixes (xsd,time,ex,geonames,rdf,rdfs,sem,skos,sio,sp) (the namespace for sp is https://w3id.org/linkflows/superpattern/latest/)
4. the individual of bibo:AcademicArticle is: ex:"""+paper['paperId']+""". Don't enclose the individual with brackets.
5. If you can't find a time reference in the abstract, try to estimate the dates.
6. include all classes of all individuals using rdf:type
7. a hypothesis describes the effect of an independent variable (such as social class or age) on a dependent variable (such as mortality). Optional variables are: a mediating variable (such as a country's living standards), which explains the process through which the independent and dependent variables are related, and a moderating variable which affects the strength and direction of that relationship.
8. for values of :hasRelation use sp:affects
9. for qualifiers, choose from the following: :strongMediumNegative, :strongMedium, :weakNegative, :weak, :no, :weakPositive, :strongMediumPositive)
10. don't create your own identifiers but use blank nodes in case no IRI is available
11. make sure to add a qualifier for the relation between independent and dependent variable, but also to the moderator.
12. link hypothesis contexts to study indicators of that context. For example, if the modifier variable is food prices, it could be that the context is a geographic region with the indicator Recession.
Only return proper RDF, no free text comments.
"""
try:
response = openai.ChatCompletion.create(model="gpt-4",
messages=[{"role": "user", "content": prompt_text}],
temperature=0)
except:
response = openai.ChatCompletion.create(model="gpt-4",
messages=[{"role": "user", "content": prompt_text}],
temperature=0)
prefixes = """
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix bibo: <http://purl.org/ontology/bibo/> .
@prefix time: <http://www.w3.org/2006/time#> .
@prefix ex: <http://example.org/> .
@prefix geonames: <http://www.geonames.org/ontology#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sem: <http://semanticweb.cs.vu.nl/2009/11/sem/> .
@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
@prefix sio: <http://semanticscience.org/resource/> .
@prefix sp: <https://w3id.org/linkflows/superpattern/latest/> .
@prefix qb: <http://purl.org/linked-data/cube#> .
@prefix : <https://w3id.org/mira/> .
"""
try:
print(paper['abstract']+'\n')
print(response.choices[0].message.content)
g=Graph()
g.parse(data=prefixes+response.choices[0].message.content, format="turtle")
full_g += g
except Exception as e:
print(e)
full_g = full_g.skolemize()
return full_g
def process_graph(batch):
# Define the old and new IRI prefixes
old_prefixes = ['HumanPopulation','https://w3id.org/mira/hasStudy','Organization','GeographicRegion','GeographicalRegion',
'https://w3id.org/mira/representedBy','ObservationalStudy','https://w3id.org/mira/hasHypothesis',
'independentVariable','dependentVariable','http://semanticscience.org/resource/Hypothesis','moderatorVariable',
'mediatorVariable','https://w3id.org/mira/DimensionProperty',
'http://purl.org/linked-data/cube#/DimensionProperty','http://purl.org/linked-data/cube#/MeasureProperty','https://w3id.org/mira/Sample']
new_prefixes = ['SIO_001062','http://semanticscience.org/resource/SIO_000008','SIO_000012','SIO_000414','SIO_000414',
'http://semanticscience.org/resource/SIO_000205','SIO_000976','http://semanticscience.org/resource/SIO_000008',
'hasSubject','hasObject','https://w3id.org/mira/Explanation','hasModerator','hasMediator',
'http://purl.org/linked-data/cube#DimensionProperty','http://purl.org/linked-data/cube#DimensionProperty',
'http://purl.org/linked-data/cube#MeasureProperty','http://semanticscience.org/resource/SIO_001050']
# Iterate through the triples in the graph and replace IRIs
new_triples = []
for subject, predicate, obj in batch:
for old_prefix,new_prefix in zip(old_prefixes,new_prefixes):
if isinstance(subject, URIRef):
subject = URIRef(str(subject).replace(old_prefix, new_prefix))
if isinstance(predicate, URIRef):
predicate = URIRef(str(predicate).replace(old_prefix, new_prefix))
if isinstance(obj, URIRef):
obj = URIRef(str(obj).replace(old_prefix, new_prefix))
new_triples.append((subject, predicate, obj))
# Clear the old triples and add the new ones
batch = Graph()
for triple in new_triples:
batch.add(triple)
query = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
prefix mira: <https://w3id.org/mira/>
PREFIX sio: <http://semanticscience.org/resource/>
construct {
?study sio:SIO_000008 ?interaction .
?interaction a mira:Explanation ;
a mira:InteractionEffect ;
mira:hasSubject ?mod_var ;
mira:hasRelation mira:moderates ;
mira:hasObject ?exp ;
mira:hasQualifier ?qual ;
mira:hasContext ?context .
?context sio:SIO_000205 ?sample .
?mod_var ?p ?o .
} where {
?study sio:SIO_000008 ?exp .
?exp a mira:Explanation ;
mira:hasModerator ?mod_var ;
mira:moderatorEffectOnStatementStrength ?qual ;
mira:moderatorContext ?context ;
mira:hasContext/sio:SIO_000205 ?sample .
?mod_var ?p ?o .
?mod_var rdfs:label ?label .
BIND (IRI(CONCAT("https://w3id.org/mira/", REPLACE(LCASE(STR(?label)), " ", "_"))) AS ?interaction)
}
"""
# Execute the SPARQL query
query_result = batch.query(query)
mods = Graph()
for row in query_result:
s, p, o = row
mods.add((s, p, o))
delete_query = """
prefix mira: <https://w3id.org/mira/>
delete {?exp mira:hasModerator ?mod_var } where {?exp mira:hasModerator ?mod_var };
"""
batch.update(delete_query)
delete_query = """
prefix mira: <https://w3id.org/mira/>
delete {?exp mira:moderatorEffectOnStatementStrength ?qual } where {?exp mira:moderatorEffectOnStatementStrength ?qual }
"""
batch.update(delete_query)
delete_query = """
prefix mira: <https://w3id.org/mira/>
delete {?exp mira:moderatorContext ?context } where {?exp mira:moderatorContext ?context }
"""
batch.update(delete_query)
batch += mods
return batch
def add_bibo_metadata(papers,batch):
for s,p,o in batch.triples((None,RDF.type,URIRef("http://purl.org/ontology/bibo/AcademicArticle"))):
for paper in papers:
if paper['paperId'] == s.n3().split('/')[-1].split('>')[0]:
batch.add((s,URIRef("http://purl.org/dc/terms/identifier"),Literal(paper['paperId'])))
batch.add((s,URIRef("http://purl.org/dc/terms/title"),Literal(paper['title'])))
batch.add((s,URIRef("http://purl.org/dc/terms/abstract"),Literal(paper.abstract)))
doi = 'https://doi.org/'+paper['externalIds']['DOI']
batch.add((s,URIRef("http://prismstandard.org/namespaces/1.2/basic/doi"),URIRef(doi)))
if paper['publicationDate'] != None:
date = paper['publicationDate'].split('-')
date_obj = datetime.date(int(date[0]), int(date[1]), int(date[2]))
else:
year = paper['year']
date_obj = datetime.date(year, 1, 1)
date_str = date_obj.isoformat()
date_literal = Literal(date_str, datatype=XSD.date)
batch.add((s,URIRef("http://purl.org/dc/terms/created"),Literal(date_literal)))
for author in paper.authors:
if author.authorId != None:
batch.add((s,URIRef("http://purl.org/dc/terms/contributor"),URIRef(author['url'])))
for referenceId in [ref.paperId for ref in paper.references if ref.paperId != None]:
batch.add((s,URIRef("http://purl.org/ontology/bibo/cites"),URIRef('http://example.org/'+referenceId)))
return batch
def add_geonames_metadata(batch):
query = """
PREFIX wgs84_pos: <http://www.w3.org/2003/01/geo/wgs84_pos#>
prefix gn: <http://www.geonames.org/ontology#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
construct {
?location gn:locatedIn ?geonamesId .
?geonamesId rdf:type gn:Feature .
?geonamesId gn:name ?label .
?geonamesId wgs84_pos:long ?long .
?geonamesId wgs84_pos:lat ?lat .
} where {
?location gn:locatedIn ?geoLocation .
BIND(IRI(CONCAT(CONCAT("http://sws.geonames.org/",REPLACE(STR(?geoLocation),"https://www.geonames.org/", "")),"/")) AS ?geonamesId)
SERVICE <http://factforge.net/repositories/ff-news> {
?geonamesId gn:name ?label .
?geonamesId wgs84_pos:long ?long .
?geonamesId wgs84_pos:lat ?lat .
FILTER ( datatype(?lat) = xsd:float)
FILTER ( datatype(?long) = xsd:float)
}
}
"""
query_result = batch.query(query)
geo = Graph()
for row in query_result:
s, p, o = row
geo.add((s, p, o))
delete_query = """
prefix gn: <http://www.geonames.org/ontology#>
delete {?location gn:locatedIn ?geoLocation } where {?location gn:locatedIn ?geoLocation }
"""
batch.update(delete_query)
batch += geo
return batch
def validate_graph(batch,shacl_file):
shacl_graph = Graph()
shacl_graph.parse(shacl_file, format="ttl")
print(shacl_graph.serialize(format='turtle'))
r = validate(batch,
shacl_graph=shacl_graph)
conforms, results_graph, results_text = r
if conforms:
print("Validation successful. No violations found.")
else:
print("Validation failed. Violations found.")
# Extract the violations from the results_graph
violations = list(results_graph.triples((None, None, None)))
# Print the number of violations
print(results_graph.serialize(format='turtle'))
def get_variables(batch):
query = """
PREFIX qb: <http://purl.org/linked-data/cube#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
select ?concept ?label where {
VALUES ?type {qb:DimensionProperty qb:MeasureProperty}.
?concept a ?type .
?concept rdfs:label ?label
}
"""
results = batch.query(query)
data = []
for row in results:
data.append({'concept':row.concept, 'label':row.label })
return pd.DataFrame(data)
def get_nes(var):
# Process the text with SpaCy
doc = nlp(var)
# Extract named entities
nouns = [token.text for token in doc if token.pos_ == "NOUN"]
# Retrieve noun phrases (nouns with their modifying adjectives)
noun_phrases = []
current_phrase = ""
for token in doc:
if token.pos_ == "NOUN" or token.pos_ == "ADJ":
current_phrase += token.text + " "
else:
if current_phrase.strip() != "":
noun_phrases.append(current_phrase.strip())
current_phrase = ""
return noun_phrases
def clean_terms_for_mapping(graph_df):
for var in graph_df.columns.tolist():
if var.endswith('label'):
dic = dict()
dic = {idx:get_nes(value.n3().split('@en')[0]) if value else '' for idx,value in enumerate(graph_df[var].values)}
dic = {key: [item.lower().replace('inequality','') for item in value] for key, value in dic.items()}
dic = {key: [item.lower().replace('population','') for item in value] for key, value in dic.items()}
dic = {key: [item.lower().replace('composition','') for item in value] for key, value in dic.items()}
dic = {key: [item.lower().replace('equality','') for item in value] for key, value in dic.items()}
dic = {key: [value+re.split('/| ',item) for item in value] for key, value in dic.items()}
dic = {key: list(chain.from_iterable(value)) for key, value in dic.items()}
dic = {key: list(set(value+list(chain.from_iterable([list(get_word_forms(item,0.7)['n']) for item in value])))) for key, value in dic.items()}
dic = {key: [item for item in value if item != ''] for key, value in dic.items()}
graph_df[var+'_cleaned'] = dic
return graph_df
def map_to_bioportal(df):
mappings = pd.DataFrame(columns=["Source Term ID","Source Term","Mapped Term Label","Mapped Term CURIE","Mapped Term IRI","Mapping Score","Tags"])
for idx,row in df.iterrows():
try:
mapping = text2term.map_terms(row.values[0],
target_ontology='MESH,DOID,HHEAR,SIO,IOBC',
min_score=0.9,
separator=',',
use_cache=True,
term_type='classes',
mapper="bioportal",
incl_unmapped=False)
mappings = pd.concat([mappings, mapping], ignore_index=True)
except:
pass
return mappings
def retrieve_mappings(row_graph_df,mappings):
superstring = ', '.join(row_graph_df[0])
if superstring:
return list(set([(row['Mapped Term IRI'],row['Mapped Term Label']) for idx,row in mappings.iterrows() if row['Source Term'].lower() in superstring.lower()]))
else:
return None
def annotate_graph_bioportal(batch):
#get variables to map
df = get_variables(batch)
#clean variables (split terms, find variations)
df = clean_terms_for_mapping(df)
#find mappings for cleaned terms
mappings = map_to_bioportal(df[['label_cleaned']])
df['mappings'] = df[['label_cleaned']].apply(retrieve_mappings, axis=1,args=(mappings,))
#add mappings to concepts
bioIRIs = Graph()
for idx,row in df.iterrows():
if row.mappings:
for identifier,label in row['mappings']:
bioIRIs.add((URIRef(row['concept']),URIRef("http://purl.org/linked-data/cube#concept"),URIRef(identifier)))
bioIRIs.add((URIRef(identifier),RDFS.label,Literal(label)))
bioIRIs.add((URIRef(identifier),RDF.type,SKOS.Concept))
batch += bioIRIs
return batch
def main(paper_file, api_key, max, output, view=0):
print("Loading data from file "+paper_file+"...")
with open(paper_file, 'rb') as fp:
papers = pickle.load(fp)
if view:
print("OpenAI annotation of batch ...")
batch = semantify_paper_batch(papers,api_key,max)
if view:
print(batch.serialize(format='turtle'))
print("Process graph ...")
batch = process_graph(batch)
if view:
print(batch.serialize(format='turtle'))
print("Add bibliographic information ...")
batch = add_bibo_metadata(papers,batch)
if view:
print(batch.serialize(format='turtle'))
print("Link geonames metadata ...")
batch = add_geonames_metadata(batch)
if view:
print(batch.serialize(format='turtle'))
print("Link concepts to terms from BioPortal (can take longer) ...")
batch = annotate_graph_bioportal(batch)
if view:
print(batch.serialize(format='turtle'))
print("Store graph to file "+output+" ...")
batch.serialize(output,format="ttl")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script turning paper abstracts into RDF in terms of the MIRA ontology.")
parser.add_argument("--paper_file", type=str, help="Path to the file with paper abstracts. File content has to be a list of dictionaries with the following keys: dict_keys(['paperId','title','abstract','year','publicationDate','authors','references'])")
parser.add_argument("--api_key", type=str, help="Key for openai.api_key")
parser.add_argument("--output", type=str, help="Path to .ttl file for storing the output graph.")
parser.add_argument("--max", type=int, help="Max number of files to process.")
parser.add_argument("--view", type=int, help="Print the annotations after each processing step, for debugging. Default 0")
args = parser.parse_args()
main(args.paper_file, args.api_key, args.max, args.output,args.view)
| [
"\n 'PLACEHOLDER'\n\n Describe the claim of the abstract above only using RDF (the turtle syntax), and using the following ontology:\n\n :hasStudy rdfs:domain bibo:AcademicArticle .\n :hasStudy rdfs:range sio:ObservationalStudy .\n :hasHypothesis rdfs:domain sio:ObservationalStudy .\n :hasHypothesis rdfs:range sio:Hypothesis .\n :independentVariable rdfs:domain sio:Hypothesis.\n :independentVariable rdfs:range qb:DimensionProperty .\n :mediatorVariable rdfs:domain sio:Hypothesis .\n :mediatorVariable rdfs:range :DimensionProperty .\n :dependentVariable rdfs:domain sio:Hypothesis .\n :dependentVariable rdfs:range qb:MeasureProperty.\n :hasRelation rdfs:domain :Hypothesis .\n :hasRelation rdfs:range :RelationProperty .\n :hasQualifier rdfs:domain :Hypothesis .\n :hasQualifier rdfs:range :Qualifier .\n :moderatorVariable rdfs:domain sio:Hypothesis .\n :moderatorVariable rdfs:range :DimensionProperty .\n :moderatorEffectOnStatementStrength rdfs:domain :Hypothesis .\n :moderatorEffectOnStatementStrength rdfs:range :Qualifier .\n :moderatorContext rdfs:domain sio:Hypothesis . \n :moderatorContext rdfs:range sio:HumanPopulation, sio:GeographicRegion, sio:Organization . \n :hasContext rdfs:domain sio:Hypothesis, :Moderator, :Mediator .\n :hasContext rdfs:range sio:HumanPopulation, sio:GeographicRegion, sio:Organization .\n :representedBy rdfs:domain sio:HumanPopulation, sio:GeographicRegion, sio:Organization .\n :representedBy rdfs:range :Sample .\n time:hasTime rdfs:domain :Sample .\n time:hasTime rdfs:range time:TemporalEntity .\n sem:hasPlace rdfs:domain :Sample .\n sem:hasPlace rdfs:range sio:GeographicRegion .\n geonames:locatedIn rdfs:domain sio:GeographicRegion .\n geonames:locatedIn rdfs:range geonames:Feature .\n time:hasBeginning rdfs:domain rdf:TemporalEntity .\n time:hasBeginning rdfs:range time:Instant .\n time:hasEnd rdfs:domain rdf:TemporalEntity .\n time:hasEnd rdfs:range time:Instant .\n time:inXSDDate rdfs:domain time:Instant .\n time:inXSDDate rdfs:range rdf:XMLLiteral .\n\n 1. use rdfs:label to describe all blank nodes, also the geographic region. Use short descriptions, pieces of text verbatim from the abstract, and add language tags to all labels. An example would be: [] :hasSubject [ rdfs:label 'social class'@en ].\n\n 2. for instances of the class geonames:Feature, find the URI for the place name in geonames (uri = https://www.geonames.org/<code>) like so: [] geonames:locatedIn <uri> (Don't leave any spaces between the URI and angle brackets). If you cannot find a place in the abstract, omit these triples.\n\n 3. use prefixes (xsd,time,ex,geonames,rdf,rdfs,sem,skos,sio,sp) (the namespace for sp is https://w3id.org/linkflows/superpattern/latest/)\n\n 4. the individual of bibo:AcademicArticle is: ex:PLACEHOLDER. Don't enclose the individual with brackets.\n\n 5. If you can't find a time reference in the abstract, try to estimate the dates.\n\n 6. include all classes of all individuals using rdf:type\n\n 7. a hypothesis describes the effect of an independent variable (such as social class or age) on a dependent variable (such as mortality). Optional variables are: a mediating variable (such as a country's living standards), which explains the process through which the independent and dependent variables are related, and a moderating variable which affects the strength and direction of that relationship.\n\n 8. for values of :hasRelation use sp:affects\n\n 9. for qualifiers, choose from the following: :strongMediumNegative, :strongMedium, :weakNegative, :weak, :no, :weakPositive, :strongMediumPositive)\n\n 10. don't create your own identifiers but use blank nodes in case no IRI is available\n\n 11. make sure to add a qualifier for the relation between independent and dependent variable, but also to the moderator.\n\n 12. link hypothesis contexts to study indicators of that context. For example, if the modifier variable is food prices, it could be that the context is a geographic region with the indicator Recession.\n\n Only return proper RDF, no free text comments.\n "
] |
2024-01-10 | fermellone/my-assistant | speech_to_text.py | def transcript(file_path: str) -> str:
import openai, os
audio_file = open(file_path, "rb")
transcription = openai.Audio.transcribe(
model="whisper-1", file=audio_file, api_key=os.getenv("OPENAI_API_KEY")
)
return transcription["text"]
| [] |
2024-01-10 | fermellone/my-assistant | assistant~assistant.py | def ask_assistant(transcription: str) -> str:
import openai, os, json
from assistant.openai_functions import (
function_descriptions,
available_functions,
)
final_answer = ""
messages = [
{
"role": "system",
"content": "You are a helpful assistant. You only understand English and Spanish. If you don't understand something, you can ask the user to repeat the sentence.",
},
{"role": "user", "content": "Hello, who are you?"},
{
"role": "assistant",
"content": "I am an AI created by OpenAI. How can I help you today?",
},
{"role": "user", "content": transcription},
]
chat = openai.ChatCompletion.create(
api_key=os.getenv("OPENAI_API_KEY"),
model="gpt-3.5-turbo-0613",
messages=messages,
functions=function_descriptions,
function_call="auto",
)
response_message = chat["choices"][0]["message"]
if response_message.get("function_call"):
available_functions = available_functions
function_name = response_message["function_call"]["name"]
fuction_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
function_response = fuction_to_call(function_args)
messages.append(response_message)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
second_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
)
final_answer = second_response["choices"][0]["message"]
else:
final_answer = response_message
return final_answer["content"]
| [
"You are a helpful assistant. You only understand English and Spanish. If you don't understand something, you can ask the user to repeat the sentence.",
"Hello, who are you?",
"I am an AI created by OpenAI. How can I help you today?"
] |
2024-01-10 | monum/llm-prototypes | server~azure-vector-search.py | import os
import datetime
import openai
from flask import Flask
from flask import request
from flask_cors import CORS
from llama_index import SimpleDirectoryReader
from llama_index import VectorStoreIndex
from llama_index import Document
from azure.storage.blob import BlobServiceClient
# from tenacity import retry, wait_random_exponential, stop_after_attempt
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.models import Vector
from azure.search.documents.indexes.models import (
SearchIndex,
SearchField,
SearchFieldDataType,
SimpleField,
SearchableField,
SearchIndex,
SearchField,
VectorSearch,
VectorSearchAlgorithmConfiguration,
)
import config
from llama_index import download_loader
from llama_index import Document
from llama_index.node_parser import SimpleNodeParser
########################### Set up ############################
# Set up azure cognitive search
service_endpoint = config.AZURE_SEARCH_ENDPOINT
index_name = config.AZURE_SEARCH_INDEX_NAME
key = config.AZURE_SEARCH_ADMIN_KEY
# Set up OpenAI
os.environ['OPENAI_API_KEY'] = config.OPENAI_API_KEY
# os.environ["OPENAI_API_TYPE"] = "azure"
# os.environ["OPENAI_API_VERSION"] = "2023-05-15"
# os.environ["OPENAI_API_BASE"] = config.AZURE_OPENAI_ENDPOINT
# os.environ["OPENAI_API_KEY"] = config.AZURE_OPENAI_API_KEY
openai.api_key = config.OPENAI_API_KEY
# Set up flask app
app = Flask(__name__)
CORS(app)
########################### Flask App Routes ############################
@app.route("/query", methods=["POST"])
def query():
request_data = request.get_json()
query = request_data['question']
if query is None:
return "No text found:(", 201
print("User query: " + query)
# Source file retrieval
try:
search_client = SearchClient(service_endpoint, index_name, credential=AzureKeyCredential(key))
results = search_client.search(
search_text=query,
select=["department", "organization", "filename", "date", "content", "url"],
)
except Exception as e:
print(e)
return "Error: failed to retrieve relevant source files", 500
# LLM response
# construct documents from source files
docs = []
sources = []
for result in results:
doc = Document(
text=result["content"]
)
doc.extra_info = {
"department": result["department"],
"organization":result["organization"],
"filename": result["filename"],
"url": result["url"],
"date": result["date"],
# "content": result["content"],
"relevance": result['@search.score']
}
# source = {
# "department": result["department"],
# "organization":result["organization"],
# "filename": result["filename"],
# "url": result["url"],
# "date": result["date"],
# "content": result["content"],
# "score": result['@search.score']
# }
docs.append(doc)
# sources.append(source)
try:
index = VectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine()
res = query_engine.query(query)
response = {
"answer": res.response,
"confidence": "",
"sources": res.source_nodes
}
except Exception as e:
print(e)
return "Error: failed to generate response from source files", 500
return response, 200
@app.route("/upload/url", methods=["POST"])
def upload_url():
url = request.form.get("url", None)
# Load data from url using LlamaIndex loader
SimpleWebPageReader = download_loader("SimpleWebPageReader")
loader = SimpleWebPageReader()
documents = loader.load_data(urls=[url])
node_parser = SimpleNodeParser.from_defaults(chunk_size=config.NODE_PARSER_CHUNK_SIZE, chunk_overlap=config.NODE_PARSER_CHUNK_OVERLAP)
nodes = node_parser.get_nodes_from_documents(documents)
# Store in Cognitive Search index
try:
index_docs = []
for document in nodes:
description = request.form.get("description", None)
department = request.form.get("label", None)
org = request.form.get("org", None)
content_text = document.text
search_index_entry = {
"id": document.doc_id,
"description": description,
"content": content_text,
"department": department,
"organization": org,
"filename": url,
"url": url,
"date": str(datetime.date.today()),
"description_vector": generate_embeddings(description),
"content_vector": generate_embeddings(content_text)
}
index_docs.append(search_index_entry)
search_client = SearchClient(endpoint=service_endpoint, index_name=index_name, credential=AzureKeyCredential(key))
result = search_client.merge_or_upload_documents(documents = index_docs)
print("Upload of new document succeeded: {}".format(result[0].succeeded))
except Exception as e:
print(e)
return "Error: {}".format(str(e)), 500
return "Url uploaded!", 200
@app.route("/upload/file", methods=["POST"])
def upload_file():
if 'file' not in request.files:
return "Please send a POST request with a file", 400
# Read file to local directory
filepath = None
try:
new_file = request.files["file"]
print(new_file)
filename = new_file.filename
filepath = os.path.join('documents', os.path.basename(filename))
new_file.save(filepath)
documents = SimpleDirectoryReader(input_files=[filepath]).load_data()
node_parser = SimpleNodeParser.from_defaults(chunk_size=8000, chunk_overlap=200)
nodes = node_parser.get_nodes_from_documents(documents)
except Exception as e:
print(e)
if filepath is not None and os.path.exists(filepath):
os.remove(filepath)
return "Error: {}".format(str(e)), 500
# Store in blob storage
try:
blob_service_client = BlobServiceClient.from_connection_string(config.AZURE_STORAGE_ACCESS_KEY)
blob_client = blob_service_client.get_blob_client(container=config.AZURE_STORAGE_CONTAINER, blob=documents[0].extra_info['file_name'])
print("\nUploading to Azure Storage as blob:\n\t" + documents[0].extra_info['file_name'])
with open(file=filepath, mode="rb") as data:
blob_client.upload_blob(data)
url = blob_client.url
except Exception as e: # if file already exists, ask if continue to upload
if e.error_code == "BlobAlreadyExists":
print("Blob already exists!")
return "Blob already Exists!", 201
return "Error: {}".format(str(e)), 500
# Store in Cognitive Search index
try:
index_docs = []
for document in nodes:
description = request.form.get("description", None)
file_name = document.extra_info['file_name']
department = request.form.get("label", None)
org = request.form.get("org", None)
content_text = document.text
search_index_entry = {
"id": document.doc_id,
"description": description,
"content": content_text,
"department": department,
"organization": org,
"filename": file_name,
"url": url,
"date": str(datetime.date.today()),
"description_vector": generate_embeddings(description),
"content_vector": generate_embeddings(content_text)
}
index_docs.append(search_index_entry)
search_client = SearchClient(endpoint=service_endpoint, index_name=index_name, credential=AzureKeyCredential(key))
result = search_client.merge_or_upload_documents(documents = index_docs)
print("Upload of new document succeeded: {}".format(result[0].succeeded))
except Exception as e:
print(e)
if filepath is not None and os.path.exists(filepath):
os.remove(filepath)
return "Error: {}".format(str(e)), 500
os.remove(filepath)
return "File uploaded!", 200
@app.route("/get_files", methods=["GET"])
def get_files():
try:
# blob_service_client = BlobServiceClient.from_connection_string(config.AZURE_STORAGE_ACCESS_KEY)
# container_client = blob_service_client.get_container_client(container=config.AZURE_STORAGE_CONTAINER)
# blob_list = container_client.find_blobs_by_tags("category")
label = request.args.get("label")
search_client = SearchClient(endpoint=service_endpoint, index_name=index_name, credential=AzureKeyCredential(key))
results = search_client.search(
search_text="*",
filter="department eq '"+label+"'",
select="filename, url"
)
except Exception as e:
print(e)
return "Error: {}".format(str(e)), 500
files = []
for result in results:
print(result)
files.append({"name": result["filename"], "url": result["url"]}) # get url
return files, 200
########################### Embeddings & Indexing #########################
def generate_embeddings(text):
'''
Generate embeddings from text string
input: text string to be embedded
output: text embeddings
'''
response = openai.Embedding.create(input=text, engine=config.OPENAI_EMBEDDING_MODEL)
embeddings = response['data'][0]['embedding']
return embeddings
def create_search_index(index_client):
'''
Create a search index with config settings
'''
fields = [
SimpleField(name="id", type=SearchFieldDataType.String, key=True, sortable=True, filterable=True, facetable=True),
SearchableField(name="description", type=SearchFieldDataType.String),
SearchableField(name="content", type=SearchFieldDataType.String),
SearchableField(name="department", type=SearchFieldDataType.String, filterable=True),
SearchableField(name="organization", type=SearchFieldDataType.String, filterable=True),
SearchableField(name="filename", type=SearchFieldDataType.String, filterable=True, searchable=True),
SearchableField(name="url", type=SearchFieldDataType.String, filterable=True),
SearchableField(name="date", type=SearchFieldDataType.String, filterable=True),
SearchField(name="description_vector", type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
searchable=True, dimensions=1536, vector_search_configuration="my-vector-config"),
SearchField(name="content_vector", type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
searchable=True, dimensions=1536, vector_search_configuration="my-vector-config"),
]
vector_search = VectorSearch(
algorithm_configurations=[
VectorSearchAlgorithmConfiguration(
name="my-vector-config",
kind="hnsw",
parameters={
"m": 4,
"efConstruction": 400,
"efSearch": 500,
"metric": "cosine"
}
)
]
)
# Create the search index with the semantic settings
index = SearchIndex(name=index_name, fields=fields, vector_search=vector_search)
result = index_client.create_or_update_index(index)
print(f' {result.name} created')
def search_index_exist(index_client):
index_names = index_client.list_index_names()
for name in index_names:
if name == index_name:
return True
return False
if __name__ == "__main__":
index_client = SearchIndexClient(
endpoint=service_endpoint,
credential=AzureKeyCredential(key)
)
# if does not exist vector store, create one
if (not search_index_exist(index_client)):
print(index_name, " does not exist, creating new search index...")
create_search_index(index_client)
app.run(host="0.0.0.0", port=5601) | [] |
2024-01-10 | adamingwersen/slidebitch | python~vectordb~peek.py | import sys
import os
import chromadb
from chromadb.config import Settings
from pprint import pprint as pp
from dotenv import load_dotenv
from openai_embedding_function import get_openai_ef
load_dotenv()
COLLECTION_NAME = os.getenv("COLLECTION_NAME")
PERSIST_DIRECTORY = os.getenv("PERSIST_DIRECTORY")
def pretty_query_results(results) -> dict:
new_results = list()
for i in range(0, len(results)):
new_result = dict()
new_result["distance"] = results["distances"][0][i]
new_result["metadata"] = results["metadatas"][0][i]
new_result["metadata"] = results["metadatas"][0][i]
new_result["id"] = results["ids"][0][i]
new_results.append(new_result)
return new_results
def find(collection: chromadb.Client,
search_term: str):
res = collection.query(
query_texts=search_term,
n_results=5)
pp(pretty_query_results(res))
def find_formatted(collection: chromadb.Client,
search_term: str, n_results: int = 5):
res = collection.query(
query_texts=search_term,
n_results=n_results)
return pretty_query_results(res)
if __name__ == "__main__":
load_dotenv()
openai_ef = get_openai_ef(os.getenv("OPENAI_API_KEY"))
chroma_client = chromadb.Client(
Settings(persist_directory=PERSIST_DIRECTORY, chroma_db_impl="duckdb+parquet"))
collection = chroma_client.get_collection(
name=COLLECTION_NAME)
search_term = sys.argv[1]
find(collection, search_term)
| [] |
2024-01-10 | harbidel/LLM_Question_Answering_APP | chat_with_documents.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
def load_document(file):
import os
name, extension = os.path.splitext(file)
if extension == '.pdf':
from langchain.document_loaders import PyPDFLoader
print(f'Loading {file}')
loader = PyPDFLoader(file)
elif extension == '.docx':
from langchain.document_loaders import Docx2txtLoader
print(f'Loading {file}')
loader = Docx2txtLoader(file)
elif extension == '.txt':
from langchain.document_loaders import TextLoader
print(f'Loading {file}')
loader = TextLoader(file, encoding='utf-8')
else:
print('Document format is not supported!')
return None
data = loader.load()
return data
def chunk_data(data, chunk_size=256, chunk_overlap=20):
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
chunks = text_splitter.split_documents(data)
return chunks
def create_embeddings(chunks):
embeddings = OpenAIEmbeddings()
vector_store = Chroma.from_documents(chunks, embeddings)
return vector_store
def ask_and_get_answer(vector_store, q, k=3):
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=1)
retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': k})
chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
answer = chain.run(q)
return answer
def calculate_embedding_cost(texts):
import tiktoken
enc = tiktoken.encoding_for_model('text-embedding-ada-002')
total_tokens = sum([len(enc.encode(page.page_content)) for page in texts])
# print(f'Total Tokens: {total_tokens}')
# print(f'Embedding cost in USD: {total_tokens / 1000* 0.0004:.6f}')
return total_tokens, total_tokens / 1000 * 0.0004
answer = ""
def clear_history():
if 'history' in st.session_state:
del st.session_state['history']
if __name__ == "__main__":
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(), override=True)
st.image('img.jpg')
st.subheader('LLM Question-Answering Application')
with st.sidebar:
api_key = st.text_input('OpenAI API Key:', type='password')
if api_key:
os.environ['OPENAI_API_KEY'] = api_key
uploaded_file = st.file_uploader('Upload a file:', type=['pdf', 'docx', 'txt'])
chunk_size = st.number_input('Chunk_size:', min_value=100, max_value=2048, value=512, on_change=clear_history)
k = st.number_input('k', min_value=1, max_value=20, value=3, on_change=clear_history)
add_data = st.button('Add Data', on_click=clear_history)
if uploaded_file and add_data:
with st.spinner('Reading, Chunking and embedding file ...'):
bytes_data = uploaded_file.read()
file_name = os.path.join('./', uploaded_file.name)
with open(file_name, 'wb') as f:
f.write(bytes_data)
data = load_document(file_name)
chunks = chunk_data(data, chunk_size=chunk_size)
st.write(f'Chunk size: {chunk_size}, Chunks: {len(chunks)}')
tokens, embedding_cost = calculate_embedding_cost(chunks)
st.write(f'Embedding cost: ${embedding_cost:.4f}')
vector_store = create_embeddings(chunks)
st.session_state.vs = vector_store
st.success('File uploaded, chunked and embedded successfully')
q = st.text_input('Ask a question about the content of your file:')
if q:
if 'vs' in st.session_state:
vector_store = st.session_state.vs
st.write(f'k: {k}')
answer = ask_and_get_answer(vector_store, q, k)
st.text_area('LLM Answer: ', value=answer)
st.divider()
if 'history' not in st.session_state:
st.session_state.history = ''
value = f'q: {q} \nA: {answer}'
st.session_state.history = f'{value} \n {"-" * 100} \n {st.session_state.history}'
h = st.session_state.history
st.text_area(label='Chat History', value=h, key='history', height=400)
| [] |
2024-01-10 | IshanG97/food-for-thought | api~index.py | import base64
import json
import pathlib
from datetime import datetime
import requests
import uvicorn
from anthropic import Anthropic
from fastapi import FastAPI, Form, Response, WebSocket, WebSocketDisconnect, Request, HTTPException
from langchain.chat_models import ChatAnthropic
from langchain.embeddings import OpenAIEmbeddings
from langchain.document_loaders import (
TextLoader,
WebBaseLoader,
)
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
)
from langchain.vectorstores import Chroma
from pydantic import BaseModel
from twilio.rest import Client
from twilio.twiml.voice_response import Gather, Say, Start, Stop, VoiceResponse
import constants
from prompts import SYSTEM_MESSAGE, format_snippets, get_booking, get_speech
from voice import transcribe_audio
RESTAURANT = "ChIJ8YR2BUkbdkgRmxJhDIsuy2U"
NUM_SNIPPETS = 3
VOICE = "Google.en-GB-Wavenet-B"
app = FastAPI()
anthropic = Anthropic(api_key=constants.ANTHROPIC_API_KEY)
twilio = Client(username=constants.TWILIO_ACCOUNT_SID, password=constants.TWILIO_AUTH_TOKEN)
calls = {}
model = ChatAnthropic(
model="claude-instant-1",
anthropic_api_key=constants.ANTHROPIC_API_KEY,
)
restaurants = {}
def hms():
return datetime.now().strftime("%H:%M:%S.%f")[:-3]
@app.post("api/call")
async def call(CallSid: str = Form(), From: str = Form()):
print(f"[{hms()}] Incoming call from {From}")
# Initialise call data
restaurant_name = restaurants[RESTAURANT]["place_details"]["name"]
calls[CallSid] = {
"booking": {},
"from": From,
"message_history": [],
"restaurant_name": restaurant_name,
"transcripts": [],
}
# Set up agent
calls[CallSid]["agent"] = {"index": restaurants[RESTAURANT]["index"]}
response = VoiceResponse()
greeting = f"Thank you for calling {restaurant_name}!"
print(f"[{hms()}] Greeting the caller with: '{greeting}'")
say = Say("", language="en-GB", voice=VOICE)
say.prosody(greeting, rate="135%")
response.append(say)
response.redirect(f"https://{constants.NGROK_DOMAIN}/record", method="POST")
return Response(content=str(response), media_type="application/xml")
@app.post("api/record")
async def record(CallSid: str = Form()):
print(f"[{hms()}] Recording")
response = VoiceResponse()
calls[CallSid]["transcripts"].append("")
start = Start()
start.stream(url=f"wss://{constants.NGROK_DOMAIN}/transcribe", name=CallSid)
response.append(start)
gather = Gather(
action="/stop",
actionOnEmptyResult=True,
input="speech",
speechTimeout="auto",
profanityFilter=False,
transcribe=False,
)
response.append(gather)
return Response(content=str(response), media_type="application/xml")
@app.post("api/stop")
async def stop(CallSid: str = Form()):
print(f"[{hms()}] Stopping recording")
response = VoiceResponse()
stop = Stop()
stop.stream(name=CallSid)
response.append(stop)
response.redirect(f"https://{constants.NGROK_DOMAIN}/respond", method="POST")
return Response(content=str(response), media_type="application/xml")
@app.post("api/respond")
async def respond(CallSid: str = Form()):
transcript = calls[CallSid]["transcripts"][-1]
print(f"[{hms()}] Waiting for transcript")
while not transcript:
continue
print(f"[{hms()}] Responding to message: '{transcript}'")
response = VoiceResponse()
print(f"[{hms()}] Obtaining relevant snippets from the database")
index = calls[CallSid]["agent"]["index"]
search = index.similarity_search_with_score(transcript, k=NUM_SNIPPETS)
snippets = [d[0].page_content for d in search]
print(f"[{hms()}] Calling claude-instant-1")
message_history = calls[CallSid]["message_history"]
message_history.append(HumanMessage(content=transcript))
messages = [
SystemMessage(
content=SYSTEM_MESSAGE.format(
snippets=format_snippets(snippets),
)
)
] + message_history
completion = model(messages).content
message_history.append(AIMessage(content=completion))
print(f"[{hms()}] Received the claude-instant-1 completion: '{completion}'")
say = Say("", voice=VOICE)
say.prosody(get_speech(completion), rate="135%")
response.append(say)
if "<booking>" in completion:
booking = get_booking(completion)
if booking:
print(f"[{hms()}] Booking details: {booking}")
calls[CallSid]["booking"] = booking
if "<hangup/>" in completion:
print(f"[{hms()}] Hanging up")
response.hangup()
response.redirect(f"https://{constants.NGROK_DOMAIN}/record", method="POST")
return Response(content=str(response), media_type="application/xml")
@app.websocket("api/transcribe")
async def transcribe(websocket: WebSocket) -> None:
"""Fast, accurate multilingual audio transcription over websockets."""
await websocket.accept()
chunks = []
call_sid = None
while True:
try:
message = await websocket.receive_text()
data = json.loads(message)
if data["event"] == "start":
call_sid = data["start"]["callSid"]
elif data["event"] == "media":
payload = data["media"]["payload"]
chunks.append(base64.b64decode(payload))
elif data["event"] == "stop" or data["event"] == "closed":
break
except WebSocketDisconnect:
break
calls[call_sid]["transcripts"][-1] = transcribe_audio(chunks)
BOOKING_MESSAGE = """Your booking for {num_people} people under the name {name}
at {time} has been confirmed. We look forward to seeing you! {restaurant_name}
""".replace(
"\n", " "
)
@app.post("api/status")
async def status(CallSid: str = Form(), CallStatus: str = Form()) -> None:
"""Sends a booking confirmation via SMS."""
if CallStatus == "completed":
if booking := calls[CallSid]["booking"]:
phone_number = calls[CallSid]["from"]
print(f"[{hms()}] Sending booking confirmation to {phone_number}")
twilio.messages.create(
body=BOOKING_MESSAGE.format(
num_people=booking["num_people"],
name=booking["name"],
time=booking["time"],
restaurant_name=calls[CallSid]["restaurant_name"],
),
from_=constants.TWILIO_PHONE_NUMBER,
to=phone_number,
)
@app.get("api/save-restaurant-data")
def save_restaurant_data(place_id: str) -> None:
if place_id in restaurants:
return
response = requests.get(
constants.PLACE_DETAILS_URL,
params={
"key": constants.GCP_API_KEY,
"place_id": place_id,
},
)
if response.status_code == 200:
place_details = response.json()["result"]
data_path = f"../data/{place_id}"
pathlib.Path(data_path).mkdir(parents=True, exist_ok=True)
# Save reviews
reviews = place_details["reviews"]
with open(f"{data_path}/reviews.txt", "w") as file:
for review in reviews:
if "text" in review:
file.write(review["text"] + "\n")
reviews_data = TextLoader(f"{data_path}/reviews.txt").load()
reviews_splits = RecursiveCharacterTextSplitter(
chunk_size=2000,
chunk_overlap=0,
separators=[],
).split_documents(reviews_data)
# Save other data
generic_data = {}
for k, v in place_details.items():
if k != "photos" and k != "reviews":
generic_data[k] = v
with open(f"{data_path}/data.json", "w") as json_file:
json.dump(generic_data, json_file, indent=4)
# TODO: This is a hack. Use a JSON data loader.
generic_data = TextLoader(f"{data_path}/data.json").load()
generic_data_splits = RecursiveCharacterTextSplitter(
chunk_size=2000,
chunk_overlap=0,
separators=[],
).split_documents(generic_data)
website_url = place_details.get("website", "")
website_data = WebBaseLoader(website_url).load()
web_splits = RecursiveCharacterTextSplitter(
chunk_size=2000,
chunk_overlap=0,
separators=[],
).split_documents(website_data)
# Build vector database for restaurant
embedding = OpenAIEmbeddings(openai_api_key=constants.OPENAI_API_KEY)
for idx, documents in enumerate([reviews_splits, generic_data_splits, web_splits]):
if idx == 0:
index = Chroma.from_documents(
documents=documents,
embedding=embedding,
persist_directory=data_path,
)
index.persist()
restaurants[place_id] = {
"index": index,
"place_details": place_details,
}
@app.get("/api/index")
def hello_world():
return {"message": "Hello World"}
# TODO: move types to separate file
class Position(BaseModel):
lat: float
lng: float
@app.post("api/position")
async def receive_position(position: Position):
lat = position.lat
lng = position.lng
# Write the coordinates to a JSON file
with open('output/position.json', 'w') as f:
json.dump({'lat': lat, 'lng': lng}, f)
find_restaurant(position)
@app.post("api/restaurant")
def find_restaurant(position: Position):
# Define the parameters
lat = position.lat
lng = position.lng
radius = 100
type = "restaurant"
api_key = constants.GCP_API_KEY
# Define the URL
url = f"https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = f"?location={lat}%2C{lng}&radius={radius}&type={type}&key={api_key}"
full_url = url + params
# Make the API request
response = requests.get(full_url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
data = response.json()
results = data.get('results', [])
for place in results:
print(f"Name: {place['name']}, Location: {place['geometry']['location']}")
# Write the results to a JSON file
with open('output/restaurant.json', 'w') as f:
json.dump(results, f, indent=4)
return results
else:
raise HTTPException(status_code=400, detail="Failed to find restaurant")
@app.post("api/user")
async def store_user_pref(request: Request):
user_data = await request.json()
print(user_data)
with open('output/user.json', 'w') as f:
json.dump(user_data, f)
# TODO: We can load the data from disk, we don't need to run this every time
save_restaurant_data(RESTAURANT)
if __name__ == "__main__":
uvicorn.run("index:app", host="0.0.0.0", port=8000, reload=True)
find_restaurant(Position(lat=51.5074, lng=0.1278)) | [] |
2024-01-10 | Aeneator/Zenko-Challenge-Misclick | Assistant~gpt_funcs.py | import openai
import read_data
import pandas as pd
#chat_logs = {}
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def answer(prompt, ip):
openai.api_key = open("API_KEY.txt", 'r').read()
df = pd.read_csv('DataFiles/FAQ.csv')
list_of_questions = ""
faq_list_questions = df["Questions"].tolist()
faq_list_answers = df["Answers"].tolist()
q_id = 0
for question in faq_list_questions:
q_id += 1
list_of_questions += ' ,' + str(q_id) + ' \"' + question + '\"'
faqTestResult = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user",
"content": "With the context that you are a festival assistant that has to answer questions. You have the following question from a festival participant:\"" + prompt + "\". Is the given question kind of similar in meaning has the same keywords, format or answer to any question in the following list of frequently asked questions:" + list_of_questions + "? If you can find a similar question return just YES and the number before the most similar question, example: 'YES,number', don't change the format: 'YES,number' and don't add anything more. If you can't find a question similar enough return just NO."}]
)
faq_content = faqTestResult['choices'][0]['message']['content']
if faqTestResult['choices'][0]['message']['content'].lower().find("yes") != -1:
AI_personality = "You are an AI Customer Relations, your role is to be the Central point of interaction with festival goers, your objectives are: Provide real-time information and Improve customer experience. "
Task = "Use the information \"" + faq_list_answers[int(faq_content.split(',')[1].strip()) - 1] + "\""
Limit = "don't change the sentence too much, but the message shouldn't exceed 300 characters."
# chat_log.append({"role": "user", "content": Task + " to answer: \"" + user_message + "\"." + Limit})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": Task + " to answer: \"" + prompt + "\"." + Limit}]
)
final_response = response['choices'][0]['message']['content']
# chat_log.append({"role": "assistant", "content": response['choices'][0]['message']['content'].strip("\n").strip()})
return final_response, None
# df = pd.read_csv("DataFiles/FAQ.csv")
# list_of_questions = ""
#
# faq_list_questions = df["Questions"].tolist()
# faq_list_answers = df["Answers"].tolist()
#
# q_id = 0
# for question in faq_list_questions:
# q_id += 1
# list_of_questions += ' ,' + str(q_id) + ' \"' + question + '\"'
#
# faqTestResult = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=[{"role": "user",
# "content": "With the context that you are a festival assistant that has to answer questions. You have the following question from a festival participant:\"" + prompt + "\". Is the given question kind of similar in meaning has the same keywords, format or answer to any question in the following list of frequently asked questions:" + list_of_questions + "? If you can find a similar question return just YES and the number before the most similar question, example: 'YES,number', don't change the format: 'YES,number' and don't add anything more. If you can't find a question similar enough return just NO."}]
# )
#
# faq_content = faqTestResult['choices'][0]['message']['content']
# if faqTestResult['choices'][0]['message']['content'].lower().find("yes") != -1:
# AI_personality = "You are an AI Customer Relations, your role is to be the Central point of interaction with festival goers, your objectives are: Provide real-time information and Improve customer experience. "
# Task = "Use the information \"" + faq_list_answers[int(faq_content.split(',')[1].strip()) - 2] + "\""
# Limit = "don't change the sentence too much, but the message shouldn't exceed 300 characters."
#
#
# #chat_logs[ip].append({"role": "user", "content": AI_personality + Task + " to answer: \"" + prompt + "\"." + Limit})
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=[{"role": "user", "content": AI_personality + Task + " to answer: \"" + prompt + "\"." + Limit}]
# )
# final_response = response['choices'][0]['message']['content']
# #chat_logs[ip].append({"role": "assistant", "content": response['choices'][0]['message']['content'].strip("\n").strip()})
# return final_response, None
else:
marker_list = None
print("Not a FAQ.")
base_string = "You are assisting with a festival and there are 10 categories: tickets, vital location, transport, food, music, program, beverage, urgency, history and other. If a question is not festival related it is considered other. In which category does this question fit (and only name exactly the category): "
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": base_string + prompt}]
)
tag_response = response['choices'][0]['message']['content'].lower()
AI_personality = "You are an AI Customer Relations, your role is to be the Central point of interaction with festival goers, your objectives are: Provide real-time information and Improve customer experience. "
Information = "Use the following information if needed: \""
Limit = "Give a concise answer in around a sentence "
beverages, foods, urgency, stages, toilets, buses, trains, recycle, streets, other = read_data.get_location_lists()
location_list = []
q_id = 0
if tag_response == "history":
Information += faq_list_answers[0]
if tag_response == "tickets":
for question in faq_list_questions:
q_id += 1
if question.find("ticket") != -1:
Information += ' ,' + ' \"' + faq_list_answers[q_id] + '\"'
elif tag_response == "vital location":
for question in faq_list_questions:
q_id += 1
if question.find("location") != -1:
Information += ' ,' + ' \"' + faq_list_answers[q_id] + '\"'
location_list.extend(toilets)
location_list.extend(recycle)
location_list.extend(stages)
pins = read_data.get_pins(location_list)
Information += pins.__str__()
elif tag_response == "transport":
Information += read_data.get_route_info()
location_list.extend(trains)
location_list.extend(streets)
location_list.extend(buses)
pins = read_data.get_pins(location_list)
Information += pins.__str__()
elif tag_response == "food":
location_list.extend(foods)
pins = read_data.get_pins(location_list)
Information += pins.__str__()
elif tag_response == "music":
for question in faq_list_questions:
q_id += 1
if question.find("music") != -1:
Information += ' ,' + ' \"' + faq_list_answers[q_id] + '\"'
elif tag_response == "program":
for question in faq_list_questions:
q_id += 1
if question.find("program") != -1:
Information += ' ,' + ' \"' + faq_list_answers[q_id] + '\"'
elif tag_response == "beverage":
for question in faq_list_questions:
q_id += 1
if question.find("beverage") != -1:
Information += ' ,' + ' \"' + faq_list_answers[q_id] + '\"'
location_list.extend(beverages)
pins = read_data.get_pins(location_list)
Information += pins.__str__()
elif tag_response == "urgency":
for question in faq_list_questions:
q_id += 1
if question.find("urgency") != -1:
Information += ' ,' + ' \"' + faq_list_answers[q_id] + '\"'
location_list.extend(urgency)
pins = read_data.get_pins(location_list)
Information += pins.__str__()
else:
Information += "Try to give a short response"
Information += read_data.get_recycle_info()
location_list.extend(other)
pins = read_data.get_pins(location_list)
Information += pins.__str__()
Information += "\""
#chat_logs[ip].append({"role": "user", "content": AI_personality + Limit + Information + prompt})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": AI_personality + Limit + Information + prompt}]
)
final_response = response['choices'][0]['message']['content']
#chat_logs[ip].append({"role": "assistant", "content": final_response.strip("\n").strip()})
if len(location_list) is not 0:
final_response += " Also here is a map that may help you!"
marker_list = read_data.get_pins(location_list)
return final_response, marker_list
| [
"PLACEHOLDER to answer: \"PLACEHOLDER\".PLACEHOLDER",
"With the context that you are a festival assistant that has to answer questions. You have the following question from a festival participant:\"PLACEHOLDER\". Is the given question kind of similar in meaning has the same keywords, format or answer to any question in the following list of frequently asked questions:PLACEHOLDER? If you can find a similar question return just YES and the number before the most similar question, example: 'YES,number', don't change the format: 'YES,number' and don't add anything more. If you can't find a question similar enough return just NO.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDER",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | Aeneator/Zenko-Challenge-Misclick | read_data.py | import json
import openai
import csv
openai.api_key = open("API_KEY.txt", 'r').read()
def get_stand_data():
shops_list = []
id_list = []
csv_file = open("RawFiles/export_stands20230922 (1).csv")
headers = next(csv_file)[:-1].split(';')
for row in csv_file:
row_data = row[:-1].split(';')
temp = {}
for i in range(1, len(headers)):
temp[headers[i]] = (row_data[i] if ', ' not in row_data[i] else row_data[i].split(', ')) if row_data[i] != '' else None
shops_list.append(temp)
id_list.append(row_data[0])
json_file = open('RawFiles/fdv_stands20230920.geojson', "r")
stand_list = json.loads(json_file.read())['features']
for stand in stand_list:
try:
stand['properties']['details'] = shops_list[id_list.index(stand['properties']['numero'])]
except ValueError:
continue
json_object = json.dumps({'stand_list': stand_list}, indent=4)
with open("DataFiles/stand_data.json", "w") as outfile:
outfile.write(json_object)
def get_location_lists():
json_file = open("DataFiles/stand_data.json", "r")
stand_list = json.loads(json_file.read())['stand_list']
beverages = []
foods = []
urgency = []
stages = []
toilets = []
buses = []
trains = []
recycle = []
streets = []
other = []
for stand in stand_list:
ok = False
if 'details' in stand['properties']:
if stand['properties']['details']['food_types'] is not None:
foods.append(stand)
ok = True
if stand['properties']['details']['drink_categories'] is not None:
beverages.append(stand)
ok = True
if 'eau' in stand['properties']['numero']:
beverages.append(stand)
ok = True
if 'GSN' in stand['properties']['numero']:
urgency.append(stand)
ok = True
if 'voirie' in stand['properties']['numero']:
streets.append(stand)
ok = True
if 'TransN' in stand['properties']['numero']:
trains.append(stand)
ok = True
if 'scène' in stand['properties']['numero']:
stages.append(stand)
ok = True
if 'camion' in stand['properties']['numero']:
buses.append(stand)
ok = True
if 'WC' in stand['properties']['numero']:
toilets.append(stand)
ok = True
if 'Centre tri' in stand['properties']['numero']:
recycle.append(stand)
ok = True
if ok is False:
other.append(stand)
return beverages, foods, urgency, stages, toilets, buses, trains, recycle, streets, other
def get_pins(loc_list):
return [{"coordinates": [loc['properties']['centerpoint'].split(', ')[1], loc['properties']['centerpoint'].split(', ')[0]],
"popupText": loc_list.index(loc)} for loc in loc_list]
def get_route_info():
routes_info = "Routes are structured as follows: each route can be found encapsulated between \"\", and follow this structure "
with open('DataFiles/Routes.csv', newline='') as file:
csvfile = csv.reader(file, delimiter=',', quotechar='|')
for row in csvfile:
if routes_info[-1] is '\"':
routes_info += ', \"'
routes_info += row.__str__() + ('\"' if routes_info[-1] is not ' ' else '')
routes_info += '. The following information is regarding closed routes, each of them encapsulated between \"\", and follow this structure '
with open('DataFiles/Modified Routes.csv', newline='') as file:
csvfile = csv.reader(file, delimiter=',', quotechar='|')
for row in csvfile:
if routes_info[-1] is '\"':
routes_info += ', \"'
routes_info += row.__str__() + ('\"' if routes_info[-1] is not ' ' else '')
return routes_info
def get_recycle_info():
recycle_info = "Recycling rules are split as follows: each of them encapsulated between \"\", and follow this structure "
with open('DataFiles/Recycle.csv', newline='') as file:
csvfile = csv.reader(file, delimiter=',', quotechar='|')
for row in csvfile:
if recycle_info[-1] is '\"':
recycle_info += ', \"'
recycle_info += row.__str__() + ('\"' if recycle_info[-1] is not ' ' else '')
return recycle_info
| [] |
2024-01-10 | samuellee77/money-manager | src~pages~3_%F0%9F%A4%96_Samuel_GPT.py | import openai
import streamlit as st
def main():
st.title("🤖 Samuel GPT")
st.caption("1.0 version by Samuel Lee (2023-07-06)")
st.write("This is a smallll ChatGPT called Samuel GPT! Enter any thing you want to ask, but don't ask too much! (Cuz it costs me $$)")
openai.api_key = st.secrets["OPENAI_API_KEY"]
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "system", "content": "You are a helpful chatbot called Samuel GPT"}]
for message in st.session_state.messages:
if not message["role"] == "system":
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Write something"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
max_tokens=200,
temperature=0.75,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
if __name__ == '__main__':
main() | [
"content",
"You are a helpful chatbot called Samuel GPT"
] |
2024-01-10 | engchina/OpenCodeInterpreter | opencodeinterpreter~core~llm~setup_text_llm.py | import os
import traceback
import litellm
import openai
import tokentrim as tt
from ...terminal_interface.utils.display_markdown_message import (
display_markdown_message,
)
def setup_text_llm(interpreter):
"""
Takes an Interpreter (which includes a ton of LLM settings),
returns a text LLM (an OpenAI-compatible chat LLM with baked-in settings. Only takes `messages`).
"""
# Pass remaining parameters to LiteLLM
def base_llm(messages):
"""
Returns a generator
"""
system_message = messages[0]["content"]
messages = messages[1:]
try:
if interpreter.context_window and interpreter.max_tokens:
trim_to_be_this_many_tokens = (
interpreter.context_window - interpreter.max_tokens - 25
) # arbitrary buffer
messages = tt.trim(
messages,
system_message=system_message,
max_tokens=trim_to_be_this_many_tokens,
)
elif interpreter.context_window and not interpreter.max_tokens:
# Just trim to the context window if max_tokens not set
messages = tt.trim(
messages,
system_message=system_message,
max_tokens=interpreter.context_window,
)
else:
try:
messages = tt.trim(
messages, system_message=system_message, model=interpreter.model
)
except:
if len(messages) == 1:
display_markdown_message(
"""
**We were unable to determine the context window of this model.** Defaulting to 3000.
If your model can handle more, run `interpreter --context_window {token limit}` or `interpreter.context_window = {token limit}`.
Also, please set max_tokens: `interpreter --max_tokens {max tokens per response}` or `interpreter.max_tokens = {max tokens per response}`
"""
)
messages = tt.trim(
messages, system_message=system_message, max_tokens=3000
)
except TypeError as e:
if interpreter.vision and str(e) == "expected string or buffer":
# There's just no way to use tokentrim on vision-enabled models yet.
if interpreter.debug_mode:
print("Couldn't token trim image messages. Error:", e)
### DISABLED image trimming
# To maintain the order of messages while simulating trimming, we will iterate through the messages
# and keep only the first 2 and last 2 images, while keeping all non-image messages.
# trimmed_messages = []
# image_counter = 0
# for message in messages:
# if (
# "content" in message
# and isinstance(message["content"], list)
# and len(message["content"]) > 1
# ):
# if message["content"][1]["type"] == "image":
# image_counter += 1
# if (
# image_counter <= 2
# or image_counter
# > len(
# [
# m
# for m in messages
# if m["content"][1]["type"] == "image"
# ]
# )
# - 2
# ):
# # keep message normal
# pass
# else:
# message["content"].pop(1)
# trimmed_messages.append(message)
# messages = trimmed_messages
# Reunite messages with system_message
messages = [{"role": "system", "content": system_message}] + messages
else:
raise
if interpreter.debug_mode:
print("Passing messages into LLM:", messages)
# Create LiteLLM generator
params = {
"model": interpreter.model,
"messages": messages,
"stream": True,
}
# Optional inputs
if interpreter.api_base:
params["api_base"] = interpreter.api_base
if interpreter.api_key:
params["api_key"] = interpreter.api_key
if interpreter.api_version:
params["api_version"] = interpreter.api_version
if interpreter.max_tokens:
params["max_tokens"] = interpreter.max_tokens
if interpreter.temperature is not None:
params["temperature"] = interpreter.temperature
else:
params["temperature"] = 0.0
if interpreter.model == "gpt-4-vision-preview":
# We need to go straight to OpenAI for this, LiteLLM doesn't work
if interpreter.api_base:
openai.api_base = interpreter.api_base
if interpreter.api_key:
openai.api_key = interpreter.api_key
if interpreter.api_version:
openai.api_version = interpreter.api_version
return openai.ChatCompletion.create(**params)
# LiteLLM
# These are set directly on LiteLLM
if interpreter.max_budget:
litellm.max_budget = interpreter.max_budget
if interpreter.debug_mode:
litellm.set_verbose = True
# Report what we're sending to LiteLLM
if interpreter.debug_mode:
print("Sending this to LiteLLM:", params)
return litellm.completion(**params)
return base_llm
| [] |
2024-01-10 | engchina/OpenCodeInterpreter | learn_gradio~chatbot_langchain_streaming.py | import os
from langchain.chat_models import ChatOpenAI
# from langchain.schema import AIMessage, HumanMessage
import openai
import gradio as gr
os.environ["OPENAI_API_BASE"] = "http://192.168.31.12:8000/v1" # Replace with your base url
os.environ["OPENAI_API_KEY"] = "sk-123456" # Replace with your api key
llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo')
def predict(message, history):
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=history_openai_format,
temperature=1.0,
stream=True
)
partial_message = ""
for chunk in response:
print(f"chunk: {chunk}")
if len(chunk['choices'][0]['delta']) != 0:
partial_message = partial_message + chunk['choices'][0]['delta']['content']
yield partial_message
gr.ChatInterface(predict).queue().launch()
| [] |
2024-01-10 | goldfishh/chatgpt-on-wechat | bot~chatgpt~chat_gpt_bot.py | # encoding:utf-8
import time
import openai
import openai.error
import requests
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
from bot.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from common.token_bucket import TokenBucket
from config import conf, load_config
# OpenAI对话模型API (可用)
class ChatGPTBot(Bot, OpenAIImage):
def __init__(self):
super().__init__()
# set the default api_key
openai.api_key = conf().get("open_ai_api_key")
if conf().get("open_ai_api_base"):
openai.api_base = conf().get("open_ai_api_base")
proxy = conf().get("proxy")
if proxy:
openai.proxy = proxy
if conf().get("rate_limit_chatgpt"):
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
self.args = {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p": 1,
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
}
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[CHATGPT] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[CHATGPT] session query={}".format(session.messages))
api_key = context.get("openai_api_key")
self.args['model'] = context.get('gpt_model') or "gpt-3.5-turbo"
# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, new_query, session_id)
reply_content = self.reply_text(session, api_key)
logger.debug(
"[CHATGPT] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[CHATGPT] reply {} used 0 tokens.".format(reply_content))
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: ChatGPTSession, api_key=None, retry_count=0) -> dict:
"""
call openai's ChatCompletion to get the answer
:param session: a conversation session
:param session_id: session id
:param retry_count: retry count
:return: {}
"""
try:
if conf().get("rate_limit_chatgpt") and not self.tb4chatgpt.get_token():
raise openai.error.RateLimitError("RateLimitError: rate limit exceeded")
# if api_key == None, the default openai.api_key will be used
response = openai.ChatCompletion.create(api_key=api_key, messages=session.messages, **self.args)
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response.choices[0]["message"]["content"],
}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, openai.error.RateLimitError):
logger.warn("[CHATGPT] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(20)
elif isinstance(e, openai.error.Timeout):
logger.warn("[CHATGPT] Timeout: {}".format(e))
result["content"] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif isinstance(e, openai.error.APIError):
logger.warn("[CHATGPT] Bad Gateway: {}".format(e))
result["content"] = "请再问我一次"
if need_retry:
time.sleep(10)
elif isinstance(e, openai.error.APIConnectionError):
logger.warn("[CHATGPT] APIConnectionError: {}".format(e))
need_retry = False
result["content"] = "我连接不到你的网络"
else:
logger.exception("[CHATGPT] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session.session_id)
if need_retry:
logger.warn("[CHATGPT] 第{}次重试".format(retry_count + 1))
return self.reply_text(session, api_key, retry_count + 1)
else:
return result
class AzureChatGPTBot(ChatGPTBot):
def __init__(self):
super().__init__()
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
self.args["deployment_id"] = conf().get("azure_deployment_id")
def create_img(self, query, retry_count=0, api_key=None):
api_version = "2022-08-03-preview"
url = "{}dalle/text-to-image?api-version={}".format(openai.api_base, api_version)
api_key = api_key or openai.api_key
headers = {"api-key": api_key, "Content-Type": "application/json"}
try:
body = {"caption": query, "resolution": conf().get("image_create_size", "256x256")}
submission = requests.post(url, headers=headers, json=body)
operation_location = submission.headers["Operation-Location"]
retry_after = submission.headers["Retry-after"]
status = ""
image_url = ""
while status != "Succeeded":
logger.info("waiting for image create..., " + status + ",retry after " + retry_after + " seconds")
time.sleep(int(retry_after))
response = requests.get(operation_location, headers=headers)
status = response.json()["status"]
image_url = response.json()["result"]["contentUrl"]
return True, image_url
except Exception as e:
logger.error("create image error: {}".format(e))
return False, "图片生成失败"
| [
"content",
"我现在有点累了,等会再来吧"
] |
2024-01-10 | swein/make4me | make4me.py | #!/usr/bin/env python3
from flask import Flask, render_template, request
import openai
# Set up OpenAI API credentials
openai.api_key = ''
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
requirements = {
'Goal': request.form['goal'],
'Input': request.form['input'],
'Output': request.form['output'],
}
code_response = generate_code(requirements)
return render_template('result.html', code_response=code_response)
return render_template('index.html')
def generate_code(requirements):
# Construct the prompt from the requirements
prompt = construct_prompt(requirements)
# Get the API response from ChatGPT
response = get_chat_response(prompt)
# Extract the code from the API response
code = extract_code(response)
return code
def construct_prompt(requirements):
prompt = f"Goal: {requirements['Goal']}\n\n"
prompt += f"Input:\n{requirements['Input']}\n\n"
prompt += f"Output:\n{requirements['Output']}\n\n"
return prompt
def get_chat_response(prompt):
response = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
max_tokens=1000,
temperature=0.7,
n=1,
stop=None,
timeout=30
)
return response.choices[0].text.strip()
def extract_code(response):
return response
if __name__ == '__main__':
app.run(host='localhost', port=8787)
| [
"Goal: PLACEHOLDER\n\n",
"Output:\nPLACEHOLDER\n\n",
"Input:\nPLACEHOLDER\n\n"
] |
2024-01-10 | Beenyaa/fwrog-e | backend~src~whispers_session.py | import base64
import os
import re
import sys
from dotenv import load_dotenv
import numpy as np
import openai
from asgiref.sync import sync_to_async
from src.langchain_agent import ChatAgent
from src.whispers_engine import process_audio_data
# Load the environment variables
load_dotenv()
class WhispersSession:
def __init__(self, transcriptionQueue, reasoningQueue, socketManager):
self.transcriptionQueue = transcriptionQueue
self.reasoningQueue = reasoningQueue
self.socketManager = socketManager
self.wakeWords = ["Hey Froggy", "Hey froggy", "Hey Froggy",
"Hey froggy", "Hey, Froggy", "Hey, froggy",
"Hey Froggie", "Hey, Froggie", "Hey Froggie",
"Hey froggie", "Hey, froggie", "Hey, froggie",
"Hey froggie", "Froggy", "Froggie", "froggie", "froggy"]
self.froggySession = False
self.previousRecording = np.array([])
self.froggyMessage = ''
self.froggySessionCounter = 0
def __base64_to_narray(self, convertible):
# Convert the string to a bytes object
convertible = base64.b64decode(convertible)
# Convert the bytes object to a NumPy array
convertible = np.frombuffer(convertible, np.int16).astype(
np.float32)
return convertible
def __clean_text(self, message: str, wakeWord: str):
"""
Remove wake word from message and return capitalized text.
"""
message = re.findall(f"!?({wakeWord}.\s|{wakeWord}\s)(.+)", message)
print('message:', message)
if message:
message = message[0][1].strip().capitalize()
else:
message = ""
return message
async def process_audio_data_from_queue(self, websocket):
print("Froggy Session:", self.froggySession)
print("Froggy Session Counter:", self.froggySessionCounter)
# Get audio data from the queue
currentRecording = await self.transcriptionQueue.get()
# Process the audio data and broadcast the transcription result
currentRecording = self.__base64_to_narray(currentRecording)
previous_recording_length = self.previousRecording.shape[0]
concatenatedRecording = np.concatenate([
self.previousRecording[-previous_recording_length//3:], currentRecording])
self.previousRecording = currentRecording
# print('concatenatedRecording:', concatenatedRecording)
transcription = await process_audio_data(concatenatedRecording, prompt="Hey Froggy,") if self.froggyMessage == '' else await process_audio_data(concatenatedRecording, prompt=self.froggyMessage)
if self.froggySession is False and len(self.froggyMessage) > 1:
data = {
"status": "broadcasting",
"transcription": self.froggyMessage,
}
await self.socketManager.broadcast(websocket, data)
await self.reasoningQueue.put(self.froggyMessage)
self.froggyMessage = ""
if transcription:
# appends cleaned text if wake word is present in message
for wakeWord in self.wakeWords:
if wakeWord in transcription:
self.froggySession = True
self.froggyMessage += self.__clean_text(
transcription, wakeWord)
# only one wake word is allowed per message
break
if (transcription is None or transcription is (" " or "")) and self.froggySessionCounter >= 5:
self.froggySession = False
self.froggySessionCounter = 0
if self.froggySession == True and (transcription != None or transcription != (" " or "")):
self.froggySessionCounter += 1
self.froggyMessage += transcription.rstrip().lower()
data = {
"status": "broadcasting",
"transcription": self.froggyMessage,
}
await self.socketManager.broadcast(websocket, data)
self.transcriptionQueue.task_done()
# async def get_ai_response(self, websocket):
# # Set the API key for OpenAI
# openai.api_key = os.getenv("OPENAI_API_KEY")
# # Get the prompt for the OpenAI model
# prompt = os.getenv("PROMPT_PREFIX")
# # Get the transcription from the queue
# transcription = await self.reasoningQueue.get()
# # Use the OpenAI API to get a response for the transcription
# response = await sync_to_async(openai.Completion.create)(
# model="text-davinci-003",
# prompt=f"{prompt}Q: {transcription}\nA: ",
# temperature=0,
# max_tokens=100,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0,
# )
# if response:
# print("response:", response)
# data = {
# "status": "broadcasting",
# "reasoning": response.choices[0].text.strip(),
# }
# await self.socketManager.broadcast(websocket, data)
# self.reasoningQueue.task_done()
async def get_ai_response(self, websocket):
# Get the transcription from the queue
transcription = await self.reasoningQueue.get()
history_array = []
print("\n\n#### INPUT ####\n")
print(transcription)
print("\n\n#### INPUT ####\n")
chat_agent = await sync_to_async(ChatAgent)(history_array=history_array)
try:
reply = chat_agent.agent_executor.run(input=transcription)
except ValueError as inst:
print('ValueError:\n')
print(inst)
reply = "Sorry, there was an error processing your request."
print("\n\n#### REPLY ####\n")
print(reply)
print("\n\n#### REPLY ####\n")
pattern = r'\(([a-z]{2}-[A-Z]{2})\)'
# Search for the local pattern in the string
match = re.search(pattern, reply)
language = 'en-US' # defaut
if match:
# Get the language code
language = match.group(1)
# Remove the language code from the reply
reply = re.sub(pattern, '', reply)
print("LANG: ", language)
sys.stdout.flush()
if reply:
data = {
"status": "broadcasting",
"reasoning": reply.strip(),
}
await self.socketManager.broadcast(websocket, data)
self.reasoningQueue.task_done()
| [] |
2024-01-10 | xinxin2man/MintPy | mintpy~ifgram_inversion.py | #!/usr/bin/env python3
############################################################
# Program is part of MintPy #
# Copyright (c) 2013, Zhang Yunjun, Heresh Fattahi #
# Author: Zhang Yunjun, Heresh Fattahi, 2013 #
# Parallel support added by David Grossman, Joshua Zahner #
############################################################
# Recommend import:
# from mintpy import ifgram_inversion as ifginv
#
# Offset inversion considerations (different from phases):
# 1. referencing is turned off because offset is spatially absolute measure
# 2. zero value is valid for offset
# 3. unit is ground pixel size in range/azimuth directions
# 4. add Az/Rg suffix in all output files to distinguish azimuth/range
import os
import sys
import time
import argparse
import warnings
import h5py
import numpy as np
from scipy import linalg # more effieint than numpy.linalg
from mintpy.objects import ifgramStack, timeseries, cluster
from mintpy.simulation import decorrelation as decor
from mintpy.defaults.template import get_template_content
from mintpy.utils import readfile, writefile, ptime, utils as ut, arg_group
# key configuration parameter name
key_prefix = 'mintpy.networkInversion.'
configKeys = ['obsDatasetName',
'numIfgram',
'weightFunc',
'maskDataset',
'maskThreshold',
'minRedundancy',
'minNormVelocity']
################################################################################################
EXAMPLE = """example:
ifgram_inversion.py inputs/ifgramStack.h5 -t smallbaselineApp.cfg --update
ifgram_inversion.py inputs/ifgramStack.h5 -w no # turn off weight for fast processing
ifgram_inversion.py inputs/ifgramStack.h5 -c no # turn off parallel processing
# offset
ifgram_inversion.py inputs/ifgramStack.h5 -i rangeOffset -w no -m waterMask.h5 --md offsetSNR --mt 5
ifgram_inversion.py inputs/ifgramStack.h5 -i azimuthOffset -w no -m waterMask.h5 --md offsetSNR --mt 5
"""
TEMPLATE = get_template_content('invert_network')
REFERENCE = """references:
Berardino, P., Fornaro, G., Lanari, R., & Sansosti, E. (2002). A new algorithm for surface
deformation monitoring based on small baseline differential SAR interferograms. IEEE TGRS,
40(11), 2375-2383. doi:10.1109/TGRS.2002.803792
Pepe, A., and R. Lanari (2006), On the extension of the minimum cost flow algorithm for phase unwrapping
of multitemporal differential SAR interferograms, IEEE-TGRS, 44(9), 2374-2383.
Perissin, D., and T. Wang (2012), Repeat-pass SAR interferometry with partially coherent targets, IEEE TGRS,
50(1), 271-280, doi:10.1109/tgrs.2011.2160644.
Samiei-Esfahany, S., J. E. Martins, F. v. Leijen, and R. F. Hanssen (2016), Phase Estimation for Distributed
Scatterers in InSAR Stacks Using Integer Least Squares Estimation, IEEE TGRS, 54(10), 5671-5687.
Seymour, M. S., and I. G. Cumming (1994), Maximum likelihood estimation for SAR interferometry, 1994.
IGARSS '94., 8-12 Aug 1994.
Yunjun, Z., H. Fattahi, and F. Amelung (2019), Small baseline InSAR time series analysis: Unwrapping error
correction and noise reduction, Computers & Geosciences, 133, 104331, doi:10.1016/j.cageo.2019.104331.
"""
def create_parser():
parser = argparse.ArgumentParser(description='Invert network of interferograms into time-series.',
formatter_class=argparse.RawTextHelpFormatter,
epilog=REFERENCE+'\n'+TEMPLATE+'\n'+EXAMPLE)
# input dataset
parser.add_argument('ifgramStackFile', help='interferograms stack file to be inverted')
parser.add_argument('-t','--template', dest='templateFile', help='template text file with options')
parser.add_argument('-i','-d', '--dset', dest='obsDatasetName', type=str,
help='dataset name of unwrap phase / offset to be used for inversion'
'\ne.g.: unwrapPhase, unwrapPhase_bridging, ...')
parser.add_argument('-m','--water-mask', dest='waterMaskFile',
help='Skip inversion on the masked out region, i.e. water.')
# options rarely used or changed
parser.add_argument('-o', '--output', dest='outfile', nargs=3,
metavar=('TS_FILE', 'TCOH_FILE', 'NUM_INV_FILE'),
help='Output file name. (default: %(default)s).')
parser.add_argument('--ref-date', dest='ref_date', help='Reference date, first date by default.')
parser.add_argument('--skip-reference','--skip-ref', dest='skip_ref', action='store_true',
help='[for offset and testing] do not apply spatial referencing.')
# solver
solver = parser.add_argument_group('solver', 'solver for the network inversion problem')
solver.add_argument('-w', '--weight-func', dest='weightFunc', default='var',
choices={'var', 'fim', 'coh', 'no'},
help='function used to convert coherence to weight for inversion:\n' +
'var - inverse of phase variance due to temporal decorrelation (default)\n' +
'fim - Fisher Information Matrix as weight' +
'coh - spatial coherence\n' +
'no - no/uniform weight')
solver.add_argument('--min-norm-phase', dest='minNormVelocity', action='store_false',
help=('Enable inversion with minimum-norm deformation phase,'
' instead of the default minimum-norm deformation velocity.'))
solver.add_argument('--norm', dest='residualNorm', default='L2', choices=['L1', 'L2'],
help='Optimization mehtod, L1 or L2 norm. (default: %(default)s).')
# mask
mask = parser.add_argument_group('mask', 'mask observation data before inversion')
mask.add_argument('--mask-dset','--mask-dataset','--md', dest='maskDataset',
help='dataset used to mask unwrapPhase, e.g. coherence, connectComponent')
mask.add_argument('--mask-thres','--mask-threshold','--mt', dest='maskThreshold', metavar='NUM', type=float, default=0.4,
help='threshold to generate mask when mask is coherence (default: %(default)s).')
mask.add_argument('--min-redun','--min-redundancy','--mr', dest='minRedundancy', metavar='NUM', type=float, default=1.0,
help='minimum redundancy of interferograms for every SAR acquisition. (default: %(default)s).')
# computing
parser = arg_group.add_memory_argument(parser)
parser = arg_group.add_parallel_argument(parser)
# update / skip
parser.add_argument('--update', dest='update_mode', action='store_true',
help='Enable update mode, and skip inversion if output timeseries file already exists,\n' +
'readable and newer than input interferograms file')
return parser
def cmd_line_parse(iargs=None):
parser = create_parser()
inps = parser.parse_args(args=iargs)
# check input file type
atr = readfile.read_attribute(inps.ifgramStackFile)
if atr['FILE_TYPE'] not in ['ifgramStack']:
raise ValueError('input is {} file, support ifgramStack file only.'.format(atr['FILE_TYPE']))
if inps.templateFile:
inps, template = read_template2inps(inps.templateFile, inps)
else:
template = dict()
# --cluster and --num-worker option
inps.numWorker = str(cluster.DaskCluster.format_num_worker(inps.cluster, inps.numWorker))
if inps.cluster and inps.numWorker == '1':
print('WARNING: number of workers is 1, turn OFF parallel processing and continue')
inps.cluster = None
# --water-mask option
if inps.waterMaskFile and not os.path.isfile(inps.waterMaskFile):
inps.waterMaskFile = None
# --dset option
if not inps.obsDatasetName:
inps.obsDatasetName = 'unwrapPhase'
# determine suffix based on unwrapping error correction method
obs_suffix_map = {'bridging' : '_bridging',
'phase_closure' : '_phaseClosure',
'bridging+phase_closure' : '_bridging_phaseClosure'}
key = 'mintpy.unwrapError.method'
if key in template.keys() and template[key]:
unw_err_method = template[key].lower().replace(' ','') # fix potential typo
inps.obsDatasetName += obs_suffix_map[unw_err_method]
print('phase unwrapping error correction "{}" is turned ON'.format(unw_err_method))
print('use dataset "{}" by default'.format(inps.obsDatasetName))
# check if input observation dataset exists.
stack_obj = ifgramStack(inps.ifgramStackFile)
stack_obj.open(print_msg=False)
if inps.obsDatasetName not in stack_obj.datasetNames:
msg = 'input dataset name "{}" not found in file: {}'.format(inps.obsDatasetName, inps.ifgramStackFile)
raise ValueError(msg)
# --skip-ref option
if 'offset' in inps.obsDatasetName.lower():
inps.skip_ref = True
# --output option
if not inps.outfile:
if inps.obsDatasetName.startswith('unwrapPhase'):
inps.outfile = ['timeseries.h5', 'temporalCoherence.h5', 'numInvIfgram.h5']
elif inps.obsDatasetName.startswith('azimuthOffset'):
inps.outfile = ['timeseriesAz.h5', 'temporalCoherenceAz.h5', 'numInvOffset.h5']
elif inps.obsDatasetName.startswith('rangeOffset'):
inps.outfile = ['timeseriesRg.h5', 'temporalCoherenceRg.h5', 'numInvOffset.h5']
else:
raise ValueError('un-recognized input observation dataset name: {}'.format(inps.obsDatasetName))
inps.tsFile, inps.tempCohFile, inps.numInvFile = inps.outfile
return inps
def read_template2inps(template_file, inps):
"""Read input template options into Namespace inps"""
if not inps:
inps = cmd_line_parse()
iDict = vars(inps)
template = readfile.read_template(template_file)
template = ut.check_template_auto_value(template)
keyList = [i for i in list(iDict.keys()) if key_prefix+i in template.keys()]
for key in keyList:
value = template[key_prefix+key]
if key in ['weightFunc', 'maskDataset', 'minNormVelocity']:
iDict[key] = value
elif value:
if key in ['maskThreshold', 'minRedundancy']:
iDict[key] = float(value)
elif key in ['residualNorm', 'waterMaskFile']:
iDict[key] = value
# computing configurations
dask_key_prefix = 'mintpy.compute.'
keyList = [i for i in list(iDict.keys()) if dask_key_prefix+i in template.keys()]
for key in keyList:
value = template[dask_key_prefix+key]
if key in ['cluster', 'config']:
iDict[key] = value
elif value:
if key in ['numWorker']:
iDict[key] = str(value)
elif key in ['maxMemory']:
iDict[key] = float(value)
# False/None --> 'no'
for key in ['weightFunc']:
if not iDict[key]:
iDict[key] = 'no'
return inps, template
def run_or_skip(inps):
print('-'*50)
print('update mode: ON')
flag = 'skip'
# check output files vs input dataset
if not all(os.path.isfile(i) for i in inps.outfile):
flag = 'run'
print('1) NOT ALL output files found: {}.'.format(inps.outfile))
else:
# check if time-series file is partly written using file size
# since time-series file is not compressed
with h5py.File(inps.outfile[0], 'r') as f:
fsize_ref = f['timeseries'].size * 4
fsize = os.path.getsize(inps.outfile[0])
if fsize <= fsize_ref:
flag = 'run'
print('1) output file {} is NOT fully written.'.format(inps.outfile[0]))
else:
print('1) output files already exist: {}.'.format(inps.outfile))
# check modification time
with h5py.File(inps.ifgramStackFile, 'r') as f:
ti = float(f[inps.obsDatasetName].attrs.get('MODIFICATION_TIME', os.path.getmtime(inps.ifgramStackFile)))
to = min(os.path.getmtime(i) for i in inps.outfile)
if ti > to:
flag = 'run'
print('2) output files are NOT newer than input dataset: {}.'.format(inps.obsDatasetName))
else:
print('2) output dataset is newer than input dataset: {}.'.format(inps.obsDatasetName))
# check configuration
if flag == 'skip':
meta_keys = ['REF_Y', 'REF_X']
atr_ifg = readfile.read_attribute(inps.ifgramStackFile)
atr_ts = readfile.read_attribute(inps.tsFile)
inps.numIfgram = len(ifgramStack(inps.ifgramStackFile).get_date12_list(dropIfgram=True))
if any(str(vars(inps)[key]) != atr_ts.get(key_prefix+key, 'None') for key in configKeys):
flag = 'run'
print('3) NOT all key configration parameters are the same: {}'.format(configKeys))
elif any(atr_ts[key] != atr_ifg[key] for key in meta_keys):
flag = 'run'
print('3) NOT all the metadata are the same: {}'.format(meta_keys))
else:
print('3) all key configuration parameters are the same: {}.'.format(configKeys))
# result
print('run or skip: {}.'.format(flag))
return flag
################################# Time-series Estimator ###################################
def estimate_timeseries(A, B, tbase_diff, ifgram, weight_sqrt=None, min_norm_velocity=True,
rcond=1e-5, min_redundancy=1.):
"""Estimate time-series from a stack/network of interferograms with
Least Square minimization on deformation phase / velocity.
opt 1: X = np.dot(np.dot(numpy.linalg.inv(np.dot(B.T, B)), B.T), ifgram)
opt 2: X = np.dot(numpy.linalg.pinv(B), ifgram)
opt 3: X = np.dot(scipy.linalg.pinv2(B), ifgram)
opt 4: X = scipy.linalg.lstsq(B, ifgram)[0] [recommend and used]
opt 4 supports weight.
scipy.linalg provides more advanced and slighted faster performance than numpy.linalg.
This function relies on the LAPACK routine gelsd. It computes the minimum-norm
solution to a linear least squares problem using the singular value decomposition
of A and a divide and conquer method.
opt 4 is faster than opt 1/2/3 because it estimates X directly without calculating
the A_inv matrix.
opt 2/3 is better than opt 1 because numpy.linalg.inv() can not handle rank defiency of
design matrix B
Traditional Small BAseline Subsets (SBAS) algorithm (Berardino et al., 2002, IEEE-TGRS)
is equivalent to the setting of:
min_norm_velocity=True
weight_sqrt=None
Parameters: A - 2D np.array in size of (num_ifgram, num_date-1)
B - 2D np.array in size of (num_ifgram, num_date-1),
design matrix B, each row represents differential temporal
baseline history between reference and secondary date of one interferogram
tbase_diff - 2D np.array in size of (num_date-1, 1),
differential temporal baseline history
ifgram - 2D np.array in size of (num_ifgram, num_pixel),
phase/offset of all interferograms.
no-data value: NaN.
weight_sqrt - 2D np.array in size of (num_ifgram, num_pixel),
square root of weight of all interferograms
min_norm_velocity - bool, assume minimum-norm deformation velocity, or not
rcond - cut-off ratio of small singular values of A or B, to maintain robustness.
It's recommend to >= 1e-5 by experience, to generate reasonable result.
min_redundancy - min redundancy defined as min num_ifgram for every SAR acquisition
Returns: ts - 2D np.array in size of (num_date, num_pixel), phase time-series
temp_coh - 1D np.array in size of (num_pixel), temporal coherence
num_inv_obs - 1D np.array in size of (num_pixel), number of observatons (ifgrams / offsets)
used during the inversion
"""
ifgram = ifgram.reshape(A.shape[0], -1)
if weight_sqrt is not None:
weight_sqrt = weight_sqrt.reshape(A.shape[0], -1)
num_date = A.shape[1] + 1
num_pixel = ifgram.shape[1]
# initial output value
ts = np.zeros((num_date, num_pixel), dtype=np.float32)
temp_coh = 0.
num_inv_obs = 0
# skip nan phase/offset value
# apply to the pixel-wised inversion only
# since the region-wised inversion has valid obs in all pairs
if np.any(np.isnan(ifgram)):
flag = (~np.isnan(ifgram[:, 0])).flatten()
A = A[flag, :]
B = B[flag, :]
# skip the pixel if its redundancy < threshold
if np.min(np.sum(A != 0., axis=0)) < min_redundancy:
return ts, temp_coh, num_inv_obs
# check matrix invertability
# for WLS only because OLS contains it already
if weight_sqrt is not None:
try:
linalg.inv(np.dot(B.T, B))
except linalg.LinAlgError:
return ts, temp_coh, num_inv_obs
ifgram = ifgram[flag, :]
if weight_sqrt is not None:
weight_sqrt = weight_sqrt[flag, :]
# update number of observations used for inversion
num_inv_obs = A.shape[0]
# invert time-series
try:
# assume minimum-norm deformation velocity
if min_norm_velocity:
if weight_sqrt is not None:
X = linalg.lstsq(np.multiply(B, weight_sqrt),
np.multiply(ifgram, weight_sqrt),
cond=rcond)[0]
else:
X = linalg.lstsq(B, ifgram, cond=rcond)[0]
# calc temporal coherence
temp_coh = calc_temporal_coherence(ifgram, B, X)
# assemble time-series
ts_diff = X * np.tile(tbase_diff, (1, num_pixel))
ts[1:, :] = np.cumsum(ts_diff, axis=0)
# assume minimum-norm deformation phase
else:
if weight_sqrt is not None:
X = linalg.lstsq(np.multiply(A, weight_sqrt),
np.multiply(ifgram, weight_sqrt),
cond=rcond)[0]
else:
X = linalg.lstsq(A, ifgram, cond=rcond)[0]
# calc temporal coherence
temp_coh = calc_temporal_coherence(ifgram, A, X)
# assemble time-series
ts[1: ,:] = X
except linalg.LinAlgError:
pass
return ts, temp_coh, num_inv_obs
def calc_temporal_coherence(ifgram, G, X):
"""Calculate the temporal coherence from the network inversion results
Parameters: ifgram - 2D np.array in size of (num_ifgram, num_pixel), phase or offset
G - 2D np.array in size of (num_ifgram, num_date-1), design matrix A or B
X - 2D np.array in size of (num_date-1, num_pixel), solution
Returns: temp_coh - 1D np.array in size of (num_pixel), temporal coherence
"""
num_ifgram, num_pixel = ifgram.shape
temp_coh = np.zeros(num_pixel, dtype=np.float32)
# chunk_size as the number of pixels
chunk_size = int(ut.round_to_1(2e5 / num_ifgram))
if num_pixel > chunk_size:
num_chunk = int(np.ceil(num_pixel / chunk_size))
num_chunk_step = max(1, int(ut.round_to_1(num_chunk / 5)))
print(('calcualting temporal coherence in chunks of {} pixels'
': {} chunks in total ...').format(chunk_size, num_chunk))
for i in range(num_chunk):
c0 = i * chunk_size
c1 = min((i + 1) * chunk_size, num_pixel)
# calc residual
ifgram_diff = ifgram[:, c0:c1] - np.dot(G, X[:, c0:c1])
# calc temporal coherence
temp_coh[c0:c1] = np.abs(np.sum(np.exp(1j*ifgram_diff), axis=0)) / num_ifgram
# print out message
if (i+1) % num_chunk_step == 0:
print('chunk {} / {}'.format(i+1, num_chunk))
else:
# calc residual
ifgram_diff = ifgram - np.dot(G, X)
# calc temporal coherence
temp_coh = np.abs(np.sum(np.exp(1j*ifgram_diff), axis=0)) / num_ifgram
return temp_coh
###################################### File IO ############################################
def write2hdf5_file(ifgram_file, metadata, ts, temp_coh, num_inv_ifg=None,
suffix='', inps=None):
stack_obj = ifgramStack(ifgram_file)
stack_obj.open(print_msg=False)
date_list = stack_obj.get_date_list(dropIfgram=True)
# File 1 - timeseries.h5
ts_file = '{}{}.h5'.format(suffix, os.path.splitext(inps.outfile[0])[0])
metadata['REF_DATE'] = date_list[0]
metadata['FILE_TYPE'] = 'timeseries'
metadata['UNIT'] = 'm'
print('-'*50)
print('calculating perpendicular baseline timeseries')
pbase = stack_obj.get_perp_baseline_timeseries(dropIfgram=True)
ts_obj = timeseries(ts_file)
ts_obj.write2hdf5(data=ts, dates=date_list, bperp=pbase, metadata=metadata)
# File 2 - temporalCoherence.h5
out_file = '{}{}.h5'.format(suffix, os.path.splitext(inps.outfile[1])[0])
metadata['FILE_TYPE'] = 'temporalCoherence'
metadata['UNIT'] = '1'
print('-'*50)
writefile.write(temp_coh, out_file=out_file, metadata=metadata)
## File 3 - timeseriesDecorStd.h5
#if not np.all(ts_std == 0.):
# out_file = 'timeseriesDecorStd{}.h5'.format(suffix)
# metadata['FILE_TYPE'] = 'timeseries'
# metadata['UNIT'] = 'm'
# phase2range = -1*float(stack_obj.metadata['WAVELENGTH'])/(4.*np.pi)
# ts_std *= abs(phase2range)
# print('-'*50)
# writefile.write(ts_std, out_file=out_file, metadata=metadata, ref_file=ts_file)
# File 3 - numInvIfgram.h5
out_file = 'numInvIfgram{}.h5'.format(suffix)
metadata['FILE_TYPE'] = 'mask'
metadata['UNIT'] = '1'
print('-'*50)
writefile.write(num_inv_ifg, out_file=out_file, metadata=metadata)
return
def split2boxes(ifgram_file, max_memory=4, print_msg=True):
"""Split into chunks in rows to reduce memory usage
Parameters: dataset_shape - tuple of 3 int
max_memory - float, max memory to use in GB
print_msg - bool
Returns: box_list - list of tuple of 4 int
num_box - int, number of boxes
"""
ifg_obj = ifgramStack(ifgram_file)
ifg_obj.open(print_msg=False)
# dataset size: defo obs (phase / offset) + weight + time-series
length = ifg_obj.length
width = ifg_obj.width
ds_size = (ifg_obj.numIfgram * 2 + ifg_obj.numDate + 5) * length * width * 4
num_box = int(np.ceil(ds_size * 1.5 / (max_memory * 1024**3)))
y_step = int(np.rint((length / num_box) / 10) * 10)
num_box = int(np.ceil(length / y_step))
if print_msg and num_box > 1:
print('maximum memory size: %.1E GB' % max_memory)
print('split %d lines into %d patches for processing' % (length, num_box))
print(' with each patch up to %d lines' % y_step)
# y_step / num_box --> box_list
box_list = []
for i in range(num_box):
y0 = i * y_step
y1 = min([length, y0 + y_step])
box = (0, y0, width, y1)
box_list.append(box)
return box_list, num_box
def check_design_matrix(ifgram_file, weight_func='var'):
"""
Check Rank of Design matrix for weighted inversion
"""
date12_list = ifgramStack(ifgram_file).get_date12_list(dropIfgram=True)
A = ifgramStack.get_design_matrix4timeseries(date12_list)[0]
if weight_func == 'no':
if np.linalg.matrix_rank(A) < A.shape[1]:
print('WARNING: singular design matrix! Inversion result can be biased!')
print('continue using its SVD solution on all pixels')
else:
if np.linalg.matrix_rank(A) < A.shape[1]:
print('ERROR: singular design matrix!')
print(' Input network of interferograms is not fully connected!')
print(' Can not invert the weighted least square solution.')
print('You could try:')
print(' 1) Add more interferograms to make the network fully connected:')
print(' a.k.a., no multiple subsets nor network islands')
print(" 2) Use '-w no' option for non-weighted SVD solution.")
raise Exception()
return A
def read_unwrap_phase(stack_obj, box, ref_phase, obs_ds_name='unwrapPhase', dropIfgram=True,
print_msg=True):
"""Read unwrapPhase from ifgramStack file
Parameters: stack_obj - ifgramStack object
box - tuple of 4 int
ref_phase - 1D array or None
Returns: pha_data - 2D array of unwrapPhase in size of (num_ifgram, num_pixel)
"""
# Read unwrapPhase
num_ifgram = stack_obj.get_size(dropIfgram=dropIfgram)[0]
if print_msg:
print('reading {} in {} * {} ...'.format(obs_ds_name, box, num_ifgram))
pha_data = stack_obj.read(datasetName=obs_ds_name,
box=box,
dropIfgram=dropIfgram,
print_msg=False).reshape(num_ifgram, -1)
pha_data[np.isnan(pha_data)] = 0.
# read ref_phase
if ref_phase is not None:
# use input ref_phase array
if print_msg:
print('use input reference phase')
elif 'refPhase' in stack_obj.datasetNames:
# read refPhase from file itself
if print_msg:
print('read reference phase from file')
with h5py.File(stack_obj.file, 'r') as f:
ref_phase = f['refPhase'][:]
else:
raise Exception('No reference phase input/found on file!'+
' unwrapped phase is not referenced!')
# reference unwrapPhase
for i in range(num_ifgram):
mask = pha_data[i, :] != 0.
pha_data[i, :][mask] -= ref_phase[i]
return pha_data
def mask_unwrap_phase(pha_data, stack_obj, box, mask_ds_name=None, mask_threshold=0.4,
dropIfgram=True, print_msg=True):
"""
Mask input unwrapped phase.
"""
# Read/Generate Mask
num_ifgram = stack_obj.get_size(dropIfgram=dropIfgram)[0]
if mask_ds_name and mask_ds_name in stack_obj.datasetNames:
if print_msg:
print('reading {} in {} * {} ...'.format(mask_ds_name, box, num_ifgram))
msk_data = stack_obj.read(datasetName=mask_ds_name,
box=box,
dropIfgram=dropIfgram,
print_msg=False).reshape(num_ifgram, -1)
# set all NaN values in coherence, connectComponent, offsetSNR to zero
# to avoid RuntimeWarning msg during math operation
msk_data[np.isnan(msk_data)] = 0
if mask_ds_name in ['coherence', 'offsetSNR']:
msk_data = msk_data >= mask_threshold
if print_msg:
print('mask out pixels with {} < {} by setting them to NaN'.format(mask_ds_name, mask_threshold))
elif mask_ds_name in ['connectComponent']:
if print_msg:
print('mask out pixels with {} == 0 by setting them to NaN'.format(mask_ds_name))
# set values of mask-out pixels to NaN
pha_data[msk_data == 0.] = np.nan
del msk_data
return pha_data
def read_coherence(stack_obj, box, dropIfgram=True, print_msg=True):
"""
Read spatial coherence
"""
num_ifgram = stack_obj.get_size(dropIfgram=dropIfgram)[0]
if print_msg:
print('reading coherence in {} * {} ...'.format(box, num_ifgram))
coh_data = stack_obj.read(datasetName='coherence',
box=box,
dropIfgram=dropIfgram,
print_msg=False).reshape(num_ifgram, -1)
coh_data[np.isnan(coh_data)] = 0.
return coh_data
def calc_weight(stack_obj, box, weight_func='var', dropIfgram=True, chunk_size=100000):
"""Read coherence and calculate weight from it, chunk by chunk to save memory
"""
print('calculating weight from spatial coherence ...')
# read coherence
weight = read_coherence(stack_obj, box=box, dropIfgram=dropIfgram)
num_pixel = weight.shape[1]
if 'NCORRLOOKS' in stack_obj.metadata.keys():
L = float(stack_obj.metadata['NCORRLOOKS'])
else:
# use the typical ratio of resolution vs pixel size of Sentinel-1 IW mode
L = int(stack_obj.metadata['ALOOKS']) * int(stack_obj.metadata['RLOOKS'])
L /= 1.94
# make sure L >= 1
L = max(np.rint(L).astype(int), 1)
# convert coherence to weight chunk-by-chunk to save memory
num_chunk = int(np.ceil(num_pixel / chunk_size))
print(('convert coherence to weight in chunks of {c} pixels'
': {n} chunks in total ...').format(c=chunk_size, n=num_chunk))
for i in range(num_chunk):
c0 = i * chunk_size
c1 = min((i + 1) * chunk_size, num_pixel)
if i == 0:
print_msg = True
else:
print_msg = False
# calc weight from coherence
weight[:, c0:c1] = decor.coherence2weight(weight[:, c0:c1],
weight_func,
L=L,
epsilon=5e-2,
print_msg=print_msg)
weight[:, c0:c1] = np.sqrt(weight[:, c0:c1])
# print out message
if (i+1) % 1 == 0:
print('chunk {} / {}'.format(i+1, num_chunk))
return weight
def ifgram_inversion_patch(ifgram_file, box=None, ref_phase=None, obs_ds_name='unwrapPhase',
weight_func='var', water_mask_file=None, min_norm_velocity=True,
mask_ds_name=None, mask_threshold=0.4, min_redundancy=1.0):
"""Invert one patch of an ifgram stack into timeseries.
Parameters: box - tuple of 4 int, indicating (x0, y0, x1, y1) of the area of interest
or None for the whole image
ifgram_file - str, interferograms stack HDF5 file, e.g. ./inputs/ifgramStack.h5
ref_phase - 1D array in size of (num_ifgram), or None
obs_ds_name - str, dataset to feed the inversion.
weight_func - str, weight function, choose in ['no', 'fim', 'var', 'coh']
water_mask_file - str, water mask filename if available, to skip inversion on water
min_norm_velocity - bool, minimize the residual phase or phase velocity
mask_ds_name - str, dataset name in ifgram_file used to mask unwrapPhase pixelwisely
mask_threshold - float, min coherence of pixels if mask_dataset_name='coherence'
min_redundancy - float, the min number of ifgrams for every acquisition.
Returns: ts - 3D array in size of (num_date, num_row, num_col)
temp_coh - 2D array in size of (num_row, num_col)
num_inv_ifg - 2D array in size of (num_row, num_col)
box - tuple of 4 int
Example: ifgram_inversion_patch('ifgramStack.h5', box=(0,200,1316,400))
"""
stack_obj = ifgramStack(ifgram_file)
stack_obj.open(print_msg=False)
# debug
#y, x = 258, 454
#box = (x, y, x+1, y+1)
## 1. input info
# size
if box:
num_row = box[3] - box[1]
num_col = box[2] - box[0]
else:
num_row = stack_obj.length
num_col = stack_obj.width
num_pixel = num_row * num_col
# get tbase_diff in the unit of year
date_list = stack_obj.get_date_list(dropIfgram=True)
num_date = len(date_list)
tbase = np.array(ptime.date_list2tbase(date_list)[0], np.float32) / 365.25
tbase_diff = np.diff(tbase).reshape(-1, 1)
# design matrix
date12_list = stack_obj.get_date12_list(dropIfgram=True)
A, B = stack_obj.get_design_matrix4timeseries(date12_list=date12_list)[0:2]
# prep for decor std time-series
#if os.path.isfile('reference_date.txt'):
# ref_date = str(np.loadtxt('reference_date.txt', dtype=bytes).astype(str))
#else:
# ref_date = date_list[0]
#Astd = stack_obj.get_design_matrix4timeseries(date12_list=date12_list, refDate=ref_date)[0]
#ref_idx = date_list.index(ref_date)
#time_idx = [i for i in range(num_date)]
#time_idx.remove(ref_idx)
# 1.1 read / calcualte weight
if weight_func in ['no', 'sbas']:
weight = None
else:
weight = calc_weight(stack_obj,
box,
weight_func=weight_func,
dropIfgram=True,
chunk_size=100000)
# 1.2 read / mask unwrapPhase / offset
pha_data = read_unwrap_phase(stack_obj,
box,
ref_phase,
obs_ds_name=obs_ds_name,
dropIfgram=True)
# translate zero phase value to nan (no-data value)
# becuase it's the common filled value used in phase masking
if 'phase' in obs_ds_name.lower():
pha_data[pha_data == 0.] = np.nan
print('convert zero value in {} to NaN (no-data value)'.format(obs_ds_name))
pha_data = mask_unwrap_phase(pha_data,
stack_obj,
box,
dropIfgram=True,
mask_ds_name=mask_ds_name,
mask_threshold=mask_threshold)
# 1.3 mask of pixels to invert
mask = np.ones(num_pixel, np.bool_)
# 1.3.1 - Water Mask
if water_mask_file:
print('skip pixels (on the water) with zero value in file: {}'.format(os.path.basename(water_mask_file)))
atr_msk = readfile.read_attribute(water_mask_file)
len_msk, wid_msk = int(atr_msk['LENGTH']), int(atr_msk['WIDTH'])
if (len_msk, wid_msk) != (stack_obj.length, stack_obj.width):
raise ValueError('Input water mask file has different size from ifgramStack file.')
dsNames = readfile.get_dataset_list(water_mask_file)
dsName = [i for i in dsNames if i in ['waterMask', 'mask']][0]
waterMask = readfile.read(water_mask_file, datasetName=dsName, box=box)[0].flatten()
mask *= np.array(waterMask, dtype=np.bool_)
del waterMask
# 1.3.2 - Mask for NaN value in ALL ifgrams
print('skip pixels with {} = NaN in all interferograms'.format(obs_ds_name))
mask *= ~np.all(np.isnan(pha_data), axis=0)
# 1.3.3 Mask for zero quality measure (average spatial coherence/SNR)
# usually due to lack of data in the processing
if 'phase' in obs_ds_name.lower():
quality_file = os.path.join(os.path.dirname(ifgram_file), '../avgSpatialCoh.h5')
elif 'offset' in obs_ds_name.lower():
quality_file = os.path.join(os.path.dirname(ifgram_file), '../avgSpatialSnr.h5')
else:
quality_file = None
if quality_file and os.path.isfile(quality_file):
print('skip pixels with zero value in file: {}'.format(os.path.basename(quality_file)))
quality = readfile.read(quality_file, box=box)[0].flatten()
mask *= quality != 0.
del quality
# invert pixels on mask 1+2
num_pixel2inv = int(np.sum(mask))
idx_pixel2inv = np.where(mask)[0]
print('number of pixels to invert: {} out of {} ({:.1f}%)'.format(
num_pixel2inv, num_pixel, num_pixel2inv/num_pixel*100))
## 2. inversion
# 2.1 initiale the output matrices
ts = np.zeros((num_date, num_pixel), np.float32)
#ts_std = np.zeros((num_date, num_pixel), np.float32)
temp_coh = np.zeros(num_pixel, np.float32)
num_inv_ifg = np.zeros(num_pixel, np.int16)
# return directly if there is nothing to invert
if num_pixel2inv < 1:
ts = ts.reshape(num_date, num_row, num_col)
#ts_std = ts_std.reshape(num_date, num_row, num_col)
temp_coh = temp_coh.reshape(num_row, num_col)
num_inv_ifg = num_inv_ifg.reshape(num_row, num_col)
return ts, temp_coh, num_inv_ifg, box
# 2.2 un-weighted inversion (classic SBAS)
if weight_func in ['no', 'sbas']:
# a. split mask into mask_all/part_net
# mask for valid (~NaN) observations in ALL ifgrams (share one B in sbas inversion)
mask_all_net = np.all(~np.isnan(pha_data), axis=0)
mask_all_net *= mask
mask_part_net = mask ^ mask_all_net
del mask
# b. invert once for all pixels with obs in all ifgrams
if np.sum(mask_all_net) > 0:
print(('inverting pixels with valid phase in all ifgrams'
' ({:.0f} pixels; {:.1f}%) ...').format(np.sum(mask_all_net),
np.sum(mask_all_net)/num_pixel2inv*100))
tsi, tcohi, num_ifgi = estimate_timeseries(A, B, tbase_diff,
ifgram=pha_data[:, mask_all_net],
weight_sqrt=None,
min_norm_velocity=min_norm_velocity,
min_redundancy=min_redundancy)
ts[:, mask_all_net] = tsi
temp_coh[mask_all_net] = tcohi
num_inv_ifg[mask_all_net] = num_ifgi
# c. pixel-by-pixel for pixels with obs not in all ifgrams
if np.sum(mask_part_net) > 0:
print(('inverting pixels with valid phase in some ifgrams'
' ({:.0f} pixels; {:.1f}%) ...').format(np.sum(mask_part_net),
np.sum(mask_all_net)/num_pixel2inv*100))
num_pixel2inv = int(np.sum(mask_part_net))
idx_pixel2inv = np.where(mask_part_net)[0]
prog_bar = ptime.progressBar(maxValue=num_pixel2inv)
for i in range(num_pixel2inv):
idx = idx_pixel2inv[i]
tsi, tcohi, num_ifgi = estimate_timeseries(A, B, tbase_diff,
ifgram=pha_data[:, idx],
weight_sqrt=None,
min_norm_velocity=min_norm_velocity,
min_redundancy=min_redundancy)
ts[:, idx] = tsi.flatten()
temp_coh[idx] = tcohi
num_inv_ifg[idx] = num_ifgi
prog_bar.update(i+1, every=2000, suffix='{}/{} pixels'.format(i+1, num_pixel2inv))
prog_bar.close()
# 2.3 weighted inversion - pixel-by-pixel
else:
print('inverting network of interferograms into time-series ...')
prog_bar = ptime.progressBar(maxValue=num_pixel2inv)
for i in range(num_pixel2inv):
idx = idx_pixel2inv[i]
tsi, tcohi, num_ifgi = estimate_timeseries(A, B, tbase_diff,
ifgram=pha_data[:, idx],
weight_sqrt=weight[:, idx],
min_norm_velocity=min_norm_velocity,
min_redundancy=min_redundancy)
ts[:, idx] = tsi.flatten()
temp_coh[idx] = tcohi
num_inv_ifg[idx] = num_ifgi
prog_bar.update(i+1, every=2000, suffix='{}/{} pixels'.format(i+1, num_pixel2inv))
prog_bar.close()
del weight
del pha_data
## 3. prepare output
# 3.1 reshape
ts = ts.reshape(num_date, num_row, num_col)
#ts_std = ts_std.reshape(num_date, num_row, num_col)
temp_coh = temp_coh.reshape(num_row, num_col)
num_inv_ifg = num_inv_ifg.reshape(num_row, num_col)
# 3.2 convert displacement unit to meter
if obs_ds_name.startswith('unwrapPhase'):
phase2range = -1 * float(stack_obj.metadata['WAVELENGTH']) / (4.*np.pi)
ts *= phase2range
print('converting LOS phase unit from radian to meter')
elif obs_ds_name == 'azimuthOffset':
az_pixel_size = ut.azimuth_ground_resolution(stack_obj.metadata)
az_pixel_size /= float(stack_obj.metadata['ALOOKS'])
ts *= az_pixel_size
print('converting azimuth offset unit from pixel ({:.2f} m) to meter'.format(az_pixel_size))
elif obs_ds_name == 'rangeOffset':
rg_pixel_size = float(stack_obj.metadata['RANGE_PIXEL_SIZE'])
rg_pixel_size /= float(stack_obj.metadata['RLOOKS'])
ts *= rg_pixel_size
print('converting range offset unit from pixel ({:.2f} m) to meter'.format(rg_pixel_size))
return ts, temp_coh, num_inv_ifg, box
def ifgram_inversion(inps=None):
"""Phase triangulatino of small baseline interferograms
Parameters: inps - namespace
Example: inps = cmd_line_parse()
ifgram_inversion(inps)
"""
if not inps:
inps = cmd_line_parse()
start_time = time.time()
## 1. input info
stack_obj = ifgramStack(inps.ifgramStackFile)
stack_obj.open(print_msg=False)
date12_list = stack_obj.get_date12_list(dropIfgram=True)
date_list = stack_obj.get_date_list(dropIfgram=True)
length, width = stack_obj.length, stack_obj.width
# 1.1 read values on the reference pixel
inps.refPhase = stack_obj.get_reference_phase(unwDatasetName=inps.obsDatasetName,
skip_reference=inps.skip_ref,
dropIfgram=True)
# 1.2 design matrix
A = stack_obj.get_design_matrix4timeseries(date12_list)[0]
num_ifgram, num_date = A.shape[0], A.shape[1]+1
inps.numIfgram = num_ifgram
# 1.3 print key setup info
msg = '-------------------------------------------------------------------------------\n'
if inps.minNormVelocity:
suffix = 'deformation velocity'
else:
suffix = 'deformation phase'
msg += 'least-squares solution with L2 min-norm on: {}\n'.format(suffix)
msg += 'minimum redundancy: {}\n'.format(inps.minRedundancy)
msg += 'weight function: {}\n'.format(inps.weightFunc)
if inps.maskDataset:
if inps.maskDataset in ['coherence', 'offsetSNR']:
suffix = '{} < {}'.format(inps.maskDataset, inps.maskThreshold)
else:
suffix = '{} == 0'.format(inps.maskDataset)
msg += 'mask out pixels with: {}\n'.format(suffix)
else:
msg += 'mask: no\n'
if np.linalg.matrix_rank(A) < A.shape[1]:
msg += '***WARNING: the network is NOT fully connected.\n'
msg += '\tInversion result can be biased!\n'
msg += '\tContinue to use SVD to resolve the offset between different subsets.\n'
msg += '-------------------------------------------------------------------------------'
print(msg)
print('number of interferograms: {}'.format(num_ifgram))
print('number of acquisitions : {}'.format(num_date))
print('number of lines : {}'.format(length))
print('number of columns : {}'.format(width))
## 2. prepare output
# 2.1 metadata
meta = dict(stack_obj.metadata)
for key in configKeys:
meta[key_prefix+key] = str(vars(inps)[key])
meta['FILE_TYPE'] = 'timeseries'
meta['UNIT'] = 'm'
meta['REF_DATE'] = date_list[0]
# 2.2 instantiate time-series
dates = np.array(date_list, dtype=np.string_)
pbase = stack_obj.get_perp_baseline_timeseries(dropIfgram=True)
ds_name_dict = {
"date" : [dates.dtype, (num_date,), dates],
"bperp" : [np.float32, (num_date,), pbase],
"timeseries" : [np.float32, (num_date, length, width), None],
}
writefile.layout_hdf5(inps.tsFile, ds_name_dict, meta)
# 2.3 instantiate temporal coherence
meta['FILE_TYPE'] = 'temporalCoherence'
meta['UNIT'] = '1'
meta.pop('REF_DATE')
ds_name_dict = {"temporalCoherence" : [np.float32, (length, width)]}
writefile.layout_hdf5(inps.tempCohFile, ds_name_dict, metadata=meta)
# 2.4 instantiate number of inverted observations
meta['FILE_TYPE'] = 'mask'
meta['UNIT'] = '1'
ds_name_dict = {"mask" : [np.float32, (length, width)]}
writefile.layout_hdf5(inps.numInvFile, ds_name_dict, metadata=meta)
## 3. run the inversion / estimation and write to disk
# 3.1 split ifgram_file into blocks to save memory
box_list, num_box = split2boxes(inps.ifgramStackFile, max_memory=inps.maxMemory)
# 3.2 prepare the input arguments for *_patch()
data_kwargs = {
"ifgram_file" : inps.ifgramStackFile,
"ref_phase" : inps.refPhase,
"obs_ds_name" : inps.obsDatasetName,
"weight_func" : inps.weightFunc,
"min_norm_velocity" : inps.minNormVelocity,
"water_mask_file" : inps.waterMaskFile,
"mask_ds_name" : inps.maskDataset,
"mask_threshold" : inps.maskThreshold,
"min_redundancy" : inps.minRedundancy
}
# 3.3 invert / write block-by-block
for i, box in enumerate(box_list):
box_wid = box[2] - box[0]
box_len = box[3] - box[1]
if num_box > 1:
print('\n------- processing patch {} out of {} --------------'.format(i+1, num_box))
print('box width: {}'.format(box_wid))
print('box length: {}'.format(box_len))
# update box argument in the input data
data_kwargs['box'] = box
if not inps.cluster:
# non-parallel
ts, temp_coh, num_inv_ifg = ifgram_inversion_patch(**data_kwargs)[:-1]
else:
# parallel
print('\n\n------- start parallel processing using Dask -------')
# initiate the output data
ts = np.zeros((num_date, box_len, box_wid), np.float32)
temp_coh = np.zeros((box_len, box_wid), np.float32)
num_inv_ifg = np.zeros((box_len, box_wid), np.float32)
# initiate dask cluster and client
cluster_obj = cluster.DaskCluster(inps.cluster, inps.numWorker, config_name=inps.config)
cluster_obj.open()
# run dask
ts, temp_coh, num_inv_ifg = cluster_obj.run(func=ifgram_inversion_patch,
func_data=data_kwargs,
results=[ts, temp_coh, num_inv_ifg])
# close dask cluster and client
cluster_obj.close()
print('------- finished parallel processing -------\n\n')
# write the block to disk
# with 3D block in [z0, z1, y0, y1, x0, x1]
# and 2D block in [y0, y1, x0, x1]
# time-series - 3D
block = [0, num_date, box[1], box[3], box[0], box[2]]
writefile.write_hdf5_block(inps.tsFile,
data=ts,
datasetName='timeseries',
block=block)
# temporal coherence - 2D
block = [box[1], box[3], box[0], box[2]]
writefile.write_hdf5_block(inps.tempCohFile,
data=temp_coh,
datasetName='temporalCoherence',
block=block)
# number of inverted obs - 2D
writefile.write_hdf5_block(inps.numInvFile,
data=num_inv_ifg,
datasetName='mask',
block=block)
m, s = divmod(time.time() - start_time, 60)
print('time used: {:02.0f} mins {:02.1f} secs.\n'.format(m, s))
# 3.4 update output data on the reference pixel
if not inps.skip_ref:
# grab ref_y/x
ref_y = int(stack_obj.metadata['REF_Y'])
ref_x = int(stack_obj.metadata['REF_X'])
print('-'*50)
print('update values on the reference pixel: ({}, {})'.format(ref_y, ref_x))
print('set temporal coherence on the reference pixel to 1.')
with h5py.File(inps.tempCohFile, 'r+') as f:
f['temporalCoherence'][ref_y, ref_x] = 1.
print('set # of observations on the reference pixel as {}'.format(num_ifgram))
with h5py.File(inps.numInvFile, 'r+') as f:
f['mask'][ref_y, ref_x] = num_ifgram
m, s = divmod(time.time() - start_time, 60)
print('time used: {:02.0f} mins {:02.1f} secs.\n'.format(m, s))
return
################################################################################################
def main(iargs=None):
inps = cmd_line_parse(iargs)
# --update option
if inps.update_mode and run_or_skip(inps) == 'skip':
return inps.outfile
# Network Inversion
if inps.residualNorm == 'L2':
ifgram_inversion(inps)
else:
raise NotImplementedError('L1 norm minimization is not fully tested.')
#ut.timeseries_inversion_L1(inps.ifgramStackFile, inps.tsFile)
return inps.outfile
################################################################################################
if __name__ == '__main__':
main(sys.argv[1:])
| [
"invert_network",
"{}"
] |
2024-01-10 | HAL22/generateCoverLetter | generate.py | import os
import streamlit as st
import pinecone
import string
import random
from langchain.llms import OpenAI
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import Pinecone
from langchain.chains import LLMChain
pine_cone_name = "coverletter"
def fill_keys(openAIKey,pineconeAPIKey,pineconeEnv):
os.environ['OPENAI_API_KEY'] = openAIKey
os.environ['PINECONE_API_KEY'] = pineconeAPIKey
os.environ['PINECONE_ENV'] = pineconeEnv
def get_index(filename):
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
pinecone.init(
api_key=os.getenv('PINECONE_API_KEY'),
environment=os.getenv('PINECONE_ENV')
)
pinecone.create_index(pine_cone_name, dimension=1536)
loader = PyPDFLoader(filename)
pages = loader.load_and_split()
return Pinecone.from_documents(pages, embeddings, index_name=pine_cone_name)
def generate_cover_letter(index,name,temp=0.1):
prompt_template = """Use the context below to write a cover letter:
Context: {context}
Cover letter:"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["context"])
llm = OpenAI(temperature=temp, verbose=True)
chain = LLMChain(llm=llm, prompt=PROMPT)
docs = index.similarity_search(name, k=4)
inputs = [{"context": doc.page_content} for doc in docs]
letter = chain.apply(inputs)
pinecone.delete_index(pine_cone_name)
return letter[0]["text"] | [
"context",
"Use the context below to write a cover letter:\n Context: {context}\n Cover letter:"
] |
2024-01-10 | andrewhinh/structured | gen_data.py | import os
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
load_dotenv()
OUTPUT_FILE = "data.jsonl"
CHAT = ChatOpenAI(temperature=0, openai_api_key=os.getenv("OPENAI_API_KEY"))
messages = [
HumanMessage(
content="Come up with a question as follows:"
),
]
CHAT(messages).content
| [
"Come up with a question as follows:"
] |
2024-01-10 | alejandro-kid/question-answer-api | src~api~process.py | import fitz
import os
import openai
from openai import ChatCompletion
from flask import Response, current_app, json
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import GPT4AllEmbeddings
from langchain.llms import GPT4All
from langchain.chains.question_answering import load_qa_chain
from config.settings import LLMS_MODEL_PATH, OPENAI_API_KEY
def process_files(document):
try:
if os.listdir(get_temp_folder()):
os.remove(
get_temp_folder() + "/" + os.listdir(get_temp_folder())[0]
)
temp_file = os.path.join(get_temp_folder(), document.filename)
document.save(temp_file)
data = {
"success": True,
"message": "Document processed successfully"
}
response = Response(json.dumps(data), 200, mimetype='application/json')
except Exception as e:
data = {
"success": False,
"error_message": str(e)
}
response = Response(json.dumps(data), 500, mimetype='application/json')
return response
def query(query_text):
try:
docs = get_similarity_search(query_text)
full_text = [ doc.page_content for doc in docs]
data = {
"success": True,
"message": "Query processed successfully",
"full_text": full_text
}
response = Response(json.dumps(data), 200, mimetype='application/json')
except Exception as e:
data = {
"success": False,
"error_message": str(e)
}
response = Response(json.dumps(data), 500, mimetype='application/json')
return response
def query_plus(query_text):
try:
docs = get_similarity_search(query_text)
llm = GPT4All(model=LLMS_MODEL_PATH, backend="gptj", verbose=False)
chain = load_qa_chain(llm, chain_type="stuff")
res = chain.run(input_documents=docs, question=query)
data = {
"success": True,
"message": "Query processed successfully",
"answer": res
}
response = Response(json.dumps(data), 200, mimetype='application/json')
except Exception as e:
data = {
"success": False,
"error_message": str(e)
}
response = Response(json.dumps(data), 500, mimetype='application/json')
return response
def query_chatgpt(query_text):
try:
docs = get_similarity_search(query_text)
full_text = [ doc.page_content for doc in docs]
openai.api_key = OPENAI_API_KEY
systemContent = ("You are a helpful assistant.You get your knowledge from the "
"following information delimited between three ticks.\n"
"```{}```\n The user will ask you questions about this"
" information and you should reply in a concise way. If you "
"can't deduce the answer using only this provided information,"
" just say you don't have the knowledge.".format(full_text))
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": systemContent},
{"role": "user", "content": query_text}
]
)
data = {
"success": True,
"message": "Query processed successfully",
"answer": completion.choices[0].message
}
response = Response(json.dumps(data), 200, mimetype='application/json')
except Exception as e:
data = {
"success": False,
"error_message": str(e)
}
response = Response(json.dumps(data), 500, mimetype='application/json')
return response
def get_similarity_search(query_text) -> list[fitz.Document]:
doc = fitz.Document()
if os.listdir(get_temp_folder()):
doc = fitz.Document(
get_temp_folder() + "/" + os.listdir(get_temp_folder())[0]
)
else:
raise Exception("No document found")
embeddings = GPT4AllEmbeddings()
n = doc.page_count
doc_content = ""
for i in range(0, n):
page_n = doc.load_page(i)
page_content = page_n.get_text("text")
doc_content += page_content + "\n"
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=800,
chunk_overlap=200,
length_function=len
)
texts = text_splitter.split_text(doc_content)
document_search = FAISS.from_texts(texts, embeddings)
docs = document_search.similarity_search(query_text)
return docs
def get_temp_folder() -> str:
dir = current_app.root_path + "/temp"
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def delete_file(path:str) -> None:
if os.path.exists(path):
os.remove(path)
| [] |
2024-01-10 | hollygrimm/policy-optimization-algos | env_makers.py | """
This project was developed by Rocky Duan, Peter Chen, Pieter Abbeel for the Berkeley Deep RL Bootcamp, August 2017. Bootcamp website with slides and lecture videos: https://sites.google.com/view/deep-rl-bootcamp/.
Copyright 2017 Deep RL Bootcamp Organizers.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import numpy as np
import cv2
import gym
from collections import deque
from gym import spaces
import gym.envs.registration
from gym.envs.atari.atari_env import AtariEnv
import logger
import os
#import roboschool
import tempfile
import gym.wrappers.monitoring
# Silence the log messages
gym.envs.registration.logger.setLevel(logging.WARNING)
gym.wrappers.monitoring.logger.setLevel(logging.WARNING)
class EnvMaker(object):
def __init__(self, env_id):
self.env_id = env_id
def make(self, video_callable=False):
env = gym.make(self.env_id)
if logger.get_dir() is not None:
monitor_dir = os.path.join(logger.get_dir(), "gym_monitor")
resume = True
force = False
else:
monitor_dir = "/tmp/gym-monitoring"
resume = False
force = True
env = gym.wrappers.Monitor(env, directory=monitor_dir, video_callable=video_callable, force=force, resume=resume,
write_upon_reset=True)
if isinstance(env.unwrapped, AtariEnv):
if '-ram-' in self.env_id:
assert 'NoFrameskip' not in self.env_id
env = ScaledFloatFrame(env)
else:
env = ScaledFloatFrame(wrap_atari_pg(env))
return env
# Code below are adopted from OpenAI Baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers_deprecated.py
class NoopResetEnv(gym.Wrapper):
def __init__(self, env=None, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = np.random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class NormalizeActionWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
if isinstance(self.env.action_space, gym.spaces.Box):
self.action_space = gym.spaces.Box(
low=-1, high=1, shape=self.env.action_space.shape)
else:
self.action_space = self.env.action_space
def step(self, action):
if not isinstance(self.action_space, gym.spaces.Box):
return super().step(action)
# rescale action
low = self.env.action_space.low
high = self.env.action_space.high
action = np.asarray(action)
rescaled_action = (action + 1) / 2 * (high - low) + low
return super().step(rescaled_action)
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""For environments where the user need to press FIRE for the game to start."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env=None):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def _step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ProcessFrame84(gym.ObservationWrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
def _observation(self, obs):
return ProcessFrame84.process(obs)
@staticmethod
def process(frame):
if frame.size == 210 * 160 * 3:
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
elif frame.size == 250 * 160 * 3:
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.299 + img[:, :, 1] * \
0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(
img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ClippedRewardsWrapper(gym.RewardWrapper):
def _reward(self, reward):
"""Change all the positive rewards to 1, negative to -1 and keep zero."""
return np.sign(reward)
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not belive how complex the previous solution was."""
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=2)
if dtype is not None:
out = out.astype(dtype)
return out
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))
def _reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def _step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def _observation(self, obs):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(obs).astype(np.float32) / 255.0
def wrap_atari_pg(env):
"""Apply a common set of wrappers for Atari games."""
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = FrameStack(env, 4)
env = ClippedRewardsWrapper(env)
return env
| [] |
2024-01-10 | itissid/Drop-PoT | tests~integration~test_message_to_api.py | import os
import unittest
from typing import Any, Dict, List, Optional, Tuple
import openai
from dotenv import load_dotenv
from drop_backend.lib.ai import AIDriver, AltAI, driver_wrapper
from drop_backend.lib.event_node_manager import BaseEventManager, EventManager
from drop_backend.model.ai_conv_types import (
MessageNode,
Role,
UserExplicitFunctionCall,
)
# Note: If we don't import the full path then the isinstance(weather_obj,
# WeatherEvent) returns False in the __eq__ depending on how the weather_obj was
# created.
from tests.integration.fixtures.weather_event import WeatherEvent
from .fixtures.schema.weather_event_schema import (
weather_event_function_call_param,
)
class NoFunctionCallEventManager(BaseEventManager):
def get_function_call_spec(
self,
):
return None, None
def extract_fn_name(self, ai_message: MessageNode) -> Optional[str]:
return None
def extract_fn_args(
self, ai_message: MessageNode
) -> Tuple[List[Any], Dict[str, Any]]:
return [], {}
def should_call_function(self, ai_message: MessageNode) -> bool:
return False
def call_fn_by_name(self, fn_name: str, *args, **kwargs):
return None, None
class TestSendToOpenAIAPI(unittest.TestCase):
def test_replay_messages_for_function_call_are_separately_maintained(self):
"""
- Send a user message: "Whats the weather in Boston in farenheit?" with
a function call to the AI using driver_wrapper.
- Along with the function call to the AI there should be a message with
the role `function` and the call result.
- The next message is a correction to the earlier message like "Could
you specify the weather in Boston in celsius?"
"""
# Mock out the call to AI and extract the messages and make sure the
pass
def test_no_function_execution(self) -> None:
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
# Lets make a test spec
event_manager = NoFunctionCallEventManager()
driver = driver_wrapper(
events=[
"What's the climate typically like in Boston during October"
],
system_message=MessageNode(
role=Role.system,
message_content="You are helpful assistant. Follow the instructions I give you. Do not respond until I ask you a question.",
),
ai_driver=AIDriver(AltAI(model="gpt-3.5-turbo-16k"), event_manager),
event_manager=event_manager,
user_message_prompt_fn=lambda x: x.raw_event_str,
)
event, _ = next(driver)
assert event.history
self.assertEqual(len(event.history), 3)
self.assertEqual(event.history[0].role, Role.system)
self.assertEqual(event.history[1].role, Role.user)
self.assertEqual(event.history[2].role, Role.assistant)
assert event.history[2].message_content
self.assertGreaterEqual(len(event.history[2].message_content), 1)
print(event.history[2].message_content)
# No function call
self.assertEqual(event.history[2].ai_function_call, None)
def test_event_to_open_ai__user_function_mandate_is_obeyed(self) -> None:
"""If MessageNode.role == user and MessageNode.message_function_call and MessageNode.explicit_function_call_spec are not null then the
next message must have a role `function` with call result. The message after that is an assistant message.
This is done by calling the AI and getting the actual response from it.
As an example lets test the weather function.
"""
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
import sys
print(sys.path)
print(", ".join([i for i in sys.modules.keys() if "drop_backend" in i]))
event_manager = EventManager(
"WeatherEvent",
"tests.integration.fixtures",
"tests.integration.fixtures.schema",
)
# def weather_fn_call_wrapper(ai_message: MessageNode) -> Tuple[Any, str]:
# assert (
# ai_message.ai_function_call is not None
# and ai_message.ai_function_call.arguments is not None
# )
# return get_current_weather(
# location=ai_message.ai_function_call.arguments.get("location"),
# unit=ai_message.ai_function_call.arguments.get("unit"),
# )
events = [
"What's the weather like in Boston, MA in farenheit? Make sure the location is qualified by the 2 letter state code."
]
driver = driver_wrapper(
events=events,
system_message=MessageNode(
role=Role.system,
message_content="You are helpful assistant. Follow the instructions I give you. Do not respond until I ask you a question.",
),
ai_driver=AIDriver(AltAI(model="gpt-3.5-turbo-16k"), event_manager),
event_manager=event_manager,
user_message_prompt_fn=lambda x: x.raw_event_str,
)
event, _ = next(driver)
assert event.history
self.assertEqual(len(event.history), 4)
self.assertEqual(event.history[0].role, Role.system)
self.assertEqual(
event.history[0].message_content,
"You are helpful assistant. Follow the instructions I give you. Do not respond until I ask you a question.",
)
self.assertEqual(event.history[1].role, Role.user)
self.assertEqual(
event.history[1].message_content,
events[0],
)
# MessageNode's functions is set and explicit_function_call is also set
fn_call_specs, _ = weather_event_function_call_param()
assert event.history[1].functions == fn_call_specs
self.assertEqual(
event.history[1].explicit_fn_call,
UserExplicitFunctionCall(name="get_current_weather"),
)
self.assertEqual(event.history[2].role, Role.assistant)
assert event.history[2].ai_function_call
self.assertEqual(
event.history[2].ai_function_call.name, "get_current_weather"
)
self.assertEqual(event.history[2].message_content, "")
self.assert_dicts_equal_for_some_keys(
event.history[2].ai_function_call.model_dump()["arguments"],
{"location": "Boston, MA", "unit": "fahrenheit", "temperature": 72},
keys=["location", "unit"],
keys_in_both=["temperature"],
)
self.assertEqual(event.history[3].role, Role.function)
self.assertEqual(
event.history[3].ai_function_call_result_name, "get_current_weather"
)
print(event.history[3].ai_function_call_result)
self.assertEqual(
event.event_obj,
WeatherEvent(
**{ # type: ignore[arg-type]
"location": "Boston, MA",
"temperature": int(event.event_obj.temperature),
"unit": "fahrenheit",
}
),
)
@staticmethod
def assert_dicts_equal_for_some_keys(dict1, dict2, keys, keys_in_both):
"""Asserts that the values of the given keys in the two dictionaries are equal.
Args:
dict1: The first dictionary.
dict2: The second dictionary.
keys: A list of keys to compare.
"""
for key in keys_in_both:
assert key in dict1
assert key in dict2
for key in keys:
assert dict1[key] == dict2[key] or dict1[key] is dict2[key]
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestSendToOpenAIAPI)
runner = unittest.TextTestRunner()
try:
runner.run(suite)
except Exception: # pylint: disable=broad-except
import pdb # type: ignore
pdb.post_mortem()
| [] |
2024-01-10 | stanleywalker1/Interactive-Lab-Hub | Lab%203~demo~text_speech_utils.py | # text_speech_utils.py
import openai
import sounddevice as sd
import audiofile as af
from scipy.io.wavfile import write
from gtts import gTTS
import multiprocessing
import pyttsx3
import keyboard
def say(text):
audio_filename = "temp_speech_output.mp3"
myobj = gTTS(text=text, lang='en', slow=False)
myobj.save(audio_filename)
play_audio(audio_filename)
def record_audio(filename, sec, sr = 44100):
audio = sd.rec(int(sec * sr), samplerate=sr, channels=1, blocking=False)
sd.wait()
write(filename, sr, audio)
def record_audio_manual(filename, sr = 44100):
input(" ** Press enter to start recording **")
audio = sd.rec(int(10 * sr), samplerate=sr, channels=1)
input(" ** Press enter to stop recording **")
sd.stop()
write(filename, sr, audio)
def play_audio(filename):
signal, sr = af.read(filename)
sd.play(signal, sr)
def transcribe_audio(filename):
audio_file= open(filename, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
audio_file.close()
return transcript
def translate_audio(filename):
audio_file= open(filename, "rb")
translation = openai.Audio.translate("whisper-1", audio_file)
audio_file.close()
return translation
def save_text_as_audio(text, audio_filename):
myobj = gTTS(text=text, lang='en', slow=False)
myobj.save(audio_filename)
| [] |
2024-01-10 | stanleywalker1/Interactive-Lab-Hub | Lab%204~ai-phone-demo~text_speech_utils.py | # text_speech_utils.py
import openai
import sounddevice as sd
import audiofile as af
from scipy.io.wavfile import write
from gtts import gTTS
import multiprocessing
import pyttsx3
import keyboard
import requests
from myapikeys import ELEVENLABS_KEY
# API constants
API_URL = "https://api.elevenlabs.io/v1/text-to-speech/<voice-id>/stream"
API_HEADERS = {
"Accept": "audio/mpeg",
"Content-Type": "application/json",
"xi-api-key": ELEVENLABS_KEY # Using the imported key
}
def say(text):
audio_filename = "temp_speech_output.mp3"
myobj = gTTS(text=text, lang='en', slow=False)
myobj.save(audio_filename)
play_audio(audio_filename)
def record_audio(filename, sec, sr = 44100):
audio = sd.rec(int(sec * sr), samplerate=sr, channels=1, blocking=False)
sd.wait()
write(filename, sr, audio)
def record_audio_manual(filename, sr = 44100):
input(" ** Press enter to start recording **")
audio = sd.rec(int(10 * sr), samplerate=sr, channels=1)
input(" ** Press enter to stop recording **")
sd.stop()
write(filename, sr, audio)
def play_audio(filename):
signal, sr = af.read(filename)
sd.play(signal, sr)
def transcribe_audio(filename):
audio_file= open(filename, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
audio_file.close()
return transcript
def translate_audio(filename):
audio_file= open(filename, "rb")
translation = openai.Audio.translate("whisper-1", audio_file)
audio_file.close()
return translation
def save_text_as_audio(text, audio_filename):
voice_id = "21m00Tcm4TlvDq8ikWAM" # Replace with your preferred voice ID.
model_id = "eleven_monolingual_v1" # Default model ID.
payload = {
"text": text,
"model_id": model_id,
"voice_settings": {
"stability": 0.5,
"similarity_boost": 0.5
}
}
# Call the Text to Speech API
response = requests.post(API_URL.replace("<voice-id>", voice_id), headers=API_HEADERS, json=payload, stream=True)
CHUNK_SIZE = 1024
with open(audio_filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
f.write(chunk)
| [] |
2024-01-10 | stanleywalker1/Interactive-Lab-Hub | Lab%204~text_speech_utils.py | # text_speech_utils.py
import openai
import sounddevice as sd
import audiofile as af
from scipy.io.wavfile import write
from gtts import gTTS
import multiprocessing
import pyttsx3
import keyboard
def say(text):
audio_filename = "temp_speech_output.mp3"
myobj = gTTS(text=text, lang='en', slow=False)
myobj.save(audio_filename)
play_audio(audio_filename)
def record_audio(filename, sec, sr = 44100):
audio = sd.rec(int(sec * sr), samplerate=sr, channels=1, blocking=False)
sd.wait()
write(filename, sr, audio)
def record_audio_manual(filename, sr = 44100):
input(" ** Press enter to start recording **")
audio = sd.rec(int(10 * sr), samplerate=sr, channels=1)
input(" ** Press enter to stop recording **")
sd.stop()
write(filename, sr, audio)
def play_audio(filename):
signal, sr = af.read(filename)
sd.play(signal, sr)
def transcribe_audio(filename):
audio_file= open(filename, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
audio_file.close()
return transcript
def translate_audio(filename):
audio_file= open(filename, "rb")
translation = openai.Audio.translate("whisper-1", audio_file)
audio_file.close()
return translation
def save_text_as_audio(text, audio_filename):
myobj = gTTS(text=text, lang='en', slow=False)
myobj.save(audio_filename) | [] |
2024-01-10 | earthly/build-transpose | toearthly~core~io.py | import contextlib
import glob
import os
import subprocess
import time
from collections import defaultdict
from typing import List, Tuple
import openai
from joblib import Memory
from toearthly.core import constants
openai.api_key = os.getenv("OPENAI_API_KEY")
def call_chat_completion_api_cached(max_tokens, messages, temperature):
cache = constants.DEBUG_DIR+"/data/gpt_cache"
memory = Memory(location=cache, verbose=1)
print(f"Caching GPT to {cache}")
@memory.cache
def inner_function(max_tokens, messages, temperature):
print("running prompt")
return call_chat_completion_api(max_tokens, messages, temperature)
return inner_function(max_tokens, messages, temperature)
def call_chat_completion_api(max_tokens, messages, temperature):
max_retries = 3
initial_delay = 1
factor = 2
retries = 0
delay = initial_delay
while retries < max_retries:
try:
response = openai.ChatCompletion.create(
model="gpt-4",
max_tokens=max_tokens,
temperature=temperature,
messages=messages,
)
print(response.choices[0].message.content)
return response.choices[0].message.content
except Exception as e:
print(f"Error: {e}, Retrying...")
time.sleep(delay)
retries += 1
delay *= factor
print("Max retries reached. Returning 'Error'.")
return "Error: Max Retry Hit"
def read(filepath: str) -> str:
with open(filepath, "r") as outfile:
return outfile.read()
def relative_read(relative_filepath: str) -> str:
# Get the directory of the current script file
script_dir = os.path.dirname(os.path.abspath(__file__))
# Construct the full filepath, by going up one folder
full_filepath = os.path.join(script_dir, "..", relative_filepath)
with open(full_filepath, "r") as outfile:
return outfile.read()
def write(contents: str, filepath: str) -> None:
directory = os.path.dirname(filepath)
if directory:
os.makedirs(directory, exist_ok=True)
with open(filepath, "w") as outfile:
outfile.write(contents)
def write_debug(filename: str, contents: str, subfolder: str = None) -> None:
debug_dir = constants.DEBUG_DIR
if subfolder is not None:
debug_dir = os.path.join(debug_dir, subfolder)
filepath = os.path.join(debug_dir, filename)
os.makedirs(debug_dir, exist_ok=True)
with open(filepath, "w") as outfile:
outfile.write(contents)
def find_first_workflow(path=None) -> Tuple[str, str]:
if path is None:
path = os.getcwd()
if not path.endswith("/"):
path += "/"
yml_files = glob.glob(path + ".github/workflows/*.yml")
if not yml_files:
raise Exception("No yml files found. Process will stop.")
with open(yml_files[0], "r") as file:
yml = file.read()
write_debug("workflow.yml", yml)
return (yml_files[0], yml)
def find_workflows(path=None) -> List[str]:
if path is None:
path = os.getcwd()
if not path.endswith("/"):
path += "/"
yml_files = glob.glob(path + ".github/workflows/*.yml")
if not yml_files:
raise Exception("No yml files found. Process will stop.")
return yml_files
def find_first_dockerfile(path=None) -> Tuple[str, str]:
if path is None:
path = os.getcwd()
if not path.endswith("/"):
path += "/"
docker_files = glob.glob(path + "Dockerfile")
if not docker_files:
return ("","")
with open(docker_files[0], "r") as file:
dockerfile = file.read()
write_debug("Dockerfile", dockerfile)
return (docker_files[0], dockerfile)
# Like tree but less output
def print_directory(path, prefix="", level=0, max_level=1) -> str:
if level > max_level:
return ""
dir_structure = ""
dir_items = defaultdict(list)
# Group files by extension and directories separately
for item in os.listdir(path):
# Ignore hidden files and directories
if item.startswith("."):
continue
if os.path.isfile(os.path.join(path, item)):
ext = os.path.splitext(item)[1]
dir_items[ext].append(item)
else:
dir_items["folders"].append(item)
# Generate directory structure, combining files with same extension if more than 3
for ext, items in dir_items.items():
if ext != "folders":
if len(items) > 3:
dir_structure += f"{prefix}├── *{ext}\n"
else:
for item in items:
dir_structure += f"{prefix}├── {item}\n"
else:
for item in items:
dir_structure += f"{prefix}├── {item}/\n"
if level < max_level:
subdir_structure = print_directory(
os.path.join(path, item), prefix + "│ ", level + 1, max_level
)
dir_structure += subdir_structure
write_debug("files.txt", dir_structure)
return dir_structure
def log(message: str) -> None:
log_file_path = os.path.join(constants.DEBUG_DIR, "log.txt")
os.makedirs(constants.DEBUG_DIR, exist_ok=True)
with open(log_file_path, "a") as log_file:
log_file.write(message + "\n")
def run_llm_program(program, *args, **kwargs):
with open(constants.DEBUG_DIR + "log.txt", "a") as f, contextlib.redirect_stdout(
f
), contextlib.redirect_stderr(f):
return program(*args, **kwargs)
def verify(earthfile: str) -> None:
debug_earthfile_path = os.path.join(constants.DEBUG_DIR, "Earthfile")
write(earthfile, debug_earthfile_path)
result = subprocess.run(
["earthly", "debug", "ast", debug_earthfile_path],
capture_output=True,
text=True,
)
if result.returncode != 0:
error_message = f"Verification failed with errors:\n{result.stderr}"
if constants.VERIFY_EARTHFILE:
raise ValueError(error_message)
else:
print(error_message)
print("Continuing despite the verification failure.")
| [] |
2024-01-10 | earthly/build-transpose | toearthly~prompt~gha_to_bash.py | from textwrap import dedent
from typing import Tuple
import guidance
from toearthly.core import io, markdown
gpt4 = guidance.llms.OpenAI("gpt-4")
input1 = io.relative_read("data/python_lint/workflow.yml")
cot1 = io.relative_read("data/python_lint/gha_to_bash/plan.md")
result1 = io.relative_read("data/python_lint/gha_to_bash/result.md")
input2 = io.relative_read("data/docker_simple/workflow.yml")
cot2 = io.relative_read("data/docker_simple/gha_to_bash/plan.md")
result2 = io.relative_read("data/docker_simple/gha_to_bash/result.md")
def prompt(gha: str, files: str) -> Tuple[str, str, str]:
program = guidance(
dedent(
"""
{{#system~}}
Given a GitHub Actions workflow YAML file, summarize how you would recreate the
steps of this build using bash and docker.
The implementation will consist of a run.sh script that creates and runs a Docker
container where our build.sh script is executed. This approach encapsulates our
build process in a controlled environment (the Docker container), isolating it from
the host machine and ensuring that it has all the necessary dependencies, regardless
of where or on what machine the build is running. This is why we choose to use
Docker and why we run the build process inside a Docker container, even though it
may seem like overkill for some simple build processes.
You will create three files:
* `run.sh`: A bash file that wraps docker. It will call docker build and afterward
run steps like docker push. Steps like git cloning and changing into the repo
aren't needed because this file is stored in the git repository along with the
code.
* `build.Dockerfile`: A dockerfile with the correct base image to support the
build steps. This includes any programming language tool and any dependencies needed
for `build.sh`. If no special dependencies are needed choose alpine.
* `build.sh` A bash file that runs the steps of the build process. It will run
inside `build.Dockerfile` in the working directory of the repository. If no build
steps need to run inside the container, you just include an `echo` as a placeholder.
Other files will exist in the repository. Code files and other assets and possibly
an application Dockerfile. The application Dockerfile you can call `app`.
Important considerations:
* no need to install dependencies, nor check out the code in `build.sh` because it
is run inside the image produced from `build.Dockerfile`. Call that docker image
`build`.
* `build.Dockerfile` should work without volume mounting. Files that are needed need
to be copied in.
* References to building/tagging and pushing a Docker image or container in GitHub
Actions workflow YAML do not refer to `build.Dockerfile` and `build` but to the
application `app` Dockerfile called `Dockerfile`.
* Any pushing and tagging of images should be of images made from app `Dockerfile`
and not from `build.Dockerfile`. Docker image `build` is used strictly for building
steps and is used as a way to set up dependencies for that build in a repeatable
way.
* Don't include any steps that executing the git hub action wouldn't produce. This
may mean a step does nothing.
* You do not need to chmod `build.sh` or `run.sh`. That is taken care of.
Do not produce the files. Instead, describe how you would approach this problem.
Then go through the yaml document section by section and discuss if steps should be
included or omitted, which of the three files it should be in, and how it needs to
be adapted to the new format.
{{~/system}}
{{~! Training Example 1 ~}}
{{#user~}}
{input1}
{{~/user}}
{{#assistant~}}
{{cot1}}
{{~/assistant}}
{{#user~}}
Ok, produce `run.sh`,`build.Dockerfile` and `build.sh`.
Remember `build.Dockerfile` should work without volume mounting: files that are
needed need to be copied in.
And three files should be produced, even if they are just place holders.
{{~/user}}
{{#assistant~}}
{{result1}}
{{~/assistant}}
{{~! Training Example 2 ~}}
{{#user~}}
{input2}
{{~/user}}
{{#assistant~}}
{{cot2}}
{{~/assistant}}
{{#user~}}
Ok, produce `run.sh`,`build.Dockerfile` and `build.sh`.
Remember `build.Dockerfile` should work without volume mounting: files that are
needed need to be copied in.
And three files should be produced, even if they are just place holders.
{{~/user}}
{{#assistant~}}
{{result2}}
{{~/assistant}}
{{~! Generate Answer~}}
{{#user~}}
Files:
```
{{files}}
```
GitHub Actions workflow:
```
{{gha}}
```
{{~/user}}
{{#assistant~}}
{{gen "discuss" temperature=0 max_tokens=2000}}
{{~/assistant}}
{{#user~}}
Ok, produce `run.sh`,`build.Dockerfile` and `build.sh`.
Remember `build.Dockerfile` should work without volume mounting: files that are
needed need to be copied in.
And three files should be produced, even if they are just place holders.
{{~/user}}
{{#assistant~}}
{{gen "files" temperature=0 max_tokens=500}}
{{~/assistant}}
"""
),
llm=gpt4,
)
out = io.run_llm_program(
program,
gha=dedent(gha),
files=files,
input1=input1,
cot1=cot1,
result1=result1,
input2=input2,
cot2=cot2,
result2=result2,
)
io.write_debug("plan.md", out["discuss"], "gha_to_bash")
io.write_debug("result.md", out["files"], "gha_to_bash")
results = markdown.extract_code_blocks(out["files"])
if len(results) != 3:
raise ValueError(f"3 Files exepected back. Instead got {len(results)}")
io.write_debug("run.sh", results[0], "gha_to_bash")
io.write_debug("build.Dockerfile", results[1], "gha_to_bash")
io.write_debug("build.sh", results[2], "gha_to_bash")
return (results[0], results[1], results[2])
| [] |
2024-01-10 | earthly/build-transpose | toearthly~prompt~dockerfile_to_earthfile.py |
from textwrap import dedent
import guidance
from toearthly.core import io, markdown
gpt4 = guidance.llms.OpenAI("gpt-4")
earthly_basics = io.relative_read("data/earthly_docs/basics.md")
earthly_reference = io.relative_read("data/earthly_docs/summary.md")
earthly_tips = io.relative_read("data/earthly_docs/tips.md")
examples = [{
"docker": io.relative_read("data/docker_simple2/Dockerfile"),
"workflow": io.relative_read("data/docker_simple2/workflow.yml"),
"plan": io.relative_read("data/docker_simple2/dockerfile_to_earthfile/plan.md"), # noqa: E501
"result": io.relative_read("data/docker_simple2/dockerfile_to_earthfile/result.md") # noqa: E501
},
{
"docker": io.relative_read("data/docker_multistage1/Dockerfile"),
"workflow": io.relative_read("data/docker_multistage1/workflow.yml"),
"plan": io.relative_read("data/docker_multistage1/dockerfile_to_earthfile/plan.md"), # noqa: E501
"result": io.relative_read("data/docker_multistage1/dockerfile_to_earthfile/result.md") # noqa: E501
},
{
"docker": io.relative_read("data/docker_multistage2/Dockerfile"),
"workflow": io.relative_read("data/docker_multistage2/workflow.yml"),
"plan": io.relative_read("data/docker_multistage2/dockerfile_to_earthfile/plan.md"), # noqa: E501
"result": io.relative_read("data/docker_multistage2/dockerfile_to_earthfile/result.md") # noqa: E501
},
]
# Throws openai.error.InvalidRequestError
# This model's maximum context length is 8192 tokens. However, you requested 8480
# tokens (7480 in the messages, 1000 in the completion). Please reduce the length
# of the messages or completion.
# ToDo: recover from this by downgrading to GPT3.5
def prompt(docker: str, build: str) -> str:
program = guidance(
dedent(
"""
{{#system~}}
You are creating an Earthfile from a Dockerfile and a GitHub Actions workflow. I'll
share Earthly documentation with you and then describe the conversion process.
{{earthly_basics}}
{{earthly_tips}}
The tutorial is over. I will now describe the task.
You are creating an Earthfile from the following inputs.
* A Dockerfile: each stage in the Dockerfile will become a target in the Earthfile.
* A GitHub Action workflow: This may not be needed. Only steps in workflow which
describe docker actions like tagging or pushing or running docker with certain
arguments may be relevant. The rest should be ignored.
{{~/system}}
{{~#each examples}}
{{#user~}}
Github Actions Workflow:
```
{{this.workflow}}
```
Dockerfile:
```Dockerfile
{{this.docker}}
```
Task:
Do not produce the Earthfile. Instead, describe how you would approach this
problem. Then go through the files, step by step, and discuss how the steps should
be ported to an Earthfile.
Remember:
- an Earthfile can't have a target named base.
- an Earthfile `COPY` from another target works like a Dockerfile multistage COPY
but it has a different syntax.
- To copy `example` from target `+build` use `COPY +build/example .`
- Also, `example` will need to be saved using `SAVE ARTIFACT` in `+build`
Let me go step by step through the dockerfile and convert it to a Earthfile.
{{~/user}}
{{#assistant~}}
{{this.plan}}
{{~/assistant}}
{{#user~}}
Ok, produce the Earthfile in backticks.
{{~/user}}
{{#assistant~}}
{{this.result}}
{{~/assistant}}
{{~/each}}
{{#user~}}
Github Actions Workflow:
```
{{build}}
```
Dockerfile:
```Dockerfile
{{docker}}
```
Task:
Do not produce the Earthfile. Instead, describe how you would approach this
problem. Then go through the files, step by step, and discuss how the steps should
be ported to an Earthfile.
Remember:
- an Earthfile can't have a target named base.
- an Earthfile `COPY` from another target works like a Dockerfile multistage COPY
but it has a different syntax.
- To copy `example` from target `+build` use `COPY +build/example .`
- Also, `example` will need to be saved using `SAVE ARTIFACT` in `+build`
Let me go step by step through the dockerfile and convert it to a Earthfile.
{{~/user}}
{{#assistant~}}
{{gen "discuss" temperature=0 max_tokens=1000}}
{{~/assistant}}
{{#user~}}
Ok, produce the Earthfile in backticks.
{{~/user}}
{{#assistant~}}
{{gen "Earthfile" temperature=0 max_tokens=500}}
{{~/assistant}}
"""
),
llm=gpt4,
)
out = io.run_llm_program(
program,
earthly_basics=earthly_basics,
earthly_reference=earthly_reference,
earthly_tips=earthly_tips,
examples=examples,
docker=docker,
build=build,
)
io.write_debug("plan.md", out["discuss"], "dockerfile_to_earthfile")
io.write_debug("result.md", out["Earthfile"], "dockerfile_to_earthfile")
results = markdown.extract_code_blocks(out["Earthfile"])
if len(results) != 1:
raise ValueError(f"1 Files exepected back. Instead got {len(results)}.")
earthfile = results[0]
io.write_debug("Earthfile", earthfile, "dockerfile_to_earthfile")
return earthfile
| [] |
2024-01-10 | earthly/build-transpose | toearthly~prompt~merge.py | from textwrap import dedent
import guidance
from toearthly.core import io, markdown
gpt4 = guidance.llms.OpenAI("gpt-4")
earthly_basics = io.relative_read("data/earthly_docs/basics.md")
examples = [{
"file1": io.relative_read("data/merge/in1a.Earthfile"),
"name1": "workflow.yml",
"file2": io.relative_read("data/merge/in1b.Earthfile"),
"name2": "Dockerfile",
"result": io.relative_read("data/merge/out1.md"),
},
{
"file1": io.relative_read("data/merge/in2a.Earthfile"),
"name1": "workflow.yml",
"file2": io.relative_read("data/merge/in2b.Earthfile"),
"name2": "Dockerfile",
"result": io.relative_read("data/merge/out2.md"),
}]
def prompt(file1: str, name1: str, file2: str, name2: str) -> str:
if not file1:
return file2
if not file2:
return file1
program = guidance(
dedent(
"""
{{#system~}}
Here is an explanation of Earthfiles:
{{earthly_basics}}
I need your help to merge Earthfiles.
If the files have different base `FROM`s, you'll have to include the `FROM`
statements in the targets where needed,
You should also add any missing steps to the `all` target if any exists.
If two steps do the same thing, but with different target names, they can
be combined.
{{~/system}}
{{~#each examples}}
{{#user~}}
First Earthfile:
Project: {{this.name1}}
```Earthfile
{{this.file1}}
```
Second Earthfile:
Project: {{this.name2}}
```Earthfile
{{this.file2}}
If the have different bases, we will have to include the `FROM` statements in
the targets where needed and we should also add any missing steps to the `all`
target.
Please discuss the way to merge these two files and then give the merged file
in backticks.
{{~/user}}
{{#assistant~}}
{{this.result}}
{{~/assistant}}
{{~/each}}
{{#user~}}
First Earthfile:
Project: {{name1}}
```Earthfile
{{file1}}
```
Second Earthfile:
Project: {{name2}}
```Earthfile
{{file2}}
If the have different bases, we will have to include the `FROM` statements in
the targets where needed and we should also add any missing steps to the `all`
target.
Please discuss the way to merge these two files and then give the merged file
in backticks.
```
{{~/user}}
{{#assistant~}}
{{gen "Earthfile" temperature=0 max_tokens=2000}}
{{~/assistant}}
"""
),
llm=gpt4,
)
out = io.run_llm_program(
program,
earthly_basics=earthly_basics,
file1=file1,
name1=name1,
file2=file2,
name2=name2,
examples=examples,
)
io.write_debug("result.md", out["Earthfile"], "merge")
results = markdown.extract_code_blocks(out["Earthfile"])
if len(results) != 1:
raise ValueError(f"1 Files exepected back. Instead got {len(results)}.")
earthfile = results[0]
io.write_debug("Earthfile", earthfile, "merge")
return earthfile
| [] |
2024-01-10 | earthly/build-transpose | toearthly~scripts~merge.py | import argparse
import traceback
from textwrap import dedent
import openai
from toearthly.core import boot, constants, io # noqa: F401
from toearthly.prompt import merge
# Default directories
DEFAULT_INPUT_DIR = "/input/"
DEFAULT_EARTHFILE_PATH = "/input/Earthfile"
DEFAULT_DEBUG_DIR = "/input/.to_earthly/"
def main(input_dir: str, earthfile_path: str) -> None:
io.log("Starting")
try:
file1 = io.relative_read("data/merge/in1a.Earthfile")
file2 = io.relative_read("data/merge/in1b.Earthfile")
print(
dedent(
f"""
Input:
Earthfile1:\t{file1}
Earthfile2:\t{file2}
Output:\t\t{earthfile_path}
Debug files:\t{constants.DEBUG_DIR}
"""
)
)
print("Starting...\n (This may take 10 minutes)")
print("Running Stage 1 - Dockerfile To Earthfile")
earthfile = merge.prompt(file1, "python.yml", file2, "Dockerfile")
io.verify(earthfile)
io.write(constants.EARTHLY_WARNING + earthfile, earthfile_path)
except openai.error.InvalidRequestError as e:
print("Error: We were unable to convert this workflow.")
io.log(f"Error Type: openai.error.InvalidRequestError \n Error details: {e}")
except (ValueError, TypeError, IndexError, KeyError) as e:
print("Error: We were unable to convert this workflow.")
trace = traceback.format_exc()
io.log(f"Error Type: {type(e).__name__} \n Error details: {e}")
io.log(f"Stack Trace: {trace}")
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Base file location", default=DEFAULT_INPUT_DIR
)
parser.add_argument(
"--earthfile", help="Earthfile path", default=DEFAULT_EARTHFILE_PATH
)
parser.add_argument(
"--debug_dir", help="Debug directory location", default=DEFAULT_DEBUG_DIR
)
return parser
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
constants.DEBUG_DIR = args.debug_dir
print(f"${args.input_dir} {args.earthfile}")
main(args.input_dir, args.earthfile)
| [] |
2024-01-10 | earthly/build-transpose | toearthly~prompt~bash_to_earthly.py |
from textwrap import dedent
import guidance
from toearthly.core import io, markdown
gpt4 = guidance.llms.OpenAI("gpt-4")
earthly_basics = io.relative_read("data/earthly_docs/basics.md")
earthly_reference = io.relative_read("data/earthly_docs/summary.md")
earthly_tips = io.relative_read("data/earthly_docs/tips.md")
input1 = io.relative_read("data/python_lint/files.md")
cot1 = io.relative_read("data/python_lint/bash_to_earthly/plan.md")
result1 = io.relative_read("data/python_lint/Earthfile")
def prompt(files: str, run: str, docker: str, build: str) -> str:
program = guidance(
dedent(
"""
{{#system~}}
You are creating an Earthfile from several bash and dockerfiles. I'll share Earthly
documentation with you and then describe the conversion process.
{{earthly_basics}}
The tutorial is over. I will now describe the task.
You are creating an Earthfile from the following inputs.
* `Files`: A Description of the file structure of the project. Use the file
structure to determine what files need to be copied in at each stage of the docker
multi-stage build.
* `run.sh`: A bash file that wraps docker. It will call docker build and afterward
run steps like docker push.
* `build.Dockerfile`: A dockerfile with the correct base image to support the build
steps. This should become the `base` and possibly the `deps` steps in the docker
file.
* `build.sh` A bash file that runs the build steps. These steps should become
targets in the Earthfile.
{{~/system}}
{{#user~}}
{input1}
{{~/user}}
{{#assistant~}}
{{cot1}}
{{~/assistant}}
{{#user~}}
Ok, produce the files. Files that are needed need to be copied in.
{{~/user}}
{{#assistant~}}
{{result1}}
{{~/assistant}}
{{#user~}}
`Files:`
```
{{files}}
```
`run.sh`:
```
{{run}}
```
build.Dockerfile
```
{{docker}}
```
`build.sh`:
```
{{build}}
```
An Earthfile is a better way to represent this build process because it combines the
concepts of running bash commands to build something with the ideas of
containerisation made popular by Docker and dockerfile.
Task:
Do not produce the Earthfile. Instead, describe how you would approach this
problem. Then go through the files, step by step, and discuss how the steps should
be ported to Earthly.
{{~/user}}
{{#assistant~}}
{{gen "discuss" temperature=0 max_tokens=2000}}
{{~/assistant}}
{{#user~}}
Ok, produce the files. Files that are needed need to be copied in.
{{~/user}}
{{#assistant~}}
{{gen "Earthfile" temperature=0 max_tokens=2000}}
{{~/assistant}}
"""
),
llm=gpt4,
)
out = io.run_llm_program(
program,
earthly_basics=earthly_basics,
input1=input1,
cot1=cot1,
result1=result1,
files=files,
run=run,
docker=docker,
build=build,
)
io.write_debug("plan.md", out["discuss"], "bash_to_earthly")
io.write_debug("result.md", out["Earthfile"], "bash_to_earthly")
results = markdown.extract_code_blocks(out["Earthfile"])
if len(results) != 1:
raise ValueError(f"1 Files exepected back. Instead got {len(results)}.")
earthfile = results[0]
io.write_debug("Earthfile", earthfile, "bash_to_earthly")
return earthfile
| [] |
2024-01-10 | earthly/build-transpose | toearthly~scripts~dockerfile.py | import argparse
import traceback
from textwrap import dedent
import openai
from toearthly.core import boot, constants, io # noqa: F401
from toearthly.prompt import dockerfile_to_earthfile
# Default directories
DEFAULT_INPUT_DIR = "/input/"
DEFAULT_EARTHFILE_PATH = "/input/Earthfile"
DEFAULT_DEBUG_DIR = "/input/.to_earthly/"
intro = """
ALPHA ALERT
This program attempts to generate an Earthfile using an Existing GitHub actions
workflow.
The generated Earthfile should be a good starting place. Additional manual changes
may be needed.
This program will send your GitHub actions workflow to OpenAPI.
This program assumes your project has the following attributes:
* Primarily a single programming language
* Not a deeply nested monorepo
Please send any stange results or issues to [email protected] along with a copy of the
.to_earthly folder and the Earthfile. I will use this for future improvements.
Many things need to be supported and will be ignored for now. These include:
* container creation
* matrix builds
* WITH DOCKER and integration tests
* Github workflow can not be specified.
(picks first result from .github/workflows/*.yml )
I'll prioritize these based on feedback. So reach out on slack or via [email protected]
or via https://github.com/adamgordonbell/to-earthly
"""
def main(input_dir: str, earthfile_path: str) -> None:
try:
print(intro)
dockerfile_path, dockerfile_content = io.find_first_dockerfile(input_dir)
workflow_path, workflow_content = io.find_first_workflow(input_dir)
print(
dedent(
f"""
Input:
DockerFile:\t{dockerfile_path}
Related Workflow:\t{workflow_path}
Output:\t\t{earthfile_path}
Debug files:\t{constants.DEBUG_DIR}
"""
)
)
print("Starting...\n (This may take 10 minutes)")
print("Running Stage 1 - Dockerfile To Earthfile")
earthfile = dockerfile_to_earthfile.prompt(dockerfile_content, workflow_content)
io.verify(earthfile)
io.write(constants.EARTHLY_WARNING + earthfile, earthfile_path)
except openai.error.InvalidRequestError as e:
print("Error: We were unable to convert this workflow.")
io.log(f"Error Type: openai.error.InvalidRequestError \n Error details: {e}")
except (ValueError, TypeError, IndexError, KeyError) as e:
print("Error: We were unable to convert this workflow.")
trace = traceback.format_exc()
io.log(f"Error Type: {type(e).__name__} \n Error details: {e}")
io.log(f"Stack Trace: {trace}")
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Base file location", default=DEFAULT_INPUT_DIR
)
parser.add_argument(
"--earthfile", help="Earthfile path", default=DEFAULT_EARTHFILE_PATH
)
parser.add_argument(
"--debug_dir", help="Debug directory location", default=DEFAULT_DEBUG_DIR
)
return parser
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
constants.DEBUG_DIR = args.debug_dir
main(args.input_dir, args.earthfile)
| [] |
2024-01-10 | earthly/build-transpose | toearthly~prompt~earthfile_correction.py | from textwrap import dedent
import guidance
from toearthly.core import io, markdown
gpt4 = guidance.llms.OpenAI("gpt-4")
earthly_basics = io.relative_read("data/earthly_docs/basics.md")
earthly_reference = io.relative_read("data/earthly_docs/summary.md")
earthly_tips = io.relative_read("data/earthly_docs/tips.md")
def prompt(earthfile: str, gha: str, files: str) -> str:
program = guidance(
dedent(
"""
{{#system~}}
Use the below documentation on Earthfiles to do a code conversion task.
<<Article>>
{{earthly_basics}}
{{earthly_tips}}
<<Article>>
The tutorial is over. I will now describe the task.
Task:
You are given an Earthfile that has incorrect syntax or doesn't conform to best
practices.
The Earthfile is based on a GitHub Actions workflow. this is also given and
should match it as closely as possible.
The file structure of the solution is also included because in an Earthfile
files must be explicitly copied into context.
The mistakes may be using Dockerfile syntax, or not SAVE ARTIFACT for things it
COPY or there just may be a better way to structure things.
Possibly files are copied in that do not exist or a target named `base` is used
even though that is reservered.
Do not produce the file yet. Instead, describe how you would approach this
problem. Then go through the Earthfile section by section and discuss any
changes that need to be made.
{{~/system}}
{{#user~}}
Files:
```
{{files}}
```
Git Hub Actions:
```
{{gha}}
```
Earthfile:
```
{{earthfile}}
```
{{~/user}}
{{#assistant~}}
{{gen "discuss" temperature=0 max_tokens=2000}}
{{~/assistant}}
{{#user~}}
Ok, produce the Earthfile in backticks.
{{~/user}}
{{#assistant~}}
{{gen "Earthfile" temperature=0 max_tokens=2000}}
{{~/assistant}}
"""
),
llm=gpt4,
)
out = io.run_llm_program(
program,
earthly_basics=earthly_basics,
earthly_tips=earthly_tips,
files=files,
gha=gha,
earthfile=earthfile,
)
io.write_debug("plan.md", out["discuss"], "earthfile_correction")
io.write_debug("result.md", out["earthfile"], "earthfile_correction")
results = markdown.extract_code_blocks(out["Earthfile"])
if len(results) != 1:
raise ValueError(f"1 Files exepected back. Instead got {len(results)}.")
earthfile = results[0]
io.write_debug("Earthfile", earthfile, "earthfile_correction")
return earthfile
| [] |
2024-01-10 | PEPLabs/LANG-CL-AGENTSMEM | src~main~lab.py | import os
from langchain.agents import AgentType, initialize_agent
from langchain.memory import ConversationBufferWindowMemory
from langchain_community.chat_models import ChatHuggingFace
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langchain_core.tools import Tool
"""
This lab will guide you through defining LangChain Agents with ConversationBufferWindowMemory.
ConversationBufferWindowMemory is a basic way to store conversation history. We can declare this in
an Agent's "memory" attribute to define a memory mechanism that the LLM can use to complete tasks.
ConversationBufferWindowMemory maintains a record of the conversation's previous interactions,
limited to the number defined in the "k" attribute. Only the last "k" interactions are stored.
In other words, ConversationBufferWindowMemory maintains a "window" of previous interactions defined by "k".
https://python.langchain.com/docs/modules/memory/types/buffer_window
"""
"""
Defining our LLM here to include for our chat model, as well as the text input & functions for the tools that our agent
will use. No need to edit these
"""
llm = HuggingFaceEndpoint(
endpoint_url=os.environ['LLM_ENDPOINT'],
task="text2text-generation",
model_kwargs={
"max_new_tokens": 200
}
)
chat_model = ChatHuggingFace(llm=llm)
textInput = """
<|system|>
You are that helpful AI that responds concisely</s>
<|user|>
{userInput}</s>
<|assistant|>
"""
def greeting(input):
"""When the user greets you, greet them back. Nothing else would be said."""
return chat_model.invoke(textInput.format(userInput=input))
def get_historical_fact(input):
"""If the user asks for a historical fact, give them a concise summary of the topic."""
return chat_model.invoke(textInput.format(userInput=input))
# TODO: define the second tool that the agent will have access to (get_historical_fact)
tools = [
Tool.from_function(
func=greeting,
name="greeting",
description="When the user sends a greeting, send a greeting back.",
),
# SECOND TOOL GOES HERE
]
"""
Defining a conversational agent that DOES NOT STORE any conversation history.
DON'T EDIT THIS CODE, as it's meant to serve as a sample in src/lab.py and proof of concept in src/app.py
First, we define a ConversationBufferWindowMemory object, and store 0 previous interactions in memory. (k=0)
(Spoiler: this is not likely to create an agent that remembers things...)
Next, we use the initialize_agent function, to create an agent providing it with the tools and llm defined above.
We've defined the agent type as CONVERSATIONAL_REACT_DESCRIPTION.
This is a more conversational type of agent than the more general-use ZERO-SHOT agents.
It is designed to hold a conversation while using tools.
By setting verbose=True, we can see what the agent is thinking/doing in the console.
Finally, we set the memory attribute to the previously defined ConversationBufferWindowMemory object.
"""
memory_no_history = ConversationBufferWindowMemory(memory_key="chat_history", k=0)
agent_executor_no_memory = initialize_agent(
tools,
chat_model,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory_no_history,
handle_parsing_errors=True
)
"""
Defining a conversational agent that STORES 3 previous interactions in memory.
This is the main task of the lab
"""
# TODO: instantiate a ConversationBufferWindowMemory object that stores 2 previous interactions in memory
memory_with_history = "TODO"
# TODO: define a conversational agent that uses memory_with_history for its memory attribute
agent_executor_with_memory = "TODO"
| [] |
2024-01-10 | joseph-crowley/BSHR | src~utils~llm_tools.py | import openai
import time
import json
import config.settings
import random
from utils.logger import logger
openai.api_key = config.settings.OPENAI_API_KEY
def retry_with_exponential_backoff(
func,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = True,
max_retries: int = config.settings.OPENAI_MAX_RETRY_COUNT,
errors: tuple = (openai.error.RateLimitError,),
):
"""Retry a function with exponential backoff."""
def wrapper(*args, **kwargs):
num_retries = 0
delay = initial_delay
while True:
try:
return func(*args, **kwargs)
except errors as e:
num_retries += 1
if num_retries > max_retries:
logger.error("Max retry count reached. Raising error.")
raise
delay *= exponential_base * (1 + jitter * random.random())
logger.info(f"Sleeping for {delay} seconds after {e}...")
time.sleep(delay)
except Exception as e:
logger.error(f"Unexpected error: {e}")
raise
return wrapper
@retry_with_exponential_backoff
def call_openai_with_backoff(messages):
logger.debug(f"Calling OpenAI API with model: {config.settings.LANGUAGE_MODEL} and messages: {messages}")
response = openai.ChatCompletion.create(
model=config.settings.LANGUAGE_MODEL,
messages=messages
)
return json.dumps(response["choices"][0]["message"])
# Function to call OpenAI API
def call_openai(messages):
"""
Calls the OpenAI API to get an intelligent response for query processing.
"""
return call_openai_with_backoff(messages)
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.