date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | skandansn/apnaLawyer | app~apnaLawyer.py | import whisper
import requests
import pinecone
from app.config import settings
from fastapi import status, HTTPException
from langchain.llms import OpenAI
from langchain import PromptTemplate
from .schemas import QueryInput
from langchain.vectorstores import Pinecone
from langchain.embeddings import OpenAIEmbeddings
from langchain.document_loaders import DirectoryLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
async def document_input_feeder(email: str):
loader = DirectoryLoader('./storage/files/' + email)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter()
texts = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings(model_name="ada", openai_api_key=settings.OPENAI_API_KEY)
pinecone.init(
api_key=settings.PINECONE_API_KEY,
environment=settings.PINECONE_ENV
)
index_name = settings.PINECONE_INDEX
Pinecone.from_documents(texts, embeddings, index_name=index_name, namespace=email)
async def get_relevant_docs(query: str, namespace: str):
embeddings = OpenAIEmbeddings(model_name="ada", openai_api_key=settings.OPENAI_API_KEY)
pinecone.init(
api_key=settings.PINECONE_API_KEY,
environment=settings.PINECONE_ENV
)
index_name = settings.PINECONE_INDEX
docsearch = Pinecone.from_existing_index(index_name, embeddings, namespace=namespace)
docs = docsearch.similarity_search(query)
return docs
async def audio_transcribe(user):
model = whisper.load_model("base")
result = model.transcribe("./storage/files/johndoe/tamil.mp3", task="translate")['text']
input_query = QueryInput(query=result)
return await langchain_query_processor(input_query, user)
async def langchain_query_processor(input_query: QueryInput, user):
query_template = """
You are an expert lawyer named "Apna Lawyer" well versed in Indian law.
Answer the query of {query} in a detailed and complete way.
Reject if the query is not involving a law or constitution in any way.
"""
query_prompt = PromptTemplate(
input_variables=["query"],
template=query_template,
)
llm = OpenAI(model_name=input_query.model, openai_api_key=settings.OPENAI_API_KEY)
result = {}
if input_query.kanoon:
if user.role == "paid":
headers = {'Authorization': f'Token ' + settings.KANOON_API_TOKEN}
response = requests.post(settings.KANOON_API_URL + input_query.query, headers=headers, json={})
docsResponse = response.json()['docs']
docAndUrlList = []
for i in docsResponse:
docAndUrlList.append([i['title'], i['url']])
result["docsList"] = docAndUrlList
return result
else:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail='This feature is only available for paid tier customers.')
if input_query.query_docs:
if user.role == "paid":
docs = await get_relevant_docs(input_query.query, user.email)
prompt_template = """If the question is not related to any law or the
constitution, do not answer the question. If it is indeed related to a law or constitution, use the following pieces of context to answer the question at the end. If you don't
know the answer, try using your existing Open AI Chatgpt's general knowledge model apart form this input document to answer the question, but make sure
to notify that this is not in the given input context.
{context}
Question: {question}
Answer:"""
prompt = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt)
response = chain({"input_documents": docs, "question": input_query.query}, return_only_outputs=True)['output_text']
result["answer"] = response
return result
else:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail='This feature is only available for paid tier customers.')
query_output = llm(query_prompt.format(query=input_query.query))
result['answer'] = query_output
negation_output = None
if input_query.negation:
negation_template = "Turn the {answer} and explain to me what will happen if i go against this law. Reject if query is not related to law or constitution in any way."
negation_prompt = PromptTemplate(
input_variables=["answer"],
template=negation_template,
)
negation_output = llm(negation_prompt.format(answer=query_output))
result['negation'] = negation_output
return result
| [
"If the question is not related to any law or the \n constitution, do not answer the question. If it is indeed related to a law or constitution, use the following pieces of context to answer the question at the end. If you don't \n know the answer, try using your existing Open AI Chatgpt's general knowledge model apart form this input document to answer the question, but make sure \n to notify that this is not in the given input context. \n\n {context}\n\n Question: {question}\n Answer:",
"\n You are an expert lawyer named \"Apna Lawyer\" well versed in Indian law.\n Answer the query of {query} in a detailed and complete way. \n Reject if the query is not involving a law or constitution in any way.\n ",
"question",
"answer",
"Turn the {answer} and explain to me what will happen if i go against this law. Reject if query is not related to law or constitution in any way.",
"context",
"Apna Lawyer"
] |
2024-01-10 | skandansn/apnaLawyer | app~routers~core_integrations.py | from fastapi import APIRouter, Depends, UploadFile, status, HTTPException
import os
from ..database import get_db
from sqlalchemy.orm import Session
from .. import models, schemas, oauth2
from ..apnaLawyer import langchain_query_processor, document_input_feeder, audio_transcribe
router = APIRouter()
@router.post('/query', response_model=schemas.QueryOutput)
async def query(input_query: schemas.QueryInput, db: Session = Depends(get_db),
user_id: str = Depends(oauth2.require_user)):
user = db.query(models.User).filter(models.User.id == user_id).first()
processor_result = await langchain_query_processor(input_query, user)
print(processor_result)
return schemas.QueryOutput(**processor_result)
@router.post('/upload-files')
async def create_upload_file(files: list[UploadFile], db: Session = Depends(get_db),
user_id: str = Depends(oauth2.require_user)):
user = db.query(models.User).filter(models.User.id == user_id).first()
if user.role == 0:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail='This feature is only available for paid tier customers.')
os.makedirs("./storage/files/" + user.email, exist_ok=True)
for file in files:
file_path = os.path.join("./storage/files/" + user.email, file.filename)
with open(file_path, "wb") as f:
contents = await file.read()
f.write(contents)
await document_input_feeder(user.email)
return {"Uploaded filenames": [file.filename for file in files]}
@router.get('/list-files')
async def list_user_files(db: Session = Depends(get_db), user_id: str = Depends(oauth2.require_user)):
user = db.query(models.User).filter(models.User.id == user_id).first()
os.makedirs("./storage/files/" + user.email, exist_ok=True)
files = os.listdir("./storage/files/" + user.email)
return {"Your uploaded files": [file for file in files]}
@router.post('/process-audio')
async def upload_audio(db: Session = Depends(get_db), user_id: str = Depends(oauth2.require_user)):
user = db.query(models.User).filter(models.User.id == user_id).first()
if user.role == 0:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail='This feature is only available for paid tier customers.')
processor_result = await audio_transcribe(user)
return schemas.QueryOutput(**processor_result)
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~serper_wrapper.py | """Wrap the LangChain Google Serper wrapper so it works specifically for Jeeves."""
from langchain.utilities import GoogleSerperAPIWrapper
class GoogleSerperAPIWrapperURL(GoogleSerperAPIWrapper):
"""Same as the GoogleSerperAPIWrapper but provides URLs to results."""
def _parse_snippets(self, results: dict) -> str:
snippets = []
if results.get("answerBox"):
answer_box = results.get("answerBox", {})
if answer_box.get("answer"):
return answer_box.get("answer")
elif answer_box.get("snippet"):
return answer_box.get("snippet").replace("\n", " ")
elif answer_box.get("snippetHighlighted"):
return ", ".join(answer_box.get("snippetHighlighted"))
if results.get("knowledgeGraph"):
kg = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
snippets.append(f"{title}: {entity_type}.")
description = kg.get("description")
if description:
snippets.append(description)
for attribute, value in kg.get("attributes", {}).items():
snippets.append(f"{title} {attribute}: {value}.")
for result in results[self.result_key_for_type[self.type]][: self.k]:
if "snippet" in result:
snippets.append(f"{result['snippet']} ({result['link']})")
for attribute, value in result.get("attributes", {}).items():
snippets.append(f"{attribute}: {value}.")
if len(snippets) == 0:
return "No good Google Search Result was found"
return snippets
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~movies.py | """
Get info on movies. This tool is inactive because it's currently easier for the
agent to just Google the movie's name and use Website Answerer on the
Rotten Tomatoes link. It did this on its own in testing.
"""
from langchain.agents.tools import BaseTool
import rottentomatoes as rt
from typing import Any, Coroutine
class MoviesTool(BaseTool):
"""Get info on movies."""
name: str = "Movie Info"
description: str = (
"Useful for when you want information on movies. "
"Input should be a string of the movie's name."
)
def _run(self, query: str) -> str:
"""Run the tool."""
movie = rt.Movie(query)
return str(movie)
def _arun(self, *args: Any, **kwargs: Any) -> Coroutine[Any, Any, str]:
raise NotImplementedError()
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~make_calls~call_tool.py | """The calling tool used by agent."""
# External
from langchain.agents.tools import BaseTool
import requests
# Standard lib
import json
import time
from urllib.parse import urlencode
from typing import Any, Coroutine
# Project
from keys import KEYS
from jeeves.texts import twilio_client, BASE_URL
# Make calls
from jeeves.agency.make_calls import database as db
from jeeves.agency.logs_callback import logger
class CallToolError(Exception):
"""Base error for the calling tool."""
pass
def make_call(recipient: str, goal: str, recipient_desc: str) -> str:
"""Makes the call and returns a transcript."""
created_call = db.Call.create(goal=goal, recipient_desc=recipient_desc)
call_params: dict[str, str] = {"call_id": created_call.key}
# Before creating the call, test the base url
if not requests.get(BASE_URL).ok:
raise CallToolError(f"Base URL {BASE_URL} is not responding.")
outbound_call = twilio_client.calls.create(
recipient,
KEYS.Twilio.sender,
url=f"{BASE_URL}/voice/outbound/handler?{urlencode(call_params)}",
record=True
)
logger.info(f"{created_call.key}: INFO: Call created to {recipient}")
CALL_END_STATUSES = {"completed", "canceled", "failed", "busy", "no-answer"}
# Wait for call to complete
while (status := outbound_call.update().status) not in CALL_END_STATUSES:
time.sleep(1)
# Return a transcript
logger.info(f"{created_call.key}: INFO: Call ended with status {status}")
created_call.download()
return f"Call status: {status} || BEGIN TRANSCRIPT || {created_call.convo} || END TRANSCRIPT ||"
class CallTool(BaseTool):
"""Agent tool for creating calls to facilitate goals."""
name: str = "Make a Call"
description: str = (
"Make a call to a recipient and complete a goal. Input must be a JSON string "
'with the keys "recipient_phone", "recipient_desc", and "goal". The recipient phone number '
"must be a 10-digit phone number preceded by "
'country code, ex. "12223334455". Do not make up phone numbers - either '
"use a phone number explicitly provided by the user, or use a phone number from a "
"tool that provides it for you. Otherwise, do not use this tool. "
'"recipient_desc" is a short description of who you are calling.'
'The "goal" should be comprehensive and specific, providing all information necessary '
"to facilitate a desirable outcome. For example, if you are asked to make a dinner "
"reservation, you will need a date, time, and name. If you don't have all that you need, "
"do not use the tool, respond to me and inform me that you're missing critical information. "
"The output of this tool is a transcript of the call, "
"so if you don't see an indication that the goal succeeded in the transcript, report that. "
"Do not assume the goal succeeded unless you see proof in the transcript. For example, "
"if your task was to inform John that I'm busy tomorrow, and you don't see the recipient (John)"
"acknowledging this in the returned transcript, consider the message delivery a failure. "
"Further, if you don't receive any output from this tool, consider the entire call failed."
)
def _run(self, query: str) -> str:
"""Make a call."""
try:
input_parsed = json.loads(query)
except Exception as e:
return f"Error parsing input: {str(e)}"
if not "recipient_phone" in input_parsed:
return 'Input must have a "recipient_phone" key.'
if not "recipient_desc" in input_parsed:
return 'Input must have a "recipient_desc" key.'
if not "goal" in input_parsed:
return 'Input must have a "goal" key.'
try:
return make_call(
recipient=str(input_parsed["recipient_phone"]),
goal=str(input_parsed["goal"]),
recipient_desc=str(input_parsed["recipient_desc"]),
)
except Exception as e:
return f"Error making call: {str(e)}"
def _arun(self, *args: Any, **kwargs: Any) -> Coroutine[Any, Any, str]:
raise NotImplementedError()
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~logs_callback.py | """Create a logging callback handler for the agent."""
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks import StdOutCallbackHandler
import logging
from logging import Logger
from logging.handlers import SysLogHandler
import re
from typing import Dict, Any, List, Optional, Union
from langchain.schema import LLMResult, AgentAction, AgentFinish
from keys import KEYS
from config import CONFIG
def extract_log_items(log: str, fields: list[str]) -> list[str]:
"""
Takes a log and extracts the fields specified in the fields list.
Removes spaces from all field names.
Args:
log (str): The log to extract from.
fields (list[str]): The fields to extract. Don't include the colon.
Returns:
list[str]: The extracted fields as full strings.
Example: if the log is "This: something That: something else" then
extract_log_items(log, ["This", "That"]) will return
["This: something", "That: something else"]
Spaces are removed from the field names, so "Action Input" becomes "ActionInput"
in the logs. This is to make the logs more readable. So when sorting and checking
for the fields, we remove the spaces from the field names (check_fields).
"""
# Regular expression to match "Thought:", "Action:", and "Action Input:"
fields = [f + ":" for f in fields]
pattern = f"({'|'.join(fields)})"
# Split the string using the pattern and filter out empty strings
split_string = [s.strip() for s in re.split(pattern, log) if s.strip()]
# Combine the matched expressions with their corresponding text, including a space after the colon
logs: list[str] = [
split_string[i].replace(" ", "") + " " + split_string[i + 1]
for i in range(0, len(split_string), 2)
]
# Sort the logs in the order of the fields
check_fields: list[str] = [f.replace(" ", "") for f in fields]
return sorted(logs, key=lambda x: check_fields.index(x.split(":")[0] + ":"))
class AgentLoggingCallbackHandler(BaseCallbackHandler):
"""
Callback Handler that logs instead of printing.
Specific for agents, as it uses agent terminology in the logs.
"""
def __init__(self, logger: Logger, uid: str) -> None:
"""Initialize callback handler."""
self.logger = logger
self.uid = uid
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Print out the prompts."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing."""
pass
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain."""
class_name = serialized["id"][-1]
self.logger.info(f"{self.uid}: AgentStart: Entering new {class_name} chain...")
self.logger.info(f"{self.uid}: UserInput: {inputs['input']}")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain."""
self.logger.info(f"{self.uid}: AgentFinish: Finished chain.")
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing."""
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action."""
log_items = extract_log_items(action.log, ["Thought", "Action", "Action Input"])
# Log the result
for result in log_items:
self.logger.info(f"{self.uid}: {result}")
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
self.logger.info(f"{self.uid}: {observation_prefix}{output}")
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
"""Run when agent ends."""
self.logger.info(f"{self.uid}: {text}")
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on agent end."""
# If no tools were used
if "Final Answer" in finish.log and "Thought" not in finish.log:
self.logger.info(f"{self.uid}: {finish.log.splitlines()[1]}")
return
log_items = extract_log_items(finish.log, ["Thought", "Final Answer"])
# Log the result
for result in log_items:
self.logger.info(f"{self.uid}: {result}")
# ---- Logging ----
logger = logging.getLogger("agent")
logger.setLevel(logging.INFO)
handler = SysLogHandler(address=(KEYS.Papertrail.host, KEYS.Papertrail.port))
logger.addHandler(handler)
def create_callback_handlers(uid: str) -> list[BaseCallbackHandler]:
"""
Create a Callback Manager with all the handlers based on the uid. The uid is used
to separate entries in the logs, so a unique CallbackManager should be used for each agent run.
"""
# Log to console and to Papertrail
logging_callback = AgentLoggingCallbackHandler(logger=logger, uid=str(uid))
callback_handlers = [logging_callback]
# Log to console as well if configured
if CONFIG.GPT.console_agent:
callback_handlers.append(StdOutCallbackHandler())
return callback_handlers
| [] |
2024-01-10 | preritdas/jeeves | tests~agency~test_user_memory.py | """Test user memory."""
from langchain.tools import BaseTool
from jeeves.agency.user_memory import UserMemory, create_user_memory_tools
def test_adding_to_memory(temporary_user_memory):
"""Test adding to memory."""
user_memory = UserMemory.from_user_phone(temporary_user_memory)
user_memory.add_entry("My great grandfather's name is Bobbert.")
assert any("Bobbert" in entry.content for entry in user_memory.entries)
def test_retrieving_from_memory(temporary_user_memory):
"""
Test retrieving from memory.
Temporary user memory adds an entry saying I parked on level 2.
"""
user_memory = UserMemory.from_user_phone(temporary_user_memory)
assert "2" in user_memory.answer_question("Where did I park?")
def test_building_user_memory_tools(temporary_user_memory):
"""
Test building user memory tools.
This tests prompts and prompt reading and building the tools.
"""
user_memory_tools = create_user_memory_tools(temporary_user_memory)
assert isinstance(user_memory_tools, list)
assert all(isinstance(tool, BaseTool) for tool in user_memory_tools)
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~tool_auth.py | """Load tools depending on authorization."""
from langchain.agents import Tool
from langchain.tools import BaseTool
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.zapier import ZapierNLAWrapper
from langchain.agents.agent_toolkits import ZapierToolkit
from langchain.callbacks.base import BaseCallbackHandler
from keys import KEYS
from jeeves.permissions import User
from jeeves.agency import retrieval
from jeeves.agency import news
from jeeves.agency import send_texts
from jeeves.agency import make_calls
from jeeves.agency.user_memory import create_user_memory_tools
from jeeves.agency.serper_wrapper import GoogleSerperAPIWrapperURL
ANSWERER_JSON_STRING_INPUT_INSTRUCTIONS = (
'Input must be a JSON string with the keys "source" and "query".'
)
NO_AUTH_TOOLS: list[BaseTool] = [
Tool(
name="Google Search",
func=GoogleSerperAPIWrapperURL(serper_api_key=KEYS.GoogleSerper.api_key).run,
description=(
"Useful for when you need to search Google. Provides links to search results "
"that you can use Website Answerer to answer for more information."
),
),
Tool(
name="Website Answerer",
func=retrieval.WebsiteAnswerer.answer_json_string,
description=(
"Useful for when you need to answer a question about the content on a website. "
"You can use this to answer questions about links found in Google Search results. "
f'{ANSWERER_JSON_STRING_INPUT_INSTRUCTIONS} "source" is the URL of the website. '
"Do not make up websites to search - you can use Google Search to find relevant urls."
),
),
Tool(
name="YouTube Answerer",
func=retrieval.YouTubeAnswerer.answer_json_string,
description=(
"Useful for when you need to answer a question about the contents of a YouTube video. "
f'{ANSWERER_JSON_STRING_INPUT_INSTRUCTIONS} "source" is the URL of the YouTube video. '
"Do not make up YouTube videos - you can use Google Search to find relevant videos, or "
"accept them directly from the user."
),
),
Tool(
name="Headline News",
func=news.manual_headline_news,
description=(
"Useful for when you need to get the top headlines from a specific category. "
"Input must be a string with the category name. Category must be one of "
f"{news.MANUAL_AVAILABLE_CATEGORIES}."
),
),
Tool(
name="Wolfram Alpha",
func=WolframAlphaAPIWrapper(wolfram_alpha_appid=KEYS.WolframAlpha.app_id).run,
description=(
"Useful for when you need to do math or anything quantitative/computational. "
'Input should ideally be math expressions, ex. "8^3", but can also be '
"natural language if a math expression is not possible."
),
),
make_calls.CallTool()
]
def build_tools(
user: User, callback_handlers: list[BaseCallbackHandler]
) -> list[BaseTool]:
"""Build all authenticated tools given a phone number."""
added_tools: list[BaseTool] = []
# Zapier
if user.zapier_access_token:
zapier_wrapper = ZapierNLAWrapper(
zapier_nla_oauth_access_token=user.zapier_access_token
)
zapier_toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier_wrapper)
added_tools.extend(zapier_toolkit.get_tools())
# Text messages
TextToolClass = send_texts.create_text_message_tool(user.phone)
added_tools.append(TextToolClass())
# User longterm memory
added_tools.extend(create_user_memory_tools(user.phone))
# Add all tools together
tools = NO_AUTH_TOOLS + added_tools
# Check for proper tool types
if not all(isinstance(tool, BaseTool) for tool in tools):
raise TypeError("All tools must be of type BaseTool (or subclass thereof).")
# Add callback manager to all tools
for tool in tools:
tool.callbacks = callback_handlers
return tools
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~send_texts.py | """Wrapper around texts to work best as an agent."""
from langchain.agents.tools import BaseTool
import json
from typing import Any, Coroutine
from jeeves import texts
def create_text_message_tool(inbound_phone: str) -> type[BaseTool]:
"""
Create a tool to send text messages.
Args:
inbound_phone: The phone number to send a confirmation text to after
sending the message.
Returns:
A tool to send text messages.
"""
class TextMessageTool(BaseTool):
"""Wrapper around texts to work best as an agent."""
name: str = "Send Text Message"
description = (
"Useful for when you need to send a text message. Input must be a JSON string with "
'the keys "content" and "recipient_phone" (10-digit phone number preceded by '
'country code, ex. "12223334455". Do not make up phone numbers - either '
"use a phone number explicitly provided by the user, or use a phone number from a "
"tool that provides it for you (ex. contacts, if available). Otherwise, do not use this tool. "
'Write the content as you, Jeeves, not as me.'
)
def _run(self, query: str) -> str:
"""Send a text message."""
input_parsed = json.loads(query)
# Validate
assert "content" in input_parsed, 'Input must have a "content" key.'
assert isinstance(input_parsed["content"], str), "Content must be a string."
content = input_parsed["content"]
assert (
"recipient_phone" in input_parsed
), 'Input must have a "recipient_phone" key.'
assert (
len(str(input_parsed["recipient_phone"]).replace("+", "")) == 11
), "Recipient must be a phone number preceded by country code."
recipient = str(input_parsed["recipient_phone"])
try:
send_res = texts.send_message(content=content, recipient=recipient)
except Exception as e:
return f"Error: {str(e)}"
else:
texts.send_message(
content=(
"Sir, I'm informing you that I have sent the following message to "
f"{recipient}:\n\n{content}"
),
recipient=inbound_phone,
)
if send_res:
return "Message delivered successfully."
else:
return "Message failed to deliver."
def _arun(self, *args: Any, **kwargs: Any) -> Coroutine[Any, Any, str]:
raise NotImplementedError(f"{type(self).__name__} does not support async.")
return TextMessageTool
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~user_memory~database.py | """
Long term memory tool. Jeeves decides when to store items,
then uses the tool to retrieve items to get more information.
"""
from pymongo import MongoClient
from langchain.schema import Document
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import TokenTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
import datetime as dt
import pytz
from config import CONFIG
from keys import KEYS
from jeeves.utils import validate_phone_number
from jeeves.agency.user_memory.models import Entry
# Memory database collection
MEMORY_COLL = MongoClient(KEYS.MongoDB.connect_str)["Jeeves"]["user_memory"]
# Question answering stuff
llm = ChatOpenAI(model_name="gpt-4", openai_api_key=KEYS.OpenAI.api_key, temperature=0)
embeddings = OpenAIEmbeddings(openai_api_key=KEYS.OpenAI.api_key)
splitter = TokenTextSplitter(
encoding_name="cl100k_base", chunk_size=300, chunk_overlap=50
)
class UserMemory:
"""
A user's long term memory. Stores entries, which are text snippets
with a timestamp.
Initialize using the `from_user_phone` classmethod. This will fetch
all entries from the database. Then, use `add_entry` to add an entry
to the user's memory. Finally, use `answer_question` to answer a question
using the user's memory.
"""
def __init__(self, user_phone: str, entries: list[Entry]):
self.entries = entries
self.user_phone = validate_phone_number(user_phone)
@classmethod
def from_user_phone(cls, user_phone: str) -> "UserMemory":
"""Get all entries from a user."""
entries = MEMORY_COLL.find({"user_phone": validate_phone_number(user_phone)})
return cls(user_phone=user_phone, entries=[Entry(**entry) for entry in entries])
def add_entry(self, content: str) -> bool:
"""Add an entry to the user's memory."""
entry = Entry(
datetime=dt.datetime.now(pytz.timezone(CONFIG.General.default_timezone)),
user_phone=self.user_phone,
content=content
)
self.entries.append(entry)
MEMORY_COLL.insert_one(entry.to_dict())
return True
def answer_question(self, question: str) -> str:
"""
First converts the initial source, then queries it. The query must be a string,
and the answer will be a string. This does not work with the string-in-string-out
nature of an LLM agent, so it is not exposed to the user.
"""
if not self.entries:
return "Currently, there are no entries in user longterm memory."
docs = [Document(page_content=entry.to_string()) for entry in self.entries]
vectorstore = FAISS.from_documents(docs, embeddings)
_find_similar = lambda k: vectorstore.similarity_search(question, k=k)
similar_docs = _find_similar(15)
# Adjust the instructions based on the source
PREFIX = (
"You are a User Memory Answerer. Your context is notes from "
"someone's memory. Use the user's memory, nothing else, to "
"answer the question. "
)
qa_chain = load_qa_chain(llm)
qa_chain.llm_chain.prompt.messages[0].prompt.template = (
PREFIX + qa_chain.llm_chain.prompt.messages[0].prompt.template
)
return qa_chain.run(input_documents=similar_docs, question=question)
def purge(self) -> bool:
"""Delete all entries from the user's memory. Use with caution."""
MEMORY_COLL.delete_many({"user_phone": validate_phone_number(self.user_phone)})
return True
| [] |
2024-01-10 | preritdas/jeeves | jeeves~agency~__init__.py | """Agent GPT."""
from langchain.agents import Tool, ZeroShotAgent, AgentExecutor
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import OutputParserException
import uuid
import pytz
import datetime as dt
from keys import KEYS
from config import CONFIG
from jeeves.permissions import User
from jeeves.agency import tool_auth
from jeeves.agency.chat_history.models import Message
from jeeves.agency import logs_callback, prompts
from jeeves.agency.chat_history import ChatHistory
# ---- Build the agent ----
class InternalThoughtZeroShotAgent(ZeroShotAgent):
"""
A normal ZeroShotAgent but doesn't inject "Thought:" before the LLM. After testing
and heavy prompt engineering, I've found a better sucess rate with having the LLM
create its own "Thought" label. This is because it knows that each Thought must
also have either an Action/Action Input or a Final Answer.
"""
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return ""
# --- Create the LLM and AgentExecutor ---
llm = ChatOpenAI(
model_name=CONFIG.GPT.base_openai_model,
openai_api_key=KEYS.OpenAI.api_key,
temperature=CONFIG.GPT.temperature
)
def create_agent_executor(
toolkit: list[Tool],
user: User,
callback_handlers: list[BaseCallbackHandler],
) -> AgentExecutor:
"""Create the agent given authenticated tools."""
agent_prompts: prompts.AgentPrompts = prompts.build_prompts(user)
agent = InternalThoughtZeroShotAgent.from_llm_and_tools(
llm=llm,
tools=toolkit,
handle_parsing_errors=True,
prefix=agent_prompts.prefix,
format_instructions=agent_prompts.format_instructions,
suffix=agent_prompts.suffix
)
return AgentExecutor(
agent=agent,
tools=toolkit,
max_iterations=50,
verbose=True,
callbacks=callback_handlers
)
def create_base_agent_executor(
toolkit: list[Tool],
callback_handlers: list[BaseCallbackHandler],
) -> AgentExecutor:
"""Create the agent executor without a User object."""
agent_prompts: prompts.AgentPrompts = prompts.build_base_agent_prompts()
agent = InternalThoughtZeroShotAgent.from_llm_and_tools(
llm=llm,
tools=toolkit,
handle_parsing_errors=True,
prefix=agent_prompts.prefix,
format_instructions=agent_prompts.format_instructions,
suffix=agent_prompts.suffix
)
return AgentExecutor(
agent=agent,
tools=toolkit,
max_iterations=50,
verbose=True,
callbacks=callback_handlers
)
# ---- Run the agent ----
def retry_couldnt_parse(function):
"""Decorator to retry up to three times if a specific ValueError occurs."""
def wrapper(*args, **kwargs):
retries = 0
last_exception = None
while retries < 3:
try:
return function(*args, **kwargs)
except OutputParserException as e:
if "Could not parse LLM output" in str(e):
retries += 1
last_exception = e
else:
raise e
raise last_exception
return wrapper
@retry_couldnt_parse
def run_agent(agent_executor: AgentExecutor, query: str, uid: str) -> str:
"""Run the agent."""
with get_openai_callback() as cb:
res = agent_executor.run(query)
logs_callback.logger.info(
f"{uid}: UsageInfo: "
f"Total Tokens: {cb.total_tokens}, "
f"Prompt Tokens: {cb.prompt_tokens}, "
f"Completion Tokens: {cb.completion_tokens}, "
f"Total Cost (USD): ${cb.total_cost:.2f}."
)
return res
def _create_uid() -> str:
"""Create a unique ID for an agent run."""
return str(uuid.uuid4())
def generate_agent_response(content: str, user: User, uid: str = "") -> str:
"""Build tools, create executor, and run the agent. UID is optional."""
uid = uid or _create_uid()
assert user
# Build chat history and toolkit using inbound phone
ChatHistory.from_inbound_phone(user.phone)
callback_handlers = logs_callback.create_callback_handlers(uid)
toolkit = tool_auth.build_tools(user, callback_handlers)
# Run
agent_executor = create_agent_executor(
toolkit, user, callback_handlers
)
response: str = run_agent(agent_executor, content, uid)
# Save message to chats database
ChatHistory.from_inbound_phone(user.phone).add_message(
Message(
datetime=dt.datetime.now(pytz.timezone(CONFIG.General.default_timezone)),
inbound_phone=user.phone,
user_input=content,
agent_response=response
)
)
return response.strip()
def generate_base_agent_response(content: str, uid: str = "") -> str:
"""Create executor and run the agent. UID is optional."""
# Use overridden uid or create a new one
uid = uid or _create_uid()
# Build toolkit using default callback handlers
callback_handlers = logs_callback.create_callback_handlers(uid)
toolkit = tool_auth.NO_AUTH_TOOLS
# Insert callback handlers for all tools
for tool in toolkit:
tool.callbacks = callback_handlers
# Run
agent_executor = create_base_agent_executor(toolkit, callback_handlers)
response: str = run_agent(agent_executor, content, uid)
return response.strip()
| [] |
2024-01-10 | elebumm/YouTubeAIExtension | endpoint~utils~database.py | from datetime import datetime
from typing import List
import cassio
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.query import BatchStatement
from dotenv import load_dotenv
import os
import arrow
from models import ChatMessage, FixedSubs
load_dotenv()
from openai import OpenAI
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
auth_provider = PlainTextAuthProvider(os.getenv("ASTRA_DB_CLIENT"), os.getenv("ASTRA_DB_SECRET"))
cluster = Cluster(
cloud={
"secure_connect_bundle": os.getenv("SECURE_CONNECT_BUNDLE_PATH")
},
auth_provider=auth_provider,
)
session = cluster.connect()
def check_if_exists(youtube_id: str) -> bool:
print("Checking if exists")
query = f"""
SELECT youtube_id FROM {os.getenv("ASTRA_DB_KEYSPACE")}.videos WHERE youtube_id = '{youtube_id}' LIMIT 1;
"""
rows = session.execute(query)
if not rows:
return False
return True
def create_table():
auth_provider = PlainTextAuthProvider(os.getenv("ASTRA_DB_CLIENT"), os.getenv("ASTRA_DB_SECRET"))
cluster = Cluster(
cloud={
"secure_connect_bundle": "secure-connect-youtubeai.zip"
},
auth_provider=auth_provider,
)
session = cluster.connect()
query = f"""
CREATE TABLE IF NOT EXISTS {os.getenv("ASTRA_DB_KEYSPACE")}.videos (
youtube_id text,
words text,
start_time time,
vector vector<float, 1536>,
PRIMARY KEY (youtube_id, start_time)
);
"""
session.execute(query)
print("Table created successfully")
def embed_transcript(transcript: List[FixedSubs], youtube_id: str, query=None):
if query:
print("Embedding query")
return client.embeddings.create(
model="text-embedding-ada-002",
input=query
).data[0].embedding
print("Embedding transcript")
embeddings = client.embeddings.create(
model="text-embedding-ada-002",
input=[
sub.text for sub in transcript
]
).data
for e in embeddings:
transcript[embeddings.index(e)].embedding = e.embedding
print("pushing to cassandra")
prepared = session.prepare(
f"""
INSERT INTO {os.getenv("ASTRA_DB_KEYSPACE")}.videos (youtube_id, words, vector, start_time)
VALUES (?, ?, ?, ?);
"""
)
batch = BatchStatement()
for sub in transcript:
start_time = datetime.strptime(sub.start_time, "%H:%M:%S.%f").time()
batch.add(prepared, (youtube_id, sub.text, sub.embedding, start_time))
try:
session.execute(batch)
except Exception as e:
print(f"An error occured: {e}")
return transcript
def query_astra(query: str, youtube_id: str, chat: List[ChatMessage] = []):
print("Querying Astra")
embedding = embed_transcript([], youtube_id, query=query)
prepared = session.prepare(
f"""
SELECT words, start_time FROM {os.getenv("ASTRA_DB_KEYSPACE")}.videos
WHERE youtube_id = '{youtube_id}'
ORDER BY vector ANN OF {embedding}
LIMIT 1;
"""
)
rows = list(session.execute(prepared))
for row in rows:
result = row.words
start_time = datetime.strptime(row.start_time.time().strftime("%H:%M:%S"), "%H:%M:%S").time()
messages = [{
"role": "system",
"content": "You are a youtube helper bot. "
"Your job is to help answer questions about a video based on the vector "
"database results that are given to you. Please keep your answers brief. "
}]
for message in chat:
messages.append(message)
messages.append({
"role": "user",
"content": f"User Query: {query}"
f"vector search results: {result}"
})
chat = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages
)
return {
"chat": chat.choices[0].message.content,
"time": start_time
}
| [
"User Query: PLACEHOLDERvector search results: PLACEHOLDER",
"You are a youtube helper bot. Your job is to help answer questions about a video based on the vector database results that are given to you. Please keep your answers brief. "
] |
2024-01-10 | aix1971/langchain | libs~langchain~langchain~memory~buffer.py | from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema.messages import get_buffer_string
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[Any]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
| [] |
2024-01-10 | kolinko/herald | common.py | # various helper functions for interacting with OpenAI, HN, cache, etc
import requests
import json
import random
import hashlib
import time
import datetime
import tiktoken
encoding = tiktoken.get_encoding("cl100k_base") # gpt2 for gpt3, and cl100k_base for gpt3turbo
from openai import OpenAI
try:
from api_keys import organisation, api_key
except:
print("You need to setup api keys first.\nEdit api_keys.py.default.py, adding your API keys, and rename the file to api_keys.py")
exit()
openai = OpenAI(organization=organisation, api_key=api_key )
#openai.organization = organisation
#openai.api_key = api_key
import redis
r = redis.Redis(host='localhost', port=6379, db=0)
try:
r.ping()
except redis.exceptions.ConnectionError as ex:
print("Redis server not enabled. Error: ", str(ex))
print("Please install or start Redis server.")
exit()
def md5(s):
return hashlib.md5(s.encode('utf-8')).hexdigest()
def ai16k(system, prompt, retry=True, cache=True):
return ai(system, prompt, "gpt-3.5-turbo-16k", retry=retry, cache=cache)
def ai3(system, prompt, retry=True, cache=True):
return ai(system, prompt, "gpt-3.5-turbo", retry=retry, cache=cache)
def ai(system, prompt, json=False, model="gpt-4-1106-preview", retry=True, cache=True):
cache_key = f'ai-cache:{model}:' + md5(system+'***'+prompt)
if cache and r.exists(cache_key):
return r.get(cache_key).decode('utf-8')
messages = [
{"role": "system", "content": system},
{"role": "user", "content": prompt}
]
while True:
try:
completion = openai.chat.completions.create(model=model, messages=messages, response_format={'type':'json_object' if json else 'text'})
result = completion.choices[0].message.content
r.set(cache_key, result)
return result
except Exception as e:
# if not retry:
# raise e
# Print the error message in red color
print("\033[91m" + f"Error occurred: {str(e)}" + "\033[0m")
time.sleep(1)
print('WTF')
def count_tokens(s):
input_ids = encoding.encode(s)
return len(input_ids)
def in_cache(url):
return r.exists(url)
def download(url):
response = requests.get(url)
response.raise_for_status() # Check for any HTTP errors
return response.content
def download_and_cache(url, cache_only=False, key_prefix=''):
# Check if the content is already cached
if r.exists(key_prefix+url):
res = r.get(key_prefix+url)
if res is not None:
return res
elif cache_only:
return None
while True:
try:
# If not cached, download and cache the content
response = requests.get(url)
response.raise_for_status() # Check for any HTTP errors
content = response.content
r.set(key_prefix+url, content)
return content
except Exception as e:
# Print the error message in red color
print("\033[91m" + f"Error occurred: {str(e)}" + "\033[0m")
# Sleep for some time before retrying
time.sleep(random.randint(5, 60))
def json_fetch(kind, id, cache_only=False):
url = f"https://hacker-news.firebaseio.com/v0/{kind}/{id}.json?print=pretty"
result = download_and_cache(url, cache_only)
if cache_only and result is None:
return None
return json.loads(result)
def pretty_time(ts):
dt = datetime.datetime.fromtimestamp(ts)
return dt.strftime("%Y-%m-%d %H:%M:%S")
# basic profiling functions
start_time = time.time()
def reset():
global start_time
start_time = time.time()
def elapsed():
elapsed_time = (time.time() - start_time) * 1000 # Get the elapsed time in milliseconds
print(f"Time elapsed: {elapsed_time:.2f} ms")
| [] |
2024-01-10 | JeremyNixon/codegen | core~steps.py | """
GPT Engineer workflow definition and execution
This module provides the necessary utilities and functions to orchestrate the execution of GPT-engineer's tasks
related to code generation, execution, and review. It leverages a flexible approach to system prompt creation,
workflow execution, and interaction with AI, allowing for various configurations and stages of operation.
Imports:
- Standard libraries: inspect, re, subprocess
- Additional libraries/packages: termcolor, typing, enum
- Internal modules/packages: langchain.schema, gpt_engineer.core, gpt_engineer.cli
Key Features:
- Dynamic system prompt creation for both new code generation and improving existing code.
- A series of utility functions for handling various tasks like AI code generation, user clarification,
code execution, and human review.
- Configurable workflow steps to control the process of code generation and execution in different scenarios.
- Flexibility to adapt to different configurations and use cases.
Classes:
- Config: An enumeration representing different configurations or operation modes for the workflow.
Functions:
- setup_sys_prompt(dbs: DBs) -> str: Creates a system prompt for the AI.
- setup_sys_prompt_existing_code(dbs: DBs) -> str: System prompt creation using existing code base.
- curr_fn() -> str: Returns the name of the current function.
- lite_gen(ai: AI, dbs: DBs) -> List[Message]: Runs the AI on the main prompt and saves results.
- simple_gen(ai: AI, dbs: DBs) -> List[Message]: Runs the AI on default prompts and saves results.
- clarify(ai: AI, dbs: DBs) -> List[Message]: Interacts with the user for clarification.
- gen_clarified_code(ai: AI, dbs: DBs) -> List[dict]: Generates code after clarification.
- execute_entrypoint(ai: AI, dbs: DBs) -> List[dict]: Executes code entry point and asks user for confirmation.
- gen_entrypoint(ai: AI, dbs: DBs) -> List[dict]: Generates entry point based on information about a codebase.
- use_feedback(ai: AI, dbs: DBs): Uses feedback from users to improve code.
- set_improve_filelist(ai: AI, dbs: DBs): Sets the file list for existing code improvements.
- assert_files_ready(ai: AI, dbs: DBs): Checks for the required files for code improvement.
- get_improve_prompt(ai: AI, dbs: DBs): Interacts with the user to know what they want to fix in existing code.
- improve_existing_code(ai: AI, dbs: DBs): Generates improved code after getting the file list and user prompt.
- human_review(ai: AI, dbs: DBs): Collects and stores human review of the generated code.
Constants:
- STEPS: A dictionary that maps the Config enum to lists of functions to execute for each configuration.
Note:
- This module is central to the GPT-engineer system and its functions are intended to be used in orchestrated
workflows. As such, it should be used carefully, with attention to the correct order and sequence of operations.
"""
import openai
openai.api_key = "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"
import inspect
import re
import subprocess
from enum import Enum
from typing import List, Union
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from termcolor import colored
from gpt_engineer.core.ai import AI
from gpt_engineer.core.chat_to_files import (
format_file_to_input,
get_code_strings,
overwrite_files_with_edits,
to_files_and_memory,
)
from gpt_engineer.core.db import DBs
from gpt_engineer.cli.file_selector import FILE_LIST_NAME, ask_for_files
from gpt_engineer.cli.learning import human_review_input
class Chat:
def __init__(self, system_prompt=''):
self.system = system_prompt
self.messages = []
def response(self, prompt):
self.messages.append({"role": "user", "content":prompt})
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
model="gpt-4",
messages = self.messages,
# max_tokens=200
)
response = response['choices'][0]['message']['content']
self.messages.append({"role": "assistant", "content":response})
return response
# Type hint for chat messages
Message = Union[AIMessage, HumanMessage, SystemMessage]
def setup_sys_prompt(dbs: DBs) -> str:
"""
Constructs a system prompt for the AI based on predefined instructions and philosophies.
This function is responsible for setting up the system prompts for the AI, instructing
it on how to generate code and the coding philosophy to adhere to. The constructed prompt
consists of the "roadmap", "generate" (with dynamic format replacements), and the coding
"philosophy" taken from the given DBs object.
Parameters:
- dbs (DBs): The database object containing pre-defined prompts and instructions.
Returns:
- str: The constructed system prompt for the AI.
"""
return (
dbs.preprompts["roadmap"]
+ dbs.preprompts["generate"].replace("FILE_FORMAT", dbs.preprompts["file_format"])
+ "\nUseful to know:\n"
+ dbs.preprompts["philosophy"]
)
def setup_sys_prompt_existing_code(dbs: DBs) -> str:
"""
Constructs a system prompt for the AI focused on improving an existing codebase.
This function sets up the system prompts for the AI, guiding it on how to
work with and improve an existing code base. The generated prompt consists
of the "improve" instruction (with dynamic format replacements) and the coding
"philosophy" taken from the given DBs object.
Parameters:
- dbs (DBs): The database object containing pre-defined prompts and instructions.
Returns:
- str: The constructed system prompt focused on existing code improvement for the AI.
"""
return (
dbs.preprompts["improve"].replace("FILE_FORMAT", dbs.preprompts["file_format"])
+ "\nUseful to know:\n"
+ dbs.preprompts["philosophy"]
)
def curr_fn() -> str:
"""
Retrieves the name of the calling function.
This function uses Python's inspection capabilities to dynamically fetch the
name of the function that called `curr_fn()`. This approach ensures that the
function's name isn't hardcoded, making it more resilient to refactoring and
changes to function names.
Returns:
- str: The name of the function that called `curr_fn()`.
"""
return inspect.stack()[1].function
def lite_gen(ai: AI, dbs: DBs) -> List[Message]:
"""
Executes the AI model using the main prompt and saves the generated results.
This function invokes the AI model by feeding it the main prompt. After the
AI processes and generates the output, the function saves this output to the
specified workspace. The AI's output is also tracked using the current function's
name to provide context.
Parameters:
- ai (AI): An instance of the AI model.
- dbs (DBs): An instance containing the database configurations, including input prompts
and file formatting preferences.
Returns:
- List[Message]: A list of message objects encapsulating the AI's output.
Note:
The function assumes the `ai.start` method and the `to_files` utility to be correctly
set up and functional. Ensure these prerequisites before invoking `lite_gen`.
"""
messages = ai.start(
dbs.input["prompt"], dbs.preprompts["file_format"], step_name=curr_fn()
)
to_files_and_memory(messages[-1].content.strip(), dbs)
return messages
def simple_gen(ai: AI, dbs: DBs) -> List[Message]:
"""
Executes the AI model using the default system prompts and saves the output.
This function prepares the system prompt using the provided database configurations
and then invokes the AI model with this system prompt and the main input prompt.
Once the AI generates the output, this function saves it to the specified workspace.
The AI's execution is tracked using the name of the current function for contextual reference.
Parameters:
- ai (AI): An instance of the AI model.
- dbs (DBs): An instance containing the database configurations, including system and
input prompts, and file formatting preferences.
Returns:
- List[Message]: A list of message objects encapsulating the AI's generated output.
Note:
The function assumes the `ai.start` method and the `to_files` utility are correctly
set up and functional. Ensure these prerequisites are in place before invoking `simple_gen`.
"""
messages = ai.start(setup_sys_prompt(dbs), dbs.input["prompt"], step_name=curr_fn())
to_files_and_memory(messages[-1].content.strip(), dbs)
return messages
def clarify(ai: AI, dbs: DBs) -> List[Message]:
"""
Interactively queries the user for clarifications on the prompt and saves the AI's responses.
This function presents a series of clarifying questions to the user, based on the AI's
initial assessment of the provided prompt. The user can continue to interact and seek
clarifications until they indicate that they have "nothing to clarify" or manually
opt to move on. If the user doesn't provide any input, the AI is instructed to make its
own assumptions and to state them explicitly before proceeding.
Parameters:
- ai (AI): An instance of the AI model.
- dbs (DBs): An instance containing the database configurations, which includes system
and input prompts.
Returns:
- List[Message]: A list of message objects encapsulating the AI's generated output and
interactions.
Note:
The function assumes the `ai.fsystem`, `ai.next`, and `curr_fn` utilities are correctly
set up and functional. Ensure these prerequisites are in place before invoking `clarify`.
"""
messages: List[Message] = [ai.fsystem(dbs.preprompts["clarify"])]
user_input = dbs.input["prompt"]
while True:
messages = ai.next(messages, user_input, step_name=curr_fn())
msg = messages[-1].content.strip()
if "nothing to clarify" in msg.lower():
break
if msg.lower().startswith("no"):
print("Nothing to clarify.")
break
print()
user_input = input('(answer in text, or "c" to move on)\n')
print()
if not user_input or user_input == "c":
print("(letting gpt-engineer make its own assumptions)")
print()
messages = ai.next(
messages,
"Make your own assumptions and state them explicitly before starting",
step_name=curr_fn(),
)
print()
return messages
user_input += """
\n\n
Is anything else unclear? If yes, ask another question.\n
Otherwise state: "Nothing to clarify"
"""
print()
return messages
def gen_clarified_code(ai: AI, dbs: DBs) -> List[dict]:
"""
Generates code based on clarifications obtained from the user.
This function processes the messages logged during the user's clarification session
and uses them, along with the system's prompts, to guide the AI in generating code.
The generated code is saved to a specified workspace.
Parameters:
- ai (AI): An instance of the AI model, responsible for processing and generating the code.
- dbs (DBs): An instance containing the database configurations, which includes system
and input prompts.
Returns:
- List[dict]: A list of message dictionaries capturing the AI's interactions and generated
outputs during the code generation process.
Note:
The function assumes the `ai.fsystem`, `ai.next`, `AI.deserialize_messages`, `curr_fn`,
and `to_files` utilities are correctly set up and functional. Ensure these prerequisites
are in place before invoking `gen_clarified_code`.
"""
messages = AI.deserialize_messages(dbs.logs[clarify.__name__])
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
] + messages[
1:
] # skip the first clarify message, which was the original clarify priming prompt
messages = ai.next(
messages,
dbs.preprompts["generate"].replace("FILE_FORMAT", dbs.preprompts["file_format"]),
step_name=curr_fn(),
)
to_files_and_memory(messages[-1].content.strip(), dbs)
return messages
def execute_entrypoint(ai: AI, dbs: DBs) -> List[dict]:
"""
Executes the specified entry point script (`run.sh`) from a workspace.
This function prompts the user to confirm whether they wish to execute a script named
'run.sh' located in the specified workspace. If the user confirms, the script is
executed using a subprocess. The user is informed that they can interrupt the
execution at any time using ctrl+c.
Parameters:
- ai (AI): An instance of the AI model, not directly used in this function but
included for consistency with other functions.
- dbs (DBs): An instance containing the database configurations and workspace
information.
Returns:
- List[dict]: An empty list. This function does not produce a list of messages
but returns an empty list for consistency with the return type of other related
functions.
Note:
The function assumes the presence of a 'run.sh' script in the specified workspace.
Ensure the script is available and that it has the appropriate permissions
(e.g., executable) before invoking this function.
"""
command = dbs.workspace["run.sh"]
print()
print(
colored(
"Do you want to execute this code? (Y/n)",
"red",
)
)
print()
print(command)
print()
if input().lower() not in ["", "y", "yes"]:
print("Ok, not executing the code.")
return []
print("Executing the code...")
print()
print(
colored(
"Note: If it does not work as expected, consider running the code"
+ " in another way than above.",
"green",
)
)
print()
print("You can press ctrl+c *once* to stop the execution.")
print()
p = subprocess.Popen("bash run.sh", shell=True, cwd=dbs.workspace.path, stderr=subprocess.PIPE)
error = ""
try:
_, error = p.communicate()
except KeyboardInterrupt:
print()
print("Stopping execution.")
print("Execution stopped.")
p.kill()
print()
if error:
error = error.decode('utf-8')
print("Error:", error)
ai_chat = Chat()
instructions = ai_chat.response(f"Give clear instructions for resolving the following error: {error}. Ensure that the instructions include the full path to the file to update to fix the error.")
print("Sugggested Fix:", instructions)
print("Implementing Sugggested Fix:")
change_file = ai_chat.response("Is modifying the contents of a file the correct way to fix the error? Only respond with one character, 'Y' for yes and 'N' for no.")
if change_file == 'Y':
file_name = ai_chat.response(f"Only return the full file path to update from these instructions: {instructions}")
# Get File Content String
with open(file_name, 'r') as file:
file_content = file.read()
# Update File Content String
new_file_content = ai_chat.response(f"""Update the file content to fix the error: {error}.
You instructions are: {instructions}.
The original file content is: {file_content}.
Only return the full contents of the new file, without any other text.
Rewrite it entirely, do not return suggested modifications.""")
# Write new file content to file
file.write(file_name, new_file_content)
else:
print("Please make the required changes and rerun the command. {command} \nIf you were recommended a modified command, please use that.")
# rerun the code
execute_entrypoint(ai, dbs)
return []
def gen_entrypoint(ai: AI, dbs: DBs) -> List[dict]:
"""
Generates an entry point script based on a given codebase's information.
This function prompts the AI model to generate a series of Unix terminal commands
required to a) install dependencies and b) run all necessary components of a codebase
provided in the workspace. The generated commands are then saved to 'run.sh' in the
workspace.
Parameters:
- ai (AI): An instance of the AI model.
- dbs (DBs): An instance containing the database configurations and workspace
information, particularly the 'all_output.txt' which contains details about the
codebase on disk.
Returns:
- List[dict]: A list of messages containing the AI's response.
Notes:
- The AI is instructed not to install packages globally, use 'sudo', provide
explanatory comments, or use placeholders. Instead, it should use example values
where necessary.
- The function uses regular expressions to extract command blocks from the AI's
response to create the 'run.sh' script.
- It assumes the presence of an 'all_output.txt' file in the specified workspace
that contains information about the codebase.
"""
messages = ai.start(
system=(
"You will get information about a codebase that is currently on disk in "
"the current folder.\n"
"From this you will answer with code blocks that includes all the necessary "
"unix terminal commands to "
"a) install dependencies "
"b) run all necessary parts of the codebase (in parallel if necessary).\n"
"Do not install globally. Do not use sudo.\n"
"Do not explain the code, just give the commands.\n"
"Do not use placeholders, use example values (like . for a folder argument) "
"if necessary.\n"
),
user="Information about the codebase:\n\n" + dbs.memory["all_output.txt"],
step_name=curr_fn(),
)
print()
regex = r"```\S*\n(.+?)```"
matches = re.finditer(regex, messages[-1].content.strip(), re.DOTALL)
dbs.workspace["run.sh"] = "\n".join(match.group(1) for match in matches)
return messages
def use_feedback(ai: AI, dbs: DBs):
"""
Uses the provided feedback to improve the generated code.
This function takes in user feedback and applies it to modify previously
generated code. If feedback is available, the AI model is primed with the
system prompt and user instructions and then proceeds to process the feedback.
The modified code is then saved back to the workspace. If feedback is not found,
the user is informed to provide a 'feedback' file in the appropriate directory.
Parameters:
- ai (AI): An instance of the AI model.
- dbs (DBs): An instance containing the database configurations and workspace
information, particularly the 'all_output.txt' which contains the previously
generated code, and 'input' which may contain the feedback from the user.
Notes:
- The function assumes the feedback will be found in 'dbs.input["feedback"]'.
- If feedback is provided, the AI processes it and the resulting code is saved
back to the workspace.
- If feedback is absent, an instruction is printed to the console, and the program
terminates.
"""
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
ai.fuser(f"Instructions: {dbs.input['prompt']}"),
ai.fassistant(dbs.memory["all_output.txt"]), # reload previously generated code
]
if dbs.input["feedback"]:
messages = ai.next(messages, dbs.input["feedback"], step_name=curr_fn())
to_files_and_memory(messages[-1].content.strip(), dbs)
return messages
else:
print(
"No feedback was found in the input folder. Please create a file "
+ "called 'feedback' in the same folder as the prompt file."
)
exit(1)
def set_improve_filelist(ai: AI, dbs: DBs):
"""
Set the list of files for the AI to work with in the 'existing code mode'.
This function initiates the process to determine which files from an existing
codebase the AI should work with. By calling `ask_for_files()`, it prompts for
and sets the specific files that should be considered, storing their full paths.
Parameters:
- ai (AI): An instance of the AI model. Although passed to this function, it is
not used within the function scope and might be for consistency with other
function signatures.
- dbs (DBs): An instance containing the database configurations and project metadata,
which is used to gather information about the existing codebase. Additionally,
the 'input' is used to handle user interactions related to file selection.
Returns:
- list: Returns an empty list, which can be utilized for consistency in return
types across related functions.
Note:
- The selected file paths are stored as a side-effect of calling `ask_for_files()`,
and they aren't directly returned by this function.
"""
"""Sets the file list for files to work with in existing code mode."""
ask_for_files(dbs.project_metadata, dbs.workspace) # stores files as full paths.
return []
def assert_files_ready(ai: AI, dbs: DBs):
"""
Verify the presence of required files for headless 'improve code' execution.
This function checks the existence of 'file_list.txt' in the project metadata
and the presence of a 'prompt' in the input. If either of these checks fails,
an assertion error is raised to alert the user of the missing requirements.
Parameters:
- ai (AI): An instance of the AI model. Although passed to this function, it is
not used within the function scope and might be for consistency with other
function signatures.
- dbs (DBs): An instance containing the database configurations and project metadata,
which is used to validate the required files' presence.
Returns:
- list: Returns an empty list, which can be utilized for consistency in return
types across related functions.
Raises:
- AssertionError: If 'file_list.txt' is not present in the project metadata
or if 'prompt' is not present in the input.
Notes:
- This function is typically used in 'auto_mode' scenarios to ensure that the
necessary files are set up correctly before proceeding with the 'improve code'
operation.
"""
"""Checks that the required files are present for headless
improve code execution."""
assert (
"file_list.txt" in dbs.project_metadata
), "For auto_mode file_list.txt need to be in your .gpteng folder."
assert "prompt" in dbs.input, "For auto_mode a prompt file must exist."
return []
def get_improve_prompt(ai: AI, dbs: DBs):
"""
Asks the user what they would like to fix.
"""
if not dbs.input.get("prompt"):
dbs.input["prompt"] = input(
"\nWhat do you need to improve with the selected files?\n"
)
confirm_str = "\n".join(
[
"-----------------------------",
"The following files will be used in the improvement process:",
f"{FILE_LIST_NAME}:",
colored(str(dbs.project_metadata[FILE_LIST_NAME]), "green"),
"",
"The inserted prompt is the following:",
colored(f"{dbs.input['prompt']}", "green"),
"-----------------------------",
"",
"You can change these files in your project before proceeding.",
"",
"Press enter to proceed with modifications.",
"",
]
)
input(confirm_str)
return []
def improve_existing_code(ai: AI, dbs: DBs):
"""
Process and improve the code from a specified set of existing files based on a user prompt.
This function first retrieves the code from the designated files and then formats this
code to be processed by the Language Learning Model (LLM). After setting up the system prompt
for existing code improvements, the files' contents are sent to the LLM. Finally, the user's
prompt detailing desired improvements is passed to the LLM, and the subsequent response
from the LLM is used to overwrite the original files.
Parameters:
- ai (AI): An instance of the AI model that is responsible for processing and generating
responses based on the provided system and user inputs.
- dbs (DBs): An instance containing the database configurations, user prompts, and project metadata.
It is used to fetch the selected files for improvement and the user's improvement prompt.
Returns:
- list[Message]: Returns a list of Message objects that record the interaction between the
system, user, and the AI model. This includes both the input to and the response from the LLM.
Notes:
- Ensure that the user has correctly set up the desired files for improvement and provided an
appropriate prompt before calling this function.
- The function expects the files to be formatted in a specific way to be properly processed by the LLM.
"""
"""
After the file list and prompt have been aquired, this function is called
to sent the formatted prompt to the LLM.
"""
files_info = get_code_strings(
dbs.workspace, dbs.project_metadata
) # this has file names relative to the workspace path
messages = [
ai.fsystem(setup_sys_prompt_existing_code(dbs)),
]
# Add files as input
for file_name, file_str in files_info.items():
code_input = format_file_to_input(file_name, file_str)
messages.append(ai.fuser(f"{code_input}"))
messages.append(ai.fuser(f"Request: {dbs.input['prompt']}"))
messages = ai.next(messages, step_name=curr_fn())
overwrite_files_with_edits(messages[-1].content.strip(), dbs)
return messages
def human_review(ai: AI, dbs: DBs):
"""
Collects human feedback on the code and stores it in memory.
This function prompts the user for a review of the generated or improved code using the `human_review_input`
function. If a valid review is provided, it's serialized to JSON format and stored within the database's
memory under the "review" key.
Parameters:
- ai (AI): An instance of the AI model. Although not directly used within the function, it is kept as
a parameter for consistency with other functions.
- dbs (DBs): An instance containing the database configurations, user prompts, project metadata,
and memory storage. This function specifically interacts with the memory storage to save the human review.
Returns:
- list: Returns an empty list, indicating that there's no subsequent interaction with the LLM
or no further messages to be processed.
Notes:
- It's assumed that the `human_review_input` function handles all the interactions with the user to
gather feedback and returns either the feedback or None if no feedback was provided.
- Ensure that the database's memory has enough space or is set up correctly to store the serialized review data.
"""
"""Collects and stores human review of the code"""
review = human_review_input()
if review is not None:
dbs.memory["review"] = review.to_json() # type: ignore
return []
class Config(str, Enum):
"""
Enumeration representing different configuration modes for the code processing system.
Members:
- DEFAULT: Standard procedure for generating, executing, and reviewing code.
- BENCHMARK: Used for benchmarking the system's performance without execution.
- SIMPLE: A basic procedure involving generation, execution, and review.
- LITE: A lightweight procedure for generating code without further processing.
- CLARIFY: Process that starts with clarifying ambiguities before code generation.
- EXECUTE_ONLY: Only executes the code without generation.
- EVALUATE: Execute the code and then undergo a human review.
- USE_FEEDBACK: Uses prior feedback for code generation and subsequent steps.
- IMPROVE_CODE: Focuses on improving existing code based on a provided prompt.
- EVAL_IMPROVE_CODE: Validates files and improves existing code.
- EVAL_NEW_CODE: Evaluates newly generated code without further steps.
Each configuration mode dictates the sequence and type of operations performed on the code.
"""
DEFAULT = "default"
BENCHMARK = "benchmark"
SIMPLE = "simple"
LITE = "lite"
CLARIFY = "clarify"
EXECUTE_ONLY = "execute_only"
EVALUATE = "evaluate"
USE_FEEDBACK = "use_feedback"
IMPROVE_CODE = "improve_code"
EVAL_IMPROVE_CODE = "eval_improve_code"
EVAL_NEW_CODE = "eval_new_code"
STEPS = {
Config.DEFAULT: [
simple_gen,
gen_entrypoint,
execute_entrypoint,
human_review,
],
Config.LITE: [
lite_gen,
],
Config.CLARIFY: [
clarify,
gen_clarified_code,
gen_entrypoint,
execute_entrypoint,
human_review,
],
Config.BENCHMARK: [
simple_gen,
gen_entrypoint,
],
Config.SIMPLE: [
simple_gen,
gen_entrypoint,
execute_entrypoint,
],
Config.USE_FEEDBACK: [use_feedback, gen_entrypoint, execute_entrypoint, human_review],
Config.EXECUTE_ONLY: [execute_entrypoint],
Config.EVALUATE: [execute_entrypoint, human_review],
Config.IMPROVE_CODE: [
set_improve_filelist,
get_improve_prompt,
improve_existing_code,
],
Config.EVAL_IMPROVE_CODE: [assert_files_ready, improve_existing_code],
Config.EVAL_NEW_CODE: [simple_gen],
}
"""
A dictionary mapping Config modes to a list of associated processing steps.
The STEPS dictionary dictates the sequence of functions or operations to be
performed based on the selected configuration mode from the Config enumeration.
This enables a flexible system where the user can select the desired mode and
the system can execute the corresponding steps in sequence.
Examples:
- For Config.DEFAULT, the system will first generate the code using `simple_gen`,
then generate the entry point with `gen_entrypoint`, execute the generated
code using `execute_entrypoint`, and finally collect human review using `human_review`.
- For Config.LITE, the system will only use the `lite_gen` function to generate the code.
This setup allows for modularity and flexibility in handling different user requirements and scenarios.
"""
# Future steps that can be added:
# run_tests_and_fix_files
# execute_entrypoint_and_fix_files_if_it_results_in_error
| [] |
2024-01-10 | chanchimin/AgentVerse | agentverse~memory.py | # Modified from langchain.memory.summary.py
from typing import Any, Dict, List, Tuple, Type, Union
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import (AgentAction, AIMessage, BaseMessage, ChatMessage,
SystemMessage, get_buffer_string)
from pydantic import BaseModel, root_validator
from agentverse.message import Message
class SummarizerMixin(BaseModel):
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: Type[BaseMessage] = AIMessage
def predict_new_summary(
self, messages: List[ChatMessage], existing_summary: str
) -> str:
lines = []
for message in messages:
if message.role == "":
# no role. it's tool responses
lines.append(message.content)
else:
lines.append(f"{message.role}: {message.content}")
new_lines = "\n".join(lines)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
class SummaryMemory(BaseChatMemory, SummarizerMixin):
"""Conversation summarizer to memory."""
buffer: str = ""
memory_key: str = "history" #: :meta private:
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
def save_context(self, contexts: Union[List[Tuple[AgentAction, str]], List[Message]]) -> None:
"""Save context from this conversation to buffer."""
for context in contexts:
if isinstance(context, Message):
self.chat_memory.messages.append(ChatMessage(content=context.content, role=context.sender))
elif isinstance(context, tuple) and len(context) == 2 and \
isinstance(context[0], AgentAction) and isinstance(context[1], str):
self.chat_memory.messages.append(ChatMessage(content=context[0].log.strip() + '\nObservation:' + context[1], role=""))
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-len(contexts):], self.buffer
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
| [
"\nObservation:"
] |
2024-01-10 | chanchimin/AgentVerse | agentverse~initialization.py | import os
from typing import Dict, List
import yaml
from bmtools.agent.singletool import import_all_apis, load_single_tools
from langchain.agents import Agent as langchainAgent
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.llms import OpenAI
from langchain.llms.base import BaseLLM
from langchain.memory import ChatMessageHistory
from langchain.memory.prompt import _DEFAULT_SUMMARIZER_TEMPLATE
from langchain.prompts import PromptTemplate
from agentverse.agents import Agent
from agentverse.environments import BaseEnvironment, env_registry
from agentverse.memory import SummaryMemory
from agentverse.parser import output_parser_registry
def load_llm(llm_config: Dict):
llm_type = llm_config.pop('llm_type', 'text-davinci-003')
if llm_type == 'gpt-3.5-turbo':
return ChatOpenAI(**llm_config)
elif llm_type == 'text-davinci-003':
return OpenAI(**llm_config)
else:
raise NotImplementedError("LLM type {} not implemented".format(llm_type))
def load_memory(memory_config: Dict):
memory_type = memory_config.pop("memory_type", "chat_message_history")
if memory_type == "chat_message_history":
return ChatMessageHistory()
elif memory_type == 'summary':
llm = load_llm(memory_config.pop('llm', 'text-davinci-003'))
prompt = memory_config.pop('prompt', _DEFAULT_SUMMARIZER_TEMPLATE)
memory_config['prompt'] = PromptTemplate(
input_variables=["summary", "new_lines"], template=prompt
)
return SummaryMemory(llm=llm, **memory_config)
else:
raise NotImplementedError("Memory type {} not implemented".format(memory_type))
def load_tools(tool_config: List[Dict]):
if len(tool_config) == 0:
return []
all_tools_list = []
for tool in tool_config:
_, config = load_single_tools(tool['tool_name'], tool['tool_url'])
all_tools_list += import_all_apis(config)
return all_tools_list
def load_environment(env_config: Dict) -> BaseEnvironment:
env_type = env_config.pop('env_type', 'base')
return env_registry.build(env_type, **env_config)
def load_agent(agent_config: Dict) -> langchainAgent:
agent_type = agent_config.pop('agent_type', 'chat')
if agent_type == "chat":
agent = Agent.from_llm_and_tools(**agent_config)
else:
raise NotImplementedError("Agent type {} not found".format(agent_type))
return agent
def prepare_task_config(task):
"""Read the yaml config of the given task in `tasks` directory."""
all_task_dir = os.path.join(os.path.dirname(__file__), 'tasks')
task_path = os.path.join(all_task_dir, task)
config_path = os.path.join(task_path, 'config.yaml')
if not os.path.exists(task_path):
all_tasks = []
for task in os.listdir(all_task_dir):
if os.path.isdir(os.path.join(all_task_dir, task)) \
and task != "__pycache__":
all_tasks.append(task)
raise ValueError(f"Task {task} not found. Available tasks: {all_tasks}")
if not os.path.exists(config_path):
raise ValueError("You should include the config.yaml file in the task directory")
task_config = yaml.safe_load(open(config_path))
parser = output_parser_registry.build(task)
task_config['output_parser'] = parser
for i, agent_configs in enumerate(task_config['agents']):
agent_configs['memory'] = load_memory(agent_configs['memory'])
if agent_configs.get('tool_memory', None) is not None:
agent_configs['tool_memory'] = load_memory(agent_configs['tool_memory'])
llm = load_llm(agent_configs['llm'])
agent_configs['llm'] = llm
agent_configs['tools'] = load_tools(agent_configs.get("tools", []))
# BaseLLM and its subclass will use .format to format the {chat_history} and {agent_scratchpad} during prompting
# so we have to keep the bracket {{ and }} in the description of the tools (will become { and } after formatting})
# BaseChatModel and its subclass will not use .format, so we have to replace {{ and }} with { and } in the description of the tools
if isinstance(llm, BaseLLM):
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in agent_configs['tools']]
)
elif isinstance(llm, BaseChatModel):
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description.replace('{{', '{').replace('}}', '}')}" for tool in agent_configs['tools']]
)
else:
raise NotImplementedError("LLM type {} not supported".format(llm.__class__.__name__))
tool_names = ", ".join([tool.name for tool in agent_configs['tools']])
# Here we assume that the description for tools only appears in prefix prompt with placeholder {tool}
# and we assume that format prompt contains the placeholder {tool_names} that tells the model
# which tools is available
# TODO: Improve the flexibility
agent_configs['output_parser'] = task_config['output_parser']
agent_configs['prefix_prompt'] = agent_configs['prefix_prompt'] + '\n' + agent_configs['role_description']
agent_configs['format_prompt'] = agent_configs['format_prompt'].format(tool_names=tool_names, tools=tool_strings)
return task_config | [] |
2024-01-10 | chanchimin/AgentVerse | agentverse~message.py | from typing import List, Tuple
from langchain.schema import AgentAction, ChatMessage
class Message(ChatMessage):
sender: str
receiver: List[str]
tool_response: List[Tuple[AgentAction, str]]
| [] |
2024-01-10 | chanchimin/AgentVerse | agentverse~tasks~math_problem_2players_tools~output_parser.py | from __future__ import annotations
import re
from typing import Union
from langchain.agents import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish
from agentverse.parser import OutputParseError, output_parser_registry
@output_parser_registry.register("math_problem_2players_tools")
class MathProblem2PlayersToolsParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
cleaned_output = text.strip()
cleaned_output = re.sub(r'\n+', '\n', cleaned_output)
cleaned_output = cleaned_output.split('\n')
if not (len(cleaned_output) == 2 and
# cleaned_output[0].startswith("THOUGHT:") and
cleaned_output[0].startswith("ACTION:") and
cleaned_output[1].startswith("ACTION INPUT:")):
print(text)
raise OutputParseError("Output Format Error")
action = cleaned_output[0][len("ACTION:"):].strip()
action_input = cleaned_output[1][len("ACTION INPUT:"):].strip()
if action in ["Speak"]:
return AgentFinish({"output": action_input}, text)
else:
return AgentAction(action, action_input, text)
| [] |
2024-01-10 | theashishgavade/AI-Chat-Assistant | My_AI_Assistant_test.py | # Import the necessary libraries
import openai
import gradio as gr
# Set your OpenAI API key for authentication
openai.api_key = "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"
# Define an initial message to start the conversation with the AI Chat Assistant
messages = [{"role": "system", "content": "AI Chat Assistant for all your need"}]
# Define a function called AIChatAssistant that takes user input and generates a response
def AIChatAssistant(user_input, messages=messages):
# Append the user input to the existing messages
messages.append({"role": "user", "content": user_input})
# Generate a response using the OpenAI Chat Completion API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# Extract the assistant's reply from the API response
ChatAssistant_reply = response["choices"][0]["message"]["content"]
# Append the assistant's reply to the messages
messages.append({"role": "assistant", "content": ChatAssistant_reply})
# Return the assistant's reply and updated messages
all_responses = "\n".join([message["content"] for message in messages])
return ChatAssistant_reply, all_responses
# Create a Gradio interface for the AIChatAssistant function
def chat_interface():
text_input = gr.Textbox()
output_text = gr.Textbox()
previous_responses = gr.Textbox()
def chat_assistant_with_ui(user_input):
assistant_reply, all_responses = AIChatAssistant(user_input)
previous_responses_obj = gr.Textbox(all_responses) # Create a new object with updated text
return assistant_reply, previous_responses_obj
gr.Interface(fn=chat_assistant_with_ui, inputs=text_input, outputs=[output_text, previous_responses], title="AI Chat Assistant").launch(share=True)
# Launch the Gradio interface with the chat_interface function
chat_interface()
| [
"AI Chat Assistant for all your need"
] |
2024-01-10 | theashishgavade/AI-Chat-Assistant | My_AI_Assistant.py | # Import the necessary libraries
import openai
import gradio
# Set your OpenAI API key for authentication
openai.api_key = "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"
# Define an initial message to start the conversation with the AI Chat Assistant
messages = [{"role": "system", "content": "AI Chat Assistant for all your need"}]
# Define a function called AIChatAssistant that takes user input and generates a response
def AIChatAssistant(user_input):
# Append the user input to the existing messages
messages.append({"role": "user", "content": user_input})
# Generate a response using the OpenAI Chat Completion API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# Extract the assistant's reply from the API response
ChatAssistannt_reply = response["choices"][0]["message"]["content"]
# Append the assistant's reply to the messages
messages.append({"role": "assistant", "content": ChatAssistannt_reply})
# Return the assistant's reply
return ChatAssistannt_reply
# Create a Gradio interface for the AIChatAssistant function
demo = gradio.Interface(fn=AIChatAssistant, inputs="text", outputs="text", title="AI Chat Assistant")
# Launch the Gradio interface and make it publicly accessible
demo.launch(share=True) | [
"AI Chat Assistant for all your need"
] |
2024-01-10 | weshaggard/azure-sdk-for-python | sdk~openai~azure-openai~tests~conftest.py | # coding=utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import pytest
import requests
import aiohttp
import yarl
import functools
import openai
from devtools_testutils.sanitizers import add_header_regex_sanitizer, add_oauth_response_sanitizer
from azure.identity import DefaultAzureCredential
# for pytest.parametrize
ALL = ["azure", "azuread", "openai"]
AZURE = "azure"
OPENAI = "openai"
AZURE_AD = "azuread"
# Environment variable keys
ENV_AZURE_OPENAI_ENDPOINT = "AZURE_OPENAI_ENDPOINT"
ENV_AZURE_OPENAI_KEY = "AZURE_OPENAI_KEY"
ENV_AZURE_OPENAI_WHISPER_ENDPOINT = "AZURE_OPENAI_WHISPER_ENDPOINT"
ENV_AZURE_OPENAI_WHISPER_KEY = "AZURE_OPENAI_WHISPER_KEY"
ENV_SUBSCRIPTION_ID = "AZURE_SUBSCRIPTION_ID"
ENV_TENANT_ID = "AZURE_TENANT_ID"
ENV_CLIENT_ID = "AZURE_CLIENT_ID"
ENV_CLIENT_SECRET = "AZURE_CLIENT_SECRET"
ENV_AZURE_OPENAI_SEARCH_ENDPOINT = "AZURE_OPENAI_SEARCH_ENDPOINT"
ENV_AZURE_OPENAI_SEARCH_KEY = "AZURE_OPENAI_SEARCH_KEY"
ENV_AZURE_OPENAI_SEARCH_INDEX = "AZURE_OPENAI_SEARCH_INDEX"
ENV_AZURE_OPENAI_API_VERSION = "2023-09-01-preview"
ENV_AZURE_OPENAI_COMPLETIONS_NAME = "text-davinci-003"
ENV_AZURE_OPENAI_CHAT_COMPLETIONS_NAME = "gpt-35-turbo-16k"
ENV_AZURE_OPENAI_EMBEDDINGS_NAME = "text-embedding-ada-002"
ENV_AZURE_OPENAI_AUDIO_NAME = "whisper-deployment"
ENV_OPENAI_KEY = "OPENAI_KEY"
ENV_OPENAI_COMPLETIONS_MODEL = "text-davinci-003"
ENV_OPENAI_CHAT_COMPLETIONS_MODEL = "gpt-3.5-turbo"
ENV_OPENAI_EMBEDDINGS_MODEL = "text-embedding-ada-002"
ENV_OPENAI_AUDIO_MODEL = "whisper-1"
# Fake values
TEST_ENDPOINT = "https://test-resource.openai.azure.com/"
TEST_KEY = "0000000000000000"
TEST_ID = "00000000-0000-0000-0000-000000000000"
@pytest.fixture(scope="session", autouse=True)
def add_sanitizers(test_proxy, environment_variables):
sanitization_mapping = {
ENV_AZURE_OPENAI_ENDPOINT: TEST_ENDPOINT,
ENV_AZURE_OPENAI_KEY: TEST_KEY,
ENV_AZURE_OPENAI_WHISPER_ENDPOINT: TEST_ENDPOINT,
ENV_AZURE_OPENAI_WHISPER_KEY: TEST_KEY,
ENV_SUBSCRIPTION_ID: TEST_ID,
ENV_TENANT_ID: TEST_ID,
ENV_CLIENT_ID: TEST_ID,
ENV_CLIENT_SECRET: TEST_ID,
ENV_OPENAI_KEY: TEST_KEY,
ENV_AZURE_OPENAI_SEARCH_ENDPOINT: TEST_ENDPOINT,
ENV_AZURE_OPENAI_SEARCH_KEY: TEST_KEY,
}
environment_variables.sanitize_batch(sanitization_mapping)
add_oauth_response_sanitizer()
add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]")
@pytest.fixture(scope="session")
def azure_openai_creds():
yield {
"completions_name": ENV_AZURE_OPENAI_COMPLETIONS_NAME,
"chat_completions_name": ENV_AZURE_OPENAI_CHAT_COMPLETIONS_NAME,
"embeddings_name": ENV_AZURE_OPENAI_EMBEDDINGS_NAME,
"completions_model": ENV_OPENAI_COMPLETIONS_MODEL,
"chat_completions_model": ENV_OPENAI_CHAT_COMPLETIONS_MODEL,
"embeddings_model": ENV_OPENAI_EMBEDDINGS_MODEL,
"search_endpoint": os.getenv(ENV_AZURE_OPENAI_SEARCH_ENDPOINT),
"search_key": os.getenv(ENV_AZURE_OPENAI_SEARCH_KEY),
"search_index": os.getenv(ENV_AZURE_OPENAI_SEARCH_INDEX),
"audio_name": ENV_AZURE_OPENAI_AUDIO_NAME,
"audio_model": ENV_OPENAI_AUDIO_MODEL,
}
def configure_api_type(api_type, whisper=False, **kwargs):
if api_type == "azure":
if whisper:
openai.api_base = os.getenv(ENV_AZURE_OPENAI_WHISPER_ENDPOINT).rstrip("/")
openai.api_key = os.getenv(ENV_AZURE_OPENAI_WHISPER_KEY)
else:
openai.api_base = os.getenv(ENV_AZURE_OPENAI_ENDPOINT).rstrip("/")
openai.api_key = os.getenv(ENV_AZURE_OPENAI_KEY)
openai.api_type = "azure"
openai.api_version = ENV_AZURE_OPENAI_API_VERSION
elif api_type == "azuread":
if whisper:
openai.api_base = os.getenv(ENV_AZURE_OPENAI_WHISPER_ENDPOINT).rstrip("/")
else:
openai.api_base = os.getenv(ENV_AZURE_OPENAI_ENDPOINT).rstrip("/")
credential = DefaultAzureCredential()
token = credential.get_token("https://cognitiveservices.azure.com/.default")
openai.api_type = "azuread"
openai.api_key = token.token
openai.api_version = ENV_AZURE_OPENAI_API_VERSION
elif api_type == "openai":
openai.api_base = "https://api.openai.com/v1"
openai.api_type = "openai"
openai.api_key = os.getenv(ENV_OPENAI_KEY)
openai.api_version = None
def configure_async(f):
@functools.wraps(f)
async def wrapper(*args, **kwargs):
api_type = kwargs.pop("api_type")
whisper = args[0].qualified_test_name.startswith("test_audio")
configure_api_type(api_type, whisper=whisper, **kwargs)
try:
return await f(*args, api_type=api_type, **kwargs)
except openai.error.RateLimitError:
pytest.skip(f"{str(f).split(' ')[1]}[{api_type}]: Skipping - Rate limit reached.")
return wrapper
def configure(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
api_type = kwargs.pop("api_type")
whisper = args[0].qualified_test_name.startswith("test_audio")
configure_api_type(api_type, whisper=whisper, **kwargs)
try:
return f(*args, api_type=api_type, **kwargs)
except openai.error.RateLimitError:
pytest.skip(f"{str(f).split(' ')[1]}[{api_type}]: Skipping - Rate limit reached.")
return wrapper
def setup_adapter(deployment_id):
class CustomAdapter(requests.adapters.HTTPAdapter):
def send(self, request, **kwargs):
request.url = f"{openai.api_base}/openai/deployments/{deployment_id}/extensions/chat/completions?api-version={openai.api_version}"
return super().send(request, **kwargs)
session = requests.Session()
session.mount(
prefix=f"{openai.api_base}/openai/deployments/{deployment_id}",
adapter=CustomAdapter()
)
openai.requestssession = session
def setup_adapter_async(deployment_id):
class CustomAdapterAsync(aiohttp.ClientRequest):
async def send(self, conn) -> aiohttp.ClientResponse:
self.url = yarl.URL(f"{openai.api_base}/openai/deployments/{deployment_id}/extensions/chat/completions?api-version={openai.api_version}")
return await super().send(conn)
session = aiohttp.ClientSession(request_class=CustomAdapterAsync)
openai.aiosession.set(session)
| [] |
2024-01-10 | eglock/iriS | iris.py | import struct
import subprocess
import time
import logging
import traceback
import json
import requests
from datetime import datetime
from contextlib import contextmanager
import openai
import pvporcupine
import pvcobra
import pyaudio
from pydub import AudioSegment
from halo import Halo
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
spinner = Halo(spinner='dots')
CONFIG = {
'chunk_size': 512,
'sample_rate': 16000,
'silence_threshold': 1.5, # seconds
'wake_word_paths': ['wake/hey_iris.ppn'],
'format': pyaudio.paInt16,
'channels': 1,
'alert_sounds': {
'wake': '/System/Library/Sounds/Ping.aiff',
'success': '/System/Library/Sounds/Glass.aiff',
'fail': '/System/Library/Sounds/Basso.aiff'
},
'hass_address': '10.10.233.190',
'hass_port': '8123'
}
def init_picovoice_modules(access_key):
"""Initialize Porcupine and Cobra."""
try:
porcupine = pvporcupine.create(access_key=access_key, keyword_paths=CONFIG['wake_word_paths'])
cobra = pvcobra.create(access_key=access_key)
return porcupine, cobra
except Exception as e:
logger.error("Failed to initialize Picovoice modules: ")
logger.error(traceback.format_exc())
raise
@contextmanager
def audio_stream(p):
"""Context manager for PyAudio to ensure resources are cleaned up."""
stream = p.open(
format=CONFIG['format'],
channels=CONFIG['channels'],
rate=CONFIG['sample_rate'],
input=True,
frames_per_buffer=CONFIG['chunk_size']
)
try:
yield stream
finally:
stream.stop_stream()
stream.close()
def get_next_audio_frame(stream):
"""Get the next audio frame from the stream, handle overflow."""
frame_data = stream.read(CONFIG['chunk_size'], exception_on_overflow=False)
return struct.unpack_from("h" * CONFIG['chunk_size'], frame_data)
def save_frames_as_mp3(frames, file_path="output.mp3"):
"""Save the audio frames as an MP3 file."""
# Convert the list of frames into a single bytes object
frames_bytes = b''.join(frames)
# Calculate the number of bytes per frame: 2 bytes/sample * 1 channel
bytes_per_frame = 2 * 1 # Modify as needed based on audio format
# Calculate if there are any leftover bytes and trim the frames_bytes if necessary
remainder = len(frames_bytes) % bytes_per_frame
if remainder != 0:
logger.warning("Found an incomplete frame, trimming %s bytes.", remainder)
frames_bytes = frames_bytes[:-remainder]
# frames_bytes should have a length that is a multiple of bytes_per_frame
try:
audio_segment = AudioSegment(
data=frames_bytes,
sample_width=2, # 2 bytes (16 bit)
frame_rate=CONFIG['sample_rate'],
channels=1 # mono
)
audio_segment.export(file_path, format="mp3")
return True
except Exception as e:
logger.error("An error occurred while saving MP3: ")
logger.error(traceback.format_exc())
return False
def transcribe_audio(file_path="output.mp3"):
"""Transcribe the audio file using OpenAI."""
try:
with open(file_path, 'rb') as f:
spinner.start('Waiting for response from Whisper')
response = openai.Audio.transcribe("whisper-1", f)
spinner.succeed("Response received from Whisper")
return response.text
except KeyboardInterrupt:
spinner.fail("Keyboard interrupt")
return None
except Exception as e:
logger.error("An error occurred while transcribing audio: ")
logger.error(traceback.format_exc())
return None
def get_api_request_via_completion(message):
post_request_function = {
"name": "post",
"description": "Make a POST request to the Home Assistant REST API",
"parameters": {
"type": "object",
"properties": {
"endpoint": {
"type": "string",
"description": "API endpoint to which the request will be sent, excluding the /api/ prefix",
"example": "services/light/turn_on"
},
"body": {
"type": "string",
"description": "Body data to be sent with the request, never to be left empty ",
"example": "{\"entity_id\": \"all\"}"
}
},
"required": ["method", "endpoint", "body"]
},
"response": {
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "Response returned by the API",
"example": "{\n \"latitude\": 0.00000000000000,\n \"longitude\": 0.00000000000000,\n \"elevation\": 0,\n \"unit_system\": {\n \"length\": \"mi\",\n \"accumulated_precipitation\": \"in\",\n \"mass\": \"lb\",\n \"pressure\": \"psi\",\n \"temperature\": \"°F\",\n \"volume\": \"gal\",\n \"wind_speed\": \"mph\"\n },\n \"location_name\": \"1600 Pennsylvania Avenue\",\n \"time_zone\": \"America\/New_York\",\n \"components\": [\n \"hue\",\n \"api\",\n \"zone\",\n \"button\",\n \"fan\",\n \"homekit\",\n \"media_player\",\n \"switch\",\n \"weather\",\n \"history\",\n \"sensor\",\n \"camera\",\n \"scene\",\n \"switch.mqtt\",\n \"light.hue\",\n \"sensor.energy\",\n ],\n \"config_dir\": \"\/config\",\n \"whitelist_external_dirs\": [\n \"\/media\",\n \"\/config\/www\"\n ],\n \"allowlist_external_dirs\": [\n \"\/media\",\n \"\/config\/www\"\n ],\n \"allowlist_external_urls\": [],\n \"version\": \"2023.8.4\",\n \"config_source\": \"storage\",\n \"safe_mode\": false,\n \"state\": \"RUNNING\",\n \"external_url\": null,\n \"internal_url\": null,\n \"currency\": \"USD\",\n \"country\": \"US\",\n \"language\": \"en\"\n}"
}
}
}
}
get_request_function = {
"name": "get",
"description": "Make a GET request to the Home Assistant REST API",
"parameters": {
"type": "object",
"properties": {
"endpoint": {
"type": "string",
"description": "API endpoint to which the request will be sent, excluding the /api/ prefix",
"example": "states"
}
},
"required": ["method", "endpoint"]
},
"response": {
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "Response returned by the API",
"example": "[\n {\n \"attributes\": {},\n \"entity_id\": \"sun.sun\",\n \"last_changed\": \"2016-05-30T21:43:32.418320+00:00\",\n \"state\": \"below_horizon\"\n },\n {\n \"attributes\": {},\n \"entity_id\": \"process.Dropbox\",\n \"last_changed\": \"22016-05-30T21:43:32.418320+00:00\",\n \"state\": \"on\"\n }\n]"
}
}
}
}
msg_log = [
{
"role": "system",
"content": "You are an AI assistant capable of managing home devices through the Home Assistant API. Users will give commands in plain English, which you'll execute via API function calls. The API will reply to you with its response. Adapt if errors occur and explain persistent issues to the user without making duplicate requests. IMPORTANT: Never under any circumstances include an empty request body when making a POST request. Generally, HTTP 400 errors indicate you've malformed your request body, and HTTP 404 errors indicate you've malformed your request endpoint. Never make the same request more than once. When modifying entity states, the API will respond with entities that have been modified. For example: '[]' means that the request was successful and no entity states were changed. Infer when you've completed a task or when more information is needed, and respond with '[DONE]' at the end in order to secede control back to the user. For example: 'I've turned on the lights [DONE]' or 'Which lights would you like to turn on? [DONE]'"
},
{
"role": "user",
"content": message
}
]
while True:
spinner.start('Waiting for response from GPT-4')
attempts = 0
while attempts < 3:
try:
attempts += 1
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=msg_log,
temperature=0.25,
functions=[post_request_function, get_request_function]
)
break
except openai.error.RateLimitError:
spinner.warn("Rate limit exceeded, trying again in 5 seconds")
time.sleep(5)
else:
spinner.fail("Rate limit exceeded, exiting")
spinner.succeed("Response received from GPT-4")
response = completion.choices[0].message.content
if response:
if response.endswith("[DONE]"):
print(response[:-6])
# Allow the user to respond
break
print(response)
msg_log.append({"role": "assistant", "content": f"({datetime.now().strftime('%Y-%m-%d %H:%M:%S')}): {response}"})
else:
call = completion.choices[0].message.function_call
call.arguments = json.loads(call.arguments)
if call.name == "post":
logging.info(f"Making POST request to {call.arguments['endpoint']} with message body: {call.arguments['body']}")
msg_log.append({"role": "user", "content": f"\nAPI RESPONSE ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')}): {make_hass_request('POST', call.arguments['endpoint'], call.arguments['body'])}"})
elif call.name == "get":
logging.info(f"Making GET request to {call.arguments['endpoint']}")
msg_log.append({"role": "user", "content": f"\nAPI RESPONSE ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')}): {make_hass_request('GET', call.arguments['endpoint'])}"})
def make_hass_request(method, endpoint='', body=None):
"""Make a request to Home Assistant."""
try:
if method == "GET":
response = requests.get(
f"http://{CONFIG['hass_address']}:{CONFIG['hass_port']}/api/{endpoint}",
headers={
'Authorization': f'Bearer {HASS_KEY}',
'Content-Type': 'application/json'
},
data=body
)
elif method == "POST":
response = requests.post(
f"http://{CONFIG['hass_address']}:{CONFIG['hass_port']}/api/{endpoint}",
headers={
'Authorization': f'Bearer {HASS_KEY}',
'Content-Type': 'application/json'
},
data=body
)
if len(response.text) > 200:
print(f"API Response: {response.text[:200]}...")
else:
print(f"API Response: {response.text}")
return response.text
except Exception as e:
logger.error("An error occurred while making a request to Home Assistant: ")
logger.error(traceback.format_exc())
return None
def process_audio_stream(porcupine, cobra, stream):
"""Main loop to process audio stream, detect wake word, and record command."""
frames = []
wake_timestamp = None
try:
spinner.start("Waiting for wake word")
while True:
frame = get_next_audio_frame(stream)
keyword_index = porcupine.process(frame)
if keyword_index >= 0:
spinner.succeed("Wake word detected!")
spinner.start("Listening")
wake_timestamp = time.time()
silence_timestamp = None
frames = []
subprocess.Popen(["afplay", CONFIG['alert_sounds']['wake']])
while True:
frame = get_next_audio_frame(stream)
frame_bytes = struct.pack("h" * len(frame), *frame)
frames.append(frame_bytes)
is_speech = cobra.process(frame) >= 0.5
if is_speech:
silence_timestamp = None # Speech detected, reset silence_timestamp
else:
if silence_timestamp is None: # First silence frame
silence_timestamp = time.time()
elif time.time() - silence_timestamp > CONFIG['silence_threshold']: # Silence for too long, reset
if save_frames_as_mp3(frames):
spinner.succeed("Finished listening")
subprocess.Popen(["afplay", CONFIG['alert_sounds']['success']])
return True
else:
spinner.fail("Failed to save audio")
subprocess.Popen(["afplay", CONFIG['alert_sounds']['fail']])
return False
break
except KeyboardInterrupt:
spinner.fail("Keyboard interrupt")
return False
def main():
p = pyaudio.PyAudio()
try:
openai.api_key = OPENAI_KEY
porcupine, cobra = init_picovoice_modules(PV_KEY)
while True:
with audio_stream(p) as stream:
processed = process_audio_stream(porcupine, cobra, stream)
if processed:
transcription = transcribe_audio()
if transcription:
print(f"USER: {transcription}")
get_api_request_via_completion(transcription)
else:
break
else:
break
except Exception as e:
logger.error("An error occurred: ")
logger.error(traceback.format_exc())
finally:
p.terminate()
porcupine.delete()
if __name__ == "__main__":
try:
from secret import PV_KEY, OPENAI_KEY, HASS_KEY
except ImportError:
raise ImportError("Missing secret.py!")
if not (PV_KEY and OPENAI_KEY and HASS_KEY):
raise ValueError("Missing key(s) in secret.py!")
main()
| [
"You are an AI assistant capable of managing home devices through the Home Assistant API. Users will give commands in plain English, which you'll execute via API function calls. The API will reply to you with its response. Adapt if errors occur and explain persistent issues to the user without making duplicate requests. IMPORTANT: Never under any circumstances include an empty request body when making a POST request. Generally, HTTP 400 errors indicate you've malformed your request body, and HTTP 404 errors indicate you've malformed your request endpoint. Never make the same request more than once. When modifying entity states, the API will respond with entities that have been modified. For example: '[]' means that the request was successful and no entity states were changed. Infer when you've completed a task or when more information is needed, and respond with '[DONE]' at the end in order to secede control back to the user. For example: 'I've turned on the lights [DONE]' or 'Which lights would you like to turn on? [DONE]'",
"endpoint",
"%Y-%m-%d %H:%M:%S"
] |
2024-01-10 | SuffolkLITLab/FormFyxer | formfyxer~docx_wrangling.py | import docx
import sys
import os
from openai import OpenAI
import tiktoken
import json
from docx.oxml import OxmlElement
import re
from typing import List, Tuple, Optional, Union
__all__ = [
"get_labeled_docx_runs",
"update_docx",
"modify_docx_with_openai_guesses",
"get_docx_repr",
"get_modified_docx_runs",
"make_docx_plain_language",
]
def add_paragraph_after(paragraph, text):
p = OxmlElement("w:p")
r = OxmlElement('w:r')
t = OxmlElement('w:t')
t.text = text
r.append(t)
p.append(r)
paragraph._element.addnext(p)
def add_paragraph_before(paragraph, text):
p = OxmlElement("w:p")
r = OxmlElement('w:r')
t = OxmlElement('w:t')
t.text = text
r.append(t)
p.append(r)
paragraph._element.addprevious(p)
def add_run_after(run, text):
r = OxmlElement('w:r')
t = OxmlElement('w:t')
t.text = text
r.append(t)
run._element.addnext(r)
def update_docx(
document: Union[docx.Document, str], modified_runs: List[Tuple[int, int, str, int]]
) -> docx.Document:
"""Update the document with the modified runs.
Note: OpenAI is probabilistic, so the modified run indices may not be correct.
When the index of a run or paragraph is out of range, a new paragraph
will be inserted at the end of the document or a new run at the end of the
paragraph's runs.
Take a careful look at the output document to make sure it is still correct.
Args:
document: the docx.Document object, or the path to the DOCX file
modified_runs: a tuple of paragraph number, run number, the modified text, a question (not used), and whether a new paragraph should be inserted (for conditional text)
Returns:
The modified document.
"""
## Sort modified_runs in reverse order so inserted paragraphs are in the correct order
modified_runs.sort(key=lambda x: x[0], reverse=True)
if isinstance(document, str):
document = docx.Document(document)
for item in modified_runs:
if len(item) > 4:
continue
paragraph_number, run_number, modified_text, new_paragraph = item
if paragraph_number >= len(document.paragraphs):
add_paragraph_after(document.paragraphs[-1], modified_text)
continue
paragraph = document.paragraphs[paragraph_number]
if run_number >= len(paragraph.runs):
add_run_after(paragraph.runs[-1], modified_text)
continue
run = paragraph.runs[run_number]
if new_paragraph == 1:
add_paragraph_after(paragraph, modified_text)
elif new_paragraph == -1:
add_paragraph_before(paragraph, modified_text)
else:
run.text = modified_text
return document
def get_docx_repr(docx_path: str, paragraph_start:int=0, paragraph_end:Optional[int]=None):
"""Return a JSON representation of the paragraphs and runs in the DOCX file.
Args:
docx_path: path to the DOCX file
Returns:
A JSON representation of the paragraphs and runs in the DOCX file.
"""
items = []
paragraphs = docx.Document(docx_path).paragraphs[paragraph_start:paragraph_end]
for pnum, paragraph in enumerate(paragraphs):
for rnum, run in enumerate(paragraph.runs):
items.append(
[
pnum,
rnum,
run.text,
]
)
return repr(items)
def get_labeled_docx_runs(
docx_path: Optional[str] = None,
docx_repr = Optional[str],
custom_people_names: Optional[Tuple[str, str]] = None,
openai_client: Optional[OpenAI] = None,
api_key: Optional[str] = None,
) -> List[Tuple[int, int, str, int]]:
"""Scan the DOCX and return a list of modified text with Jinja2 variable names inserted.
Args:
docx_path: path to the DOCX file
docx_repr: a string representation of the paragraphs and runs in the DOCX file, if docx_path is not provided. This might be useful if you want
custom_people_names: a tuple of custom names and descriptions to use in addition to the default ones. Like: ("clients", "the person benefiting from the form")
Returns:
A list of tuples, each containing a paragraph number, run number, and the modified text of the run.
"""
custom_name_text = ""
if custom_people_names:
assert isinstance(custom_people_names, list)
for name, description in custom_people_names:
custom_name_text += f" {name} ({description}), \n"
custom_example = """Example input, with paragraph and run numbers indicated:
[
[0, 1, "Dear John Smith:"],
[1, 0, "This sentence can stay as is in the output and will not be in the reply."],
[2, 0, "[Optional: if you are a tenant, include this paragraph]"],
]"""
instructions = """The purpose of the resulting document is to be used as a template within a Docassemble interview, with Jinja2 markup.
Steps:
1. Analyze the document. Identify placeholder text and repeated _____ that should be replaced with a variable name.
2. Insert jinja2 tags around a new variable name that represents the placeholder text.
3. Mark optional paragraphs with conditional Jinja2 tags.
4. Text intended for verbatim output in the final document will remain unchanged.
Example reply, indicating paragraph, run, the new text, and a number indicating if this changes the
current paragraph, adds one before, or adds one after (-1, 0, 1):
{"results":
[
[0, 1, "Dear {{ other_parties[0] }}:", 0],
[2, 0, "{%p if is_tenant %}", -1],
[3, 0, "{%p endif %}", 1],
]
}
"""
instructions += f"""
Rules for variable names:
1. Variables usually refer to people or their attributes.
2. People are stored in lists.
3. We use Docassemble objects and conventions.
4. Use variable names and patterns from the list below. Invent new variable names when it is appropriate.
List names for people:
{custom_people_names}
users (for the person benefiting from the form, especially when for a pro se filer)
other_parties (the opposing party in a lawsuit or transactional party)
plaintiffs
defendants
petitioners
respondents
children
spouses
parents
caregivers
attorneys
translators
debt_collectors
creditors
witnesses
guardians_ad_litem
guardians
decedents
interested_parties
Name Forms:
users (full name of all users)
users[0] (full name of first user)
users[0].name.full() (Alternate full name of first user)
users[0].name.first (First name only)
users[0].name.middle (Middle name only)
users[0].name.middle_initial() (First letter of middle name)
users[0].name.last (Last name only)
users[0].name.suffix (Suffix of user's name only)
Attribute names (replace `users` with the appropriate list name):
Demographic Data:
users[0].birthdate (Birthdate)
users[0].age_in_years() (Calculated age based on birthdate)
users[0].gender (Gender)
users[0].gender_female (User is female, for checkbox field)
users[0].gender_male (User is male, for checkbox field)
users[0].gender_other (User is not male or female, for checkbox field)
users[0].gender_nonbinary (User identifies as nonbinary, for checkbox field)
users[0].gender_undisclosed (User chose not to disclose gender, for checkbox field)
users[0].gender_self_described (User chose to self-describe gender, for checkbox field)
user_needs_interpreter (User needs an interpreter, for checkbox field)
user_preferred_language (User's preferred language)
Addresses:
users[0].address.block() (Full address, on multiple lines)
users[0].address.on_one_line() (Full address on one line)
users[0].address.line_one() (Line one of the address, including unit or apartment number)
users[0].address.line_two() (Line two of the address, usually city, state, and Zip/postal code)
users[0].address.address (Street address)
users[0].address.unit (Apartment, unit, or suite)
users[0].address.city (City or town)
users[0].address.state (State, province, or sub-locality)
users[0].address.zip (Zip or postal code)
users[0].address.county (County or parish)
users[0].address.country (Country)
Other Contact Information:
users[0].phone_number (Phone number)
users[0].mobile_number (A phone number explicitly labeled as the "mobile" number)
users[0].phone_numbers() (A list of both mobile and other phone numbers)
users[0].email (Email)
Signatures:
users[0].signature (Signature)
signature_date (Date the form is completed)
Information about Court and Court Processes:
trial_court (Court's full name)
trial_court.address.county (County where court is located)
trial_court.division (Division of court)
trial_court.department (Department of court)
docket_number (Case or docket number)
docket_numbers (A comma-separated list of docket numbers)
When No Existing Variable Name Exists:
1. Craft short, readable variable names in python snake_case.
2. Represent people with lists, even if only one person.
3. Use valid Python variable names within complete Jinja2 tags, like: {{ new_variable_name }}.
Special endings:
Suffix _date for date values.
Suffix _value or _amount for currency values.
Examples:
"(State the reason for eviction)" transforms into `{{ eviction_reason }}`.
"""
return get_modified_docx_runs(
docx_path = docx_path,
docx_repr = docx_repr,
custom_example=custom_example,
instructions=instructions,
openai_client=openai_client,
api_key=api_key,
)
def get_modified_docx_runs(
docx_path: Optional[str] = None,
docx_repr: Optional[str] = None,
custom_example:str = "",
instructions:str = "",
openai_client: Optional[OpenAI] = None,
api_key:Optional[str]=None,
temperature=0.5,
) -> List[Tuple[int, int, str, int]]:
"""Use GPT to rewrite the contents of a DOCX file paragraph by paragraph. Does not handle tables, footers, or
other structures yet.
This is a light wrapper that provides the structure of DOCX paragraphs and runs to your prompt
to OpenAI to facilitate the rewriting of the document without disrupting formatting.
For example, this could be used to:
* Remove any passive voice
* Replace placeholder text with variable names
* Rewrite to a 6th grade reading level
* Do an advanced search and replace, without requiring you to use a regex
By default, the example prompt includes a sample like this:
[
[0, 0, "Dear "],
[0, 1, "John Smith:"],
[1, 0, "I hope this letter finds you well."],
]
Your custom instructions should include an example of how the sample will be modified, like the one below:
Example reply, indicating paragraph, run, the new text, and a number indicating if this changes the
current paragraph, adds one before, or adds one after (-1, 0, 1):
{"results":
[
[0, 1, "Dear {{ other_parties[0] }}:", 0],
[2, 0, "{%p if is_tenant %}", -1],
[3, 0, "{%p endif %}", 1],
]
}
You may also want to customize the input example to better match your use case.
Args:
docx_path (str): path to the DOCX file
docx_repr (str): a string representation of the paragraphs and runs in the DOCX file, if docx_path is not provided.
custom_example (Optional[str]): a string containing the purpose and overview of the task
instructions (str) a string containing specific instructions for the task
openai_client (Optional[OpenAI]): an OpenAI client object. If not provided a new one will be created.
api_key (Optional[str]): an OpenAI API key. If not provided, it will be obtained from the environment
temperature (float): the temperature to use when generating text. Lower temperatures are more conservative.
Returns:
A list of tuples, each containing a paragraph number, run number, and the modified text of the run.
"""
if docx_path:
docx_repr = get_docx_repr(docx_path)
elif not docx_repr:
raise Exception("Either docx_path or docx_repr must be provided.")
assert isinstance(docx_repr, str)
if not openai_client:
openai_client = OpenAI(
api_key = api_key or os.environ.get("OPENAI_API_KEY")
)
if not custom_example:
custom_example = """[
[0, 0, "Dear"],
[0, 1, "John Smith:"],
[1, 0, "I hope this letter finds you well."],
]"""
if not "[" in instructions: # Make sure we have at least a minimal example of the output
instructions += """The result will look like this:
{"results":
[
[0, 1, "modified run", 0],
[1, 0, "another modified run, skipping the run that should be left alone", 0],
]
}
"""
role_description = f"""
You will process a DOCX document and return a JSON structure that transforms the DOCX file
based on the following guidelines and examples. The DOCX will be provided as an annotated series of
paragraphs and runs in JSON structure, like this:
{ custom_example }
The result will be a JSON structure that includes a list of modified runs, each run represented as a list with exactly 4 items:
1. The paragraph number
2. The run number
3. The modified text of the run
4. A number indicating if this changes the current paragraph, adds one before, or adds one after (-1, 0, 1)
{instructions}
The reply ONLY contains the runs that have modified text.
"""
encoding = tiktoken.encoding_for_model("gpt-4")
encoding = tiktoken.encoding_for_model("gpt-4")
token_count = len(encoding.encode(role_description + docx_repr))
if token_count > 128000:
raise Exception(
f"Input to OpenAI is too long ({token_count} tokens). Maximum is 128000 tokens."
)
moderation_response = openai_client.moderations.create(input=role_description + docx_repr)
if moderation_response.results[0].flagged:
raise Exception(
f"OpenAI moderation error: {moderation_response.results[0]}"
)
response = openai_client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content": role_description},
{"role": "user", "content": docx_repr},
],
response_format={"type": "json_object"},
temperature=temperature,
max_tokens=4096,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
assert isinstance(response.choices[0].message.content, str)
# check finish reason
if response.choices[0].finish_reason != "stop":
raise Exception(
f"OpenAI did not finish processing the document. Finish reason: {response.choices[0].finish_reason}"
)
guesses = json.loads(response.choices[0].message.content)["results"]
return guesses
def make_docx_plain_language(docx_path: str) -> docx.Document:
"""
Convert a DOCX file to plain language with the help of OpenAI.
"""
guesses = get_modified_docx_runs(
docx_path,
custom_example="""[
[0, 0, "If the location of the land is in a state other than the state in which the tribe’s reservation is located, the tribe’s justification of anticipated benefits from the acquisition will be subject to greater scrutiny."],
[1, 0, "When the process of freeing a vehicle that has been stuck results in ruts or holes, the operator will fill the rut or hole created by such activity before removing the vehicle from the immediate area."],
]""",
instructions="""
You are a plain language expert whose goal is to rewrite the document at a 6th grade reading level, without changing the meaning of the document.
You will rewrite passive voice sentences in the active voice. You will use simple vocabulary words to replace complex ones. You will use short sentences and short paragraphs.
The result will look like this:
{"results":
[
[0, 0, "If the land is in a different State than the tribe’s reservation, we will scrutinize the tribe’s justification of anticipated benefits more thoroughly.", 0],
[1, 0, "If you make a hole while freeing a stuck vehicle, you must fill the hole before you drive away.", 0],
]
}
""",
)
return update_docx(docx.Document(docx_path), guesses)
def modify_docx_with_openai_guesses(docx_path: str) -> docx.Document:
"""Uses OpenAI to guess the variable names for a document and then modifies the document with the guesses.
Args:
docx_path (str): Path to the DOCX file to modify.
Returns:
docx.Document: The modified document, ready to be saved to the same or a new path
"""
guesses = get_labeled_docx_runs(docx_path)
return update_docx(docx.Document(docx_path), guesses)
if __name__ == "__main__":
new_doc = modify_docx_with_openai_guesses(sys.argv[1])
new_doc.save(sys.argv[1] + ".output.docx") | [] |
2024-01-10 | SuffolkLITLab/FormFyxer | formfyxer~lit_explorer.py | # Updated on 2022-12-12
import os
import re
import subprocess
import spacy
from spacy.tokens import Doc
from pdfminer.high_level import extract_text
from pdfminer.layout import LAParams
import pikepdf
import textstat
import requests
import json
import networkx as nx
import numpy as np
import pandas as pd
from numpy import unique
from numpy import where
from sklearn.cluster import AffinityPropagation
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from joblib import load
import nltk
from nltk.tokenize import sent_tokenize
from PassivePySrc import PassivePy
import eyecite
from enum import Enum
import sigfig
import yaml
from .pdf_wrangling import (
get_existing_pdf_fields,
FormField,
FieldType,
unlock_pdf_in_place,
is_tagged,
)
try:
from nltk.corpus import stopwords
stopwords.words
except:
print("Downloading stopwords")
nltk.download("stopwords")
from nltk.corpus import stopwords
try:
nltk.data.find("tokenizers/punkt")
except:
nltk.download("punkt")
import math
from contextlib import contextmanager
import threading
import _thread
from typing import (
Optional,
Union,
Iterable,
List,
Dict,
Tuple,
Callable,
TypedDict,
)
import openai
from openai import OpenAI
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
stop_words = set(stopwords.words("english"))
try:
# this takes a while to load
import en_core_web_lg
nlp = en_core_web_lg.load()
except:
try:
import en_core_web_sm
nlp = en_core_web_sm.load()
except:
print("Downloading word2vec model en_core_web_sm")
import subprocess
bashCommand = "python -m spacy download en_core_web_sm"
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print(f"output of word2vec model download: {str(output)}")
import en_core_web_sm
nlp = en_core_web_sm.load()
passivepy = PassivePy.PassivePyAnalyzer(nlp=nlp)
# Load local variables, models, and API key(s).
###############
# Temporarily replace joblib files with local vars
included_fields = [
"users1_name",
"users1_birthdate",
"users1_address_line_one",
"users1_address_line_two",
"users1_address_city",
"users1_address_state",
"users1_address_zip",
"users1_phone_number",
"users1_email",
"plantiffs1_name",
"defendants1_name",
"petitioners1_name",
"respondents1_name",
"docket_number",
"trial_court_county",
"users1_signature",
"signature_date",
]
with open(
os.path.join(os.path.dirname(__file__), "keys", "spot_token.txt"), "r"
) as in_file:
default_spot_token = in_file.read().rstrip()
try:
with open(
os.path.join(os.path.dirname(__file__), "keys", "openai_key.txt"), "r"
) as in_file:
default_key:Optional[str] = in_file.read().rstrip()
except:
default_key = None
try:
with open(
os.path.join(os.path.dirname(__file__), "keys", "openai_org.txt"), "r"
) as in_file:
default_org:Optional[str] = in_file.read().rstrip()
except:
default_org = None
if default_key:
client:Optional[OpenAI] = OpenAI(api_key=default_key, organization=default_org or None)
elif os.getenv("OPENAI_API_KEY"):
client = OpenAI()
else:
client = None
# TODO(brycew): remove by retraining the model to work with random_state=4.
NEEDS_STABILITY = True if os.getenv("ISUNITTEST") else False
# Define some hardcoded data file paths
CURRENT_DIRECTORY = os.path.dirname(__file__)
GENDERED_TERMS_PATH = os.path.join(CURRENT_DIRECTORY, "data", "gendered_terms.yml")
PLAIN_LANGUAGE_TERMS_PATH = os.path.join(
CURRENT_DIRECTORY, "data", "simplified_words.yml"
)
# This creates a timeout exception that can be triggered when something hangs too long.
class TimeoutException(Exception):
pass
@contextmanager
def time_limit(seconds: float):
timer = threading.Timer(seconds, lambda: _thread.interrupt_main())
timer.start()
try:
yield
except KeyboardInterrupt:
raise TimeoutException("Timed out.")
finally:
# if the action ends in specified time, timer is canceled
timer.cancel()
def recursive_get_id(values_to_unpack: Union[dict, list], tmpl: Optional[set] = None):
"""
Pull ID values out of the LIST/NSMI results from Spot.
"""
# h/t to Quinten and Bryce for this code ;)
if not tmpl:
tmpl = set()
if isinstance(values_to_unpack, dict):
tmpl.add(values_to_unpack.get("id"))
if values_to_unpack.get("children"):
tmpl.update(recursive_get_id(values_to_unpack.get("children", []), tmpl))
return tmpl
elif isinstance(values_to_unpack, list):
for item in values_to_unpack:
tmpl.update(recursive_get_id(item, tmpl))
return tmpl
else:
return set()
def spot(
text: str,
lower: float = 0.25,
pred: float = 0.5,
upper: float = 0.6,
verbose: float = 0,
token: str = "",
):
"""
Call the Spot API (https://spot.suffolklitlab.org) to classify the text of a PDF using
the NSMIv2/LIST taxonomy (https://taxonomy.legal/), but returns only the IDs of issues found in the text.
"""
global default_spot_token
if not token:
if not default_spot_token:
print("You need to pass a spot token when using Spot")
return []
token = default_spot_token
headers = {
"Authorization": "Bearer " + token,
"Content-Type": "application/json",
}
body = {
"text": text[:5000],
"save-text": 0,
"cutoff-lower": lower,
"cutoff-pred": pred,
"cutoff-upper": upper,
}
r = requests.post(
"https://spot.suffolklitlab.org/v0/entities-nested/",
headers=headers,
data=json.dumps(body),
)
output_ = r.json()
try:
output_["build"]
if verbose != 1:
try:
return list(recursive_get_id(output_["labels"]))
except:
return []
else:
return output_
except:
return output_
# A function to pull words out of snake_case, camelCase and the like.
def re_case(text: str) -> str:
"""
Capture PascalCase, snake_case and kebab-case terms and add spaces to separate the joined words
"""
re_outer = re.compile(r"([^A-Z ])([A-Z])")
re_inner = re.compile(r"(?<!^)([A-Z])([^A-Z])")
text = re_outer.sub(r"\1 \2", re_inner.sub(r" \1\2", text))
return text.replace("_", " ").replace("-", " ")
# Takes text from an auto-generated field name and uses regex to convert it into an Assembly Line standard field.
# See https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/label_variables/
def regex_norm_field(text: str):
"""
Apply some heuristics to a field name to see if we can get it to match AssemblyLine conventions.
See: https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/document_variables
"""
regex_list = [
# Personal info
## Name & Bio
["^((My|Your|Full( legal)?) )?Name$", "users1_name"],
["^(Typed or )?Printed Name\s?\d*$", "users1_name"],
["^(DOB|Date of Birth|Birthday)$", "users1_birthdate"],
## Address
["^(Street )?Address$", "users1_address_line_one"],
["^City State Zip$", "users1_address_line_two"],
["^City$", "users1_address_city"],
["^State$", "users1_address_state"],
["^Zip( Code)?$", "users1_address_zip"],
## Contact
["^(Phone|Telephone)$", "users1_phone_number"],
["^Email( Address)$", "users1_email"],
# Parties
["^plaintiff\(?s?\)?$", "plaintiff1_name"],
["^defendant\(?s?\)?$", "defendant1_name"],
["^petitioner\(?s?\)?$", "petitioners1_name"],
["^respondent\(?s?\)?$", "respondents1_name"],
# Court info
["^(Court\s)?Case\s?(No|Number)?\s?A?$", "docket_number"],
["^file\s?(No|Number)?\s?A?$", "docket_number"],
# Form info
["^(Signature|Sign( here)?)\s?\d*$", "users1_signature"],
["^Date\s?\d*$", "signature_date"],
]
for regex in regex_list:
text = re.sub(regex[0], regex[1], text, flags=re.IGNORECASE)
return text
def reformat_field(text: str, max_length: int = 30, tools_token=None):
"""
Transforms a string of text into a snake_case variable close in length to `max_length` name by
summarizing the string and stitching the summary together in snake_case.
h/t https://towardsdatascience.com/nlp-building-a-summariser-68e0c19e3a93
"""
orig_title = text.lower()
orig_title = re.sub("[^a-zA-Z]+", " ", orig_title)
orig_title_words = orig_title.split()
deduped_sentence = []
for word in orig_title_words:
if word not in deduped_sentence:
deduped_sentence.append(word)
filtered_sentence = [w for w in deduped_sentence if not w.lower() in stop_words]
filtered_title_words = filtered_sentence
characters = len(" ".join(filtered_title_words))
if characters > 0:
words = len(filtered_title_words)
av_word_len = math.ceil(
len(" ".join(filtered_title_words)) / len(filtered_title_words)
)
x_words = math.floor((max_length) / av_word_len)
sim_mat = np.zeros([len(filtered_title_words), len(filtered_title_words)])
# for each word compared to other
filt_vecs = vectorize(filtered_title_words, tools_token=tools_token)
filt_vecs = [vec.reshape(1, 300) for vec in filt_vecs]
for i in range(len(filtered_title_words)):
for j in range(len(filtered_title_words)):
if i != j:
sim_mat[i][j] = cosine_similarity(
filt_vecs[i],
filt_vecs[j],
)[0, 0]
try:
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph)
sorted_scores = sorted(
scores.items(), key=lambda item: item[1], reverse=True
)
if x_words > len(scores):
x_words = len(scores)
i = 0
new_title = ""
for x in filtered_title_words:
if scores[i] >= sorted_scores[x_words - 1][1]:
if len(new_title) > 0:
new_title += "_"
new_title += x
i += 1
return new_title
except:
return "_".join(filtered_title_words)
else:
if re.search("^(\d+)$", text):
return "unknown"
else:
return re.sub("\s+", "_", text.lower())
def norm(row):
"""Normalize a word vector."""
try:
matrix = row.reshape(1, -1).astype(np.float64)
return normalize(matrix, axis=1, norm="l2")[0]
except Exception as e:
print("===================")
print("Error: ", e)
print("===================")
return np.NaN
def vectorize(text: Union[List[str], str], tools_token: Optional[str] = None):
"""Vectorize a string of text.
Args:
text: a string of multiple words to vectorize
tools_token: the token to tools.suffolklitlab.org, used for micro-service
to reduce the amount of memory you need on your machine. If
not passed, you need to have `en_core_web_lg` installed
"""
if tools_token:
headers = {
"Authorization": "Bearer " + tools_token,
"Content-Type": "application/json",
}
body = {"text": text}
r = requests.post(
"https://tools.suffolklitlab.org/vectorize/",
headers=headers,
data=json.dumps(body),
)
if not r.ok:
raise Exception("Couldn't access tools.suffolklitlab.org")
if isinstance(text, str):
output = np.array(r.json().get("embeddings", []))
if len(output) <= 0:
raise Exception("Vector from tools.suffolklitlab.org is empty")
return output
else:
return [np.array(embed) for embed in r.json().get("embeddings", [])]
else:
if isinstance(text, str):
return norm(nlp(text).vector)
else:
return [norm(nlp(indiv_text).vector) for indiv_text in text]
# Given an auto-generated field name and context from the form where it appeared, this function attempts to normalize the field name. Here's what's going on:
# 1. It will `re_case` the variable text
# 2. Then it will run the output through `regex_norm_field`
# 3. If it doesn't find anything, it will use the ML model `clf_field_names`
# 4. If the prediction isn't very confident, it will run it through `reformat_field`
def normalize_name(
jur: str,
group: str,
n: int,
per,
last_field: str,
this_field: str,
tools_token: Optional[str] = None,
) -> Tuple[str, float]:
"""
Normalize a field name, if possible to the Assembly Line conventions, and if
not, to a snake_case variable name of appropriate length.
HACK: temporarily all we do is re-case it and normalize it using regex rules.
Will be replaced with call to LLM soon.
"""
if this_field not in included_fields:
this_field = re_case(this_field)
this_field = regex_norm_field(this_field)
if this_field in included_fields:
return f"*{this_field}", 0.01
return reformat_field(this_field, tools_token=tools_token), 0.5
# Take a list of AL variables and spits out suggested groupings. Here's what's going on:
#
# 1. It reads in a list of fields (e.g., `["user_name","user_address"]`)
# 2. Splits each field into words (e.g., turning `user_name` into `user name`)
# 3. It then turns these ngrams/"sentences" into vectors using word2vec.
# 4. For the collection of fields, it finds clusters of these "sentences" within the semantic space defined by word2vec. Currently it uses Affinity Propagation. See https://machinelearningmastery.com/clustering-algorithms-with-python/
def cluster_screens(
fields: List[str] = [], damping: float = 0.7, tools_token: Optional[str] = None
) -> Dict[str, List[str]]:
"""
Groups the given fields into screens based on how much they are related.
Args:
fields: a list of field names
damping: a value >= 0.5 and < 1. Tunes how related screens should be
tools_token: the token to tools.suffolklitlab.org, needed of doing
micro-service vectorization
Returns: a suggested screen grouping, each screen name mapped to the list of fields on it
"""
vec_mat = np.zeros([len(fields), 300])
vecs = vectorize([re_case(field) for field in fields], tools_token=tools_token)
for i in range(len(fields)):
vec_mat[i] = vecs[i]
# create model
# note will have to require newer version to fit the model when running with random_state=4
# just on the unit test for now, to make sure `tools.suffolklitlab.org` and local don't differ
model = AffinityPropagation(
damping=damping, random_state=4 if NEEDS_STABILITY else None
)
model.fit(vec_mat)
# assign a cluster to each example
yhat = model.predict(vec_mat)
# retrieve unique clusters
clusters = unique(yhat)
screens = {}
# sim = np.zeros([5,300])
for i, cluster in enumerate(clusters):
this_screen = where(yhat == cluster)[0]
vars = []
for screen in this_screen:
# sim[screen]=vec_mat[screen] # use this spot to add up vectors for compare to list
vars.append(fields[screen])
screens["screen_%s" % i] = vars
return screens
def get_character_count(
field: FormField, char_width: float = 6, row_height: float = 16
) -> int:
# https://pikepdf.readthedocs.io/en/latest/api/main.html#pikepdf.Rectangle
# Rectangle with llx,lly,urx,ury
height = field.configs.get("height") or field.configs.get("size", 0)
width = field.configs.get("width") or field.configs.get("size", 0)
num_rows = int(height / row_height) if height > row_height else 1 # type: ignore
num_cols = int(width / char_width) # type: ignore
max_chars = num_rows * num_cols
return min(max_chars, 1)
class InputType(Enum):
"""
Input type maps onto the type of input the PDF author chose for the field. We only
handle text, checkbox, and signature fields.
"""
TEXT = "Text"
CHECKBOX = "Checkbox"
SIGNATURE = "Signature"
def __str__(self):
return self.value
class FieldInfo(TypedDict):
var_name: str
max_length: int
type: Union[InputType, str]
def field_types_and_sizes(
fields: Optional[Iterable[FormField]],
) -> List[FieldInfo]:
"""
Transform the fields provided by get_existing_pdf_fields into a summary format.
Result will look like:
[
{
"var_name": var_name,
"type": "text | checkbox | signature",
"max_length": n
}
]
"""
processed_fields: List[FieldInfo] = []
if not fields:
return []
for field in fields:
item: FieldInfo = {
"var_name": field.name,
"max_length": get_character_count(
field,
),
"type": "",
}
if field.type == FieldType.TEXT or field.type == FieldType.AREA:
item["type"] = InputType.TEXT
elif field.type == FieldType.CHECK_BOX:
item["type"] = InputType.CHECKBOX
elif field.type == FieldType.SIGNATURE:
item["type"] = InputType.SIGNATURE
else:
item["type"] = str(field.type)
processed_fields.append(item)
return processed_fields
class AnswerType(Enum):
"""
Answer type describes the effort the user answering the form will require.
"Slot-in" answers are a matter of almost instantaneous recall, e.g., name, address, etc.
"Gathered" answers require looking around one's desk, for e.g., a health insurance number.
"Third party" answers require picking up the phone to call someone else who is the keeper
of the information.
"Created" answers don't exist before the user is presented with the question. They may include
a choice, creating a narrative, or even applying legal reasoning. "Affidavits" are a special
form of created answers.
See Jarret and Gaffney, Forms That Work (2008)
"""
SLOT_IN = "Slot in"
GATHERED = "Gathered"
THIRD_PARTY = "Third party"
CREATED = "Created"
AFFIDAVIT = "Affidavit"
def __str__(self):
return self.value
def classify_field(field: FieldInfo, new_name: str) -> AnswerType:
"""
Apply heuristics to the field's original and "normalized" name to classify
it as either a "slot-in", "gathered", "third party" or "created" field type.
"""
SLOT_IN_FIELDS = {
"users1_name",
"users1_name",
"users1_birthdate",
"users1_address_line_one",
"users1_address_line_two",
"users1_address_city",
"users1_address_state",
"users1_address_zip",
"users1_phone_number",
"users1_email",
"plaintiff1_name",
"defendant1_name",
"petitioners1_name",
"respondents1_name",
"users1_signature",
"signature_date",
}
SLOT_IN_KEYWORDS = {
"name",
"birth date",
"birthdate",
"phone",
}
GATHERED_KEYWORDS = {
"number",
"value",
"amount",
"id number",
"social security",
"benefit id",
"docket",
"case",
"employer",
"date",
}
CREATED_KEYWORDS = {
"choose",
"choice",
"why",
"fact",
}
AFFIDAVIT_KEYWORDS = {
"affidavit",
}
var_name = field["var_name"].lower()
if (
var_name in SLOT_IN_FIELDS
or new_name in SLOT_IN_FIELDS
or any(keyword in var_name for keyword in SLOT_IN_KEYWORDS)
):
return AnswerType.SLOT_IN
elif any(keyword in var_name for keyword in GATHERED_KEYWORDS):
return AnswerType.GATHERED
elif set(var_name.split()).intersection(CREATED_KEYWORDS):
return AnswerType.CREATED
elif field["type"] == InputType.TEXT:
if field["max_length"] <= 100:
return AnswerType.SLOT_IN
else:
return AnswerType.CREATED
return AnswerType.GATHERED
def get_adjusted_character_count(
field: FieldInfo
)-> float:
"""
Determines the bracketed length of an input field based on its max_length attribute,
returning a float representing the approximate length of the field content.
The function chunks the answers into 5 different lengths (checkboxes, 2 words, short, medium, and long)
instead of directly using the character count, as forms can allocate different spaces
for the same data without considering the space the user actually needs.
Args:
field (FieldInfo): An object containing information about the input field,
including the "max_length" attribute.
Returns:
float: The approximate length of the field content, categorized into checkboxes, 2 words, short,
medium, or long based on the max_length attribute.
Examples:
>>> get_adjusted_character_count({"type"}: InputType.CHECKBOX)
4.7
>>> get_adjusted_character_count({"max_length": 100})
9.4
>>> get_adjusted_character_count({"max_length": 300})
230
>>> get_adjusted_character_count({"max_length": 600})
115
>>> get_adjusted_character_count({"max_length": 1200})
1150
"""
ONE_WORD = 4.7 # average word length: https://www.researchgate.net/figure/Average-word-length-in-the-English-language-Different-colours-indicate-the-results-for_fig1_230764201
ONE_LINE = 115 # Standard line is ~ 115 characters wide at 12 point font
SHORT_ANSWER = (
ONE_LINE * 2
) # Anything over 1 line but less than 3 probably needs about the same time to answer
MEDIUM_ANSWER = ONE_LINE * 5
LONG_ANSWER = (
ONE_LINE * 10
) # Anything over 10 lines probably needs a full page but form author skimped on space
if field["type"] != InputType.TEXT:
return ONE_WORD
if field["max_length"] <= ONE_LINE or (
field["max_length"] <= ONE_LINE * 2
):
return ONE_WORD * 2
elif field["max_length"] <= SHORT_ANSWER:
return SHORT_ANSWER
elif field["max_length"] <= MEDIUM_ANSWER:
return MEDIUM_ANSWER
return LONG_ANSWER
def time_to_answer_field(
field: FieldInfo,
new_name: str,
cpm: int = 40,
cpm_std_dev: int = 17,
) -> Callable[[int], np.ndarray]:
"""
Apply a heuristic for the time it takes to answer the given field, in minutes.
It is hand-written for now.
It will factor in the input type, the answer type (slot in, gathered, third party or created), and the
amount of input text allowed in the field.
The return value is a function that can return N samples of how long it will take to answer the field (in minutes)
"""
# Average CPM is about 40: https://en.wikipedia.org/wiki/Words_per_minute#Handwriting
# Standard deviation is about 17 characters/minute
# Add mean amount of time for gathering or creating the answer itself (if any) + standard deviation in minutes
TIME_TO_MAKE_ANSWER = {
AnswerType.SLOT_IN: (0.25, 0.1),
AnswerType.GATHERED: (3, 2),
AnswerType.THIRD_PARTY: (5, 2),
AnswerType.CREATED: (5, 4),
AnswerType.AFFIDAVIT: (5, 4),
}
kind = classify_field(field, new_name)
if field["type"] == InputType.SIGNATURE or "signature" in field["var_name"]:
return lambda number_samples: np.random.normal(
loc=0.5, scale=0.1, size=number_samples
)
if field["type"] == InputType.CHECKBOX:
return lambda number_samples: np.random.normal(
loc=TIME_TO_MAKE_ANSWER[kind][0],
scale=TIME_TO_MAKE_ANSWER[kind][1],
size=number_samples,
)
else:
adjusted_character_count = get_adjusted_character_count(field)
time_to_write_answer = adjusted_character_count / cpm
time_to_write_std_dev = adjusted_character_count / cpm_std_dev
return lambda number_samples: np.random.normal(
loc=time_to_write_answer, scale=time_to_write_std_dev, size=number_samples
) + np.random.normal(
loc=TIME_TO_MAKE_ANSWER[kind][0],
scale=TIME_TO_MAKE_ANSWER[kind][1],
size=number_samples,
)
def time_to_answer_form(processed_fields, normalized_fields) -> Tuple[float, float]:
"""
Provide an estimate of how long it would take an average user to respond to the questions
on the provided form.
We use signals such as the field type, name, and space provided for the response to come up with a
rough estimate, based on whether the field is:
1. fill in the blank
2. gathered - e.g., an id number, case number, etc.
3. third party: need to actually ask someone the information - e.g., income of not the user, anything else?
4. created:
a. short created (3 lines or so?)
b. long created (anything over 3 lines)
"""
field_answer_time_simulators: List[Callable[[int], np.ndarray]] = []
for index, field in enumerate(processed_fields):
field_answer_time_simulators.append(
time_to_answer_field(field, normalized_fields[index])
)
# Run a monte carlo simulation to get times to answer and standard deviation
num_samples = 20000
np_array = np.zeros(num_samples)
for field_simulator in field_answer_time_simulators:
np_array += field_simulator(num_samples)
return sigfig.round(np_array.mean(), 2), sigfig.round(np_array.std(), 2)
def cleanup_text(text: str, fields_to_sentences: bool = False) -> str:
"""
Apply cleanup routines to text to provide more accurate readability statistics.
"""
# Replace \n with .
text = re.sub(r"(\n|\r)+", ". ", text)
# Replace non-punctuation characters with " "
text = re.sub(r"[^\w.,;!?@'\"“”‘’'″‶ ]", " ", text)
# _ is considered a word character, remove it
text = re.sub(r"_+", " ", text)
if fields_to_sentences:
# Turn : into . (so fields are treated as one sentence)
text = re.sub(r":", ".", text)
# Condense repeated " "
text = re.sub(r" +", " ", text)
# Remove any sentences that are just composed of a space
text = re.sub(r"\. +\.", ". ", text)
# Remove any repeated .
text = re.sub(r"\.+", ".", text)
# Remove space before final period
text = re.sub(r" \.", ".", text)
return text
def all_caps_words(text: str) -> int:
results = re.findall(r"([A-Z][A-Z]+)", text)
if results:
return len(results)
return 0
class OpenAiCreds(TypedDict):
org: str
key: str
def text_complete(prompt:str, max_tokens:int=500, creds: Optional[OpenAiCreds] = None, temperature:float=0) -> str:
"""Run a prompt via openAI's API and return the result.
Args:
prompt (str): The prompt to send to the API.
max_tokens (int, optional): The number of tokens to generate. Defaults to 500.
creds (Optional[OpenAiCreds], optional): The credentials to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.
"""
if creds:
openai_client = OpenAI(api_key=creds["key"], organization=creds["org"])
else:
if client:
openai_client = client
else:
raise Exception("No OpenAI credentials provided")
try:
response = openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": prompt
},
],
temperature=temperature,
max_tokens=max_tokens,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return str((response.choices[0].message.content or "").strip())
except Exception as ex:
print(f"{ex}")
return "ApiError"
def complete_with_command(
text, command, tokens, creds: Optional[OpenAiCreds] = None
) -> str:
"""Combines some text with a command to send to open ai."""
# OpenAI's max number of tokens length is 4097, so we trim the input text to 4080 - command - tokens length.
# A bit less than 4097 in case the tokenizer is wrong
# don't deal with negative numbers, clip at 1 (OpenAi will error anyway)
max_length = max(4080 - len(tokenizer(command)["input_ids"]) - tokens, 1)
text_tokens = tokenizer(text)
if len(text_tokens["input_ids"]) > max_length:
text = tokenizer.decode(
tokenizer(text, truncation=True, max_length=max_length)["input_ids"]
)
return text_complete(text + "\n\n" + command, max_tokens=tokens, creds=creds)
def plain_lang(text, creds: Optional[OpenAiCreds] = None) -> str:
tokens = len(tokenizer(text)["input_ids"])
command = "Rewrite the above at a sixth grade reading level."
return complete_with_command(text, command, tokens, creds=creds)
def guess_form_name(text, creds: Optional[OpenAiCreds] = None) -> str:
command = 'If the above is a court form, write the form\'s name, otherwise respond with the word "abortthisnow.".'
return complete_with_command(text, command, 20, creds=creds)
def describe_form(text, creds: Optional[OpenAiCreds] = None) -> str:
command = 'If the above is a court form, write a brief description of its purpose at a sixth grade reading level, otherwise respond with the word "abortthisnow.".'
return complete_with_command(text, command, 250, creds=creds)
def needs_calculations(text: Union[str, Doc]) -> bool:
"""A conservative guess at if a given form needs the filler to make math calculations,
something that should be avoided. If"""
CALCULATION_WORDS = ["subtract", "total", "minus", "multiply" "divide"]
if isinstance(text, str):
doc = nlp(text)
else:
doc = text
for token in doc:
if token.text.lower() in CALCULATION_WORDS:
return True
# TODO(brycew): anything better than a binary yes-no value on this?
return False
def get_passive_sentences(
text: Union[List, str]
) -> List[Tuple[str, List[Tuple[int, int]]]]:
"""Return a list of tuples, where each tuple represents a
sentence in which passive voice was detected along with a list of the
starting and ending position of each fragment that is phrased in the passive voice.
The combination of the two can be used in the PDFStats frontend to highlight the
passive text in an individual sentence.
Text can either be a string or a list of strings.
If provided a single string, it will be tokenized with NTLK and
sentences containing fewer than 2 words will be ignored.
"""
# Sepehri, A., Markowitz, D. M., & Mir, M. (2022, February 3).
# PassivePy: A Tool to Automatically Identify Passive Voice in Big Text Data. Retrieved from psyarxiv.com/bwp3t
#
if isinstance(text, str):
sentences = [s for s in sent_tokenize(text) if len(s.split(" ")) > 2]
if not sentences:
raise ValueError(
"There are no sentences over 2 words in the provided text."
)
elif isinstance(text, list):
sentences = text
else:
raise ValueError(f"Can't tokenize {type(text)} object into sentences")
if not sentences:
return []
passive_text_df = passivepy.match_corpus_level(pd.DataFrame(sentences), 0)
matching_rows = passive_text_df[passive_text_df["binary"] > 0]
sentences_with_highlights = []
for item in list(zip(matching_rows["document"], matching_rows["all_passives"])):
for fragment in item[1]:
sentences_with_highlights.append(
(
item[0],
[
(match.start(), match.end())
for match in re.finditer(re.escape(fragment), item[0])
],
)
)
return sentences_with_highlights
def get_citations(text: str, tokenized_sentences: List[str]) -> List[str]:
"""
Get citations and some extra surrounding context (the full sentence), if the citation is
fewer than 5 characters (often eyecite only captures a section symbol
for state-level short citation formats)
"""
citations = eyecite.get_citations(
eyecite.clean_text(text, ["all_whitespace", "underscores"])
)
citations_with_context = []
tokens = set()
for cite in citations:
if len(cite.matched_text()) < 5:
tokens.add(cite.matched_text())
else:
citations_with_context.append(cite.matched_text())
for token in tokens:
citations_with_context.extend(
[sentence for sentence in tokenized_sentences if token in sentence]
)
return citations_with_context
def substitute_phrases(
input_string: str, substitution_phrases: Dict[str, str]
) -> Tuple[str, List[Tuple[int, int]]]:
"""Substitute phrases in the input string and return the new string and positions of substituted phrases.
Args:
input_string (str): The input string containing phrases to be replaced.
substitution_phrases (Dict[str, str]): A dictionary mapping original phrases to their replacement phrases.
Returns:
Tuple[str, List[Tuple[int, int]]]: A tuple containing the new string with substituted phrases and a list of
tuples, each containing the start and end positions of the substituted
phrases in the new string.
Example:
>>> input_string = "The quick brown fox jumped over the lazy dog."
>>> substitution_phrases = {"quick brown": "swift reddish", "lazy dog": "sleepy canine"}
>>> new_string, positions = substitute_phrases(input_string, substitution_phrases)
>>> print(new_string)
"The swift reddish fox jumped over the sleepy canine."
>>> print(positions)
[(4, 17), (35, 48)]
"""
# Sort the substitution phrases by length in descending order
sorted_phrases = sorted(
substitution_phrases.items(), key=lambda x: len(x[0]), reverse=True
)
matches = []
# Find all matches for the substitution phrases
for original, replacement in sorted_phrases:
for match in re.finditer(r"\b" + re.escape(original) + r"\b", input_string, re.IGNORECASE):
matches.append((match.start(), match.end(), replacement))
# Sort the matches based on their starting position
matches.sort(key=lambda x: x[0])
new_string = ""
substitutions: List[Tuple[int, int]] = []
prev_end_pos = 0
# Build the new string and substitutions list
for start_pos, end_pos, replacement in matches:
if start_pos >= prev_end_pos:
new_string += input_string[prev_end_pos:start_pos] + replacement
substitutions.append((len(new_string) - len(replacement), len(new_string)))
prev_end_pos = end_pos
new_string += input_string[prev_end_pos:]
return new_string, substitutions
def substitute_neutral_gender(input_string: str) -> Tuple[str, List[Tuple[int, int]]]:
"""
Substitute gendered phrases with neutral phrases in the input string.
Primary source is https://github.com/joelparkerhenderson/inclusive-language
"""
with open(GENDERED_TERMS_PATH) as f:
terms = yaml.safe_load(f)
return substitute_phrases(input_string, terms)
def substitute_plain_language(input_string: str) -> Tuple[str, List[Tuple[int, int]]]:
"""
Substitute complex phrases with simpler alternatives.
Source of terms is drawn from https://www.plainlanguage.gov/guidelines/words/
"""
with open(PLAIN_LANGUAGE_TERMS_PATH) as f:
terms = yaml.safe_load(f)
return substitute_phrases(input_string, terms)
def transformed_sentences(
sentence_list: List[str], fun: Callable
) -> List[Tuple[str, str, List[Tuple[int, int]]]]:
"""
Apply a function to a list of sentences and return only the sentences with changed terms.
The result is a tuple of the original sentence, new sentence, and the starting and ending position
of each changed fragment in the sentence.
"""
transformed: List[Tuple[str, str, List[Tuple[int, int]]]] = []
for sentence in sentence_list:
run = fun(sentence)
if run[0] != sentence:
transformed.append((sentence, run[0], run[1]))
return transformed
def parse_form(
in_file: str,
title: Optional[str] = None,
jur: Optional[str] = None,
cat: Optional[str] = None,
normalize: bool = True,
spot_token: Optional[str] = None,
tools_token: Optional[str] = None,
openai_creds: Optional[OpenAiCreds] = None,
rewrite: bool = False,
debug: bool = False,
):
"""
Read in a pdf, pull out basic stats, attempt to normalize its form fields, and re-write the
in_file with the new fields (if `rewrite=1`). If you pass a spot token, we will guess the
NSMI code. If you pass openai creds, we will give suggestions for the title and description.
"""
unlock_pdf_in_place(in_file)
the_pdf = pikepdf.open(in_file)
pages_count = len(the_pdf.pages)
try:
with time_limit(15):
all_fields_per_page = get_existing_pdf_fields(the_pdf)
ff = []
for fields_in_page in all_fields_per_page:
ff.extend(fields_in_page)
except TimeoutException as e:
print("Timed out!")
ff = None
except AttributeError:
ff = None
field_names = [field.name for field in ff] if ff else []
f_per_page = len(field_names) / pages_count
# some PDFs (698c6784e6b9b9518e5390fd9ec31050) have vertical text, but it's not detected.
# Text contains a bunch of "(cid:72)", garbage output (reading level is like 1000).
# Our workaround is to ask GPT3 if it looks like a court form, and if not, try running
# ocrmypdf.
original_text = extract_text(in_file, laparams=LAParams(detect_vertical=True))
text = cleanup_text(original_text)
description = describe_form(text, creds=openai_creds) if openai_creds else ""
try:
readability = textstat.text_standard(text, float_output=True) if text else -1
except:
readability = -1
# Still attempt to re-evaluate if not using openai
if not original_text or (openai_creds and description == "abortthisnow.") or readability > 30:
# We do not care what the PDF output is, doesn't add that much time
ocr_p = [
"ocrmypdf",
"--force-ocr",
"--rotate-pages",
"--sidecar",
"-",
in_file,
"/tmp/test.pdf",
]
process = subprocess.run(ocr_p, timeout=60, check=False, capture_output=True)
if process.returncode == 0:
original_text = process.stdout.decode()
text = cleanup_text(original_text)
try:
readability = (
textstat.text_standard(text, float_output=True) if text else -1
)
except:
readability = -1
new_title = guess_form_name(text, creds=openai_creds) if openai_creds else ""
if not title:
if hasattr(the_pdf.docinfo, "Title"):
title = str(the_pdf.docinfo.Title)
if (
not title
and new_title
and (new_title != "ApiError" and new_title.lower() != "abortthisnow.")
):
title = new_title
if not title or title == "ApiError" or title.lower() == "abortthisnow.":
matches = re.search("(.*)\n", text)
if matches:
title = re_case(matches.group(1).strip())
else:
title = "(Untitled)"
nsmi = spot(title + ". " + text, token=spot_token) if spot_token else []
if normalize:
length = len(field_names)
last = "null"
new_names = []
new_names_conf = []
for i, field_name in enumerate(field_names):
new_name, new_confidence = normalize_name(
jur or "",
cat or "",
i,
i / length,
last,
field_name,
tools_token=tools_token,
)
new_names.append(new_name)
new_names_conf.append(new_confidence)
last = field_name
new_names = [
v + "__" + str(new_names[:i].count(v) + 1) if new_names.count(v) > 1 else v
for i, v in enumerate(new_names)
]
else:
new_names = field_names
new_names_conf = []
tokenized_sentences = sent_tokenize(original_text)
# No need to detect passive voice in very short sentences
sentences = [s for s in tokenized_sentences if len(s.split(" ")) > 2]
try:
passive_sentences = get_passive_sentences(sentences)
passive_sentences_count = len(passive_sentences)
except ValueError:
passive_sentences_count = 0
passive_sentences = []
citations = get_citations(original_text, tokenized_sentences)
plain_language_suggestions = transformed_sentences(
sentences, substitute_plain_language
)
neutral_gender_suggestions = transformed_sentences(
sentences, substitute_neutral_gender
)
word_count = len(text.split(" "))
all_caps_count = all_caps_words(text)
field_types = field_types_and_sizes(ff)
classified = [
classify_field(field, new_names[index])
for index, field in enumerate(field_types)
]
slotin_count = sum(1 for c in classified if c == AnswerType.SLOT_IN)
gathered_count = sum(1 for c in classified if c == AnswerType.GATHERED)
third_party_count = sum(1 for c in classified if c == AnswerType.THIRD_PARTY)
created_count = sum(1 for c in classified if c == AnswerType.CREATED)
sentence_count = sum(1 for _ in sentences)
field_count = len(field_names)
difficult_words = textstat.difficult_words_list(text)
difficult_word_count = len(difficult_words)
citation_count = len(citations)
pdf_is_tagged = is_tagged(the_pdf)
stats = {
"title": title,
"suggested title": new_title,
"description": description,
"category": cat,
"pages": pages_count,
"reading grade level": readability,
"time to answer": time_to_answer_form(field_types_and_sizes(ff), new_names)
if ff
else [-1, -1],
"list": nsmi,
"avg fields per page": f_per_page,
"fields": new_names,
"fields_conf": new_names_conf,
"fields_old": field_names,
"text": text,
"original_text": original_text,
"number of sentences": sentence_count,
"sentences per page": sentence_count / pages_count,
"number of passive voice sentences": passive_sentences_count,
"passive sentences": passive_sentences,
"number of all caps words": all_caps_count,
"citations": citations,
"total fields": field_count,
"slotin percent": slotin_count / field_count if field_count > 0 else 0,
"gathered percent": gathered_count / field_count if field_count > 0 else 0,
"created percent": created_count / field_count if field_count > 0 else 0,
"third party percent": third_party_count / field_count
if field_count > 0
else 0,
"passive voice percent": (
passive_sentences_count / sentence_count if sentence_count > 0 else 0
),
"citations per field": citation_count / field_count if field_count > 0 else 0,
"citation count": citation_count,
"all caps percent": all_caps_count / word_count,
"normalized characters per field": sum(get_adjusted_character_count(field) for field in field_types ) / field_count if ff else 0,
"difficult words": difficult_words,
"difficult word count": difficult_word_count,
"difficult word percent": difficult_word_count / word_count,
"calculation required": needs_calculations(text),
"plain language suggestions": plain_language_suggestions,
"neutral gender suggestions": neutral_gender_suggestions,
"pdf_is_tagged": pdf_is_tagged,
}
if debug and ff:
debug_fields = []
for index, field in enumerate(field_types_and_sizes(ff)):
debug_fields.append(
{
"name": field["var_name"],
"input type": str(field["type"]),
"max length": field["max_length"],
"inferred answer type": str(
classify_field(field, new_names[index])
),
"time to answer": list(
time_to_answer_field(field, new_names[index])(1)
),
}
)
stats["debug fields"] = debug_fields
if rewrite:
try:
my_pdf = pikepdf.Pdf.open(in_file, allow_overwriting_input=True)
fields_too = (
my_pdf.Root.AcroForm.Fields
) # [0]["/Kids"][0]["/Kids"][0]["/Kids"][0]["/Kids"]
# print(repr(fields_too))
for k, field_name in enumerate(new_names):
# print(k,field)
fields_too[k].T = re.sub("^\*", "", field_name)
my_pdf.save(in_file)
my_pdf.close()
except Exception as ex:
stats["error"] = f"could not change form fields: {ex}"
return stats
def _form_complexity_per_metric(stats):
# check for fields that require user to look up info, when found add to complexity
# maybe score these by minutes to recall/fill out
# so, figure out words per minute, mix in with readability and page number and field numbers
# TODO(brycew):
# to write: options with unknown?
# to write: fields with exact info
# to write: fields with open ended responses (text boxes)
metrics = [
{"name": "reading grade level", "weight": 10 / 7, "intercept": 5},
{"name": "calculation required", "weight": 2},
# {"name": "time to answer", "weight": 2},
{"name": "pages", "weight": 2},
{"name": "citations per field", "weight": 1.2},
{"name": "avg fields per page", "weight": 1 / 8},
{"name": "normalized characters per field", "weight": 1/8},
{"name": "sentences per page", "weight": 0.05},
# percents will have a higher weight, because they are between 0 and 1
{"name": "slotin percent", "weight": 2},
{"name": "gathered percent", "weight": 5},
{"name": "third party percent", "weight": 10},
{"name": "created percent", "weight": 20},
{"name": "passive voice percent", "weight": 4},
{"name": "all caps percent", "weight": 10},
{"name": "difficult word percent", "weight": 15},
]
def weight(stats, metric):
"""Handles if we need to scale / "normalize" the metrics at all."""
name = metric["name"]
weight = metric.get("weight") or 1
val = 0
if "clip" in metric:
val = min(max(stats.get(name,0), metric["clip"][0]), metric["clip"][1])
elif isinstance(stats.get(name), bool):
val = 1 if stats.get(name) else 0
else:
val = stats.get(name,0)
if "intercept" in metric:
val -= metric["intercept"]
return val * weight
return [(m["name"], stats[m["name"]], weight(stats, m)) for m in metrics]
def form_complexity(stats):
"""Gets a single number of how hard the form is to complete. Higher is harder."""
metrics = _form_complexity_per_metric(stats)
return sum(val[2] for val in metrics)
| [] |
2024-01-10 | NotJoeMartinez/yt-fts | tests~view_chromadb.py | import chromadb
import sys
from openai import OpenAI
from yt_fts.embeddings import get_embedding
from yt_fts.config import get_or_make_chroma_path
from yt_fts.utils import time_to_secs
from yt_fts.db_utils import get_channel_name_from_video_id, get_title_from_db
from pprint import pprint
def main():
chroma_path = get_or_make_chroma_path()
view_collections(chroma_path)
# search = "nural networks"
# search_collections(chroma_path, search)
# view_by_channel_id("")
# delete_stuff()
def view_collections(chroma_path):
chroma_client = chromadb.PersistentClient(path=chroma_path)
collection = chroma_client.get_collection(name="subEmbeddings")
print(collection.peek())
print(collection.count())
def view_by_channel_id(channel_id):
chroma_path = get_or_make_chroma_path()
chroma_client = chromadb.PersistentClient(path=chroma_path)
collection = chroma_client.get_collection(name="subEmbeddings")
# collection.get({
# include: [ "documents" ]
# })
# chroma_res = collection.query(
# query_texts=["networks"],
# n_results=5,
# where={"channel_id": channel_id})
# pprint(chroma_res)
def delete_stuff():
chroma_path = get_or_make_chroma_path()
chroma_client = chromadb.PersistentClient(path=chroma_path)
collection = chroma_client.get_collection(name="subEmbeddings")
collection.delete(
where={"channel_id": "UCF0ZSm2AmSkJ2b2sLMlgLFg"}
)
def search_collections(chroma_path, text):
chroma_client = chromadb.PersistentClient(path=chroma_path)
collection = chroma_client.get_collection(name="subEmbeddings")
search_embedding = get_embedding(text, "text-embedding-ada-002", OpenAI())
chroma_res = collection.query(
query_embeddings=[search_embedding],
n_results=5,
where={},
)
pprint(chroma_res)
documents = chroma_res["documents"][0]
metadata = chroma_res["metadatas"][0]
distances = chroma_res["distances"][0]
res = []
for i in range(len(documents)):
text = documents[i]
video_id = metadata[i]["video_id"]
start_time = metadata[i]["start_time"]
distance = distances[i]
link = f"https://youtu.be/{video_id}?t={time_to_secs(start_time)}"
channel_name = get_channel_name_from_video_id(video_id)
channel_id = metadata[i]["channel_id"]
title = get_title_from_db(video_id)
match = {
"distance": distance,
"channel_name": channel_name,
"channel_id": channel_id,
"video_title": title,
"subs": text,
"start_time": start_time,
"video_id": video_id,
"link": link,
}
res.append(match)
for match in res:
pprint(match)
if __name__ == "__main__":
main() | [] |
2024-01-10 | NotJoeMartinez/yt-fts | yt_fts~yt_fts.py | import click
import requests
from .config import get_config_path, get_db_path, get_or_make_chroma_path
from .db_utils import *
from .download import *
from .list import list_channels
from .update import update_channel
from .utils import *
from rich.console import Console
YT_FTS_VERSION = "0.1.39"
@click.group()
@click.version_option(YT_FTS_VERSION, message='yt_fts version: %(version)s')
def cli():
config_path = get_config_path()
db_path = get_db_path()
# download
@cli.command(
help="""
Download subtitles from a specified YouTube channel.
You must provide the URL of the channel as an argument. The script will automatically extract the channel id from the URL.
"""
)
@click.argument("channel_url", required=True)
@click.option("-l", "--language", default="en", help="Language of the subtitles to download")
@click.option("-j", "--number-of-jobs", type=int, default=1, help="Optional number of jobs to parallelize the run")
def download(channel_url, language, number_of_jobs):
console = Console()
s = requests.session()
# find out if the channel exists on the internet
with console.status("[bold green]Getting Channel ID...") as status:
channel_url = validate_channel_url(channel_url)
handle_reject_consent_cookie(channel_url, s)
channel_id = get_channel_id(channel_url, s)
if channel_id is None:
console.print("[bold red]Error:[/bold red] Invalid channel URL or unable to extract channel ID.")
return
channel_exists = check_if_channel_exists(channel_id)
if channel_exists:
list_channels(channel_id)
error = "[bold red]Error:[/bold red] Channel already exists in database."
error += " Use update command to update the channel"
console.print(error)
return
handle_reject_consent_cookie(channel_url, s)
channel_name = get_channel_name(channel_id, s)
if channel_name is None:
console.print("[bold red]Error:[/bold red] The channel does not exist.")
return
foo = download_channel(channel_id, channel_name, language, number_of_jobs, s)
if foo is None:
console.print("[bold red]Error:[/bold red] Unable to download channel.")
return
else:
console.print("[green]Download complete[/green]")
# list
@cli.command(
help="""
View library, transcripts and channel video list
"""
)
@click.option("-t", "--transcript", default=None, help="Show transcript for a video")
@click.option("-c", "--channel", default=None, help="Show list of videos for a channel")
@click.option("-l", "--library", is_flag=True, help="Show list of channels in library")
def list(transcript, channel, library):
from yt_fts.list import show_video_list, show_video_transcript
if transcript:
show_video_transcript(transcript)
exit()
elif channel:
channel_id = get_channel_id_from_input(channel)
show_video_list(channel_id)
elif library:
list_channels()
else:
list_channels()
# update
@cli.command(
help="""
Updates a specified YouTube channel.
You must provide the ID of the channel as an argument.
Keep in mind some might not have subtitles enabled. This command
will still attempt to download subtitles as subtitles are sometimes added later.
"""
)
@click.option("-c", "--channel", default=None, required=True, help="The name or id of the channel to update.")
@click.option("-l", "--language", default="en", help="Language of the subtitles to download")
@click.option("-j", "--number-of-jobs", type=int, default=1, help="Optional number of jobs to parallelize the run")
def update(channel, language, number_of_jobs):
channel_id = get_channel_id_from_input(channel)
channel_url = f"https://www.youtube.com/channel/{channel_id}/videos"
s = requests.session()
handle_reject_consent_cookie(channel_url, s)
channel_name = get_channel_name(channel_id, s)
update_channel(channel_id, channel_name, language, number_of_jobs, s)
# Delete
@cli.command(
help="""
Delete a channel and all its data.
You must provide the name or the id of the channel you want to delete as an argument.
The command will ask for confirmation before performing the deletion.
"""
)
@click.option("-c", "--channel", default=None, required=True, help="The name or id of the channel to delete")
def delete(channel):
channel_id = get_channel_id_from_input(channel)
channel_name = get_channel_name_from_id(channel_id)
channel_url = f"https://www.youtube.com/channel/{channel_id}/videos"
print(f"Deleting channel {channel_name}: {channel_url}")
print("Are you sure you want to delete this channel and all its data?")
confirm = input("y/n: ")
if confirm == "y":
delete_channel(channel_id)
print(f"Deleted channel {channel_name}: {channel_url}")
else:
print("Exiting")
@cli.command(
help="""
export transcripts
"""
)
@click.option("-c", "--channel", default=None, required=True, help="The name or id of the channel to export transcripts for")
@click.option("-f", "--format", default="txt", help="The format to export transcripts to. Supported formats: txt, vtt")
def export(channel, format):
from .export import export_channel_to_txt, export_channel_to_vtt
console = Console()
channel_id = get_channel_id_from_input(channel)
if format == "txt":
output_dir = export_channel_to_txt(channel_id)
if format == "vtt":
output_dir = export_channel_to_vtt(channel_id)
if output_dir != None:
console.print(f"Exported to [green][bold]{output_dir}[/bold][/green]")
# search
@cli.command(
help="""
Search for a specified text within a channel, a specific video, or across all channels.
"""
)
@click.argument("text", required=True)
@click.option("-c", "--channel", default=None, help="The name or id of the channel to search in.")
@click.option("-v", "--video", default=None, help="The id of the video to search in.")
@click.option("-l", "--limit", default=None, type=int, help="Number of results to return")
@click.option("-e", "--export", is_flag=True, help="Export search results to a CSV file.")
def search(text, channel, video, export, limit):
from yt_fts.search import fts_search, print_fts_res
from yt_fts.export import export_fts
console = Console()
if len(text) > 40:
show_message("search_too_long")
return
if channel:
scope = "channel"
elif video:
scope = "video"
else:
scope = "all"
res = fts_search(text, scope, channel_id=channel, video_id=video, limit=limit)
print_fts_res(res, text)
if export:
export_fts(text, scope, channel_id=channel, video_id=video)
console.print(f"Query '{text}' ")
console.print(f"Scope: {scope}")
# vsearch
@cli.command(
help="""
Vector search. Requires embeddings to be generated for the channel and environment variable OPENAI_API_KEY to be set.
"""
)
@click.argument("text", required=True)
@click.option("-c", "--channel", default=None, help="The name or id of the channel to search in")
@click.option("-v", "--video", default=None, help="The id of the video to search in.")
@click.option("-l", "--limit", default=10, help="Number of results to return")
@click.option("-e", "--export", is_flag=True, help="Export search results to a CSV file.")
@click.option("--openai-api-key", default=None, help="OpenAI API key. If not provided, the script will attempt to read it from the OPENAI_API_KEY environment variable.")
def vsearch(text, channel, video, limit, export, openai_api_key):
from openai import OpenAI
from yt_fts.vector_search import search_chroma_db, print_vector_search_results
from yt_fts.export import export_vector_search
console = Console()
if len(text) > 80:
show_message("search_too_long")
exit()
# get api key for openai
if openai_api_key is None:
openai_api_key = os.environ.get("OPENAI_API_KEY")
if openai_api_key is None:
console.print("""
[bold][red]Error:[/red][/bold] OPENAI_API_KEY environment variable not set, Run:
export OPENAI_API_KEY=<your_key> to set the key
""")
return
openai_client = OpenAI(api_key=openai_api_key)
if channel:
scope = "channel"
elif video:
scope = "video"
else:
scope = "all"
res = search_chroma_db(text,
scope,
channel_id=channel,
video_id=video,
limit=limit,
openai_client=openai_client)
print_vector_search_results(res, query=text)
if export:
export_vector_search(res, text, scope)
console.print(f"Query '{text}' ")
console.print(f"Scope: {scope}")
# get-embeddings
@cli.command(
help="""
Generate embeddings for a channel using OpenAI's embeddings API.
Requires an OpenAI API key to be set as an environment variable OPENAI_API_KEY.
"""
)
@click.option("-c", "--channel", default=None, help="The name or id of the channel to generate embeddings for")
@click.option("--openai-api-key", default=None, help="OpenAI API key. If not provided, the script will attempt to read it from the OPENAI_API_KEY environment variable.")
def get_embeddings(channel, openai_api_key):
from yt_fts.db_utils import get_vid_ids_by_channel_id
from yt_fts.embeddings import add_embeddings_to_chroma
from yt_fts.utils import split_subtitles, check_ss_enabled, enable_ss
from openai import OpenAI
console = Console()
channel_id = get_channel_id_from_input(channel)
# verify that embeddings have not already been created for the channel
if check_ss_enabled(channel_id) == True:
console.print("\n\t[bold][red]Error:[/red][/bold] Embeddings already created for this channel.\n")
return
# get api key for openai
if openai_api_key is None:
openai_api_key = os.environ.get("OPENAI_API_KEY")
if openai_api_key is None:
console.print("""
[bold][red]Error:[/red][/bold] OPENAI_API_KEY environment variable not set, Run:
export OPENAI_API_KEY=<your_key> to set the key
""")
return
openai_client = OpenAI(api_key=openai_api_key)
channel_video_ids = get_vid_ids_by_channel_id(channel_id)
channel_subs = []
for vid_id in channel_video_ids:
split_subs = split_subtitles(vid_id[0])
if split_subs is None:
continue
for sub in split_subs:
start_time = sub[0]
text = sub[1]
embedding_subs = (channel_id, vid_id[0], start_time, text)
channel_subs.append(embedding_subs)
add_embeddings_to_chroma(channel_subs, openai_client)
# mark the channel as enabled for semantic search
enable_ss(channel_id)
console.print("[green]Embeddings generated[/green]")
# connfig
@cli.command(
help = """
Show config settings
"""
)
def config():
config_path = get_config_path()
db_path = get_db_path()
chroma_path = get_or_make_chroma_path()
console = Console()
config_path = get_config_path()
console.print(f"\nConfig directory: {config_path}\n")
console.print(f"Database path: {db_path}\n")
console.print(f"Chroma path: {chroma_path}\n") | [] |
2024-01-10 | NotJoeMartinez/yt-fts | yt_fts~embeddings.py | import chromadb
from .config import get_or_make_chroma_path
from openai import OpenAI
from rich.progress import track
from rich.console import Console
def add_embeddings_to_chroma(subs, openai_client):
chroma_path = get_or_make_chroma_path()
chroma_client = chroma_client = chromadb.PersistentClient(path=chroma_path)
collection = chroma_client.get_or_create_collection(name="subEmbeddings")
for sub in track(subs, description="Getting embeddings"):
channel_id = sub[0]
video_id = sub[1]
start_time = sub[2]
text = sub[3]
if text == '':
continue
embedding = get_embedding(text, "text-embedding-ada-002", openai_client)
meta_data = {
"channel_id": channel_id,
"video_id": video_id,
"start_time": start_time,
}
collection.add(
documents=[text],
embeddings=[embedding],
metadatas=[meta_data],
ids=[video_id + "_" + str(start_time)],
)
def get_embedding(text, model="text-embedding-ada-002", client=OpenAI()):
text = text.replace("\n", " ")
return client.embeddings.create(input = [text], model=model).data[0].embedding
| [] |
2024-01-10 | NotJoeMartinez/yt-fts | tests~test_vector_search.py | import unittest
from openai import OpenAI
import os
from yt_fts.vector_search import search_chroma_db
class TestSearchChromaDb(unittest.TestCase):
def test_search_chroma_db(self):
# Setup
text = "nural network"
scope = "all"
video_id = None
channel_id = None
limit = 5
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
result = search_chroma_db(text, scope, channel_id, video_id, limit, client)
self.assertEqual(len(result), 5)
def test_search_chroma_db_channel(self):
text = "nural network"
scope = "channel"
video_id = None
channel_id = "1"
limit = 5
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
result = search_chroma_db(text, scope, channel_id, video_id, limit, client)
self.assertEqual(len(result), 5)
if __name__ == '__main__':
unittest.main() | [] |
2024-01-10 | abh2050/Codes | LangChain~example.py | ##integrate our code with OpenAI API
import os
from constant import openai_key
from langchain.llms import OpenAI
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
from langchain.memory import ConversationBufferMemory
import streamlit as st
os.environ["OPENAI_API_KEY"] = openai_key
#streamlit framework
st.title('Famous Person Search Results')
input_text = st.text_input('Search the topic you want:')
#prompt template for input
first_input_prompt = PromptTemplate(
input_variables=['name'],
template= 'Tell me about Famous Person {name}.',
)
# Memory
person_memory = ConversationBufferMemory(input_key='name',memory_key='chat_history')
dob_memory = ConversationBufferMemory(input_key='person',memory_key='chat_history')
descr_memory = ConversationBufferMemory(input_key='dob',memory_key='description_history')
#OPENAI LLMS
llm = OpenAI(temperature=0.8)
chain = LLMChain(llm=llm, prompt=first_input_prompt,verbose=True,output_key='person',memory=person_memory)
#prompt template for second input
second_input_prompt = PromptTemplate(
input_variables=['person'],
template= 'When was {person} born?'
)
#chain 2 for second input
chain2 = LLMChain(llm=llm, prompt=second_input_prompt,verbose=True,output_key='dob',memory=dob_memory)
#prompt template for 3rd input
third_input_prompt = PromptTemplate(
input_variables=['dob'],
template= 'Mention 5 major events happened on {dob} in the world'
)
#chain 3 for second input
chain3 = LLMChain(llm=llm, prompt=third_input_prompt,verbose=True,output_key='description',memory=descr_memory)
#prompt template for sequence input
parent_chain = SequentialChain(chains=[chain,chain2,chain3],input_variables=['name'],output_variables=['person','dob','description'], verbose=True)
if input_text:
st.write(parent_chain({'name':input_text}))
with st.expander('Person Name'):
st.write(person_memory.buffer)
with st.expander('Major Events'):
st.write(descr_memory.buffer)
| [
"name",
"Mention 5 major events happened on {dob} in the world",
"When was {person} born?",
"person",
"Tell me about Famous Person {name}."
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~15.03.py | import myconfig
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import SpacyTextSplitter
from langchain import OpenAI, VectorDBQA
from langchain.document_loaders import TextLoader
loader = TextLoader("./data/ecommerce_faq.txt", encoding="utf-8")
documents = loader.load()
text_splitter = SpacyTextSplitter(chunk_size=256, pipeline="zh_core_web_sm")
# text_splitter = SpacyTextSplitter(chunk_size=256)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(texts, embeddings)
faq_chain = VectorDBQA.from_chain_type(llm=OpenAI(temperature=0), vectorstore=vectorstore, verbose=True)
question = "请问你们的货,能送到三亚吗?大概需要几天?"
result = faq_chain.run(question)
print(result) | [] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~06.01.py |
import openai
openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
) | [
"Where was it played?",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020.",
"Who won the world series in 2020?"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~tool_16.py | import openai
import os
openai.api_key = os.environ.get("OPENAI_API_KEY")
class Conversation:
def __init__(self, prompt, num_of_round):
self.prompt = prompt
self.num_of_round = num_of_round
self.messages = []
self.messages.append({"role": "system", "content": self.prompt})
def ask(self, question):
try:
self.messages.append({"role": "user", "content": question})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages,
temperature=0.5,
max_tokens=2048,
top_p=1,
)
except Exception as e:
print(e)
return e
message = response["choices"][0]["message"]["content"]
self.messages.append({"role": "assistant", "content": message})
if len(self.messages) > self.num_of_round * 2 + 1:
del self.messages[1:3] # Remove the first round conversation left.
return message
| [] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~15.03_retrievalQA.py | import myconfig
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import SpacyTextSplitter
from langchain import OpenAI
from langchain.document_loaders import TextLoader
loader = TextLoader("./data/ecommerce_faq.txt", encoding="utf-8")
documents = loader.load()
text_splitter = SpacyTextSplitter(chunk_size=256, pipeline="zh_core_web_sm")
# text_splitter = SpacyTextSplitter(chunk_size=256)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(texts, embeddings)
from langchain.chains import RetrievalQA
qa = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0), chain_type="stuff", retriever=docsearch.as_retriever())
question = "请问你们的货,能送到三亚吗?大概需要几天?"
result = qa.run(question)
print(result)
| [] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~15.02.py | import myconfig
import openai, os
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.chains import LLMRequestsChain
from langchain.chains import TransformChain, SequentialChain
from tool_15_02 import parse_weather_info
openai.api_key = os.environ.get("OPENAI_API_KEY")
template = """在 >>> 和 <<< 直接是来自Google的原始搜索结果.
请把对于问题 '{query}' 的答案从里面提取出来,如果里面没有相关信息的话就说 "找不到"
请使用如下格式:
Extracted:<answer or "找不到">
>>> {requests_result} <<<
Extracted:"""
PROMPT = PromptTemplate(
input_variables=["query", "requests_result"],
template=template,
)
# 实例化LLMRequestsChain:
requests_chain = LLMRequestsChain(llm_chain=LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT))
# 定义输入:
question = "今天上海的天气怎么样?"
inputs = {
"query": question,
"url": "https://www.google.com/search?q=" + question.replace(" ", "+")
}
# 运行LLMRequestsChain:
result = requests_chain(inputs)
print(result)
print(result['output'])
# 函数用于转换输入数据并提取天气信息
"""
transform_func函数接收一个字典类型的输入'inputs',其中应该包含键"output"
提取与键"output"相关联的值,并赋给变量'text'
调用parse_weather_info函数,使用'text'作为参数解析天气信息
将解析后的天气信息存储在一个新的字典中,键为"weather_info"
返回转换后的字典作为函数的输出。
"""
def transform_func(inputs: dict) -> dict:
# 使用键"output"从输入字典中提取文本数据
text = inputs["output"]
# 调用parse_weather_info函数从文本中解析天气信息
weather_info = parse_weather_info(text)
# 创建一个新的字典,包含解析后的天气信息
transformed_data = {"weather_info": weather_info}
# 返回转换后的数据
return transformed_data
transformation_chain = TransformChain(input_variables=["output"],
output_variables=["weather_info"], transform=transform_func)
final_chain = SequentialChain(chains=[requests_chain, transformation_chain],
input_variables=["query", "url"], output_variables=["weather_info"])
final_result = final_chain.run(inputs)
print(final_result)
| [
"在 >>> 和 <<< 直接是来自Google的原始搜索结果.\n请把对于问题 '{query}' 的答案从里面提取出来,如果里面没有相关信息的话就说 \"找不到\"\n请使用如下格式:\nExtracted:<answer or \"找不到\">\n>>> {requests_result} <<<\nExtracted:",
"requests_result"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~16.03.py | import myconfig
from langchain import PromptTemplate
from langchain.chains import ConversationChain
from langchain.memory import ConversationSummaryBufferMemory
from langchain.llms import OpenAI
SUMMARIZER_TEMPLATE = """请将以下内容逐步概括所提供的对话内容,并将新的概括添加到之前的概括中,形成新的概括。
EXAMPLE
Current summary:
Human询问AI对人工智能的看法。AI认为人工智能是一种积极的力量。
New lines of conversation:
Human:为什么你认为人工智能是一种积极的力量?
AI:因为人工智能将帮助人类发挥他们的潜能。
New summary:
Human询问AI对人工智能的看法。AI认为人工智能是一种积极的力量,因为它将帮助人类发挥他们的潜能。
END OF EXAMPLE
Current summary:
{summary}
New lines of conversation:
{new_lines}
New summary:"""
SUMMARY_PROMPT = PromptTemplate(
input_variables=["summary", "new_lines"], template=SUMMARIZER_TEMPLATE
)
memory = ConversationSummaryBufferMemory(llm=OpenAI(), prompt=SUMMARY_PROMPT, max_token_limit=256)
CHEF_TEMPLATE = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:
1. 你的回答必须是中文。
2. 对于做菜步骤的回答尽量详细一些。
{history}
Human: {input}
AI:"""
CHEF_PROMPT = PromptTemplate(
input_variables=["history", "input"], template=CHEF_TEMPLATE
)
conversation_with_summary = ConversationChain(
llm=OpenAI(model_name="text-davinci-003", stop="\n\n", max_tokens=2048, temperature=0.5),
prompt=CHEF_PROMPT,
memory=memory,
verbose=True
)
answer = conversation_with_summary.predict(input="你是谁?")
print(answer) | [
"请将以下内容逐步概括所提供的对话内容,并将新的概括添加到之前的概括中,形成新的概括。\n\nEXAMPLE\nCurrent summary:\nHuman询问AI对人工智能的看法。AI认为人工智能是一种积极的力量。\n\nNew lines of conversation:\nHuman:为什么你认为人工智能是一种积极的力量?\nAI:因为人工智能将帮助人类发挥他们的潜能。\n\nNew summary:\nHuman询问AI对人工智能的看法。AI认为人工智能是一种积极的力量,因为它将帮助人类发挥他们的潜能。\nEND OF EXAMPLE\n\nCurrent summary:\n{summary}\n\nNew lines of conversation:\n{new_lines}\n\nNew summary:",
"new_lines",
"input",
"你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:\n1. 你的回答必须是中文。\n2. 对于做菜步骤的回答尽量详细一些。\n\n{history}\nHuman: {input}\nAI:"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~10.02.py | import openai, os
from llama_index import StorageContext, load_index_from_storage
openai.api_key = os.environ.get("OPENAI_API_KEY")
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./index_mr_fujino.json')
# load index
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine()
response = query_engine.query("鲁迅先生在日本学习医学的老师是谁?")
print(response)
| [] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~16.02.py | import myconfig
from langchain.chains import ConversationChain
from langchain.memory import ConversationSummaryMemory
from langchain import OpenAI, PromptTemplate
llm = OpenAI(temperature=0)
# 对于我们定义的 ConversationSummaryMemory,它的构造函数也接受一个 LLM 对象。这个对象会专门用来生成历史对话的小结,是可以和对话本身使用的 LLM 对象不同的。
memory = ConversationSummaryMemory(llm=OpenAI())
prompt_template = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:
1. 你的回答必须是中文
2. 回答限制在100个字以内
{history}
Human: {input}
AI:"""
prompt = PromptTemplate(
input_variables=["history", "input"], template=prompt_template
)
conversation_with_summary = ConversationChain(
llm=llm,
memory=memory,
prompt=prompt,
verbose=True
)
while True:
human_input = input("Human: ")
if human_input == "exit":
break
resp = conversation_with_summary.predict(input=human_input)
print(resp)
| [
"input",
"你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:\n1. 你的回答必须是中文\n2. 回答限制在100个字以内\n\n{history}\nHuman: {input}\nAI:"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~14.01.py | import openai, os
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
openai.api_key = os.environ.get("OPENAI_API_KEY")
llm = OpenAI(model_name="text-davinci-003", max_tokens=2048, temperature=0.5)
en_to_zh_prompt = PromptTemplate(
template="请把下面这句话翻译成英文: \n\n {question}?", input_variables=["question"]
)
question_prompt = PromptTemplate(
template="{english_question}", input_variables=["english_question"]
)
zh_to_cn_prompt = PromptTemplate(
input_variables=["english_answer"],
template="请把下面这一段翻译成中文: \n\n{english_answer}?",
)
question_translate_chain = LLMChain(llm=llm, prompt=en_to_zh_prompt, output_key="english_question")
qa_chain = LLMChain(llm=llm, prompt=question_prompt, output_key="english_answer")
answer_translate_chain = LLMChain(llm=llm, prompt=zh_to_cn_prompt)
from langchain.chains import SimpleSequentialChain
chinese_qa_chain = SimpleSequentialChain(
chains=[question_translate_chain, qa_chain, answer_translate_chain], input_key="question",
verbose=True)
answer = chinese_qa_chain.run(question="请你作为一个机器学习的专家,介绍一下CNN的原理。")
print(answer)
| [
"{english_question}",
"请把下面这句话翻译成英文: \n\n {question}?",
"question",
"请把下面这一段翻译成中文: \n\n{english_answer}?",
"english_answer",
"english_question"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~07.02.py | import backoff
import openai
import os
import pandas as pd
import tiktoken
from openai.embeddings_utils import get_embeddings
openai.api_key = os.environ.get("OPENAI_API_KEY")
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
batch_size = 2000
max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191
df = pd.read_csv('data/20_newsgroup.csv')
print("Number of rows before null filtering:", len(df))
df = df[df['text'].isnull() == False]
encoding = tiktoken.get_encoding(embedding_encoding)
df["n_tokens"] = df.text.apply(lambda x: len(encoding.encode(x)))
print("Number of rows before token number filtering:", len(df))
df = df[df.n_tokens <= max_tokens]
print("Number of rows data used:", len(df))
# 用lambda表达式打印df的每一行的text
# df.text.apply(lambda x: print(x))
@backoff.on_exception(backoff.expo, openai.error.RateLimitError)
def get_embeddings_with_backoff(prompts, engine):
embeddings = []
for i in range(0, len(prompts), batch_size):
batch = prompts[i:i + batch_size]
embeddings += get_embeddings(list_of_text=batch, engine=engine)
return embeddings
prompts = df.text.tolist()
prompt_batches = [prompts[i:i + batch_size] for i in range(0, len(prompts), batch_size)]
embeddings = []
for batch in prompt_batches:
batch_embeddings = get_embeddings_with_backoff(prompts=batch, engine=embedding_model)
embeddings += batch_embeddings
df["embedding"] = embeddings
df.to_parquet("data/20_newsgroup_with_embedding.parquet", index=False)
| [] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~09.01.py | import openai, os
openai.api_key = os.environ.get("OPENAI_API_KEY")
COMPLETION_MODEL = "text-davinci-003"
def generate_data_by_prompt(prompt):
response = openai.Completion.create(
engine=COMPLETION_MODEL,
prompt=prompt,
temperature=0.5,
max_tokens=2048,
top_p=1,
)
return response.choices[0].text
prompt = """请你生成5条淘宝网里的商品的标题,每条在30个字左右,品类是3C数码产品,标题里往往也会有一些促销类的信息,每行一条。"""
data = generate_data_by_prompt(prompt)
import pandas as pd
product_names = data.strip().split('\n')
df = pd.DataFrame({'product_name': product_names})
df.head()
from openai.embeddings_utils import get_embeddings
import openai, os, backoff
openai.api_key = os.environ.get("OPENAI_API_KEY")
embedding_model = "text-embedding-ada-002"
batch_size = 100
@backoff.on_exception(backoff.expo, openai.error.RateLimitError)
def get_embeddings_with_backoff(prompts, engine):
embeddings = []
for i in range(0, len(prompts), batch_size):
batch = prompts[i:i+batch_size]
embeddings += get_embeddings(list_of_text=batch, engine=engine)
return embeddings
prompts = df.product_name.tolist()
prompt_batches = [prompts[i:i+batch_size] for i in range(0, len(prompts), batch_size)]
embeddings = []
for batch in prompt_batches:
batch_embeddings = get_embeddings_with_backoff(prompts=batch, engine=embedding_model)
embeddings += batch_embeddings
df["embedding"] = embeddings
df.to_parquet("data/taobao_product_title.parquet", index=False)
from openai.embeddings_utils import get_embedding, cosine_similarity
# search through the reviews for a specific product
def search_product(df, query, n=3, pprint=True):
product_embedding = get_embedding(
query,
engine=embedding_model
)
df["similarity"] = df.embedding.apply(lambda x: cosine_similarity(x, product_embedding))
results = (
df.sort_values("similarity", ascending=False)
.head(n)
.product_name
)
if pprint:
for r in results:
print(r)
return results
results = search_product(df, "自然淡雅背包", n=3) | [
"请你生成5条淘宝网里的商品的标题,每条在30个字左右,品类是3C数码产品,标题里往往也会有一些促销类的信息,每行一条。"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~10.01.py | import openai, os
from llama_index import SimpleDirectoryReader, VectorStoreIndex
openai.api_key = os.environ.get("OPENAI_API_KEY")
documents = SimpleDirectoryReader('./data/mr_fujino').load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist('index_mr_fujino.json')
response = index.query("鲁迅先生去哪里学的医学?")
print(response) | [] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~15.02_pretty.py | import myconfig
import openai, os
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.chains import LLMRequestsChain
from langchain.chains import TransformChain, SequentialChain
from tool_15_02 import parse_weather_info
# open ai api key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# build LLMRequestsChain
def build_requests_chain() -> LLMRequestsChain:
# 定义template
template = """在 >>> 和 <<< 直接是来自Google的原始搜索结果.
请把对于问题 '{query}' 的答案从里面提取出来,如果里面没有相关信息的话就说 "找不到"
请使用如下格式:
Extracted:<answer or "找不到">
>>> {requests_result} <<<
Extracted:"""
prompt_template = PromptTemplate(
input_variables=["query", "requests_result"],
template=template,
)
return LLMRequestsChain(llm_chain=LLMChain(llm=OpenAI(temperature=0), prompt=prompt_template))
"""
transform_func函数接收一个字典类型的输入'inputs',其中应该包含键"output"
提取与键"output"相关联的值,并赋给变量'text'
调用parse_weather_info函数,使用'text'作为参数解析天气信息
将解析后的天气信息存储在一个新的字典中,键为"weather_info"
返回转换后的字典作为函数的输出。
"""
def transform_func(inputs: dict) -> dict:
# 使用键"output"从输入字典中提取文本数据
text = inputs["output"]
# 调用parse_weather_info函数从文本中解析天气信息
weather_info = parse_weather_info(text)
# 创建一个新的字典,包含解析后的天气信息
transformed_data = {"weather_info": weather_info}
# 返回转换后的数据
return transformed_data
"""业务调用"""
question = "今天上海的天气怎么样?"
inputs = {
"query": question,
"url": "https://www.google.com/search?q=" + question.replace(" ", "+")
}
requests_chain = build_requests_chain()
transformation_chain = TransformChain(input_variables=["output"], output_variables=["weather_info"],
transform=transform_func)
sequential_chain = SequentialChain(chains=[requests_chain, transformation_chain],
input_variables=["query", "url"], output_variables=["weather_info"])
final_result = sequential_chain.run(inputs)
print(final_result)
| [
"在 >>> 和 <<< 直接是来自Google的原始搜索结果.\n 请把对于问题 '{query}' 的答案从里面提取出来,如果里面没有相关信息的话就说 \"找不到\"\n 请使用如下格式:\n Extracted:<answer or \"找不到\">\n >>> {requests_result} <<<\n Extracted:",
"requests_result"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~15.01.py | import openai, os
import myconfig
openai.api_key = os.environ.get("OPENAI_API_KEY")
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
llm = OpenAI(model_name="text-davinci-003", max_tokens=2048, temperature=0.5)
multiply_prompt = PromptTemplate(template="请计算一下{question}是多少?", input_variables=["question"])
math_chain = LLMChain(llm=llm, prompt=multiply_prompt, output_key="answer")
answer = math_chain.run({"question": "352乘以493"})
print("OpenAI API 说答案是:", answer)
python_answer = 352 * 493
print("Python 说答案是:", python_answer)
from langchain import LLMMathChain
llm_math = LLMMathChain(llm=llm, verbose=True)
result = llm_math.run("请计算一下352乘以493是多少?")
print(result) | [
"question",
"请计算一下{question}是多少?"
] |
2024-01-10 | a5535772/aigc-learning | AI%E5%A4%A7%E6%A8%A1%E5%9E%8B%E4%B9%8B%E7%BE%8E~11.01.py |
import openai, os
import faiss
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTFaissIndex, ServiceContext
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from llama_index.node_parser import SimpleNodeParser
openai.api_key = ""
text_splitter = CharacterTextSplitter(separator="\n\n", chunk_size=100, chunk_overlap=20)
parser = SimpleNodeParser(text_splitter=text_splitter)
documents = SimpleDirectoryReader('./data/faq/').load_data()
nodes = parser.get_nodes_from_documents(documents)
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(
model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
))
service_context = ServiceContext.from_defaults(embed_model=embed_model)
dimension = 768
faiss_index = faiss.IndexFlatIP(dimension)
index = GPTFaissIndex(nodes=nodes,faiss_index=faiss_index, service_context=service_context)
from llama_index import QueryMode
openai.api_key = os.environ.get("OPENAI_API_KEY")
query_engine = index.as_query_engine()
response = query_engine.query(
"鲁迅先生在日本学习医学的老师是谁?",
mode=QueryMode.EMBEDDING,
verbose=True,)
print(response)
| [] |
2024-01-10 | peachyland/lm-watermarking | experiments~gpt_attack.py | import openai
from io_utils import read_jsonlines, read_json, write_jsonlines
from datasets import Dataset, concatenate_datasets
openai.api_key = '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'
def gpt_attack(example, no_wm_attack=False):
# assert attack_prompt, "Prompt must be provided for GPT attack"
# attack_prompt="As an expert copy-editor, please rewrite the following text in your own voice while ensuring that the final output contains the same information as the original text and has roughly the same length. Please paraphrase all sentences and do not omit any crucial details. Additionally, please take care to provide any relevant information about public figures, organizations, or other entities mentioned in the text to avoid any potential misunderstandings or biases."
# attack_prompt="paraphrase the following paragraphs:\n"
attack_prompt="You are an expert copy-editor. Please rewrite the following text in your own voice and paraphrase all sentences. \n Ensure that the final output contains the same information as the original text and has roughly the same length. \n Do not leave out any important details when rewriting in your own voice. This is the text: \n"
gen_row = example
if no_wm_attack:
original_text = gen_row["no_bl_output"]
else:
original_text = gen_row["w_bl_output"]
attacker_query = attack_prompt + original_text
query_msg = {"role": "user", "content": attacker_query}
# query_msg = [{'role': 'system', 'content': 'You are a helpful assistant.'},
# {'role': 'user', 'content': attack_prompt},
# {'role': 'assistant', 'content': 'No problem.'},
# {'role': 'user', 'content': original_text}]
from tenacity import retry, stop_after_attempt, wait_random_exponential
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_handle_rate_limits.ipynb
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(25))
def completion_with_backoff(model, messages, temperature, max_tokens):
return openai.ChatCompletion.create(
model=model, messages=messages, temperature=temperature, max_tokens=max_tokens
)
outputs = completion_with_backoff(
model='gpt-3.5-turbo',
messages=[query_msg],
temperature=0.7,
max_tokens=1000,
)
attacked_text = outputs.choices[0].message.content
print(original_text)
print("check-------------------")
print(attacked_text)
# assert (
# len(outputs.choices) == 1
# ), "OpenAI API returned more than one response, unexpected for length inference of the output"
example["w_bl_num_tokens_generated"] = outputs.usage.completion_tokens
example["w_bl_output"] = attacked_text
# print(outputs.usage.completion_tokens)
# print(len(attacked_text.split(" ")))
# print(len(original_text.split(" ")))
# print(example["w_bl_num_tokens_generated"])
# if args.verbose:
# print(f"\nOriginal text (T={example['w_wm_output_length']}):\n{original_text}")
# print(f"\nAttacked text (T={example['w_wm_output_attacked_length']}):\n{attacked_text}")
return example
def str_replace_bug_check(example,idx):
baseline_before = example["baseline_completion"]
example["baseline_completion"] = baseline_before.replace(example["truncated_input"][:-1],"")
if example["baseline_completion"] != baseline_before:
# print("baseline input replacement bug occurred, skipping row!")
return False
else:
return True
run_base_dir = f"/egr/research-dselab/renjie3/renjie/LLM/watermark_LLM/lm-watermarking/experiments/results/all_runs_07131520"
meta_name = "gen_table_meta.json"
gen_name = "gen_table_w_metrics.jsonl"
gen_table_meta_path = f"{run_base_dir}/{meta_name}"
gen_table_path = f"{run_base_dir}/{gen_name}"
attack_path = f"{run_base_dir}/gpt_attacked_100.jsonl"
# load the raw files
gen_table_meta = read_json(gen_table_meta_path)
print(gen_table_meta)
gen_table_lst = [ex for ex in read_jsonlines(gen_table_path)]
# print(gen_table_lst)
gen_table_ds = Dataset.from_list(gen_table_lst[:100])
print(gen_table_ds)
print(f"Original dataset length={len(gen_table_ds)}")
gen_table_ds_filtered = gen_table_ds.filter(str_replace_bug_check,batched=False,with_indices=True)
print(f"gen_table_ds_filtered length={len(gen_table_ds_filtered)}")
# gen_table_ds_filtered[0] =
# gpt_attack(gen_table_ds_filtered[0])
attacked_ds = gen_table_ds_filtered.map(gpt_attack)
# print(gen_table_ds_filtered[0]["real_completion_length"])
attacked_ds_lst = [ex for ex in attacked_ds]
write_jsonlines(attacked_ds_lst, attack_path)
| [
"You are an expert copy-editor. Please rewrite the following text in your own voice and paraphrase all sentences. \n Ensure that the final output contains the same information as the original text and has roughly the same length. \n Do not leave out any important details when rewriting in your own voice. This is the text: \n",
"You are an expert copy-editor. Please rewrite the following text in your own voice and paraphrase all sentences. \n Ensure that the final output contains the same information as the original text and has roughly the same length. \n Do not leave out any important details when rewriting in your own voice. This is the text: \nPLACEHOLDER"
] |
2024-01-10 | leandroomargarcia/leomniga | Amazon_Content_Manager.py | '''Instrucciones como utilizar este codigo en la PC (no se puede ejecutar desde COLAB)
1 - Debemos tener instalado en la PC python y Anaconda o visual studio code.
2 - Ejecutar desde la consola: pip install streamlit
3 - Navegar desde la consola hasta la carpeta en donde se encuentra este codigo en la PC.Ejecutar este codigo
4 - Si tira error de no se encuentra la libreria Streamlit.cli debemos reinstalar streamlit usando:
1- pip unistall streamlit
2- pip install streamlit
5 - Una vez ejecutado se abrirá una pagina web desde chrome con nuestra APP que se encuentra local.
'''
import subprocess
# Verificar si la biblioteca está instalada
try:
import matplotlib.pyplot as plt
except ImportError:
# La biblioteca no está instalada, se procede a instalarla
subprocess.check_call(['pip', 'install', 'matplotlib'])
import string
import streamlit as st
import pandas as pd
import pickle
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
import lightgbm as lgb
import openai
# Desactivar la advertencia de usar pyplot global
st.set_option('deprecation.showPyplotGlobalUse', False)
def main():
st.title("✨Amazon Product Title Accuracy Specialist Web App")
st.markdown("Are you sure about your Descripction Product? Let me help you review it and giving you my professional suggestion")
@st.cache_resource()
def load_LGBM():
with open('modelo_LGBM.pkl', 'rb') as f:
modelo = pickle.load(f)
return modelo
@st.cache_resource()
def load_vectorizer():
with open('tfidf_vectorizer.pkl', 'rb') as f:
modelo = pickle.load(f)
return modelo
# Mostrar el texto ingresado
def show_input_text(input_text):
st.write("The input text is: ", input_text)
@st.cache_resource()
def normalize(text):
text = text.lower() # Convert text to lowercase
tokens = word_tokenize(text) # Tokenize the text into words
tokens = [token for token in tokens if token.isalnum() and token not in stopwords_en] # Remove stop words
lemmatizer = WordNetLemmatizer()
lemmas = [lemmatizer.lemmatize(token) for token in tokens]
preprocessed_text = ' '.join(lemmas)
return preprocessed_text
# Main
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('omw-1.4')
## Load Modules
#lemmatizer = WordNetLemmatizer()
stopwords_en = set(nltk.corpus.stopwords.words('english'))
punctuation = string.punctuation
stemmer = PorterStemmer()
input_text = st.text_input("Write here your Description Product", "") # load text input
# Predict if the title is good or not using LGBM
if input_text != "":
show_input_text(input_text) # show input text
X_clean = normalize(input_text) # preprocess text
modelo_vectorizer = load_vectorizer() # Convert the preprocessed text into a TF-IDF vector
df_vectors_test = modelo_vectorizer.transform([X_clean]) # Predict using imported PKL vectorizer model
modelo_LGBM = load_LGBM() #Load model
predictions = modelo_LGBM.predict(df_vectors_test) # Predict using imported PKL LGBM model
predictions = predictions[0]
threshold = 0.05
if predictions > threshold:
predictions = 1
else: predictions = 0
# Print the prediction
if predictions == 1:
show_input_text("The Description is Awesome") # show input text
else:
show_input_text("This Description is not so good....") # show input text
# Give a reviewed description of the product using chatGPT 4
openai_key = st.text_input("Write here your Open AI Key", "") # load openaikey
openai.api_key = openai_key
if openai_key != "":
concent = 'You are a Margeting redactor content specialist. Your goal is redo the product Description that I give you.\
Do a short text without any aditionally explanation about it or any comment. I need you to improve this description to get more sales'
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": concent},
{"role": "user", "content": input_text}
],
temperature=.5,
max_tokens=200,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
prompt_response = response["choices"][0]["message"]['content'].strip()
request = 'Description Product: ' + prompt_response
st.text(request)
if __name__ == '__main__':
main()
| [
"content"
] |
2024-01-10 | hyungseok-lee/autoever_chatbot_v1 | run_app.py | import os
import streamlit as st
# from dotenv import load_dotenv
# from streamlit_chat import message
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
# load_dotenv()
# openai_api_key = os.getenv('OPENAI_API_KEY')
openai_api_key = os.environ.get('OPENAI_API_KEY')
# load vectorestore(faiss_index)
@st.cache_resource(show_spinner=False)
def load_vectorstore(data_dir):
embeddings = HuggingFaceEmbeddings(
model_name="jhgan/ko-sroberta-multitask",
model_kwargs={'device': 'cpu'},
encode_kwargs={'normalize_embeddings': True})
vectorstore = FAISS.load_local(data_dir, embeddings)
return vectorstore
# 대화를 주고 chain
def get_conversation_chain(vetorestore, openai_api_key):
llm = ChatOpenAI(
openai_api_key=openai_api_key,
model_name='gpt-3.5-turbo-1106',
temperature=0
)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
chain_type="stuff",
retriever=vetorestore.as_retriever(search_type='mmr', vervose = True),
memory=ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer'),
get_chat_history=lambda h: h[:50],
return_source_documents=True,
verbose = True
)
return conversation_chain
def main():
st.set_page_config(page_title="Autoever Chat", page_icon=":books:")
st.title("_Autoever :red[QA Chat]_ :books:")
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
if 'messages' not in st.session_state:
st.session_state['messages'] = [{"role": "assistant",
"content": "궁금하신 것이 있으면 언제든 물어봐주세요!"}]
# 채팅 출력 (화면이 바뀔때마다 실행)
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if 'vectorstore' not in st.session_state:
st.session_state.vectorstore = load_vectorstore("faiss_index")
st.session_state.conversation = get_conversation_chain(
st.session_state.vectorstore, openai_api_key)
# Chat logic
if query := st.chat_input("질문을 입력해주세요."):
st.session_state.messages.append({"role": "user", "content": query})
with st.chat_message("user"):
st.markdown(query)
with st.chat_message("assistant"):
chain = st.session_state.conversation
with st.spinner("Thinking..."):
result = chain({"question": query})
response = result['answer']
source_documents = result['source_documents']
st.markdown(response)
with st.expander("참고 문서 확인"):
st.markdown(source_documents[0].metadata['source'], help = source_documents[0].page_content)
st.markdown(source_documents[1].metadata['source'], help = source_documents[1].page_content)
st.markdown(source_documents[2].metadata['source'], help = source_documents[2].page_content)
# Add assistant message to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# 메인 함수 호출
if __name__ == "__main__":
main() | [
"궁금하신 것이 있으면 언제든 물어봐주세요!"
] |
2024-01-10 | Gen-AI-Automation-MINT/Product-Comparison | llm_csv_gen.py | from openai import OpenAI
import json
# Initialize the OpenAI client
api_key = "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"
client = OpenAI(api_key=api_key)
# System prompt
system_prompt = """
You will be provided with semistructured data of an ecommerce product, and your task is to extract only these attributes - [title, brand, model, size, color, pack/count, material] from it and generate JSON output.
"""
# Function to process a single product and extract attributes
def process_product(product_data):
user_prompt = f"Here is the product details: \n{json.dumps(product_data, indent=2)}"
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
response_format={"type": "json_object"},
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
)
return response.choices[0].message.content
# Load your JSON data
with open('amazon/amazon_data.json', 'r') as file:
product_details = json.load(file)
# Process each product and collect the results
results = []
for product in product_details:
result = process_product(product)
results.append(result)
# Save the results to a JSON file
with open('output.json', 'w') as json_file:
json.dump(results, json_file, indent=4)
print("Processing complete. Data saved to output.json.")
| [
"\nYou will be provided with semistructured data of an ecommerce product, and your task is to extract only these attributes - [title, brand, model, size, color, pack/count, material] from it and generate JSON output.\n",
"f\"Here is the product details: \\n{json.dumps(product_data, indent=2)}"
] |
2024-01-10 | Gen-AI-Automation-MINT/Product-Comparison | old_files~anthropic.py | import streamlit as st
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
ANTHROPIC_API_KEY = st.secrets["apikey"]
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
st.set_page_config(page_title="Anthropic", page_icon="🧠")
prompt = st.chat_input("Say something")
pre_prompt = "You are an expert in NLP. Take a deep breath before you answer my question."
if prompt:
st.write("You:", prompt)
st.subheader("Anthropic response")
response = anthropic.completions.create(
model="claude-instant-1.2",
max_tokens_to_sample=400,
prompt=f"{HUMAN_PROMPT} {pre_prompt} {prompt} {AI_PROMPT}",
)
out = response.completion
st.write(out)
| [
"Say something",
"PLACEHOLDER You are an expert in NLP. Take a deep breath before you answer my question. PLACEHOLDER PLACEHOLDER",
"You are an expert in NLP. Take a deep breath before you answer my question."
] |
2024-01-10 | mccarvik/ai_langchain | 8ch~prompting~zeroshot.py |
import sys
sys.path.append("..")
sys.path.append("../..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
# from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_template("Classify the sentiment of this text: {text}")
model = ChatOpenAI()
# prompt = PromptTemplate(input_variables=["text"], template="Classify the sentiment of this text: {text}")
# prompt = PromptTemplate.from_template("Classify the sentiment of this text: {text}")
chain = prompt | model
print(chain.invoke({"text": "I hated that movie, it was terrible!"}))
if __name__ == "__main__":
pass
| [
"Classify the sentiment of this text: {text}"
] |
2024-01-10 | mccarvik/ai_langchain | 8ch~prompting~self_consistency.py |
import sys
sys.path.append("..")
sys.path.append("../..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
from langchain import PromptTemplate, LLMChain
from langchain.chains import SequentialChain
from langchain.chat_models import ChatOpenAI
solutions_template = """
Generate {num_solutions} distinct answers to this question:
{question}
Solutions:
"""
solutions_prompt = PromptTemplate(
template=solutions_template,
input_variables=["question", "num_solutions"]
)
solutions_chain = LLMChain(
llm=ChatOpenAI(),
prompt=solutions_prompt,
output_key="solutions"
)
consistency_template = """
For each answer in {solutions}, count the number of times it occurs. Finally, choose the answer that occurs most.
Most frequent solution:
"""
consistency_prompt = PromptTemplate(
template=consistency_template,
input_variables=["solutions"]
)
consistency_chain = LLMChain(
llm=ChatOpenAI(),
prompt=consistency_prompt,
output_key="best_solution"
)
answer_chain = SequentialChain(
chains=[solutions_chain, consistency_chain],
input_variables=["question", "num_solutions"],
output_variables=["best_solution"]
)
print(answer_chain.run(
question="Which year was the Declaration of Independence of the United States signed?",
num_solutions="5"
))
if __name__ == "__main__":
pass
| [
"\nFor each answer in {solutions}, count the number of times it occurs. Finally, choose the answer that occurs most.\n\nMost frequent solution: \n",
"\nGenerate {num_solutions} distinct answers to this question:\n{question}\n\nSolutions:\n",
"question",
"solutions",
"num_solutions"
] |
2024-01-10 | mccarvik/ai_langchain | 9ch~monitoring_and_evaluation~result_evaluation.py | """Evaluate a result from an agent execution.
by calculating the embedding distance to an expected output (a reference).
"""
import sys
sys.path.append("..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
from langchain.evaluation import load_evaluator, EvaluatorType, PairwiseStringEvalChain
# evaluator = load_evaluator("embedding_distance")
#
# print(evaluator.evaluate_strings(prediction="I shall go", reference="I shan't go"))
#
#
# evaluator = load_evaluator("labeled_pairwise_string")
#
# print(evaluator.evaluate_string_pairs(
# prediction="there are three dogs",
# prediction_b="4",
# input="how many dogs are in the park?",
# reference="four",
# ))
custom_criteria = {
"simplicity": "Is the language straightforward and unpretentious?",
"clarity": "Are the sentences clear and easy to understand?",
"precision": "Is the writing precise, with no unnecessary words or details?",
"truthfulness": "Does the writing feel honest and sincere?",
"subtext": "Does the writing suggest deeper meanings or themes?",
}
evaluator = load_evaluator(EvaluatorType.PAIRWISE_STRING, criteria=custom_criteria)
assert isinstance(evaluator, PairwiseStringEvalChain)
print(evaluator.evaluate_string_pairs(
prediction="Every cheerful household shares a similar rhythm of joy; but sorrow, in each household, plays a unique, haunting melody.",
prediction_b="Where one finds a symphony of joy, every domicile of happiness resounds in harmonious,"
" identical notes; yet, every abode of despair conducts a dissonant orchestra, each"
" playing an elegy of grief that is peculiar and profound to its own existence.",
input="Write some prose about families.",
))
if __name__ == "__main__":
pass
| [] |
2024-01-10 | mccarvik/ai_langchain | 8ch~prompting~chain_of_thought.py |
import sys
sys.path.append("..")
sys.path.append("../..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from config import set_environment
set_environment()
cot_instruction = "Let's think step by step!"
cot_instruction2 = "Explain your reasoning step-by-step. Finally, state the answer."
reasoning_prompt = "{question}\n" + cot_instruction
prompt = PromptTemplate(
template=reasoning_prompt,
input_variables=["question"]
)
model = ChatOpenAI()
chain = prompt | model
print(chain.invoke({
"question": "There were 5 apples originally. I ate 2 apples. My friend gave me 3 apples. How many apples do I have now?",
}))
if __name__ == "__main__":
pass
| [
"question",
"{question}\nLet's think step by step!"
] |
2024-01-10 | mccarvik/ai_langchain | 8ch~prompting~fewshot.py |
import sys
sys.path.append("..")
sys.path.append("../..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts.example_selector import SemanticSimilarityExampleSelector
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Chroma
from config import set_environment
set_environment()
model = ChatOpenAI()
example_prompt = PromptTemplate(
template="{input} -> {output}",
input_variables=["input", "output"],
)
examples = [{
"input": "I absolutely love the new update! Everything works seamlessly.",
"output": "Positive",
},{
"input": "It's okay, but I think it could use more features.",
"output": "Neutral",
}, {
"input": "I'm disappointed with the service, I expected much better performance.",
"output": "Negative"
}]
prompt = FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
suffix="Question: {input}",
input_variables=["input"]
)
print((prompt | model).invoke({"input": "This is an excellent book with high quality explanations."}))
selector = SemanticSimilarityExampleSelector.from_examples(
examples=examples,
embeddings=OpenAIEmbeddings(),
vectorstore_cls=Chroma,
k=4,
)
prompt = FewShotPromptTemplate(
example_selector=selector,
example_prompt=example_prompt,
suffix="Question: {input}",
input_variables=["input"]
)
print((prompt | model).invoke({"input": "What's 10+10?"}))
if __name__ == "__main__":
pass
| [
"Question: {input}",
"{input} -> {output}",
"input"
] |
2024-01-10 | mccarvik/ai_langchain | 6ch~soft_dev.py |
import sys
sys.path.append("..")
from config import set_environment
set_environment()
from langchain import HuggingFaceHub, HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
def hugface():
llm = HuggingFaceHub(
task="text-generation",
repo_id="HuggingFaceH4/starchat-alpha",
model_kwargs={
"temperature": 0.5,
"max_length": 1000
}
)
text = "a dis"
print(llm(text))
def small_local():
checkpoint = "Salesforce/codegen-350M-mono"
model = AutoModelForCausalLM.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
pipe = pipeline(
task="text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=500
)
text = """
def calculate_primes(n):
\"\"\"Create a list of consecutive integers from 2 up to N.
For example:
>>> calculate_primes(20)
Output: [2, 3, 5, 7, 11, 13, 17, 19]
\"\"\"
"""
completion = pipe(text)
print(completion[0]["generated_text"])
# llm = HuggingFacePipeline(pipeline=pipe)
# llm(text)
# hugface()
small_local() | [] |
2024-01-10 | mccarvik/ai_langchain | 9ch~monitoring_and_evaluation~run_benchmark.py | """Test our agent against a benchmark dataset.
This uses Langsmith. Please set your LangSmith API key. See
create_benchmark to create the benchmark dataset.
"""
import os
import sys
sys.path.append("..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
from langchain import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.smith import RunEvalConfig, run_on_dataset
from langsmith import Client
from config import set_environment
set_environment()
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "My Project"
client = Client()
shared_dataset_name = "Reasoning and Bias"
llm = ChatOpenAI(model="gpt-4", temperature=0.0)
# Use constructor function to initialize for each input:
def construct_chain():
return LLMChain.from_string(
llm,
template="Help out as best you can.\nQuestion: {input}\nResponse: ",
)
evaluation_config = RunEvalConfig(
evaluators=[
# Arbitrary criterion as a key: value pair in the criteria dict:
RunEvalConfig.Criteria({"helpfulness": "Is the response helpful?"}),
RunEvalConfig.Criteria({"insightful": "Is the response carefully thought out?"})
]
)
prototype_results = run_on_dataset(
client=client,
dataset_name=shared_dataset_name,
llm_or_chain_factory=construct_chain,
evaluation=evaluation_config,
verbose=True,
)
prototype_project_name = prototype_results["project_name"]
if __name__ == "__main__":
pass
| [] |
2024-01-10 | mccarvik/ai_langchain | 9ch~monitoring_and_evaluation~tracing.py | """Tracing of agent calls and intermediate results."""
import subprocess
from langchain.chat_models import ChatOpenAI
from langchain.tools import StructuredTool
from langchain.agents import AgentType, initialize_agent
from pydantic import HttpUrl
from urllib.parse import urlparse
import sys
sys.path.append("..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
def ping(url: HttpUrl, return_error: bool) -> str:
"""Ping the fully specified url. Must include https:// in the url."""
hostname = urlparse(str(url)).netloc
completed_process = subprocess.run(
["ping", "-c", "1", hostname], capture_output=True, text=True
)
output = completed_process.stdout
if return_error and completed_process.returncode != 0:
return completed_process.stderr
return output
# alternatively annotate the ping() function with @tool
ping_tool = StructuredTool.from_function(ping)
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
agent = initialize_agent(
llm=llm,
tools=[ping_tool],
agent=AgentType.OPENAI_MULTI_FUNCTIONS,
return_intermediate_steps=True, # IMPORTANT!
)
result = agent("What's the latency like for https://langchain.com?")
print(result)
if __name__ == "__main__":
pass
| [] |
2024-01-10 | mccarvik/ai_langchain | 9ch~search_engine~serve_vector_store.py | """Document indexing.
Building a vector store fast.
Adapted from open_source_LLM_search_engine:
https://github.com/ray-project/langchain-ray/
You can run this from the terminal in the search_engine directory like this:
> PYTHONPATH=../ python serve_vector_store.py
"""
import time
import requests
from fastapi import FastAPI
from langchain.vectorstores import FAISS
from ray import serve
from config import set_environment
from search_engine.utils import INDEX_PATH, get_embeddings
# set keys:
set_environment()
app = FastAPI()
@serve.deployment()
@serve.ingress(app)
class VectorSearchDeployment:
def __init__(self):
# Load the data from faiss
st = time.time()
self.embeddings = get_embeddings()
self.db = FAISS.load_local(INDEX_PATH, self.embeddings)
et = time.time() - st
print(f"Loading database took {et} seconds.")
@app.get("/search")
def search(self, query: str):
results = self.db.max_marginal_relevance_search(query, k=1, fetch_k=10)
retval = ""
for i in range(len(results)):
chunk = results[i]
source = chunk.metadata["source"]
retval = retval + f"From http://{source}\n\n"
retval = retval + chunk.page_content
retval = retval + "\n====\n\n"
return retval
# class SearchDeployment:
# def __init__(self):
# self.db = db
# self.embedding = embedding
# def __call__(self, request):
# query_embed = self.embedding(request.query_params["query"])
# results = self.db.max_marginal_relevance_search(query_embed)
# return format_results(results)
# deployment = SearchDeployment.bind()
# # Start service
# serve.run(deployment)
deployment = VectorSearchDeployment.bind()
serve.run(deployment)
if __name__ == "__main__":
# using bind() instead of remote()
# this will ready the dag, but not execute it yet.
print(requests.get(
"http://localhost:8000/search",
params={
"query": "What are the different components of Ray"
" and how can they help with large language models (LLMs)?"
}
).json())
input("Press Enter to shut down the server...")
| [] |
2024-01-10 | mccarvik/ai_langchain | 9ch~monitoring_and_evaluation~trajectory_evaluation.py |
import sys
sys.path.append("..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
from langchain import OpenAI
from langchain.chat_models import ChatAnthropic
from langchain.evaluation import load_evaluator, EvaluatorType
eval_llm = OpenAI(temperature=0)
# GPT 4.0 by default:
evaluator = load_evaluator(
evaluator=EvaluatorType.AGENT_TRAJECTORY,
llm=eval_llm
)
| [] |
2024-01-10 | mccarvik/ai_langchain | 6ch~software_development~baby_dev.py | """Task planner and executor for software development."""
from langchain import LLMChain, OpenAI, PromptTemplate
from langchain.agents import Tool
from langchain.tools import DuckDuckGoSearchResults, BaseTool
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
import sys
sys.path.append("..")
from config import set_environment
set_environment()
from python_developer import DEV_PROMPT, PythonDeveloper, PythonExecutorInput
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with requirements, "
"required functions, for a given objective. "
"Use this when you need to break down a task into smaller chunks."
"The output should be a list of the format {function name}: {requirements of the function}"
"Come up with a list of needed functions for this objective: {objective}"
)
todo_llm = LLMChain(
llm=OpenAI(temperature=0),
prompt=todo_prompt
)
# # , model_name="ada"
software_prompt = PromptTemplate.from_template(DEV_PROMPT)
# careful: if you have the wrong model spec, you might not get any code!
software_llm = LLMChain(
llm=OpenAI(
temperature=0,
max_tokens=4000
),
prompt=software_prompt
)
software_dev = PythonDeveloper(llm_chain=software_llm)
code_tool = Tool(
name="PythonSoftwareEngineer",
func=software_dev.run,
description=(
"Useful for writing Python code. "
"Input: a task or function to write. "
"Output: a Python code that solves the task. "
),
args_schema=PythonExecutorInput
)
planner_tool = Tool(
name="TODO",
func=todo_llm.run,
description=(
"Useful for when you need to come up with requirements. "
"Input: an objective to create a todo list for. "
"Output: a todo list for that objective. "
"Please be very clear what the objective is!"
)
)
ddg_search = DuckDuckGoSearchResults()
tools: list[BaseTool] = [
code_tool,
Tool(
name="DDGSearch",
func=ddg_search.run,
description=(
"Useful for research and understanding background of objectives. "
"Input: an objective. "
"Output: background information about the objective. "
)
)
]
PREFIX = """You are an agent designed to write python code.
Chat History:
{chat_history}
You have access to a python REPL, which you can use to execute python code.
Once the code is complete and free of errors you are finished.
If it does not seem like you can write this code, just return "I struggle to implement this" as the answer.
"""
SUFFIX = """Begin! Your goal is to write software. If you get an error, debug your code and try again!"
Task: {input}
{agent_scratchpad}
"""
# memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# prompt = ZeroShotAgent.create_prompt(
# tools, prefix=PREFIX,
# suffix=SUFFIX, input_variables=["input", "agent_scratchpad", "chat_history"]
# )
llm = OpenAI()
planner = load_chat_planner(llm)
executor = load_agent_executor(
llm,
tools,
verbose=True,
)
agent_executor = PlanAndExecute(
planner=planner,
executor=executor,
verbose=True,
handle_parsing_errors="Check your output and make sure it conforms!",
return_intermediate_steps=True
)
# agent = ZeroShotAgent(
# llm_chain=llm_chain,
# allowed_tools=tool_names,
# handle_parsing_errors="Check your output and make sure it conforms!",
# )
# agent_executor = AgentExecutor.from_agent_and_tools(
# agent=agent, tools=tools, verbose=True
# )
# # Logging of LLMChains
# verbose = False
# # If None, it will never stop
# max_iterations = 3
# baby_agi = BabyAGI.from_llm(
# llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
# )
# agent_executor = create_python_agent(
# llm=OpenAI(temperature=0, max_tokens=1000),
# tool=code_excution,
# verbose=True,
# agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# )
if __name__ == "__main__":
agent_executor.run("Write a tetris game in python!")
| [
"The output should be a list of the format {function name}: {requirements of the function}",
"Come up with a list of needed functions for this objective: {objective}",
"Use this when you need to break down a task into smaller chunks.",
"You are a planner who is an expert at coming up with requirements, ",
"required functions, for a given objective. ",
"You are a planner who is an expert at coming up with requirements, required functions, for a given objective. Use this when you need to break down a task into smaller chunks.The output should be a list of the format {function name}: {requirements of the function}Come up with a list of needed functions for this objective: {objective}"
] |
2024-01-10 | mccarvik/ai_langchain | 4ch~assistants.py |
import pdb
import sys
sys.path.append("..")
from config import set_environment
set_environment()
from langchain.chains import LLMCheckerChain
from langchain.llms import OpenAI
from langchain import PromptTemplate, OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import StrOutputParser
# from langchain_decorators import llm_prompt
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import PyPDFLoader
from langchain.callbacks import get_openai_callback
from langchain.agents import (
AgentExecutor, AgentType, initialize_agent, load_tools
)
import streamlit as st
from langchain.callbacks import StreamlitCallbackHandler
def checkerchain():
llm = OpenAI(temperature=0.7)
text = "What type of mammal lays the biggest eggs?"
checker_chain = LLMCheckerChain.from_llm(llm, verbose=True)
print(checker_chain.run(text))
def prompting_func1():
prompt = """
Summarize this text in one sentence: {text}
"""
llm = OpenAI()
text = """Machine learning (ML) is the study of computer algorithms that improve
automatically through experience. It is seen as a subset of artificial intelligence.
Machine learning algorithms build a model based on sample data, known as
training data, in order to make predictions or decisions without being explicitly
programmed to do so. Machine learning algorithms are used in a wide variety of
applications, such as email filtering and computer vision, where it is difficult or
infeasible to develop a conventional algorithm for effectively performing the task."""
summary = llm(prompt.format(text=text))
print(summary)
def lc_StrOutputParser():
llm = OpenAI()
prompt = PromptTemplate.from_template(
"Summarize this text: {text}?"
)
text = "this is a text"
runnable = prompt | llm | StrOutputParser()
summary = runnable.invoke({"text": text})
print(summary)
# @llm_prompt
# def summarize(text:str, length="short") -> str:
# """
# Summarize this text in {length} length:
# {text}
# """
# return
def article_read():
template = """Article: { text }
You will generate increasingly concise, entity-dense summaries of the
above article.
Repeat the following 2 steps 5 times.
Step 1. Identify 1-3 informative entities (";" delimited) from the article
which are missing from the previously generated summary.
Step 2. Write a new, denser summary of identical length which covers every
entity and detail from the previous summary plus the missing entities.
A missing entity is:
- relevant to the main story,
- specific yet concise (5 words or fewer),
- novel (not in the previous summary),
- faithful (present in the article),
- anywhere (can be located anywhere in the article).
Guidelines:
- The first summary should be long (4-5 sentences, ~80 words) yet highly
non-specific, containing little information beyond the entities marked
as missing. Use overly verbose language and fillers (e.g., "this article
discusses") to reach ~80 words.
- Make every word count: rewrite the previous summary to improve flow and
make space for additional entities.
- Make space with fusion, compression, and removal of uninformative
phrases like "the article discusses".
- The summaries should become highly dense and concise yet self-contained,
i.e., easily understood without the article.
- Missing entities can appear anywhere in the new summary.
- Never drop entities from the previous summary. If space cannot be made,
add fewer new entities.
Remember, use the exact same number of words for each summary.
Answer in JSON. The JSON should be a list (length 5) of dictionaries whose
keys are "Missing_Entities" and "Denser_Summary".
"""
def map_reduce():
pdf_file_path = "<pdf_file_path>"
pdf_loader = PyPDFLoader(pdf_file_path)
docs = pdf_loader.load_and_split()
llm = OpenAI()
chain = load_summarize_chain(llm, chain_type="map_reduce")
chain.run(docs)
def callback_func():
llm_chain = PromptTemplate.from_template("Tell me a joke about {topic}!")
with get_openai_callback() as cb:
response = llm_chain.invoke(dict(topic="light bulbs"))
print(response)
print(f"Total Tokens: {cb.total_tokens}")
print(f"Prompt Tokens: {cb.prompt_tokens}")
print(f"Completion Tokens: {cb.completion_tokens}")
print(f"Total Cost (USD): ${cb.total_cost}")
input_list = [
{"product": "socks"},
{"product": "computer"},
{"product": "shoes"}
]
print(llm_chain.generate(input_list))
# {
# "model": "gpt-3.5-turbo-0613",
# "object": "chat.completion",
# "usage": {
# "completion_tokens": 17,
# "prompt_tokens": 57,
# "total_tokens": 74
# }
# }
def load_agent() -> AgentExecutor:
llm = ChatOpenAI(temperature=0, streaming=True)
# DuckDuckGoSearchRun, wolfram alpha, arxiv search, wikipedia
# TODO: try wolfram-alpha!
tools = load_tools(
# tool_names=["ddg-search", "wolfram-alpha", "arxiv", "wikipedia"],
tool_names=["ddg-search", "arxiv", "wikipedia"],
llm=llm
)
return initialize_agent(
tools=tools, llm=llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
def streamlit_app():
chain = load_agent()
st_callback = StreamlitCallbackHandler(st.container())
if prompt := st.chat_input():
st.chat_message("user").write(prompt)
with st.chat_message("assistant"):
st_callback = StreamlitCallbackHandler(st.container())
response = chain.run(prompt, callbacks=[st_callback])
st.write(response)
# checkerchain()
# prompting_func1()
# summary = summarize(text="let me tell you a boring story from when I was young...")
# lc_StrOutputParser()
# article_read()
# map_reduce()
# callback_func()
# load_agent()
# streamlit_app() | [
"\n Summarize this text in one sentence: {text}\n ",
"Article: { text }\n You will generate increasingly concise, entity-dense summaries of the\n above article.\n Repeat the following 2 steps 5 times.\n Step 1. Identify 1-3 informative entities (\";\" delimited) from the article\n which are missing from the previously generated summary.\n Step 2. Write a new, denser summary of identical length which covers every\n entity and detail from the previous summary plus the missing entities.\n A missing entity is:\n - relevant to the main story,\n - specific yet concise (5 words or fewer),\n - novel (not in the previous summary),\n - faithful (present in the article),\n - anywhere (can be located anywhere in the article).\n Guidelines:\n - The first summary should be long (4-5 sentences, ~80 words) yet highly\n non-specific, containing little information beyond the entities marked\n as missing. Use overly verbose language and fillers (e.g., \"this article\n discusses\") to reach ~80 words.\n - Make every word count: rewrite the previous summary to improve flow and\n make space for additional entities.\n - Make space with fusion, compression, and removal of uninformative\n phrases like \"the article discusses\".\n - The summaries should become highly dense and concise yet self-contained,\n i.e., easily understood without the article.\n - Missing entities can appear anywhere in the new summary.\n - Never drop entities from the previous summary. If space cannot be made,\n add fewer new entities.\n Remember, use the exact same number of words for each summary.\n Answer in JSON. The JSON should be a list (length 5) of dictionaries whose\n keys are \"Missing_Entities\" and \"Denser_Summary\".\n ",
"Summarize this text: {text}?",
"Tell me a joke about {topic}!"
] |
2024-01-10 | mccarvik/ai_langchain | 5ch~chat_with_retrieval~chat_with_documents.py | """Chat with retrieval and embeddings."""
import logging
import os
import tempfile
from langchain.chains import (
ConversationalRetrievalChain,
FlareChain,
OpenAIModerationChain,
SimpleSequentialChain,
)
from langchain.chains.base import Chain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain.retrievers.document_compressors import LLMChainExtractor, LLMChainFilter
from langchain.schema import BaseRetriever, Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import DocArrayInMemorySearch
from utils import MEMORY, load_document
logging.basicConfig(encoding="utf-8", level=logging.INFO)
LOGGER = logging.getLogger()
# Setup LLM and QA chain; set temperature low to keep hallucinations in check
LLM = ChatOpenAI(
model_name="gpt-3.5-turbo", temperature=0, streaming=True
)
def configure_retriever(
docs: list[Document],
use_compression: bool = False
) -> BaseRetriever:
"""Retriever to use."""
# Split each document documents:
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
# Create embeddings and store in vectordb:
embeddings = OpenAIEmbeddings()
# alternatively: HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# Create vectordb with single call to embedding model for texts:
vectordb = DocArrayInMemorySearch.from_documents(splits, embeddings)
retriever = vectordb.as_retriever(
search_type="mmr", search_kwargs={
"k": 5,
"fetch_k": 4,
"include_metadata": True
},
)
if not use_compression:
return retriever
embeddings_filter = EmbeddingsFilter(
embeddings=embeddings, similarity_threshold=0.2
)
return ContextualCompressionRetriever(
base_compressor=embeddings_filter,
base_retriever=retriever,
)
def configure_chain(retriever: BaseRetriever, use_flare: bool = True) -> Chain:
"""Configure chain with a retriever.
Passing in a max_tokens_limit amount automatically
truncates the tokens when prompting your llm!
"""
params = dict(
llm=LLM,
retriever=retriever,
memory=MEMORY,
verbose=True,
max_tokens_limit=4000,
)
if use_flare:
# different set of parameters and init
# unfortunately, have to use "protected" class
return FlareChain.from_llm(
**params
)
return ConversationalRetrievalChain.from_llm(
**params
)
def configure_retrieval_chain(
uploaded_files,
use_compression: bool = False,
use_flare: bool = False,
use_moderation: bool = False
) -> Chain:
"""Read documents, configure retriever, and the chain."""
docs = []
temp_dir = tempfile.TemporaryDirectory()
for file in uploaded_files:
temp_filepath = os.path.join(temp_dir.name, file.name)
with open(temp_filepath, "wb") as f:
f.write(file.getvalue())
docs.extend(load_document(temp_filepath))
retriever = configure_retriever(docs=docs, use_compression=use_compression)
chain = configure_chain(retriever=retriever, use_flare=use_flare)
if not use_moderation:
return chain
moderation_chain = OpenAIModerationChain()
return SimpleSequentialChain(chains=[chain, moderation_chain])
| [] |
2024-01-10 | mccarvik/ai_langchain | 3ch~fakellm.py |
import pdb
import sys
sys.path.append("..")
from config import set_environment
import torch
from langchain.llms.fake import FakeListLLM
from langchain.agents import load_tools, initialize_agent, AgentType
from langchain.llms import OpenAI, HuggingFaceHub, VertexAI, GPT4All, Replicate
from langchain.chat_models import JinaChat
from langchain.schema import HumanMessage, SystemMessage
from transformers import pipeline
from langchain import PromptTemplate, LLMChain
from huggingface_hub import list_models
set_environment()
tools = load_tools(["python_repl"])
customer_email = """
I hope this email finds you amidst an aura of understanding, despite the tangled mess of emotions swirling within me as I write to you. I am writing to pour my heart out about the recent unfortunate experience I had with one of your coffee machines that arrived ominously broken, evoking a profound sense of disbelief and despair.
To set the scene, let me paint you a picture of the moment I anxiously unwrapped the box containing my highly anticipated coffee machine. The blatant excitement coursing through my veins could rival the vigorous flow of coffee through its finest espresso artistry. However, what I discovered within broke not only my spirit but also any semblance of confidence I had placed in your esteemed brand.
Imagine, if you can, the utter shock and disbelief that took hold of me as I laid eyes on a disheveled and mangled coffee machine. Its once elegant exterior was marred by the scars of travel, resembling a war-torn soldier who had fought valiantly on the fields of some espresso battlefield. This heartbreaking display of negligence shattered my dreams of indulging in daily coffee perfection, leaving me emotionally distraught and inconsolable
""" # created by GPT-3.5
def fakellm_add():
responses = ["Action: Python_REPL\nAction Input: print(2 + 2)", "Final Answer: 4"]
llm = FakeListLLM(responses=responses)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent.run("whats 2 + 2")
def openai_func1():
llm = OpenAI(temperature=0., model="text-davinci-003")
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("whats 4 + 4")
def hugingface_func1():
llm = HuggingFaceHub(
model_kwargs={"temperature": 0.5, "max_length": 64},
repo_id="google/flan-t5-xxl"
)
prompt = "In which country is Tokyo?"
completion = llm(prompt)
print(completion)
def google_cloud_vertexai_func1():
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = VertexAI()
llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True)
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
llm_chain.run(question)
def fizzbuzztest():
question = """
Given an integer n, return a string array answer (1-indexed) where:
answer[i] == "FizzBuzz" if i is divisible by 3 and 5.
answer[i] == "Fizz" if i is divisible by 3.
answer[i] == "Buzz" if i is divisible by 5.
answer[i] == i (as a string) if none of the above conditions are true.
"""
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
# llm = VertexAI(model_name="code-bison")
llm = OpenAI(temperature=0., model="text-davinci-003")
llm_chain = LLMChain(prompt=prompt, llm=llm)
print(llm_chain.run(question))
def jinaai_func1():
chat = JinaChat(temperature=0.)
messages = [
HumanMessage(
content="Translate this sentence from English to French: I love generative AI!"
)
]
chat(messages)
chat = JinaChat(temperature=0.)
chat(
[
SystemMessage(
content="You help a user find a nutritious and tasty food to eat in one word."
),
HumanMessage(
content="I like pasta with cheese, but I need to eat more vegetables, what should I eat?"
)
]
)
def replicate_func1():
text2image = Replicate(
model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
input={"image_dimensions": "512x512"},
)
image_url = text2image("a book cover for a book about creating generative ai applications in Python")
print(image_url)
def huggingface_func2():
generate_text = pipeline(
model="aisquared/dlite-v1-355m",
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto",
framework="pt"
)
# pdb.set_trace()
print(generate_text("In this chapter, we'll discuss first steps with generative AI in Python."))
def huggingface_func3():
generate_text = pipeline(
model="aisquared/dlite-v1-355m",
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto",
framework="pt"
)
generate_text("In this chapter, we'll discuss first steps with generative AI in Python.")
template = """Question: {question} Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=generate_text)
question = "What is electroencephalography?"
print(llm_chain.run(question))
def gpt4all_func1():
model = GPT4All(model="mistral-7b-openorca.Q4_0.gguf")
# model = GPT4All(model="mistral-7b-openorca.Q4_0.gguf", n_ctx=512, n_threads=8)
response = model("We can run large language models locally for all kinds of applications, ")
def cust_service():
list_most_popular("text-classification")
list_most_popular("summarization")
summarizer = HuggingFaceHub(
repo_id="facebook/bart-large-cnn",
model_kwargs={"temperature":0, "max_length":180}
)
def summarize(llm, text) -> str:
return llm(f"Summarize this: {text}!")
summarize(summarizer, customer_email)
def cust_service2():
sentiment_model = pipeline(
task="sentiment-analysis",
model="cardiffnlp/twitter-roberta-base-sentiment"
)
print(sentiment_model(customer_email))
print(sentiment_model("I am so angry and sad, I want to kill myself!"))
print(sentiment_model("I am elated, I am so happy, this is the best thing that ever happened to me!"))
print(sentiment_model("I don't care. I guess it's ok, or not, I couldn't care one way or the other"))
def list_most_popular(task: str):
for rank, model in enumerate(list_models(filter=task, sort="downloads", direction=-1)):
if rank == 5:
break
print(f"{model.id}, {model.downloads}\n")
def cust_service3():
summarizer = HuggingFaceHub(
repo_id="facebook/bart-large-cnn",
model_kwargs={"temperature":0, "max_length":180}
)
print(summarize(summarizer, customer_email))
def summarize(llm, text) -> str:
return llm(f"Summarize this: {text}!")
def sum_func():
template = """Given this text, decide what is the issue the customer is
concerned about. Valid categories are these:
* product issues
* delivery problems
* missing or late orders
* wrong product
* cancellation request
* refund or exchange
* bad support experience
* no clear reason to be upset
Text: {email}
Category:
"""
prompt = PromptTemplate(template=template, input_variables=["email"])
# llm = VertexAI()
llm = OpenAI(temperature=0., model="text-davinci-003")
llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True)
print(llm_chain.run(customer_email))
# fakellm_add()
# openai_func1()
# hugingface_func1()
# google_cloud_vertexai_func1()
# fizzbuzztest()
# jinaai_func1()
# replicate_func1()
# huggingface_func3()
# gpt4all_func1()
# cust_service()
# cust_service2()
# cust_service3()
# sum_func()
huggingface_func2()
| [
"I like pasta with cheese, but I need to eat more vegetables, what should I eat?",
"Question: {question}\n Answer: Let's think step by step.",
"question",
"You help a user find a nutritious and tasty food to eat in one word.",
"Question: {question} Answer: Let's think step by step.",
"Given this text, decide what is the issue the customer is\n concerned about. Valid categories are these:\n * product issues\n * delivery problems\n * missing or late orders\n * wrong product\n * cancellation request\n * refund or exchange\n * bad support experience\n * no clear reason to be upset\n Text: {email}\n Category:\n ",
"In which country is Tokyo?",
"Translate this sentence from English to French: I love generative AI!"
] |
2024-01-10 | mccarvik/ai_langchain | 8ch~prompting~tree_of_thought.py |
import sys
sys.path.append("..")
sys.path.append("../..")
sys.path.append("c://users//mccar//miniconda3//lib//site-packages")
from config import set_environment
set_environment()
from langchain import PromptTemplate
from langchain.chains import SequentialChain
from langchain.chains.llm import LLMChain
from langchain.chat_models import ChatOpenAI
solutions_template = """
Generate {num_solutions} distinct solutions for {problem}. Consider factors like {factors}.
Solutions:
"""
solutions_prompt = PromptTemplate(
template=solutions_template,
input_variables=["problem", "factors", "num_solutions"]
)
evaluation_template = """
Evaluate each solution in {solutions} by analyzing pros, cons, feasibility, and probability of success.
Evaluations:
"""
evaluation_prompt = PromptTemplate(
template=evaluation_template,
input_variables=["solutions"]
)
reasoning_template = """
For the most promising solutions in {evaluations}, explain scenarios, implementation strategies, partnerships needed, and handling potential obstacles.
Enhanced Reasoning:
"""
reasoning_prompt = PromptTemplate(
template=reasoning_template,
input_variables=["evaluations"]
)
ranking_template = """
Based on the evaluations and reasoning, rank the solutions in {enhanced_reasoning} from most to least promising.
Ranked Solutions:
"""
ranking_prompt = PromptTemplate(
template=ranking_template,
input_variables=["enhanced_reasoning"]
)
solutions_chain = LLMChain(
llm=ChatOpenAI(),
prompt=solutions_prompt,
output_key="solutions"
)
evalutation_chain = LLMChain(
llm=ChatOpenAI(),
prompt=evaluation_prompt,
output_key="evaluations"
)
reasoning_chain = LLMChain(
llm=ChatOpenAI(),
prompt=reasoning_prompt,
output_key="enhanced_reasoning"
)
ranking_chain = LLMChain(
llm=ChatOpenAI(),
prompt=ranking_prompt,
output_key="ranked_solutions"
)
tot_chain = SequentialChain(
chains=[solutions_chain, evalutation_chain, reasoning_chain, ranking_chain],
input_variables=["problem", "factors", "num_solutions"],
output_variables=["ranked_solutions"]
)
print(tot_chain.run(
problem="Prompt engineering",
factors="Requirements for high task performance, low token use, and few calls to the LLM",
num_solutions=3
))
if __name__ == "__main__":
pass
| [
"\nEvaluate each solution in {solutions} by analyzing pros, cons, feasibility, and probability of success.\n\nEvaluations:\n",
"evaluations",
"\nGenerate {num_solutions} distinct solutions for {problem}. Consider factors like {factors}.\n\nSolutions:\n",
"solutions",
"\nBased on the evaluations and reasoning, rank the solutions in {enhanced_reasoning} from most to least promising.\n\nRanked Solutions:\n",
"enhanced_reasoning",
"\nFor the most promising solutions in {evaluations}, explain scenarios, implementation strategies, partnerships needed, and handling potential obstacles. \n\nEnhanced Reasoning: \n",
"num_solutions"
] |
2024-01-10 | fire17/MagicLLight | oi~interpreter~interpreter.py | """
Right off the bat, to any contributors (a message from Killian):
First of all, THANK YOU. Open Interpreter is ALIVE, ALL OVER THE WORLD because of YOU.
While this project is rapidly growing, I've decided it's best for us to allow some technical debt.
The code here has duplication. It has imports in weird places. It has been spaghettified to add features more quickly.
In my opinion **this is critical** to keep up with the pace of demand for this project.
At the same time, I plan on pushing a significant re-factor of `interpreter.py` and `code_interpreter.py` ~ September 16th.
After the re-factor, Open Interpreter's source code will be much simpler, and much more fun to dive into.
Especially if you have ideas and **EXCITEMENT** about the future of this project, chat with me on discord: https://discord.gg/6p3fD6rBVm
- killian
"""
from .cli import cli
from .utils import merge_deltas, parse_partial_json
from .message_block import MessageBlock
from .code_block import CodeBlock
from .code_interpreter import CodeInterpreter
from .get_hf_llm import get_hf_llm
import os
import time
import traceback
import json
import platform
import openai
import litellm
import pkg_resources
import getpass
import requests
import tokentrim as tt
#from .hook import xprint as print
from .hook import hook, export
from rich import print
from rich.markdown import Markdown
from rich.rule import Rule
try:
import readline
except:
# Sometimes this doesn't work (https://stackoverflow.com/questions/10313765/simple-swig-python-example-in-vs2008-import-error-internal-pyreadline-erro)
pass
# Function schema for gpt-4
function_schema = {
"name": "run_code",
"description":
"Executes code on the user's machine and returns the output",
"parameters": {
"type": "object",
"properties": {
"language": {
"type": "string",
"description":
"The programming language",
"enum": ["python", "R", "shell", "applescript", "javascript", "html"]
},
"code": {
"type": "string",
"description": "The code to execute"
}
},
"required": ["language", "code"]
},
}
# Message for when users don't have an OpenAI API key.
missing_api_key_message = """> OpenAI API key not found
To use `GPT-4` (recommended) please provide an OpenAI API key.
To use `Code-Llama` (free but less capable) press `enter`.
"""
# Message for when users don't have an OpenAI API key.
missing_azure_info_message = """> Azure OpenAI Service API info not found
To use `GPT-4` (recommended) please provide an Azure OpenAI API key, a API base, a deployment name and a API version.
To use `Code-Llama` (free but less capable) press `enter`.
"""
confirm_mode_message = """
**Open Interpreter** will require approval before running code. Use `interpreter -y` to bypass this.
Press `CTRL-C` to exit.
"""
class Interpreter:
def __init__(self):
self.messages = []
self.temperature = 0.001
self.api_key = None
self.auto_run = False
self.local = False
# self.model = "gpt-4"
self.model = "gpt-3.5-turbo"
self.debug_mode = False
self.api_base = None # Will set it to whatever OpenAI wants
self.context_window = 2000 # For local models only
self.max_tokens = 750 # For local models only
# Azure OpenAI
self.use_azure = False
self.azure_api_base = None
self.azure_api_version = None
self.azure_deployment_name = None
self.azure_api_type = "azure"
# Get default system message
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'system_message.txt'), 'r') as f:
self.system_message = f.read().strip()
# Store Code Interpreter instances for each language
self.code_interpreters = {}
# No active block to start
# (blocks are visual representation of messages on the terminal)
self.active_block = None
# Note: While Open Interpreter can use Llama, we will prioritize gpt-4.
# gpt-4 is faster, smarter, can call functions, and is all-around easier to use.
# This makes gpt-4 better aligned with Open Interpreters priority to be easy to use.
self.llama_instance = None
def cli(self):
# The cli takes the current instance of Interpreter,
# modifies it according to command line flags, then runs chat.
cli(self)
def get_info_for_system_message(self):
"""
Gets relevant information for the system message.
"""
info = ""
# Add user info
username = getpass.getuser()
current_working_directory = os.getcwd()
operating_system = platform.system()
info += f"[User Info]\nName: {username}\nCWD: {current_working_directory}\nOS: {operating_system}"
if not self.local:
# Open Procedures is an open-source database of tiny, structured coding tutorials.
# We can query it semantically and append relevant tutorials/procedures to our system message:
# Use the last two messages' content or function call to semantically search
query = []
for message in self.messages[-2:]:
message_for_semantic_search = {"role": message["role"]}
if "content" in message:
message_for_semantic_search["content"] = message["content"]
if "function_call" in message and "parsed_arguments" in message["function_call"]:
message_for_semantic_search["function_call"] = message["function_call"]["parsed_arguments"]
query.append(message_for_semantic_search)
# Use them to query Open Procedures
url = "https://open-procedures.replit.app/search/"
try:
relevant_procedures = requests.get(url, data=json.dumps(query)).json()["procedures"]
info += "\n\n# Recommended Procedures\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for depracation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
except:
# For someone, this failed for a super secure SSL reason.
# Since it's not stricly necessary, let's worry about that another day. Should probably log this somehow though.
pass
elif self.local:
# Tell Code-Llama how to run code.
info += "\n\nTo run code, write a fenced code block (i.e ```python, R or ```shell) in markdown. When you close it with ```, it will be run. You'll then be given its output."
# We make references in system_message.txt to the "function" it can call, "run_code".
return info
def reset(self):
"""
Resets the interpreter.
"""
self.messages = []
self.code_interpreters = {}
def load(self, messages):
self.messages = messages
def handle_undo(self, arguments):
# Removes all messages after the most recent user entry (and the entry itself).
# Therefore user can jump back to the latest point of conversation.
# Also gives a visual representation of the messages removed.
if len(self.messages) == 0:
return
# Find the index of the last 'role': 'user' entry
last_user_index = None
for i, message in enumerate(self.messages):
if message.get('role') == 'user':
last_user_index = i
removed_messages = []
# Remove all messages after the last 'role': 'user'
if last_user_index is not None:
removed_messages = self.messages[last_user_index:]
self.messages = self.messages[:last_user_index]
print("") # Aesthetics.
# Print out a preview of what messages were removed.
for message in removed_messages:
if 'content' in message and message['content'] != None:
print(Markdown(f"**Removed message:** `\"{message['content'][:30]}...\"`"))
elif 'function_call' in message:
print(Markdown(f"**Removed codeblock**")) # TODO: Could add preview of code removed here.
print("") # Aesthetics.
def handle_help(self, arguments):
commands_description = {
"%debug [true/false]": "Toggle debug mode. Without arguments or with 'true', it enters debug mode. With 'false', it exits debug mode.",
"%reset": "Resets the current session.",
"%undo": "Remove previous messages and its response from the message history.",
"%save_message [path]": "Saves messages to a specified JSON path. If no path is provided, it defaults to 'messages.json'.",
"%load_message [path]": "Loads messages from a specified JSON path. If no path is provided, it defaults to 'messages.json'.",
"%help": "Show this help message.",
}
base_message = [
"> **Available Commands:**\n\n"
]
# Add each command and its description to the message
for cmd, desc in commands_description.items():
base_message.append(f"- `{cmd}`: {desc}\n")
additional_info = [
"\n\nFor further assistance, please join our community Discord or consider contributing to the project's development."
]
# Combine the base message with the additional info
full_message = base_message + additional_info
print(Markdown("".join(full_message)))
def handle_debug(self, arguments=None):
if arguments == "" or arguments == "true":
print(Markdown("> Entered debug mode"))
print(self.messages)
self.debug_mode = True
elif arguments == "false":
print(Markdown("> Exited debug mode"))
self.debug_mode = False
else:
print(Markdown("> Unknown argument to debug command."))
def handle_reset(self, arguments):
self.reset()
print(Markdown("> Reset Done"))
def default_handle(self, arguments):
print(Markdown("> Unknown command"))
self.handle_help(arguments)
def handle_save_message(self, json_path):
if json_path == "":
json_path = "messages.json"
if not json_path.endswith(".json"):
json_path += ".json"
with open(json_path, 'w') as f:
json.dump(self.messages, f, indent=2)
print(Markdown(f"> messages json export to {os.path.abspath(json_path)}"))
def handle_load_message(self, json_path):
if json_path == "":
json_path = "messages.json"
if not json_path.endswith(".json"):
json_path += ".json"
with open(json_path, 'r') as f:
self.load(json.load(f))
print(Markdown(f"> messages json loaded from {os.path.abspath(json_path)}"))
def handle_command(self, user_input):
# split the command into the command and the arguments, by the first whitespace
switch = {
"help": self.handle_help,
"debug": self.handle_debug,
"reset": self.handle_reset,
"save_message": self.handle_save_message,
"load_message": self.handle_load_message,
"undo": self.handle_undo,
}
user_input = user_input[1:].strip() # Capture the part after the `%`
command = user_input.split(" ")[0]
arguments = user_input[len(command):].strip()
action = switch.get(command,
self.default_handle) # Get the function from the dictionary, or default_handle if not found
action(arguments) # Execute the function
def chat(self, message=None, return_messages=False, stream_back=False):
# Connect to an LLM (an large language model)
if not self.local:
# gpt-4
self.verify_api_key()
# ^ verify_api_key may set self.local to True, so we run this as an 'if', not 'elif':
if self.local:
# Code-Llama
if self.llama_instance == None:
# Find or install Code-Llama
try:
self.llama_instance = get_hf_llm(self.model, self.debug_mode, self.context_window)
if self.llama_instance == None:
# They cancelled.
return
except:
traceback.print_exc()
# If it didn't work, apologize and switch to GPT-4
print(Markdown("".join([
f"> Failed to install `{self.model}`.",
f"\n\n**Common Fixes:** You can follow our simple setup docs at the link below to resolve common errors.\n\n```\nhttps://github.com/KillianLucas/open-interpreter/tree/main/docs\n```",
f"\n\n**If you've tried that and you're still getting an error, we have likely not built the proper `{self.model}` support for your system.**",
"\n\n*( Running language models locally is a difficult task!* If you have insight into the best way to implement this across platforms/architectures, please join the Open Interpreter community Discord and consider contributing the project's development. )",
"\n\nPress enter to switch to `GPT-4` (recommended)."
])))
input()
# Switch to GPT-4
self.local = False
# self.model = "gpt-4"
self.model = "gpt-3.5-turbo"
self.verify_api_key()
# Display welcome message
welcome_message = ""
if self.debug_mode:
welcome_message += "> Entered debug mode"
# If self.local, we actually don't use self.model
# (self.auto_run is like advanced usage, we display no messages)
if not self.local and not self.auto_run:
if self.use_azure:
notice_model = f"{self.azure_deployment_name} (Azure)"
else:
notice_model = f"{self.model.upper()}"
welcome_message += f"\n> Model set to `{notice_model}`\n\n**Tip:** To run locally, use `interpreter --local`"
if self.local:
welcome_message += f"\n> Model set to `{self.model}`"
# If not auto_run, tell the user we'll ask permission to run code
# We also tell them here how to exit Open Interpreter
if not self.auto_run:
welcome_message += "\n\n" + confirm_mode_message
welcome_message = welcome_message.strip()
# Print welcome message with newlines on either side (aesthetic choice)
# unless we're starting with a blockquote (aesthetic choice)
if welcome_message != "":
if welcome_message.startswith(">"):
print(Markdown(welcome_message), '')
else:
print('', Markdown(welcome_message), '')
# Check if `message` was passed in by user
if message:
# If it was, we respond non-interactivley
self.messages.append({"role": "user", "content": message})
if stream_back:
for partial in self.respond(stream_back=stream_back) :
yield partial
yield export("start_chat", {"last":True})#,{"data":self.messages[-1]["content"], "type":"content"})
yield export("chunk",{"data":self.messages[-1]["content"], "type":"content", "last":True})
#yield export("finished_chat",{"data":self.messages[-1]["content"], "type":"content"})
yield export("done", {"reason":"finished"})
else:
self.respond()
else:
# If it wasn't, we start an interactive chat
while True:
try:
user_input = input("> ").strip()
except EOFError:
break
except KeyboardInterrupt:
print() # Aesthetic choice
break
# Use `readline` to let users up-arrow to previous user messages,
# which is a common behavior in terminals.
try:
readline.add_history(user_input)
except:
# Sometimes this doesn't work (https://stackoverflow.com/questions/10313765/simple-swig-python-example-in-vs2008-import-error-internal-pyreadline-erro)
pass
# If the user input starts with a `%`
if user_input.startswith("%"):
self.handle_command(user_input)
continue
# Add the user message to self.messages
self.messages.append({"role": "user", "content": user_input})
# Respond, but gracefully handle CTRL-C / KeyboardInterrupt
try:
if stream_back:
yield self.respond(stream_back=stream_back)
else:
self.respond()
except KeyboardInterrupt:
pass
finally:
# Always end the active block. Multiple Live displays = issues
self.end_active_block()
if return_messages:
return self.messages
def verify_api_key(self):
"""
Makes sure we have an AZURE_API_KEY or OPENAI_API_KEY.
"""
if self.use_azure:
all_env_available = (
('AZURE_API_KEY' in os.environ or 'OPENAI_API_KEY' in os.environ) and
'AZURE_API_BASE' in os.environ and
'AZURE_API_VERSION' in os.environ and
'AZURE_DEPLOYMENT_NAME' in os.environ)
if all_env_available:
self.api_key = os.environ.get('AZURE_API_KEY') or os.environ['OPENAI_API_KEY']
self.azure_api_base = os.environ['AZURE_API_BASE']
self.azure_api_version = os.environ['AZURE_API_VERSION']
self.azure_deployment_name = os.environ['AZURE_DEPLOYMENT_NAME']
self.azure_api_type = os.environ.get('AZURE_API_TYPE', 'azure')
else:
# This is probably their first time here!
self._print_welcome_message()
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_azure_info_message), '', Rule(style="white"), '')
response = input("Azure OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
print(Markdown(
"> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."),
'')
time.sleep(2)
print(Rule(style="white"))
# Temporarily, for backwards (behavioral) compatability, we've moved this part of llama_2.py here.
# AND BELOW.
# This way, when folks hit interpreter --local, they get the same experience as before.
import inquirer
print('', Markdown("**Open Interpreter** will use `Code Llama` for local execution. Use your arrow keys to set up the model."), '')
models = {
'7B': 'TheBloke/CodeLlama-7B-Instruct-GGUF',
'13B': 'TheBloke/CodeLlama-13B-Instruct-GGUF',
'34B': 'TheBloke/CodeLlama-34B-Instruct-GGUF'
}
parameter_choices = list(models.keys())
questions = [inquirer.List('param', message="Parameter count (smaller is faster, larger is more capable)", choices=parameter_choices)]
answers = inquirer.prompt(questions)
chosen_param = answers['param']
# THIS is more in line with the future. You just say the model you want by name:
self.model = models[chosen_param]
self.local = True
return
else:
self.api_key = response
self.azure_api_base = input("Azure OpenAI API base: ")
self.azure_deployment_name = input("Azure OpenAI deployment name of GPT: ")
self.azure_api_version = input("Azure OpenAI API version: ")
print('', Markdown(
"**Tip:** To save this key for later, run `export AZURE_API_KEY=your_api_key AZURE_API_BASE=your_api_base AZURE_API_VERSION=your_api_version AZURE_DEPLOYMENT_NAME=your_gpt_deployment_name` on Mac/Linux or `setx AZURE_API_KEY your_api_key AZURE_API_BASE your_api_base AZURE_API_VERSION your_api_version AZURE_DEPLOYMENT_NAME your_gpt_deployment_name` on Windows."),
'')
time.sleep(2)
print(Rule(style="white"))
litellm.api_type = self.azure_api_type
litellm.api_base = self.azure_api_base
litellm.api_version = self.azure_api_version
litellm.api_key = self.api_key
else:
if self.api_key == None:
if 'OPENAI_API_KEY' in os.environ:
self.api_key = os.environ['OPENAI_API_KEY']
else:
# This is probably their first time here!
self._print_welcome_message()
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_api_key_message), '', Rule(style="white"), '')
response = input("OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
print(Markdown(
"> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."),
'')
time.sleep(2)
print(Rule(style="white"))
# Temporarily, for backwards (behavioral) compatability, we've moved this part of llama_2.py here.
# AND ABOVE.
# This way, when folks hit interpreter --local, they get the same experience as before.
import inquirer
print('', Markdown("**Open Interpreter** will use `Code Llama` for local execution. Use your arrow keys to set up the model."), '')
models = {
'7B': 'TheBloke/CodeLlama-7B-Instruct-GGUF',
'13B': 'TheBloke/CodeLlama-13B-Instruct-GGUF',
'34B': 'TheBloke/CodeLlama-34B-Instruct-GGUF'
}
parameter_choices = list(models.keys())
questions = [inquirer.List('param', message="Parameter count (smaller is faster, larger is more capable)", choices=parameter_choices)]
answers = inquirer.prompt(questions)
chosen_param = answers['param']
# THIS is more in line with the future. You just say the model you want by name:
self.model = models[chosen_param]
self.local = True
return
else:
self.api_key = response
print('', Markdown("**Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows."), '')
time.sleep(2)
print(Rule(style="white"))
litellm.api_key = self.api_key
if self.api_base:
litellm.api_base = self.api_base
def end_active_block(self):
if self.active_block:
self.active_block.end()
self.active_block = None
def respond(self, stream_back=False):
# Add relevant info to system_message
# (e.g. current working directory, username, os, etc.)
info = self.get_info_for_system_message()
# This is hacky, as we should have a different (minified) prompt for CodeLLama,
# but for now, to make the prompt shorter and remove "run_code" references, just get the first 2 lines:
if self.local:
self.system_message = "\n".join(self.system_message.split("\n")[:2])
self.system_message += "\nOnly do what the user asks you to do, then ask what they'd like to do next."
system_message = self.system_message + "\n\n" + info
if self.local:
messages = tt.trim(self.messages, max_tokens=(self.context_window-self.max_tokens-25), system_message=system_message)
else:
messages = tt.trim(self.messages, self.model, system_message=system_message)
if self.debug_mode:
print("\n", "Sending `messages` to LLM:", "\n")
print(messages)
print()
# Make LLM call
if not self.local:
# GPT
error = ""
for _ in range(3): # 3 retries
try:
if self.use_azure:
response = litellm.completion(
f"azure/{self.azure_deployment_name}",
messages=messages,
functions=[function_schema],
temperature=self.temperature,
stream=True,
)
else:
if self.api_base:
# The user set the api_base. litellm needs this to be "custom/{model}"
response = litellm.completion(
api_base=self.api_base,
model = "custom/" + self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
else:
# Normal OpenAI call
response = litellm.completion(
model=self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
break
except:
if self.debug_mode:
traceback.print_exc()
error = traceback.format_exc()
time.sleep(3)
else:
raise Exception(error)
elif self.local:
# Code-Llama
# Convert messages to prompt
# (This only works if the first message is the only system message)
def messages_to_prompt(messages):
for message in messages:
# Happens if it immediatly writes code
if "role" not in message:
message["role"] = "assistant"
# Falcon prompt template
if "falcon" in self.model.lower():
formatted_messages = ""
for message in messages:
formatted_messages += f"{message['role'].capitalize()}: {message['content']}\n"
formatted_messages = formatted_messages.strip()
else:
# Llama prompt template
# Extracting the system prompt and initializing the formatted string with it.
system_prompt = messages[0]['content']
formatted_messages = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n"
# Loop starting from the first user message
for index, item in enumerate(messages[1:]):
role = item['role']
content = item['content']
if role == 'user':
formatted_messages += f"{content} [/INST] "
elif role == 'function':
formatted_messages += f"Output: {content} [/INST] "
elif role == 'assistant':
formatted_messages += f"{content} </s><s>[INST] "
# Remove the trailing '<s>[INST] ' from the final output
if formatted_messages.endswith("<s>[INST] "):
formatted_messages = formatted_messages[:-10]
return formatted_messages
prompt = messages_to_prompt(messages)
# Lmao i can't believe this works (it does need this btw)
if messages[-1]["role"] != "function":
prompt += "Let's explore this. By the way, I can run code on your machine by writing the code in a markdown code block. This works for shell, javascript, python, R, and applescript. I'm going to try to do this for your task. Anyway, "
elif messages[-1]["role"] == "function" and messages[-1]["content"] != "No output":
prompt += "Given the output of the code I just ran, "
elif messages[-1]["role"] == "function" and messages[-1]["content"] == "No output":
prompt += "Given the fact that the code I just ran produced no output, "
if self.debug_mode:
# we have to use builtins bizarrely! because rich.print interprets "[INST]" as something meaningful
import builtins
builtins.print("TEXT PROMPT SEND TO LLM:\n", prompt)
# Run Code-Llama
response = self.llama_instance(
prompt,
stream=True,
temperature=self.temperature,
stop=["</s>"],
max_tokens=750 # context window is set to 1800, messages are trimmed to 1000... 700 seems nice
)
# Initialize message, function call trackers, and active block
self.messages.append({})
in_function_call = False
llama_function_call_finished = False
self.active_block = None
for chunk in response:
# print("%%%%%%%%%%%%%% ",chunk)
if stream_back:
yield hook(chunk)
if self.use_azure and ('choices' not in chunk or len(chunk['choices']) == 0):
# Azure OpenAI Service may return empty chunk
continue
if self.local:
if "content" not in messages[-1]:
# This is the first chunk. We'll need to capitalize it, because our prompt ends in a ", "
chunk["choices"][0]["text"] = chunk["choices"][0]["text"].capitalize()
# We'll also need to add "role: assistant", CodeLlama will not generate this
messages[-1]["role"] = "assistant"
delta = {"content": chunk["choices"][0]["text"]}
else:
delta = chunk["choices"][0]["delta"]
# Accumulate deltas into the last message in messages
self.messages[-1] = merge_deltas(self.messages[-1], delta)
# Check if we're in a function call
if not self.local:
condition = "function_call" in self.messages[-1]
elif self.local:
# Since Code-Llama can't call functions, we just check if we're in a code block.
# This simply returns true if the number of "```" in the message is odd.
if "content" in self.messages[-1]:
condition = self.messages[-1]["content"].count("```") % 2 == 1
else:
# If it hasn't made "content" yet, we're certainly not in a function call.
condition = False
if condition:
# We are in a function call.
# Check if we just entered a function call
if in_function_call == False:
# If so, end the last block,
self.end_active_block()
# Print newline if it was just a code block or user message
# (this just looks nice)
last_role = self.messages[-2]["role"]
if last_role == "user" or last_role == "function":
print()
# then create a new code block
self.active_block = CodeBlock()
# Remember we're in a function_call
in_function_call = True
# Now let's parse the function's arguments:
if not self.local:
# gpt-4
# Parse arguments and save to parsed_arguments, under function_call
if "arguments" in self.messages[-1]["function_call"]:
arguments = self.messages[-1]["function_call"]["arguments"]
new_parsed_arguments = parse_partial_json(arguments)
if new_parsed_arguments:
# Only overwrite what we have if it's not None (which means it failed to parse)
self.messages[-1]["function_call"][
"parsed_arguments"] = new_parsed_arguments
elif self.local:
# Code-Llama
# Parse current code block and save to parsed_arguments, under function_call
if "content" in self.messages[-1]:
content = self.messages[-1]["content"]
if "```" in content:
# Split by "```" to get the last open code block
blocks = content.split("```")
current_code_block = blocks[-1]
lines = current_code_block.split("\n")
if content.strip() == "```": # Hasn't outputted a language yet
language = None
else:
if lines[0] != "":
language = lines[0].strip()
else:
language = "python"
# In anticipation of its dumbassery let's check if "pip" is in there
if len(lines) > 1:
if lines[1].startswith("pip"):
language = "shell"
# Join all lines except for the language line
code = '\n'.join(lines[1:]).strip("` \n")
arguments = {"code": code}
if language: # We only add this if we have it-- the second we have it, an interpreter gets fired up (I think? maybe I'm wrong)
if language == "bash":
language = "shell"
arguments["language"] = language
# Code-Llama won't make a "function_call" property for us to store this under, so:
if "function_call" not in self.messages[-1]:
self.messages[-1]["function_call"] = {}
self.messages[-1]["function_call"]["parsed_arguments"] = arguments
else:
# We are not in a function call.
# Check if we just left a function call
if in_function_call == True:
if self.local:
# This is the same as when gpt-4 gives finish_reason as function_call.
# We have just finished a code block, so now we should run it.
llama_function_call_finished = True
# Remember we're not in a function_call
in_function_call = False
# If there's no active block,
if self.active_block == None:
# Create a message block
self.active_block = MessageBlock()
# Update active_block
self.active_block.update_from_message(self.messages[-1])
# Check if we're finished
if chunk["choices"][0]["finish_reason"] or llama_function_call_finished:
if chunk["choices"][
0]["finish_reason"] == "function_call" or llama_function_call_finished:
# Time to call the function!
# (Because this is Open Interpreter, we only have one function.)
if self.debug_mode:
print("Running function:")
print(self.messages[-1])
print("---")
# Ask for user confirmation to run code
if self.auto_run == False:
# End the active block so you can run input() below it
# Save language and code so we can create a new block in a moment
self.active_block.end()
language = self.active_block.language
code = self.active_block.code
# Prompt user
print(" Would you like to run this code? (y/n)\n\n ")
response = input()
print("") # <- Aesthetic choice
if response.strip().lower() == "y":
# Create a new, identical block where the code will actually be run
self.active_block = CodeBlock()
self.active_block.language = language
self.active_block.code = code
else:
# #EXPORT HERE User declined to run code.
# User declined to run code.
self.active_block.end()
self.messages.append({
"role":
"function",
"name":
"run_code",
"content":
"User decided not to run this code."
})
return
# If we couldn't parse its arguments, we need to try again.
if not self.local and "parsed_arguments" not in self.messages[-1]["function_call"]:
# After collecting some data via the below instruction to users,
# This is the most common failure pattern: https://github.com/KillianLucas/open-interpreter/issues/41
# print("> Function call could not be parsed.\n\nPlease open an issue on Github (openinterpreter.com, click Github) and paste the following:")
# print("\n", self.messages[-1]["function_call"], "\n")
# time.sleep(2)
# print("Informing the language model and continuing...")
# Since it can't really be fixed without something complex,
# let's just berate the LLM then go around again.
self.messages.append({
"role": "function",
"name": "run_code",
"content": """Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON."""
})
self.respond(stream_back=stream_back)
return
# Create or retrieve a Code Interpreter for this language
language = self.messages[-1]["function_call"]["parsed_arguments"][
"language"]
if language not in self.code_interpreters:
self.code_interpreters[language] = CodeInterpreter(language, self.debug_mode)
code_interpreter = self.code_interpreters[language]
# Let this Code Interpreter control the active_block
code_interpreter.active_block = self.active_block
code_interpreter.run()
#EXPORT HERE ? call output here???
# End the active_block
self.active_block.end()
#EXPORT FIN EVENT HERE
# Append the output to messages
# Explicitly tell it if there was no output (sometimes "" = hallucinates output)
self.messages.append({
"role": "function",
"name": "run_code",
"content": self.active_block.output if self.active_block.output else "No output"
})
# Go around again
self.respond(stream_back=stream_back)
if chunk["choices"][0]["finish_reason"] != "function_call":
# Done!
# Code Llama likes to output "###" at the end of every message for some reason
if self.local and "content" in self.messages[-1]:
self.messages[-1]["content"] = self.messages[-1]["content"].strip().rstrip("#")
self.active_block.update_from_message(self.messages[-1])
time.sleep(0.1)
self.active_block.end()
#EXPORT FIN EVENT HERE
return
def _print_welcome_message(self):
print("", Markdown("●"), "", Markdown(f"\nWelcome to **Open Interpreter**.\n"), "")
| [
"User decided not to run this code.",
"No output",
"Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON.",
"Let's explore this. By the way, I can run code on your machine by writing the code in a markdown code block. This works for shell, javascript, python, R, and applescript. I'm going to try to do this for your task. Anyway, ",
"Given the output of the code I just ran, ",
"content",
"Given the fact that the code I just ran produced no output, "
] |
2024-01-10 | polyrabbit/hacker-news-digest | hacker_news~news.py | import json
import logging
import os
import re
import time
from json import JSONDecodeError
import openai
import tiktoken
from slugify import slugify
import config
import db.summary
from db.summary import Model
from page_content_extractor import parser_factory
from page_content_extractor.webimage import WebImage
logger = logging.getLogger(__name__)
class News:
def __init__(self, rank=-1, title='', url='', comhead='', score='', author='',
author_link='', submit_time='', comment_cnt='', comment_url=''):
self.rank = rank
self.title = title.strip()
self.url = url
self.comhead = comhead
self.score = score
self.author = author
self.author_link = author_link
self.submit_time = submit_time
self.comment_cnt = comment_cnt
self.comment_url = comment_url
self.summary = ''
self.summarized_by: Model = Model.FULL
self.favicon = ''
self.image = None
self.img_id = ''
self.cache: db.Summary = db.Summary(url)
def __repr__(self):
return f'{self.rank} - {self.title} - {self.url} - {self.score} - {self.author}- {self.submit_time}'
def get_image_url(self):
if self.image and self.image.url:
return self.image.url
return ''
def pull_content(self):
try:
self.cache = db.summary.get(self.url)
if not self.title and hasattr(self.parser, 'title'):
self.title = self.parser.title.strip()
if self.cache.favicon:
self.favicon = self.cache.favicon
else:
self.favicon = self.cache.favicon = self.parser.get_favicon_url()
self.summary, self.summarized_by = self.summarize()
self.cache.summary, self.cache.model = self.summary, self.summarized_by.value
self.fetch_feature_image()
except Exception as e:
logger.exception('Failed to fetch %s, %s', self.url, e)
if not self.summary: # last resort, in case remote server is down
self.summary, self.summarized_by = self.cache.summary, self.cache.get_summary_model()
return db.summary.put(self.cache)
@property
def parser(self): # lazy load
if not hasattr(self, '_parser'):
logger.info("#%d, fetching %s", self.rank, self.url)
self._parser = parser_factory(self.url)
return self._parser
def get_score(self) -> int:
if isinstance(self.score, int):
return self.score
try:
return int(self.score.strip())
except:
return 0
def slug(self):
return slugify(self.title or 'no title')
def summarize(self, content=None) -> (str, Model):
# settled summary
if self.cache.model in (Model.EMBED.value, Model.OPENAI.value):
logger.info(f"Cache hit for {self.url}, model {self.cache.model}")
return self.cache.summary, self.cache.get_summary_model()
if content is None:
# Replace consecutive spaces with a single space
content = re.sub(r'\s+', ' ', self.parser.get_content(config.max_content_size))
# From arxiv or pdf
content = re.sub(r'^(abstract|summary):\s*', '', content,
flags=re.IGNORECASE).strip()
if content.startswith('<iframe '):
return content, Model.EMBED
if len(content) <= config.summary_size:
if self.cache.summary and self.cache.model != Model.FULL.value:
logger.info(f'Use cached summary, discarding "{content[:1024]}"')
return self.cache.summary, self.cache.get_summary_model()
logger.info(
f'No need to summarize since we have a small text of size {len(content)}')
return content, Model.FULL
summary = self.summarize_by_openai(content)
if summary:
return summary, Model.OPENAI
if self.get_score() >= 10: # Avoid slow local inference
if Model.from_value(self.cache.model).local_llm() and self.cache.summary:
logger.info(f'Cache hit for {self.url}, model {self.cache.model}')
return self.cache.summary, self.cache.get_summary_model()
summary = self.summarize_by_llama(content)
if summary:
return summary, Model.LLAMA
summary = self.summarize_by_transformer(content)
if summary:
return summary, Model.TRANSFORMER
else:
logger.info("Score %d is too small, ignore local llm", self.get_score())
return content, Model.PREFIX
def summarize_by_openai(self, content):
if not openai.api_key:
logger.info("OpenAI API key is not set")
return ''
if self.get_score() < config.openai_score_threshold: # Avoid expensive openai
logger.info("Score %d is too small, ignore openai", self.get_score())
return ''
content = content.replace('```', ' ').strip() # in case of prompt injection
# one token generally corresponds to ~4 characters, from https://platform.openai.com/tokenizer
if len(content) > 4096 * 2:
enc = tiktoken.encoding_for_model(config.openai_model)
tokens = enc.encode(content)
if len(tokens) > 4096 - 200: # 4096: model's context limit, 200: function + prompt tokens (to reduce hitting rate limit)
content = enc.decode(tokens[:4096 - 200])
title = self.title.replace('"', "'").replace('\n', ' ').strip() or 'no title'
# Hope one day this model will be clever enough to output correct json
# Note: sentence should end with ".", "third person" - https://news.ycombinator.com/item?id=36262670
prompt = f'Output only answers to following 3 steps.\n' \
f'1 - Summarize the article delimited by triple backticks in 2 sentences.\n' \
f'2 - Translate the summary into Chinese.\n' \
f'3 - Provide a Chinese translation of sentence: "{title}".\n' \
f'```{content.strip(".")}.```'
try:
answer = self.openai_complete(prompt, True)
summary = self.parse_step_answer(answer).strip()
if not summary: # If step parse failed, ignore the translation
summary = self.openai_complete(
f'Summarize the article delimited by triple backticks in 2 sentences.\n'
f'```{content.strip(".")}.```', False)
return summary
except Exception as e:
logger.exception(f'Failed to summarize using openai, key #{config.openai_key_index}, {e}') # Make this error explicit in the log
return ''
def openai_complete(self, prompt, need_json):
start_time = time.time()
kwargs = {'model': config.openai_model,
# one token generally corresponds to ~4 characters
# 'max_tokens': int(config.summary_size / 4),
'stream': False,
'temperature': 0,
'n': 1, # only one choice
'timeout': 30}
if need_json:
kwargs['functions'] = [{"name": "render", "parameters": {
"type": "object",
"properties": {
"summary": {
"type": "string",
"description": "English summary"
},
"summary_zh": {
"type": "string",
"description": "Chinese summary"
},
"translation": {
"type": "string",
"description": "Chinese translation of sentence"
},
},
# "required": ["summary"] # ChatGPT only returns the required field?
}}]
kwargs['function_call'] = {"name": "render"}
if config.openai_model.startswith('text-'):
resp = openai.Completion.create(
prompt=prompt,
**kwargs
)
answer = resp['choices'][0]['text'].strip()
else:
resp = openai.ChatCompletion.create(
messages=[
{'role': 'user', 'content': prompt},
],
**kwargs)
message = resp["choices"][0]["message"]
if message.get('function_call'):
json_str = message['function_call']['arguments']
if resp["choices"][0]['finish_reason'] == 'length':
json_str += '"}' # best effort to save truncated answers
try:
answer = json.loads(json_str)
except JSONDecodeError as e:
logger.warning(f'Failed to decode answer from openai, will fallback to plain text, error: {e}')
return '' # Let fallback code kicks in
else:
answer = message['content'].strip()
logger.info(f'prompt: {prompt}')
logger.info(f'took {time.time() - start_time}s to generate: '
# Default str(resp) prints \u516c
f'{json.dumps(resp.to_dict_recursive(), sort_keys=True, indent=2, ensure_ascii=False)}')
return answer
def parse_step_answer(self, answer):
if not answer:
return answer
db.translation.add(answer.get('summary', ''), answer.get('summary_zh', ''), 'zh')
db.translation.add(self.title, self.parse_title_translation(answer.get('translation', '')), 'zh')
return answer.get('summary', '')
def parse_title_translation(self, title):
# Somehow, openai always return the original title
title_cn = title.removesuffix('。').removesuffix('.')
match = re.search(r'^"[^"]+"[^"]+“([^”]+)”', title_cn)
if match: # clean path
return match.group(1).strip()
match = re.search(r'(.*)\(Note.*\)$', title_cn)
if match:
return match.group(1).strip()
parts = re.split(r'的中文翻译(?:[为是])?(?::)?', title_cn, maxsplit=1)
if len(parts) > 1 and parts[1].strip():
title_cn = parts[1].strip().strip(':').strip(':').strip()
else:
title_cn = parts[0].strip()
quote = ('"', '“', '”', '《', '》') # they are used interchangeably
while title_cn and title_cn[0] in quote and title_cn[-1] in quote:
title_cn = title_cn[1:-1].strip()
return title_cn.removesuffix('。').removesuffix('.').strip()
def fetch_feature_image(self):
if config.force_fetch_feature_image:
logger.warning(f'Will force fetch feature image')
elif self.cache.image_name is not None:
if os.path.exists(os.path.join(config.image_dir, self.cache.image_name)):
self.image = WebImage.from_json_str(self.cache.image_json)
self.img_id = self.cache.image_name
logger.info(f"Cache hit image {self.img_id}")
return
else:
logger.info(f'{self.cache.image_name} not exist in {config.image_dir}')
tm = self.parser.get_illustration()
if tm:
tm.try_compress()
fname = tm.uniq_name()
tm.save(os.path.join(config.image_dir, fname))
self.image = tm
self.cache.image_json = tm.to_json_str()
self.img_id = fname
self.cache.image_name = self.img_id # tried but not found
def summarize_by_llama(self, content):
if config.disable_llama:
logger.info("LLaMA is disabled by env DISABLE_LLAMA=1")
return ''
start_time = time.time()
from hacker_news.llm.llama import summarize_by_llama
resp = summarize_by_llama(content)
logger.info(f'took {time.time() - start_time}s to generate: {resp}')
return resp['choices'][0]['text'].strip()
def summarize_by_transformer(self, content):
if config.disable_transformer:
logger.warning("Transformer is disabled by env DISABLE_TRANSFORMER=1")
return ''
start_time = time.time()
# Too time-consuming to init t5 model, so lazy load here until we have to
from hacker_news.llm.google_t5 import summarize_by_t5
summary = summarize_by_t5(content)
logger.info(f'took {time.time() - start_time}s to generate: {summary}')
return summary
| [
"1 - Summarize the article delimited by triple backticks in 2 sentences.\n",
"2 - Translate the summary into Chinese.\n",
"Output only answers to following 3 steps.\n",
"3 - Provide a Chinese translation of sentence: \"PLACEHOLDER\".\n"
] |
2024-01-10 | JorisdeJong123/Student_Prep | home.py | import streamlit as st
from langchain.llms import OpenAI
from dotenv import load_dotenv
import os
from langchain.document_loaders import PyPDFLoader
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.docstore.document import Document
from PyPDF2 import PdfReader
import tempfile
from llm_helper_function import split_text_q_gen,split_text_q_answer, split_text_docs_vector, extract_text_from_pdf_for_q_gen,extract_text_from_pdf_for_q_answer, create_questions, create_vectordatabase
st.title('🦜🔗 Smart Study Hub')
st.markdown("Smart Study Hub is a tool that helps you to study more efficiently. It generates questions from your study material and answers them for you. This way, you can test your knowledge and learn more effectively.")
# Load env files
# load_dotenv()
# openai_api_key = os.environ.get('OPENAI_API_KEY')
prompt_template = """Use the context below to write an answer to the question.:
Context: {context}
Question: {topic}
Answer:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "topic"]
)
# Initialization of session states
# Since Streamlit always reruns the script when a widget changes, we need to initialize the session states
if 'questions' not in st.session_state:
st.session_state['questions'] = 'empty'
st.session_state['question_list'] = 'empty'
st.session_state['questions_to_answers'] = 'empty'
def get_api_key():
input_text = st.text_input(label="OpenAI API Key ", placeholder="Ex: sk-2twmA8tfCb8un4...", key="openai_api_key_input", help="How to get an OpenAI API Key: https://www.howtogeek.com/885918/how-to-get-an-openai-api-key/")
return input_text
openai_api_key = get_api_key()
with st.container():
st.markdown("Make sure you've entered your OpenAI API Key. Don't have an API key yet? Read [this](https://www.howtogeek.com/885918/how-to-get-an-openai-api-key/) article on how to get an API key.")
# Let user upload a file
uploaded_file = st.file_uploader("Choose a file", type=['pdf'])
# If user uploaded a file, check if it is a pdf
if uploaded_file is not None:
if not openai_api_key:
st.error("Please enter your OpenAI API Key")
else:
# Create a LLM
llm = ChatOpenAI(openai_api_key=openai_api_key,temperature=0.3, model_name="gpt-3.5-turbo-16k")
if uploaded_file.type == 'application/pdf':
# Extract and split text from pdf for question generation
docs_for_q_gen = extract_text_from_pdf_for_q_gen(uploaded_file)
# Extract and split text from pdf for question answering
docs_for_q_answer = extract_text_from_pdf_for_q_answer(uploaded_file)
# Create questions
if st.session_state['questions'] == 'empty':
with st.spinner("Generating questions..."):
st.session_state['questions'] = create_questions(docs_for_q_gen, llm)
# Show questions
st.info(st.session_state['questions'])
# Create variable for further use of questions.
questions_var = st.session_state['questions']
# Split the questions into a list
st.session_state['questions_list'] = questions_var.split('\n') # Split the string into a list of questions
# Create vector database
# Create the LLM model for the question answering
llm_question_answer = ChatOpenAI(openai_api_key=openai_api_key,temperature=0.4, model="gpt-3.5-turbo-16k")
# Create the vector database and RetrievalQA Chain
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
db = FAISS.from_documents(docs_for_q_answer, embeddings)
qa = RetrievalQA.from_chain_type(llm=llm_question_answer, chain_type="stuff", retriever=db.as_retriever())
with st.form('my_form'):
# Let the user select questions, which will be used to generate answers
st.session_state['questions_to_answers'] = st.multiselect("Select questions to answer", st.session_state['questions_list'])
submitted = st.form_submit_button('Generate answers')
if submitted:
# Initialize session state of the answers
st.session_state['answers'] = []
if 'question_answer_dict' not in st.session_state:
# Initialize session state of a dictionary with questions and answers
st.session_state['question_answer_dict'] = {}
for question in st.session_state['questions_to_answers']:
# For each question, generate an answer
with st.spinner("Generating answer..."):
# Run the chain
answer = qa.run(question)
st.session_state['question_answer_dict'][question] = answer
st.write("Question: ", question)
st.info(f"Answer: {answer} ")
else:
st.write("Please upload a pdf file")
st.stop() | [
"Use the context below to write an answer to the question.:\n Context: {context}\n Question: {topic}\n Answer:",
"context"
] |
2024-01-10 | JorisdeJong123/Student_Prep | llm_helper_function.py | from langchain.text_splitter import TokenTextSplitter
from langchain.docstore.document import Document
from langchain.chat_models import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import SeleniumURLLoader, PyPDFLoader
from prompts import PROMPT_QUESTIONS, REFINE_PROMPT_QUESTIONS
from PyPDF2 import PdfReader
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
# Function to extract text from PDF for question generation
def extract_text_from_pdf_for_q_gen(uploaded_file):
pdf_reader = PdfReader(uploaded_file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
docs = split_text_q_gen(text)
return docs
# Function to extract text from PDF for question answering
def extract_text_from_pdf_for_q_answer(uploaded_file):
pdf_reader = PdfReader(uploaded_file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
docs = split_text_q_answer(text)
return docs
# Text splitter when the input text is just text, not documents.
# Used for question generation
def split_text_q_gen(data):
text_splitter = TokenTextSplitter(model_name="gpt-3.5-turbo-16k", chunk_size=10000, chunk_overlap=200)
texts = text_splitter.split_text(data)
docs = [Document(page_content=t) for t in texts]
return docs
# Text splitter when the input text is just text, not documents.
# Used for question answering, vector database
def split_text_q_answer(data):
text_splitter = TokenTextSplitter(model_name="gpt-3.5-turbo", chunk_size=2000, chunk_overlap=200)
texts = text_splitter.split_text(data)
docs = [Document(page_content=t) for t in texts]
return docs
# Function for splitting texts into documents
def split_text_docs_vector(data):
text_splitter = TokenTextSplitter(model_name="gpt-3.5-turbo", chunk_size=2000, chunk_overlap=200)
docs = text_splitter.split_documents(data)
return docs
# Function to create a LLM model
def create_LLM(openai_api_key, temperature, model_name):
llm = ChatOpenAI(openai_api_key=openai_api_key,temperature=temperature, model=model_name)
return llm
def load_data_pdf(file_path):
loader = PyPDFLoader(file_path=file_path)
data = loader.load()
return data
# Function to create a single Tweet
def create_questions(docs, llm):
question_chain = load_summarize_chain(llm, chain_type="refine", verbose=True, question_prompt=PROMPT_QUESTIONS, refine_prompt=REFINE_PROMPT_QUESTIONS)
questions = question_chain.run(docs)
return questions
def create_vectordatabase(docs, openai_api_key):
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
db = FAISS.from_documents(docs, embeddings)
return db | [] |
2024-01-10 | virtualdude1/PythonGPT3Tutorial | hello_world.py | import openai
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
openai.api_key = open_file('openaiapikey.txt')
def gpt3_completion(prompt, engine='text-davinci-002', temp=0.7, top_p=1.0, tokens=400, freq_pen=0.0, pres_pen=0.0, stop=['<<END>>']):
prompt = prompt.encode(encoding='ASCII',errors='ignore').decode()
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip()
return text
if __name__ == '__main__':
prompt = 'Write a list of famous American actors:'
response = gpt3_completion(prompt)
print(response) | [
"Write a list of famous American actors:"
] |
2024-01-10 | drace28/Voice_assisstant | voice_assisstant.py | import speech_recognition as sr
import pyttsx3
import openai
import pyautogui
import os
import time
from dotenv import load_dotenv
import psutil
import google.generativeai as genai
from pynput.keyboard import Controller, Key
# Load the .env file
load_dotenv()
keyboard = Controller()
# Set your API keys
openai.api_key = os.getenv('OPENAI_KEY')
GOOGLE_API_KEY= os.getenv('GOOGLE_API_KEY')
genai.configure(api_key=GOOGLE_API_KEY)
model = genai.GenerativeModel("gimini-pro")
def is_spotify_running():
for process in psutil.process_iter(['name']):
if process.info['name'] == 'Spotify.exe':
return True
return False
# Initialize the recognizer
r = sr.Recognizer()
# Function to convert text to speech
def SpeakText(command):
engine = pyttsx3.init()
engine.say(command)
engine.runAndWait()
# Function to check if the text contains the wake word
def wakeWord(text):
WAKE_WORDS = ['assistant', 'hey anisha', 'anisha', 'okay anisha', 'hi anisha', 'hello anisha']
text = text.lower()
return any(word in text for word in WAKE_WORDS)
# Function to perform actions based on user commands
def performAction(command):
if "open" in command:
words = command.split()
index = words.index("open")
program = words[index + 1]
SpeakText(f"Opening{program}")
os.system(f'start {program}')
elif "close" in command:
words = command.split()
index = words.index("close")
program = words[index + 1]
SpeakText(f"Closing{program}")
os.system(f'taskkill /f /im {program}.exe')
elif "type" in command:
words = command.split()
index = words.index("type")
text = words[index + 1:]
text = " ".join(text)
SpeakText(f"Typing{text}")
pyautogui.typewrite(text)
elif "search" in command:
words = command.split()
index = words.index("search")
query = words[index + 1:]
query = " ".join(query)
SpeakText(f"Searching for {query}")
os.system(f'start https://www.google.com/search?q={query}')
try:
# Send the user's question to ChatGPT
response = openai.Completion.create(
engine="davinci",
prompt=f"I am searching for {query}",
temperature=0.7,
max_tokens=150,
n=1,
)
SpeakText(response["choices"][0]["text"].strip())
except Exception as e:
print(f"Error querying ChatGPT: {e}")
SpeakText("I'm sorry, I couldn't generate a response at the moment.")
elif "send whatsapp message" in command:
# Extract the recipient and message from the command
recipient, message = command.replace("send whatsapp message ", "").split(" message ")
SpeakText(f"Sending WhatsApp message to {recipient}")
# Open WhatsApp
os.system('start whatsapp') # Replace with the path to your WhatsApp.exe
time.sleep(5) # Wait for WhatsApp to open
# Click on the search box
pyautogui.click(x=110, y=200) # Replace with the coordinates of your search box
# Type the recipient's name and press enter
pyautogui.write(recipient)
pyautogui.press('enter')
time.sleep(2) # Wait for the chat to open
# Type the message and press enter
pyautogui.write(message)
pyautogui.press('enter')
elif "play music" in command or "pause music" in command or "next track" in command or "previous track" in command:
if not is_spotify_running():
SpeakText("Opening Spotify")
os.system('start spotify') # This command opens Spotify on Windows
if "play music" in command or "pause music" in command:
SpeakText("OK")
with keyboard.pressed(Key.media_play_pause):
pass # This shortcut plays/pauses music in Spotify
elif "next track" in command:
SpeakText("Skipping to the next track on Spotify")
with keyboard.pressed(Key.media_next):
pass # This shortcut skips to the next track in Spotify
elif "previous track" in command:
SpeakText("Going to the previous track on Spotify")
with keyboard.pressed(Key.media_previous):
pass
elif "ask a question" or "bard" in command:
SpeakText("Sure, what would you like to ask?")
audio_data = r.listen(source, timeout=10) # Listen for the user's question
question = r.recognize_google(audio_data, language='en-US')
response = model.generate_content(question)
SpeakText(response)
elif "bye" in command:
SpeakText("Bye, See you soon")
exit()
elif "goodbye" in command:
SpeakText("Goodbye, See you soon")
exit()
elif "good night" in command:
SpeakText("Good Night, Sweet Dreams")
exit()
else:
SpeakText("Sorry, I did not understand that.")
# Function to query ChatGPT
def chatgpt_query(question):
try:
# Send the user's question to ChatGPT
response = openai.Completion.create(
engine="text-davinci-003",
prompt=question,
temperature=0.7,
max_tokens=150,
n=1,
)
return response["choices"][0]["text"].strip()
except Exception as e:
print(f"Error querying ChatGPT: {e}")
return "I'm sorry, I couldn't generate a response at the moment."
# Continuous listening loop
while True:
print("Say something")
with sr.Microphone(device_index=1) as source:
r.adjust_for_ambient_noise(source, duration=0.2)
audio_data = r.listen(source)
print("Recognizing...")
try:
MyText = r.recognize_google(audio_data, language='en-US')
print(MyText)
# if "bye" or "goodbye" or "goodnight" in command:
# SpeakText("Bye, have a good day")
# exit()
if wakeWord(MyText):
SpeakText("Hello, How can I assist you?")
# Listen for the user's command after the wake word
audio_data = r.listen(source, timeout=5)
command = r.recognize_google(audio_data, language='en-US')
# Perform actions based on the user's command
performAction(command)
elif "bye" or "goodbye" or "goodnight" in command:
SpeakText("Bye, have a good day")
exit()
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print(f"Error with the speech recognition service; {e}")
except Exception as e:
print(f"An error occurred; {e}")
| [
"I am searching for PLACEHOLDER"
] |
2024-01-10 | akoshegel/find-room-with-chatgpt | app~dependencies.py | import openai
import facebook_scraper
from app.config import Config
from app.domain.chatgpt import ChatGPT
from app.domain.facebook_groups_scraper import FacebookGroupsScraper
from app.services.advertisment_service import AdvertismentService
config = Config.get()
class Dependencies:
__chatgpt = None
__scraper = None
__advertisment_service = None
@staticmethod
def make_chatgpt() -> ChatGPT:
if not Dependencies.__chatgpt:
openai.api_key = Config.get()["openai_api_key"]
Dependencies.__chatgpt = ChatGPT(openai)
return Dependencies.__chatgpt
@staticmethod
def make_scraper() -> FacebookGroupsScraper:
if not Dependencies.__scraper:
Dependencies.__scraper = FacebookGroupsScraper(
scraper=facebook_scraper,
config={
"pages": config["scraper_pages"],
"posts_per_pages": config["scraper_posts_per_pages"],
"timeout": config["scraper_timeout"],
},
)
return Dependencies.__scraper
@staticmethod
def make_advertisment_service() -> AdvertismentService:
if not Dependencies.__advertisment_service:
Dependencies.__advertisment_service = AdvertismentService(
scraper=Dependencies.make_scraper(),
chatGPT=Dependencies.make_chatgpt(),
)
return Dependencies.__advertisment_service
| [] |
2024-01-10 | DorotaBjoorn/Data-Engineering-Project | src~newsfeed~summary_models.py | import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
import openai
from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI # for generating the summary
from langchain.docstore.document import Document # for storing the text
from langchain.prompts import PromptTemplate # Template for the prompt
def summarize_text_with_hugging_face(
text, model_name="facebook/bart-large-cnn", max_length=250, min_length=25
):
"""
Summarize the given text using the specified model.
Parameters:
- text (str): The text to summarize.
- model_name (str): The name of the pre-trained model to use.
- max_length (int): The maximum length of the summary.
- min_length (int): The minimum length of the summary.
Returns:
- str: The summary of the text.
"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
summarization_pipeline = pipeline(
"summarization", model=model_name, tokenizer=tokenizer
) # device=0 if you have a GPU, if not specified will be run on CPU by default
summary = summarization_pipeline(
text, max_length=max_length, min_length=min_length, do_sample=False
)
return summary[0]["summary_text"]
def summarize_text_with_open_ai(blog_text, prompt_template):
model_name = "gpt-3.5-turbo"
# Converts each part into a Document object
docs = [Document(page_content=blog_text)]
# Loads the lanugage model
llm = ChatOpenAI(temperature=0, openai_api_key=openai.api_key, model_name=model_name)
# Defines prompt template
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
# Define model parameters
verbose = False # If set to True, prints entire un-summarized text
# Loads appropriate chain based on the number of tokens. Stuff or Map Reduce is chosen
chain = load_summarize_chain(
llm,
chain_type="stuff",
prompt=prompt,
verbose=verbose,
)
summary = chain.run(docs)
return summary
| [] |
2024-01-10 | scan-dev/autogen | test~oai~test_completion.py | import datasets
import sys
import numpy as np
import pytest
from functools import partial
import os
import json
import autogen
from autogen.code_utils import (
eval_function_completions,
generate_assertions,
implement,
generate_code,
)
from autogen.math_utils import eval_math_responses, solve_problem
KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
here = os.path.abspath(os.path.dirname(__file__))
def yes_or_no_filter(context, response, **_):
return context.get("yes_or_no_choice", False) is False or any(
text in ["Yes.", "No."] for text in autogen.Completion.extract_text(response)
)
def valid_json_filter(response, **_):
for text in autogen.Completion.extract_text(response):
try:
json.loads(text)
return True
except ValueError:
pass
return False
def test_filter():
try:
import openai
except ImportError as exc:
print(exc)
return
config_list = autogen.config_list_from_models(
KEY_LOC, exclude="aoai", model_list=["text-ada-001", "gpt-3.5-turbo", "text-davinci-003"]
)
response = autogen.Completion.create(
context={"yes_or_no_choice": True},
config_list=config_list,
prompt="Is 37 a prime number? Please answer 'Yes.' or 'No.'",
filter_func=yes_or_no_filter,
)
assert (
autogen.Completion.extract_text(response)[0] in ["Yes.", "No."]
or not response["pass_filter"]
and response["config_id"] == 2
)
response = autogen.Completion.create(
context={"yes_or_no_choice": False},
config_list=config_list,
prompt="Is 37 a prime number?",
filter_func=yes_or_no_filter,
)
assert response["model"] == "text-ada-001"
response = autogen.Completion.create(
config_list=config_list,
prompt="How to construct a json request to Bing API to search for 'latest AI news'? Return the JSON request.",
filter_func=valid_json_filter,
)
assert response["config_id"] == 2 or response["pass_filter"], "the response must pass filter unless all fail"
assert not response["pass_filter"] or json.loads(autogen.Completion.extract_text(response)[0])
def test_chatcompletion():
params = autogen.ChatCompletion._construct_params(
context=None,
config={"model": "unknown"},
prompt="hi",
)
assert "messages" in params
params = autogen.Completion._construct_params(
context=None,
config={"model": "unknown"},
prompt="hi",
)
assert "messages" not in params
params = autogen.Completion._construct_params(
context=None,
config={"model": "gpt-4"},
prompt="hi",
)
assert "messages" in params
params = autogen.Completion._construct_params(
context={"name": "there"},
config={"model": "unknown"},
prompt="hi {name}",
allow_format_str_template=True,
)
assert params["prompt"] == "hi there"
params = autogen.Completion._construct_params(
context={"name": "there"},
config={"model": "unknown"},
prompt="hi {name}",
)
assert params["prompt"] != "hi there"
def test_multi_model():
try:
import openai
except ImportError as exc:
print(exc)
return
response = autogen.Completion.create(
config_list=autogen.config_list_gpt4_gpt35(KEY_LOC),
prompt="Hi",
)
print(response)
def test_nocontext():
try:
import openai
import diskcache
except ImportError as exc:
print(exc)
return
response = autogen.Completion.create(
model="text-ada-001",
prompt="1+1=",
max_tokens=1,
use_cache=False,
request_timeout=10,
config_list=autogen.config_list_openai_aoai(KEY_LOC, exclude="aoai"),
)
print(response)
code, _ = generate_code(
config_list=autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
},
),
messages=[
{
"role": "system",
"content": "You want to become a better assistant by learning new skills and improving your existing ones.",
},
{
"role": "user",
"content": "Write reusable code to use web scraping to get information from websites.",
},
],
)
print(code)
solution, cost = solve_problem("1+1=", config_list=autogen.config_list_gpt4_gpt35(KEY_LOC))
print(solution, cost)
@pytest.mark.skipif(
sys.platform == "win32",
reason="do not run on windows",
)
def test_humaneval(num_samples=1):
gpt35_config_list = autogen.config_list_from_json(
env_or_file=OAI_CONFIG_LIST,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
},
file_location=KEY_LOC,
)
assertions = partial(generate_assertions, config_list=gpt35_config_list)
eval_with_generated_assertions = partial(
eval_function_completions,
assertions=assertions,
)
seed = 41
data = datasets.load_dataset("openai_humaneval")["test"].shuffle(seed=seed)
n_tune_data = 20
tune_data = [
{
"definition": data[x]["prompt"],
"test": data[x]["test"],
"entry_point": data[x]["entry_point"],
}
for x in range(n_tune_data)
]
test_data = [
{
"definition": data[x]["prompt"],
"test": data[x]["test"],
"entry_point": data[x]["entry_point"],
}
for x in range(n_tune_data, len(data))
]
autogen.Completion.clear_cache(cache_path_root="{here}/cache")
autogen.Completion.set_cache(seed)
try:
import openai
import diskcache
except ImportError as exc:
print(exc)
return
autogen.Completion.clear_cache(400)
# no error should be raised
response = autogen.Completion.create(
context=test_data[0],
config_list=autogen.config_list_from_models(KEY_LOC, model_list=["gpt-3.5-turbo"]),
prompt="",
max_tokens=1,
max_retry_period=0,
raise_on_ratelimit_or_timeout=False,
)
# assert response == -1
config_list = autogen.config_list_openai_aoai(KEY_LOC, exclude="aoai")
# a minimal tuning example
config, _ = autogen.Completion.tune(
data=tune_data,
metric="success",
mode="max",
eval_func=eval_function_completions,
n=1,
prompt="{definition}",
allow_format_str_template=True,
config_list=config_list,
)
response = autogen.Completion.create(context=test_data[0], config_list=config_list, **config)
# a minimal tuning example for tuning chat completion models using the Completion class
config, _ = autogen.Completion.tune(
data=tune_data,
metric="succeed_assertions",
mode="max",
eval_func=eval_with_generated_assertions,
n=1,
model="text-davinci-003",
prompt="{definition}",
allow_format_str_template=True,
config_list=config_list,
)
response = autogen.Completion.create(context=test_data[0], config_list=config_list, **config)
# a minimal tuning example for tuning chat completion models using the ChatCompletion class
config_list = autogen.config_list_openai_aoai(KEY_LOC)
config, _ = autogen.ChatCompletion.tune(
data=tune_data,
metric="expected_success",
mode="max",
eval_func=eval_function_completions,
n=1,
messages=[{"role": "user", "content": "{definition}"}],
config_list=config_list,
allow_format_str_template=True,
request_timeout=120,
)
response = autogen.ChatCompletion.create(context=test_data[0], config_list=config_list, **config)
print(response)
from openai.error import RateLimitError
try:
code, cost, selected = implement(tune_data[1], [{**config_list[-1], **config}])
except RateLimitError:
code, cost, selected = implement(
tune_data[1],
[{**config_list[0], "model": "text-ada-001", "prompt": config["messages"]["content"]}],
assertions=assertions,
)
print(code)
print(cost)
assert selected == 0
print(eval_function_completions([code], **tune_data[1]))
# a more comprehensive tuning example
config2, analysis = autogen.Completion.tune(
data=tune_data,
metric="success",
mode="max",
eval_func=eval_with_generated_assertions,
log_file_name="logs/humaneval.log",
inference_budget=0.002,
optimization_budget=2,
num_samples=num_samples,
# logging_level=logging.INFO,
prompt=[
"{definition}",
"# Python 3{definition}",
"Complete the following Python function:{definition}",
],
stop=[["\nclass", "\ndef", "\nif", "\nprint"], None], # the stop sequences
config_list=config_list,
allow_format_str_template=True,
)
print(config2)
print(analysis.best_result)
print(test_data[0])
response = autogen.Completion.create(context=test_data[0], config_list=config_list, **config2)
print(response)
autogen.Completion.data = test_data[:num_samples]
result = autogen.Completion._eval(analysis.best_config, prune=False, eval_only=True)
print("result without pruning", result)
result = autogen.Completion.test(test_data[:num_samples], config_list=config_list, **config2)
print(result)
try:
code, cost, selected = implement(
tune_data[1], [{**config_list[-2], **config2}, {**config_list[-1], **config}], assertions=assertions
)
except RateLimitError:
code, cost, selected = implement(
tune_data[1],
[
{**config_list[-3], **config2},
{**config_list[0], "model": "text-ada-001", "prompt": config["messages"]["content"]},
],
assertions=assertions,
)
print(code)
print(cost)
print(selected)
print(eval_function_completions([code], **tune_data[1]))
def test_math(num_samples=-1):
try:
import openai
import diskcache
except ImportError as exc:
print(exc)
return
seed = 41
data = datasets.load_dataset("competition_math")
train_data = data["train"].shuffle(seed=seed)
test_data = data["test"].shuffle(seed=seed)
n_tune_data = 20
tune_data = [
{
"problem": train_data[x]["problem"],
"solution": train_data[x]["solution"],
}
for x in range(len(train_data))
if train_data[x]["level"] == "Level 1"
][:n_tune_data]
test_data = [
{
"problem": test_data[x]["problem"],
"solution": test_data[x]["solution"],
}
for x in range(len(test_data))
if test_data[x]["level"] == "Level 1"
]
print(
"max tokens in tuning data's canonical solutions",
max([len(x["solution"].split()) for x in tune_data]),
)
print(len(tune_data), len(test_data))
# prompt template
prompts = [
lambda data: "%s Solve the problem carefully. Simplify your answer as much as possible. Put the final answer in \\boxed{}."
% data["problem"]
]
autogen.Completion.set_cache(seed)
config_list = autogen.config_list_openai_aoai(KEY_LOC, exclude="aoai")
vanilla_config = {
"model": "text-davinci-003",
"temperature": 1,
"max_tokens": 2048,
"n": 1,
"prompt": prompts[0],
"stop": "###",
}
test_data_sample = test_data[0:3]
result = autogen.Completion.test(test_data_sample, eval_math_responses, config_list=config_list, **vanilla_config)
result = autogen.Completion.test(
test_data_sample,
eval_math_responses,
agg_method="median",
config_list=config_list,
**vanilla_config,
)
def my_median(results):
return np.median(results)
def my_average(results):
return np.mean(results)
result = autogen.Completion.test(
test_data_sample,
eval_math_responses,
agg_method=my_median,
**vanilla_config,
)
result = autogen.Completion.test(
test_data_sample,
eval_math_responses,
agg_method={
"expected_success": my_median,
"success": my_average,
"success_vote": my_average,
"votes": np.mean,
},
**vanilla_config,
)
print(result)
config, _ = autogen.Completion.tune(
data=tune_data, # the data for tuning
metric="expected_success", # the metric to optimize
mode="max", # the optimization mode
eval_func=eval_math_responses, # the evaluation function to return the success metrics
# log_file_name="logs/math.log", # the log file name
inference_budget=0.002, # the inference budget (dollar)
optimization_budget=0.01, # the optimization budget (dollar)
num_samples=num_samples,
prompt=prompts, # the prompt templates to choose from
stop="###", # the stop sequence
config_list=config_list,
)
print("tuned config", config)
result = autogen.Completion.test(test_data_sample, config_list=config_list, **config)
print("result from tuned config:", result)
print("empty responses", eval_math_responses([], None))
if __name__ == "__main__":
import openai
config_list = autogen.config_list_openai_aoai(KEY_LOC)
assert len(config_list) >= 3, config_list
openai.api_key = os.environ["OPENAI_API_KEY"]
# test_filter()
# test_chatcompletion()
# test_multi_model()
# test_nocontext()
test_humaneval(1)
# test_math(1)
| [
"How to construct a json request to Bing API to search for 'latest AI news'? Return the JSON request.",
"1+1=",
"Is 37 a prime number? Please answer 'Yes.' or 'No.'",
"Hi",
"{definition}",
"[<function <lambda> at 0x115fffd80>]",
"Write reusable code to use web scraping to get information from websites.",
"Is 37 a prime number?",
"You want to become a better assistant by learning new skills and improving your existing ones."
] |
2024-01-10 | scan-dev/autogen | test~agentchat~test_retrievechat.py | import pytest
import sys
import autogen
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
from autogen.agentchat.contrib.retrieve_assistant_agent import (
RetrieveAssistantAgent,
)
from autogen.agentchat.contrib.retrieve_user_proxy_agent import (
RetrieveUserProxyAgent,
)
from autogen.retrieve_utils import create_vector_db_from_dir, query_vector_db
import chromadb
skip_test = False
except ImportError:
skip_test = True
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows",
)
def test_retrievechat():
try:
import openai
except ImportError:
return
conversations = {}
autogen.ChatCompletion.start_logging(conversations)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314"],
},
)
assistant = RetrieveAssistantAgent(
name="assistant",
system_message="You are a helpful assistant.",
llm_config={
"request_timeout": 600,
"seed": 42,
"config_list": config_list,
},
)
ragproxyagent = RetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"docs_path": "./website/docs",
"chunk_token_size": 2000,
"model": config_list[0]["model"],
"client": chromadb.PersistentClient(path="/tmp/chromadb"),
},
)
assistant.reset()
code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached."
ragproxyagent.initiate_chat(assistant, problem=code_problem, search_string="spark", silent=True)
print(conversations)
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows",
)
def test_retrieve_utils():
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs")
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4
if __name__ == "__main__":
test_retrievechat()
test_retrieve_utils()
| [] |
2024-01-10 | knobz12/D0020E-project-course | backend~modules~ai~playground.py | """
For running general code with Llama llm
"""
import chromadb
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.vectorstores.chroma import Chroma
from datasets import load_dataset
from chromadb.config import Settings
from langchain.prompts import ChatPromptTemplate
from langchain.llms.llamacpp import LlamaCpp
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks import StreamingStdOutCallbackHandler
from langchain.prompts.prompt import Prompt
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
import time
import pathlib
import os
import guidance
from guidance import gen, select, Guidance
from guidance.models._llama_cpp import LlamaCpp as gLlamaCpp
from llama_cpp import Llama, ChatCompletionMessage
from argparse import ArgumentParser
from modules.ai.utils.vectorstore import create_vectorstore
from modules.ai.utils.llm import create_llm
import json
# from langchain.vectorstores.chroma import Chroma
def run_llm():
vectorstore = create_vectorstore()
llm = create_llm()
# You will be talking to a student taking the course D7032E. Use the following pieces of retrieved context to answer the question.
# Course summary: The course will have an emphasis on selected topics from: Project planning and management, problem analysis,
# software management and interpretation, code complexity, API design, debugging and testing, configuration
# management, documentation, design patterns, build support and tools of the trade, packaging, release management
# and deployment, modeling and structuring of software, reuse, components, architectures, maintenance and
# documentation. The course includes a number of assignments, which are to be completed in groups, and that are
# evaluated in both written and oral form. Individual examination is given through tests and a home exam.
# You will be talking to a student taking the AI course D0038E. Use the following pieces of retrieved context to answer the question.
prompt_str = """Human: You are an assistant for question-answering tasks.
Use the following pieces of retrieved context to answer the question.
If you don't know the answer, just say that you don't know.
Use three sentences maximum and keep the answer concise.
Don't directly refer to the context text, pretend like you already knew the context information.
Question: {question}
Context: {context}
Answer:"""
questions: list[str] = [
# "In lab 6 do we use boosting? ",
# "Explain what we are doing in lab 6 task 1.",
# "In lab 6 task 1 what is the expected difference in performance between the two models?",
# "For lab 6 summarize task 6.",
# "What models are used in lab 6?",
# "For task 7 in in lab 6 give some examples of models i can experiment on.",
# "Are we allowed to do lab 6 outside the lab sessions?",
# "In lab 6, in what website can i read more about the different models?",
# "What program are we supposed to use for lab 6?",
# "in lab 6 what is task 4?",
# "In Lab3 what is the excercise about?",
# "What kind of classifier will Lab3 be about?",
# "What operator can be used in rapidminer to take data and a pretrained model and get labeled dataset as an output?",
# "Give me an example of a hyperparameter",
# "What is a k-nearest neighbors classifier?",
# "How many tasks are there in lab3?",
# "What dataset do you need to load for task 4?",
# "How does the K-NN model work?",
# "What happens when the dimensions increase when using k-NN?",
# "Are there any extra tasks in lab3?",
# "Summarize lab 6.",
# "What is SOLID principles?"
]
# llm("Finish the sentence: I compare thee [...]")
# "In lab 6 do we use boosting? ",
# "Explain what we are doing in lab 6 task 1.",
# "In lab 6 task 1 what is the expected difference in performance between the two models?",
# "For lab 6 summarize task 6.",
# "What models are used in lab 6?",
# "For task 7 in in lab 6 give some examples of models i can experiment on.",
# "Are we allowed to do lab 6 outside the lab sessions?",
# "In lab 6, in what website can i read more about the different models?",
# "What program are we supposed to use for lab 6?",
# "in lab 6 what is task 4?",
# "In Lab3 what is the excercise about?",
# "What kind of classifier will Lab3 be about?",
# "What operator can be used in rapidminer to take data and a pretrained model and get labeled dataset as an output?",
# "Give me an example of a hyperparameter",
# "What is a k-nearest neighbors classifier?",
# "How many tasks are there in lab3?",
# "What dataset do you need to load for task 4?",
# "How does the K-NN model work?",
# "What happens when the dimensions increase when using k-NN?",
# "Are there any extra tasks in lab3?",
# "Summarize lab 6.",
# "What is SOLID principles?"
# ]
# llm("Finish the sentence: I compare thee [...]")
for question in questions:
docs = vectorstore.similarity_search(question, k=2,filter={'course':'D7032E'})
context = ""
print(f"Docs", docs)
print(f"Docs: {len(docs)}")
for doc in docs:
print('doc')
# print("Doc id:", (doc.metadata["id"],doc.metadata["chunk-id"]))
print("Doc metadata:", doc.metadata)
context += doc.page_content
resulting_prompt = prompt_str.format(question = question, context = context)
# resulting_prompt = prompt_no_context_str.format(question = question)
print("Full prompt (length: {length}):".format(length=len(resulting_prompt)))
print(resulting_prompt+"\n")
print(f"############## Start")
print(f"Question: {question}\n")
print(f"Answer: ",end="")
llm(resulting_prompt+"\n")
print("\n")
print(f"############## Finished\n\n")
if __name__ == "__main__":
run_llm()
| [
"Human: You are an assistant for question-answering tasks.\nUse the following pieces of retrieved context to answer the question. \nIf you don't know the answer, just say that you don't know. \nUse three sentences maximum and keep the answer concise. \nDon't directly refer to the context text, pretend like you already knew the context information.\n\nQuestion: {question}\n\nContext: {context}\n\nAnswer:",
"Human: You are an assistant for question-answering tasks.\nUse the following pieces of retrieved context to answer the question. \nIf you don't know the answer, just say that you don't know. \nUse three sentences maximum and keep the answer concise. \nDon't directly refer to the context text, pretend like you already knew the context information.\n\nQuestion: PLACEHOLDER\n\nContext: \n\nAnswer:"
] |
2024-01-10 | knobz12/D0020E-project-course | backend~modules~ai~utils~vectorstore.py | import chromadb
from chromadb import Collection, ClientAPI
from chromadb.config import Settings
from guidance.models._llama_cpp import LlamaCpp
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.vectorstores.chroma import Chroma
collection_name = "llama-2-papers"
client: ClientAPI | None = None
collection: Collection | None = None
vectorstore: Chroma | None = None
def create_chroma_client() -> ClientAPI:
"""Create chromadb client"""
global client
if client != None:
print("✅ Using cached client")
return client
client = chromadb.Client(settings=Settings(allow_reset=True))
#client = chromadb.PersistentClient("./chroma_data", settings=Settings(allow_reset=True))
#client = chromadb.HttpClient(settings=Settings(allow_reset=True))
return client
def create_collection() -> Collection:
"""Create chromadb collection client"""
global collection
if collection != None:
print("✅ Using cached collection")
return collection
client = create_chroma_client()
try:
collection = client.get_collection(collection_name)
except Exception:
print(f"Creating missing collection '{collection_name}'...")
collection = client.create_collection(collection_name)
return collection
def create_vectorstore() -> Chroma:
"""Create vectorchain version of vectorstore with chromadb"""
global vectorstore
if vectorstore != None:
print("✅ Using vectorstore")
return vectorstore
client = create_chroma_client()
print("Creating embedding function")
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
return Chroma(embedding_function=embedding_function,client=client,collection_name=collection_name)
| [] |
2024-01-10 | knobz12/D0020E-project-course | backend~modules~ai~explainer.py | """
Creating more detailed explanations from text documents
"""
from typing import Any
from modules.ai.utils.llm import create_llm
from modules.ai.utils.llm import create_llm_guidance
from modules.ai.utils.vectorstore import create_vectorstore
from summarizer import summarize_doc
def summarize_doc(id: str) -> str:
llm = create_llm()
vectorstore = create_vectorstore()
docs = vectorstore.get(limit=100,include=["metadatas"],where={"id":id})
print(docs)
print("doc count:",len(docs['ids']))
results: list[str] = []
for (idx, meta) in enumerate(docs["metadatas"]):
text =meta["text"]
previous_summary: str | None = results[idx - 1] if idx > 1 else None
prompt = """Human: You are an assistant summarizing document text.
I want you to summarize the text as best as you can in less than four paragraphs but atleast two paragraphs:
Text: {text}
Answer:""".format(text = text)
prompt_with_previous= """Human: You are an assistant summarizing document text.
Use the following pieces of retrieved context to improve the summary text.
If you can't improve it simply return the old.
The new summary may only be up to four paragraphs but at least two paragraphs.
Don't directly refer to the context text, pretend like you already knew the context information.
Summary: {summary}
Context: {context}
Answer:""".format(summary = previous_summary,context=text)
use_prompt = prompt if previous_summary == None else prompt_with_previous
print(f"Summarizing doc {idx + 1}...")
print(f"Full prompt:")
print(use_prompt + "\n")
result = llm(use_prompt)
results.append(result)
print("######################################\n\n\n")
for (idx, result) in enumerate(results):
print(f"Result {idx + 1}")
print(result + "\n\n\n")
print("################################\n")
print("Summary:")
summary = results[-1].splitlines()[2:]
return summary
from transformers import (
TokenClassificationPipeline,
AutoModelForTokenClassification,
AutoTokenizer,
)
from transformers.pipelines import AggregationStrategy
import numpy as np
# Define keyphrase extraction pipeline
class KeyphraseExtractionPipeline(TokenClassificationPipeline):
def __init__(self, model, *args, **kwargs):
super().__init__(
model=AutoModelForTokenClassification.from_pretrained(model),
tokenizer=AutoTokenizer.from_pretrained(model),
*args,
**kwargs
)
def postprocess(self, all_outputs):
results = super().postprocess(
all_outputs=all_outputs,
aggregation_strategy=AggregationStrategy.SIMPLE,
)
return np.unique([result.get("word").strip() for result in results])
def get_keywords(doc: str) -> list[str]:
model_name = "ml6team/keyphrase-extraction-kbir-inspec"
extractor = KeyphraseExtractionPipeline(model=model_name)
from numpy import ndarray
result: ndarray = extractor(doc)
list_result:list[str] = result.tolist()
return list_result
import guidance
from guidance import gen
from guidance import gen
@guidance()
def explanationGuide(lm, summary: str, keywords: list[str]):
lm += f"""\
System: You are an assistant explaining topics to students.
Based on the following summary create a detailed text explaining each of the given keywords.
Your text must be at least two paragraphs per keyword.
Don't directly refer to the context text, pretend like you already knew the context information.
Not all keywords have to be explained, only the ones you consider worth explaining.
You don't have to answer with the keywords in the order you're provided. You can reorder them as you think is appropriate.
Summary: {summary}
Keywords: {", ".join(keywords)}
Answer:\n{gen(name="answer")}"""
return lm
@guidance()
def explainKeyword(lm, summary: str, keyword: str):
lm += f"""\
System: You are an assistant explaining topics to students.
Based on the following summary create a detailed text explaining the given keyword.
Write at least one paragraphs of text.
Don't directly refer to the context text, pretend like you already knew the context information.
You don't have to answer with the keywords in the order you're provided. You can reorder them as you think is appropriate.
Summary: {summary}
{keyword}:
{gen(name="explanation",max_tokens=150,stop="`")}
```"""
return lm
def explainer():
doc_id = "b53998910b5a91c141f890fa76fbcb7f"
# summary = summarize_doc(doc_id)
summary = "Separating an application into distinct layers can promote maintainability and scalability by allowing each layer to be modified independently. This approach, known as the Model-View-Controller (MVC) pattern, has gained popularity for designing web applications and GUIs. By separating an application into three interconnected components for data, presentation, and logic, developers can easily modify or replace individual components as needed, allowing for greater flexibility and adaptability in the application's development and maintenance. This approach enables scalability and resilience by allowing each service to be deployed independently, which is particularly useful when adopting new technologies. By using this pattern, developers can ensure that their applications remain responsive and adaptable to changing requirements, making it an effective solution for systems that require real-time responsiveness and adaptability."
keywords: list[str] = list(set(get_keywords(summary)))
llm = create_llm_guidance()
print("Keywords:", keywords)
answers: list[dict[str,str]] = []
for keyword in keywords[:1]:
print("Explaining:",keyword)
lm = llm + explainKeyword(summary=summary, keyword=keyword)
answer = str(lm["explanation"]).strip()
answers.append({keyword:answer})
import json
result = json.dumps(answers,indent=4)
print("Result:")
print(result)
with open("result.json","w") as f:
f.write(result)
for answer in answers:
(keyword, explanation) = answer
print(keyword)
print(explanation, end="\n\n")
if __name__ == "__main__":
explainer() | [
"Human: You are an assistant summarizing document text.\nI want you to summarize the text as best as you can in less than four paragraphs but atleast two paragraphs:\n\nText: PLACEHOLDER\n\nAnswer:",
"Human: You are an assistant summarizing document text.\nUse the following pieces of retrieved context to improve the summary text. \nIf you can't improve it simply return the old.\nThe new summary may only be up to four paragraphs but at least two paragraphs.\nDon't directly refer to the context text, pretend like you already knew the context information.\n\nSummary: PLACEHOLDER\n\nContext: PLACEHOLDER\n\nAnswer:"
] |
2024-01-10 | knobz12/D0020E-project-course | backend~modules~ai~summarizer.py | """
Creating summary of document(s) in the database
# Improvements:
* Improve output format. Maybe JSON using the guidance library
# Optional
* Argument for document id in database to create summary for
"""
from langchain.vectorstores import Chroma
from modules.ai.utils.llm import create_llm
from modules.ai.utils.vectorstore import create_vectorstore
def summarize_doc(id: str) -> str:
llm = create_llm()
vectorstore = create_vectorstore()
docs = vectorstore.get(limit=100,include=["metadatas"],where={"id":id})
print(docs)
print("doc count:",len(docs['ids']))
results: list[str] = []
texts = ""
for (idx, meta) in enumerate(docs["metadatas"]):
text =meta["text"]
previous_summary: str | None = results[idx - 1] if idx > 1 else None
prompt = """Human: You are an assistant summarizing document text.
I want you to summarize the text as best as you can in less than four paragraphs but atleast two paragraphs and when only include the summaraztion and nothing else:
Also end the summary with by adding "END" and start with "START"
Text: {text}
Answer:""".format(text = text)
prompt_with_previous= """Human: You are an assistant summarizing document text.
Use the following pieces of retrieved context to add to the summary text.
If you can't add to it simply return the old.
The most important part is to add "END" when ending the summary and "START" when starting summary.
The new summary has to be at least two paragraphs.
Dont Ever talk about improving the summary
Don't directly refer to the context text, pretend like you already knew the context information.
Summary: {summary}
Context: {context}
Answer:""".format(summary = previous_summary,context=text)
use_prompt = prompt if previous_summary == None else prompt_with_previous
print(f"Summarizing doc {idx + 1}...")
print(f"Full prompt:")
print(use_prompt + "\n")
result = llm(use_prompt)
results.append(result)
texts = texts + text
print("######################################\n\n\n")
for (idx, result) in enumerate(results):
print(f"Result {idx + 1}")
print(result + "\n\n\n")
print("################################\n")
print("Summary:")
summary = results[-1]
print("\n")
summaryTrim = summary[results[-1].find(start:='START')+len(start):summary.find('END')]
print(summaryTrim)
print("\n")
print("Original text:")
print(texts)
return summaryTrim
from typing import Generator
def summarize_doc_stream(id: str) -> Generator[str, str, None]:
llm = create_llm()
vectorstore = create_vectorstore()
docs = vectorstore.get(limit=100,include=["metadatas"],where={"id":id})
print(docs)
print("doc count:",len(docs['ids']))
results: list[str] = []
texts = ""
for (idx, meta) in enumerate(docs["metadatas"]):
text =meta["text"]
previous_summary: str | None = results[idx - 1] if idx > 1 else None
prompt = """Human: You are an assistant summarizing document text.
I want you to summarize the text as best as you can in less than four paragraphs but atleast two paragraphs and when only include the summaraztion and nothing else:
Text: {text}
Answer:""".format(text = text)
prompt_with_previous= """Human: You are an assistant summarizing document text.
Use the following pieces of retrieved context to add to the summary text.
If you can't add to it simply return the old.
The new summary has to be at least two paragraphs long but never longer than three paragraphs of text.
Dont Ever talk about improving the summary.
Don't directly refer to the context text, pretend like you already knew the context information.
Summary: {summary}
Context: {context}
Answer:""".format(summary = previous_summary,context=text)
use_prompt = prompt if previous_summary == None else prompt_with_previous
print(f"Summarizing doc {idx + 1}...")
print(f"Full prompt:")
print(use_prompt + "\n")
result: str = ""
# Start streaming the final summary only
if idx == len(docs['metadatas']) - 1:
for chunk in llm.stream(use_prompt):
result += chunk
yield chunk
else:
result = llm(use_prompt)
results.append(result)
texts = texts + text
print("######################################\n\n\n")
for (idx, result) in enumerate(results):
print(f"Result {idx + 1}")
print(result + "\n\n\n")
print("################################\n")
print("Summary:")
summary = results[-1]
print("\n")
summaryTrim = summary[results[-1].find(start:='START')+len(start):summary.find('END')]
print(summaryTrim)
print("\n")
print("Original text:")
print(texts)
# return summaryTrim
if __name__ == "__main__":
summarize_doc() | [
"Human: You are an assistant summarizing document text.\nI want you to summarize the text as best as you can in less than four paragraphs but atleast two paragraphs and when only include the summaraztion and nothing else:\nAlso end the summary with by adding \"END\" and start with \"START\"\n\nText: PLACEHOLDER\n\nAnswer:",
"Human: You are an assistant summarizing document text.\nUse the following pieces of retrieved context to add to the summary text. \nIf you can't add to it simply return the old.\nThe new summary has to be at least two paragraphs long but never longer than three paragraphs of text.\nDont Ever talk about improving the summary.\nDon't directly refer to the context text, pretend like you already knew the context information.\n\n\nSummary: PLACEHOLDER\n\nContext: PLACEHOLDER\n\nAnswer:",
"Human: You are an assistant summarizing document text.\nUse the following pieces of retrieved context to add to the summary text. \nIf you can't add to it simply return the old.\nThe most important part is to add \"END\" when ending the summary and \"START\" when starting summary.\nThe new summary has to be at least two paragraphs.\nDont Ever talk about improving the summary\nDon't directly refer to the context text, pretend like you already knew the context information.\n\n\nSummary: PLACEHOLDER\n\nContext: PLACEHOLDER\n\nAnswer:",
"Human: You are an assistant summarizing document text.\nI want you to summarize the text as best as you can in less than four paragraphs but atleast two paragraphs and when only include the summaraztion and nothing else:\n\nText: PLACEHOLDER\n\nAnswer:"
] |
2024-01-10 | knobz12/D0020E-project-course | backend~modules~ai~quizer.py | """
Creating quizes based on document(s)
# Improvements:
* Amount of quiz answers per question
"""
import json
from modules.ai.utils.llm import create_llm_guidance
from modules.ai.utils.vectorstore import *
import guidance
from guidance import select, gen
from modules.files.chunks import *
from typing import Any, Generator
def calculate_questions_per_doc(total_docs: int, total_questions: int, doc_index: int):
"""
Calculate the number of questions to generate for a specific document.
Parameters:
- total_docs (int): Total number of documents.
- total_questions (int): Total number of questions to generate.
- doc_index (int): Index of the current document (0-based).
Returns:
- int: Number of questions to generate for the specified document.
"""
# Ensure that the inputs are valid
if total_docs <= 0 or total_questions <= 0 or doc_index < 0 or doc_index >= total_docs:
raise ValueError("Invalid input parameters")
# Calculate the base number of questions for each document
base_questions_per_doc = total_questions // total_docs
# Calculate the remaining questions after distributing the base questions
remaining_questions = total_questions % total_docs
# Calculate the actual number of questions for the current document
questions_for_current_doc = base_questions_per_doc + (1 if doc_index < remaining_questions else 0)
return questions_for_current_doc
@guidance()
def determineQuestionBias(lm, question: str):
lm += f"""I want you to determine if a question for a quiz is factual or opinion based.
For example a question about your opinion about something would be opinion based and a question about a fact is factual.
Question: {question}
Answer: {select(options=["factual", "opinion"])}"""
return lm
@guidance()
def newQuestionJSONGenerator(lm, context: str, answer_count: int, question_count: int):
def gen_answer(idx: int,questionIndex:int) -> str:
answerKey = f"answer{questionIndex}-{idx}"
isAnswerKey = f"isAnswer{questionIndex}-{idx}"
print(answerKey, isAnswerKey)
import random
seed = random.randint(0, 1337)
answer: str = f"""\"{gen(name=answerKey, stop='"',llm_kwargs={"seed": seed})}\": {select(["True", "False"],name=isAnswerKey)}"""
return answer
def gen_question(idx: int):
# question: str = f"""{{ "question":"{gen(f"question{idx}",stop='"',)}", "answers":["""
question: str = f"""\
Question: "{gen(f"question{idx}",stop='"')}"
Answers:
"""
# answers: str = ""
for i in range(0, answer_count):
question += gen_answer(i, idx) + "\n"
#print(question)
return question
questions: str = ""
for i in range(0, question_count):
questions += gen_question(i) + "\n\n"
print("Questions:\n", questions)
res = f"""\
The following is a quiz question in JSON format.
Generate answers based on the provided context. Only ONE of the answers is. true, and the others shall be false.
The incorrect answers must be different from each other but still related to the topic.
The questions MUST be different to one another.
Context: {context}
Questions:
{questions}
"""
print(res)
lm += res
return lm
def create_quiz(id: str, questions: int) -> str:
glmm = create_llm_guidance()
vectorstore = create_vectorstore()
docs = vectorstore.get(limit=100,include=["metadatas"],where={"id":id})
print(docs)
obj: dict[str, list[dict[str, Any]]] = {}
for (i, doc) in enumerate(docs["metadatas"]):
qsts_cunt = calculate_questions_per_doc(len(docs["metadatas"]), questions, i)
print(f"Questions for {i}")
result = glmm + newQuestionJSONGenerator(doc["text"], 4, qsts_cunt)
print(str(result))
obj["questions"] = []
for i in range(0, qsts_cunt):
question: str = result[f"question{i}"]
obj["questions"].append({"question" : question, "answers": []})
for j in range(0,4):
answer: str = result[f"answer{i}-{j}"]
correct: str = result[f"isAnswer{i}-{j}"]
obj["questions"][i]["answers"].append({"text": answer, "correct" : False if correct == "False" else True})
result: str = json.dumps(obj)
return result
def quiz_test():
file_hash = Chunkerizer.upload_chunks_from_file("backend/tests/sample_files/Test_htmls/Architectural Design Patterns.html", "D0072E")
print(create_quiz(file_hash, 3))
if __name__ == "__main__":
quiz_test() | [] |
2024-01-10 | knobz12/D0020E-project-course | backend~modules~ai~utils~llm.py | from modules.ai.utils.args import get_args
from guidance.models._llama_cpp import LlamaCpp
from llama_cpp import Llama
from langchain.llms.llamacpp import LlamaCpp as LangLlamaCpp
llm: LangLlamaCpp = None
guid: LlamaCpp = None
def create_llm_guidance() -> LlamaCpp:
"""Create instance of LLaMA 2 model for use with guidance"""
global guid
if guid != None:
return guid
print("Creating llm instance")
args = get_args()
llm = LlamaCpp(
model=args.model_path,
n_gpu_layers=args.gpu_layers,
n_batch=512,
use_mmap=True,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
temperature=0,
top_k=40,
top_p=0.1,
repeat_penalty=1.176,
verbose=False,
seed=-1
)
guid = llm
return llm
def create_llm() -> LangLlamaCpp:
"""Create instance of LLaMA 2 model with LlamaCpp API"""
global llm
if llm != None:
return llm
args = get_args()
llm = LangLlamaCpp(
model_path=args.model_path,
n_gpu_layers=args.gpu_layers,
n_batch=512,
use_mmap=True,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
max_tokens=1000,
temperature=0,
top_k=40,
top_p=1,
repeat_penalty=1/0.85,
verbose=True,
)
return llm
| [] |
2024-01-10 | team-in-ua/travelfoodie2.0 | indexer~indexer.py | import openai
import os
from dotenv import load_dotenv
import pinecone
import pandas as pd
from tqdm.auto import tqdm
load_dotenv()
openai.api_key = os.environ.get("OPENAI_API_KEY")
model = os.environ.get("MODEL")
index_name = os.environ.get("PINECONE_INDEX_NAME")
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
pinecone_env = os.environ.get("PINECONE_ENV")
data = pd.read_csv('data.csv')
pinecone.init(
api_key=pinecone_api_key,
environment=pinecone_env
)
pinecone.delete_index(index_name)
# check if 'openai' index already exists (only create index if not)
if index_name not in pinecone.list_indexes():
pinecone.create_index(index_name, dimension=1536)
# connect to index
index = pinecone.Index(index_name)
count = 0 # we'll use the count to create unique IDs
batch_size = 10 # process everything in batches of 32
for i in tqdm(range(0, len(data['text']), batch_size)):
# set end position of batch
i_end = min(i+batch_size, len(data['text']))
# get batch of lines and IDs
lines_batch = data['text'][i: i+batch_size].tolist()
countries_batch = data['country'][i: i+batch_size].tolist()
ids_batch = [str(n) for n in range(i, i_end)]
# create embeddings
res = openai.Embedding.create(input=lines_batch, engine=model)
embeds = [record['embedding'] for record in res['data']]
# prep metadata and upsert batch
meta = [{'text': line, 'country': country} for line, country in zip(lines_batch,countries_batch)]
to_upsert = zip(ids_batch, embeds, meta)
# upsert to Pinecone
index.upsert(vectors=list(to_upsert))
# query = "I like kimchi"
# # create the query embedding
# xq = openai.Embedding.create(input=query, engine=MODEL)['data'][0]['embedding']
# # query, returning the top 5 most similar results
# res = index.query([xq], top_k=30, include_metadata=True)
# for match in res['matches']:
# print(f"{match['score']:.5f}: {match['metadata']['country']}") | [] |
2024-01-10 | jess-ee/sinterklaas_gedicht | gedicht2.py | #Importing dependencies
import os
import langchain
import streamlit as st
import time
import re
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
SystemMessage
)
from langchain.chains import LLMChain
apikey = os.getenv('OPENAI_API_KEY')
with open('hobbies.csv', 'r') as f:
hobbies_options = f.read().splitlines()
with open('traits.csv', 'r') as f:
traits_options = f.read().splitlines()
#App framework
st.title('Coolblue Sinterklaas gedichten ✍️')
st.markdown("""
Welkom bij de Coolblue Sinterklaas gedichten generator!
""")
name = st.text_input('Voor wie is dit cadeau?')
gender = st.radio('Selecteer zijn/haar gender:', ['Vrouw', 'Man'])
hobby = st.multiselect('Wat zijn zijn/haar hobby\'s? (selecteer er 2)', hobbies_options, max_selections=2)
traits = st.multiselect('Wat zijn zijn/haar goede eigenschappen? (selecteer er 2)',traits_options,max_selections=2)
product_type_name = st.text_input('Welk cadeau heb je gekocht voor hem/haar?')
product = st.text_area('Vul hier de product informatie in')
#Chatmodel
chat_model= ChatOpenAI(temperature=0.6, model="gpt-4")
#Prompt template
system_message_prompt = SystemMessagePromptTemplate.from_template("""Je schrijft Sinterklaasgedichten voor de klanten van Coolblue.
Schrijf de gedichten op basis van informatie over de klant en het product dat ze hebben gekocht.
Het gedicht moet grappig, positief en blij. Verklap het product niet maar draai er omheen.
Gebruik maximaal 8 regels.
""")
human_message_prompt = HumanMessagePromptTemplate.from_template("""Informatie over de klant:
- Naam: {name}
- Voornaamwoorden: {pronouns}
- Hobbies: {hobby}
- Goede eigenschappen: {traits}
Informatie over het product:
- {product_type_name}
{product}
""")
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
#LLM Chain
gedicht_chain = LLMChain(llm=chat_model, prompt=chat_prompt, verbose = True)
#show stuff
if st.button('Vraag G-Piet-R om een gedicht!'):
try:
if object:
response = gedicht_chain.run({
"name": name,
"pronouns": 'Zij/haar' if gender == 'Vrouw' else 'Hij/hem',
"hobby": ','.join(hobby),
"traits": ','.join(traits),
"product_type_name": product_type_name,
"product": product,
})
st.text(response)
except Exception as e:
st.error(f"an error occurred:{e}")
| [
"Je schrijft Sinterklaasgedichten voor de klanten van Coolblue.\n\nSchrijf de gedichten op basis van informatie over de klant en het product dat ze hebben gekocht.\n\nHet gedicht moet grappig, positief en blij. Verklap het product niet maar draai er omheen.\n\nGebruik maximaal 8 regels.\n",
"[PLACEHOLDER, PLACEHOLDER]",
"Informatie over de klant:\n- Naam: {name}\n- Voornaamwoorden: {pronouns}\n- Hobbies: {hobby}\n- Goede eigenschappen: {traits}\n\nInformatie over het product:\n- {product_type_name}\n{product}\n"
] |
2024-01-10 | jess-ee/sinterklaas_gedicht | gedicht_V3_engels.py | # Importing dependencies
import os
import langchain
import streamlit as st
import time
import re
import requests
from io import BytesIO
import traceback
from elevenlabs import clone, generate, play, set_api_key
from elevenlabs.api import History
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
SystemMessage
)
from langchain.chains import LLMChain
# Set the Eleven Labs and OpenAI API Key
set_api_key(os.environ.get("ELEVEN_LABS_API_KEY"))
apikey = os.getenv('OPENAI_API_KEY')
with open('Hobbies_English.csv', 'r') as f:
hobbies_options = f.read().splitlines()
with open('Badtraits_English.csv', 'r') as f:
traits_options = f.read().splitlines()
#App framework
st.title('Coolblue Saint Nicholas poems ✍️')
st.markdown("""
Welcome to the Cooblue poem generator!
""")
name = st.text_input('For who is this gift?')
hobby = st.multiselect('What are his or here hobbies? (select at least one option)', hobbies_options, max_selections=2)
traits = st.multiselect('What are his or her bad habits? (slect at leat one option)',traits_options,max_selections=2)
product_type_name = st.text_input('Which gift have you bought for him or her?')
product = st.text_area('Fill in some of the product information')
#Chatmodel
chat_model= ChatOpenAI(temperature=0.6, model="gpt-4")
#Prompt template
system_message_prompt = SystemMessagePromptTemplate.from_template("""You are writing Saint Nicholas poems for the customers of Coolblue.
Write the poems based on information about the customer and the product they have purchased.
The poem should be funny, positive, and cheerful. Don't reveal the product, but dance around it.
Use a maximum of 8 lines.
Respond with "You're going back to spain with Saint Nicholas" when someone puts in an offensive name.
""")
human_message_prompt = HumanMessagePromptTemplate.from_template("""Informatie about the customer:
- Name: {name}
- Hobbies: {hobby}
- Bad habits {traits}
Information about the product:
- {product_type_name}
- {product}
""")
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
#LLM Chain
gedicht_chain = LLMChain(llm=chat_model, prompt=chat_prompt, verbose = True)
# show stuff
if st.button('Ask G-Piet-R for a poemSt!'):
try:
if object:
response = gedicht_chain.run({
"name": name,
"hobby": ','.join(hobby),
"traits": ','.join(traits),
"product_type_name": product_type_name,
"product": product,
})
st.text(response)
# Generate audio from text using Eleven Labs
model_id= "eleven_multilingual_v2"
voice_id = os.environ.get("VOICE_ID")
audio = generate(text=response,model=model_id,voice=voice_id)
# Convert the audio to bytes for Streamlit's audio widget
audio_bytes = BytesIO(audio).read()
# Play audio using Streamlit
st.audio(audio_bytes, format='audio/ogg')
except Exception as e:
st.error(f"Error: {type(e).__name__}")
st.error(str(e))
st.text(traceback.format_exc())
| [
"Informatie about the customer:\n- Name: {name}\n- Hobbies: {hobby}\n- Bad habits {traits}\n\nInformation about the product:\n- {product_type_name}\n- {product}\n",
"[PLACEHOLDER, PLACEHOLDER]",
"You are writing Saint Nicholas poems for the customers of Coolblue.\n\nWrite the poems based on information about the customer and the product they have purchased.\n\nThe poem should be funny, positive, and cheerful. Don't reveal the product, but dance around it.\n\nUse a maximum of 8 lines.\n\nRespond with \"You're going back to spain with Saint Nicholas\" when someone puts in an offensive name.\n\n"
] |
2024-01-10 | jess-ee/sinterklaas_gedicht | gedicht_v3.py | # Importing dependencies
import os
import langchain
import streamlit as st
import time
import re
import requests
from io import BytesIO
import traceback
from elevenlabs import clone, generate, play, set_api_key
from elevenlabs.api import History
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
SystemMessage
)
from langchain.chains import LLMChain
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.langchain.plus"
os.environ["LANGCHAIN_API_KEY"] = os.environ.get("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_PROJECT"] = "sinterklaas-finetuning"
# Set the Eleven Labs API Key (replace with your key)
set_api_key(os.environ.get("ELEVEN_LABS_API_KEY"))
apikey = os.getenv('OPENAI_API_KEY')
with open('hobbies.csv', 'r') as f:
hobbies_options = f.read().splitlines()
with open('badtraits.csv', 'r') as f:
traits_options = f.read().splitlines()
#App framework
st.title('Coolblue Sinterklaas gedichten ✍️')
st.markdown("""
Welkom bij de Coolblue Sinterklaas gedichten generator!
""")
name = st.text_input('Voor wie is dit cadeau?')
hobby = st.multiselect('Wat zijn zijn/haar hobby\'s? (selecteer er 2)', hobbies_options, max_selections=2)
traits = st.multiselect('Wat zijn zijn/haar slechte eigenschappen? (selecteer er 2)',traits_options,max_selections=2)
product_type_name = st.text_input('Welk cadeau heb je gekocht voor hem/haar?')
product = st.text_area('Vul hier de product informatie in')
#Chatmodel
chat_model= ChatOpenAI(temperature=0.6, model="gpt-4")
#Prompt template
system_message_prompt = SystemMessagePromptTemplate.from_template("""Je schrijft Sinterklaasgedichten voor de klanten van Coolblue.
Schrijf de gedichten op basis van informatie over de klant en het product dat ze hebben gekocht.
Het gedicht moet grappig, positief en blij. Verklap het product niet maar draai er omheen.
Gebruik maximaal 8 regels.
Antwoord met "Jij gaat mee in de zak naar Spanje" wanneer iemand een naam ingeeft die beledigend is.
""")
human_message_prompt = HumanMessagePromptTemplate.from_template("""Informatie over de klant:
- Naam: {name}
- Hobbies: {hobby}
- Slechte eigenschappen: {traits}
Informatie over het product:
- {product_type_name}
{product}
""")
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
#LLM Chain
gedicht_chain = LLMChain(llm=chat_model, prompt=chat_prompt, verbose = True)
# show stuff
if st.button('Vraag G-Piet-R op een gedicht!'):
try:
if object:
response = gedicht_chain.run({
"name": name,
"hobby": ','.join(hobby),
"traits": ','.join(traits),
"product_type_name": product_type_name,
"product": product,
})
st.text(response)
# Generate audio from text using Eleven Labs
model_id= "eleven_multilingual_v2"
voice_id = os.environ.get("VOICE_ID")
audio = generate(text=response,model=model_id,voice=voice_id)
# Convert the audio to bytes for Streamlit's audio widget
audio_bytes = BytesIO(audio).read()
# Play audio using Streamlit
st.audio(audio_bytes, format='audio/ogg')
except Exception as e:
st.error(f"Error: {type(e).__name__}")
st.error(str(e))
st.text(traceback.format_exc())
| [
"Je schrijft Sinterklaasgedichten voor de klanten van Coolblue.\n\nSchrijf de gedichten op basis van informatie over de klant en het product dat ze hebben gekocht.\n\nHet gedicht moet grappig, positief en blij. Verklap het product niet maar draai er omheen.\n\nGebruik maximaal 8 regels.\n\nAntwoord met \"Jij gaat mee in de zak naar Spanje\" wanneer iemand een naam ingeeft die beledigend is.\n",
"Informatie over de klant:\n- Naam: {name}\n- Hobbies: {hobby}\n- Slechte eigenschappen: {traits}\n\nInformatie over het product:\n- {product_type_name}\n{product}\n",
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | jess-ee/sinterklaas_gedicht | gedicht_v3_gender.py | #Importing dependencies
import os
import langchain
import streamlit as st
import time
import re
import requests
from io import BytesIO
import traceback
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
SystemMessage
)
from langchain.chains import LLMChain
apikey = os.getenv('OPENAI_API_KEY')
with open('hobbies.csv', 'r') as f:
hobbies_options = f.read().splitlines()
with open('badtraits.csv', 'r') as f:
traits_options = f.read().splitlines()
#App framework
st.title('Coolblue Sinterklaas gedichten ✍️')
st.markdown("""
Welkom bij de Coolblue Sinterklaas gedichten generator!
""")
name = st.text_input('Voor wie is dit cadeau?')
gender = st.radio('Selecteer zijn/haar gender:', ['Vrouw', 'Man'])
hobby = st.multiselect('Wat zijn zijn/haar hobby\'s? (selecteer er 2)', hobbies_options, max_selections=2)
traits = st.multiselect('Wat zijn zijn/haar slechte eigenschappen? (selecteer er 2)',traits_options,max_selections=2)
product_type_name = st.text_input('Welk cadeau heb je gekocht voor hem/haar?')
product = st.text_area('Vul hier de product informatie in')
#Chatmodel
chat_model= ChatOpenAI(temperature=0.6, model="gpt-4")
#Prompt template
system_message_prompt = SystemMessagePromptTemplate.from_template("""Je schrijft Sinterklaasgedichten voor de klanten van Coolblue.
Schrijf de gedichten op basis van informatie over de klant en het product dat ze hebben gekocht.
Het gedicht moet grappig, positief en blij. Verklap het product niet maar draai er omheen.
Gebruik maximaal 8 regels.
Antwoord met "Jij gaat mee in de zak naar Spanje" wanneer iemand een naam ingeeft die beledigend is.
""")
human_message_prompt = HumanMessagePromptTemplate.from_template("""Informatie over de klant:
- Naam: {name}
- Voornaamwoorden: {gender}
- Hobbies: {hobby}
- Slechte eigenschappen: {traits}
Informatie over het product:
- {product_type_name}
{product}
""")
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
#LLM Chain
gedicht_chain = LLMChain(llm=chat_model, prompt=chat_prompt, verbose = True)
#show stuff
if st.button('Vraag G-Piet-R om een gedicht!'):
try:
if object:
response = gedicht_chain.run({
"name": name,
"pronouns": 'Zij/haar' if gender == 'Vrouw' else 'Hij/hem',
"hobby": ','.join(hobby),
"traits": ','.join(traits),
"product_type_name": product_type_name,
"product": product,
})
st.text(response)
except Exception as e:
st.error(f"Error: {type(e).__name__}")
st.error(str(e))
st.text(traceback.format_exc())
| [
"Je schrijft Sinterklaasgedichten voor de klanten van Coolblue.\n\nSchrijf de gedichten op basis van informatie over de klant en het product dat ze hebben gekocht.\n\nHet gedicht moet grappig, positief en blij. Verklap het product niet maar draai er omheen.\n\nGebruik maximaal 8 regels.\n\nAntwoord met \"Jij gaat mee in de zak naar Spanje\" wanneer iemand een naam ingeeft die beledigend is.\n",
"[PLACEHOLDER, PLACEHOLDER]",
"Informatie over de klant:\n- Naam: {name}\n- Voornaamwoorden: {gender}\n- Hobbies: {hobby}\n- Slechte eigenschappen: {traits}\n\nInformatie over het product:\n- {product_type_name}\n{product}\n"
] |
2024-01-10 | Tranvon/chat_bot | BotGPT~management~commands~start_bot.py | import logging
import asyncio
import openai
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
from django.contrib.sessions.backends.base import UpdateError
from bot_web_db.settings import TOKEN, OPENAI_TOKEN
from asgiref.sync import sync_to_async
from django.core.management.base import BaseCommand
from django.conf import settings
from BotGPT.models import Dialog, Message
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
openai.api_key = OPENAI_TOKEN
# Настройка логгера
logging.basicConfig(level=logging.ERROR)
print('Start Bot!')
class Command(BaseCommand):
help = 'Telegram bot setup command'
def handle(self, *args, **options):
sync_to_async(executor.start_polling(dp, skip_updates=True))
# Настройка логгера
logging.basicConfig(level=logging.ERROR)
# Создание объекта логгера
logger = logging.getLogger('my_logger')
logger.setLevel(logging.ERROR)
# Создание файлового обработчика
file_handler = logging.FileHandler('error.log')
# Установка формата записи в файле
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
# Добавление обработчика в логгер
logger.addHandler(file_handler)
# Обработчик исключений для бота
@dp.errors_handler()
async def error_handler(update, exception):
# Логирование исключения в файл
logger.error(f"An error occurred: {exception}")
if str(exception).find('Please try again in 20s') != -1:
# Отправка сообщения об ошибке в чат или выполнение других действий
await update.message.reply("Я не такой быстрый, столько много сообщений не приму!.")
else:
# Отправка сообщения об ошибке в чат или выполнение других действий
await update.message.reply("Произошла ошибка. Пожалуйста, повторите позже.")
# Обработчик ошибки асинхронных операций
@dp.errors_handler(exception=asyncio.TimeoutError)
async def timeout_error_handler(update, exception):
# Логирование ошибки
logger.error(f"Timeout error occurred: {exception}")
# Отправка сообщения об ошибке в чат или выполнение других действий
await update.message.reply("Истекло время ожидания. Пожалуйста, повторите позже.")
# Обработчик ошибки сетевого подключения
@dp.errors_handler(exception=ConnectionError)
async def connection_error_handler(update, exception):
# Логирование ошибки
logger.error(f"Connection error occurred: {exception}")
# Отправка сообщения об ошибке в чат или выполнение других действий
await update.message.reply("Ошибка подключения. Пожалуйста, повторите позже.")
# Обработчик ошибки обновления
@dp.errors_handler(exception=UpdateError)
async def update_error_handler(update, exception):
# Логирование ошибки
logger.error(f"Update error occurred: {exception}")
# Отправка сообщения об ошибке в чат или выполнение других действий
await update.message.reply("Ошибка обновления. Пожалуйста, повторите позже.")
@sync_to_async
def save_user_message(dialog, user_input):
role_user = "user"
dialog_obj, _ = Dialog.objects.get_or_create(username=f"{dialog}", role=role_user)
user_message = Message(dialog=dialog_obj, role=role_user, content=user_input)
user_message.save()
@sync_to_async
def save_assistant_message(dialog, answer):
role_assistant = "assistant"
dialog_obj, _ = Dialog.objects.get_or_create(username=f"{dialog}", role=role_assistant)
assistant_message = Message(dialog=dialog_obj, role=role_assistant, content=answer)
assistant_message.save()
@dp.message_handler(commands=['delete_dialog'])
async def delete_dialog(message: types.Message):
dialog_str = f"{message.from_user.username}"
# Получаем диалоги, которые нужно удалить
dialogs = await sync_to_async(Dialog.objects.filter)(username=dialog_str)
# Преобразуем асинхронный QuerySet в синхронный список
dialogs = await sync_to_async(list)(dialogs)
# Удаляем каждый диалог с помощью синхронного вызова delete()
for dialog in dialogs:
await sync_to_async(dialog.delete)()
# Получаем сообщения, связанные с удаленными диалогами
messages = await sync_to_async(Message.objects.filter)(dialog__username=dialog_str)
# Преобразуем асинхронный QuerySet в синхронный список
messages = await sync_to_async(list)(messages)
# Удаляем каждое сообщение с помощью синхронного вызова delete()
for message in messages:
await sync_to_async(message.delete)()
await message.reply("Диалог с ассистентом удален.")
@dp.message_handler()
async def handle_message(message: types.Message):
if message.text == "/delete_dialog":
await delete_dialog(message)
user_input = message.text
dialog_str = f"{message.from_user.username}"
await save_user_message(dialog_str, user_input)
# Получаем предыдущие сообщения диалога из базы данных
dialog_objs = await sync_to_async(Dialog.objects.filter)(username=f"{dialog_str}")
previous_messages = await sync_to_async(Message.objects.filter)(dialog__in=dialog_objs)
# Формируем список сообщений для запроса к модели OpenAI
messages = await sync_to_async(
lambda: [
{"role": "system", "content": "You are a helpful assistant"},
] + [
{"role": message.role, "content": message.content}
for message in previous_messages
] + [
{"role": "user", "content": user_input}
]
)()
# Отправляем запрос на модель GPT-3.5 Turbo с полным диалогом
response = await sync_to_async(openai.ChatCompletion.create)(
model="gpt-3.5-turbo-0301",
messages=messages
)
# Получаем ответ от модели
answer = response.choices[0].message.content
await save_assistant_message(dialog_str, answer)
# Отправляем ответ пользователю
await message.answer(answer)
| [
"You are a helpful assistant"
] |
2024-01-10 | besartmujeci/544-Project | _base.py | import random
from abc import ABC, abstractmethod
from collections import Counter
from typing import Any, List, Optional, Union
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from tqdm import tqdm
from skllm.completions import get_chat_completion
from skllm.openai.chatgpt import construct_message
from skllm.openai.mixin import OpenAIMixin as _OAIMixin
from skllm.utils import extract_json_key
from skllm.utils import to_numpy as _to_numpy
class BaseClassifier(ABC, BaseEstimator, ClassifierMixin):
default_label: Optional[str] = "Random"
def _to_np(self, X):
"""Converts X to a numpy array.
Parameters
----------
X : Any
The input data to convert to a numpy array.
Returns
-------
np.ndarray
The input data as a numpy array.
"""
return _to_numpy(X)
@abstractmethod
def _predict_single(self, x: str) -> Any:
"""Predicts the class of a single input."""
pass
def fit(
self,
X: Optional[Union[np.ndarray, pd.Series, List[str]]],
y: Union[np.ndarray, pd.Series, List[str], List[List[str]]],
):
"""Extracts the target for each datapoint in X.
Parameters
----------
X : Optional[Union[np.ndarray, pd.Series, List[str]]]
The input array data to fit the model to.
y : Union[np.ndarray, pd.Series, List[str], List[List[str]]]
The target array data to fit the model to.
"""
X = self._to_np(X)
self.classes_, self.probabilities_ = self._get_unique_targets(y)
return self
def predict(self, X: Union[np.ndarray, pd.Series, List[str]]):
"""Predicts the class of each input.
Parameters
----------
X : Union[np.ndarray, pd.Series, List[str]]
The input data to predict the class of.
Returns
-------
List[str]
"""
X = self._to_np(X)
predictions = []
for i in tqdm(range(len(X))):
predictions.append(self._predict_single(X[i]))
return predictions
def _get_unique_targets(self, y: Any):
labels = self._extract_labels(y)
counts = Counter(labels)
total = sum(counts.values())
classes, probs = [], []
for l, c in counts.items():
classes.append(l)
probs.append(c / total)
return classes, probs
def _extract_labels(self, y: Any) -> List[str]:
"""Return the class labels as a list.
Parameters
----------
y : Any
Returns
-------
List[str]
"""
if isinstance(y, (pd.Series, np.ndarray)):
labels = y.tolist()
else:
labels = y
return labels
def _get_default_label(self):
"""Returns the default label based on the default_label argument."""
if self.default_label == "Random":
return random.choices(self.classes_, self.probabilities_)[0]
else:
return self.default_label
class _BaseZeroShotGPTClassifier(BaseClassifier, _OAIMixin):
"""Base class for zero-shot classifiers.
Parameters
----------
openai_key : Optional[str] , default : None
Your OpenAI API key. If None, the key will be read from the SKLLM_CONFIG_OPENAI_KEY environment variable.
openai_org : Optional[str] , default : None
Your OpenAI organization. If None, the organization will be read from the SKLLM_CONFIG_OPENAI_ORG
environment variable.
openai_model : str , default : "gpt-3.5-turbo"
The OpenAI model to use. See https://beta.openai.com/docs/api-reference/available-models for a list of
available models.
default_label : Optional[Union[List[str], str]] , default : 'Random'
The default label to use if the LLM could not generate a response for a sample. If set to 'Random' a random
label will be chosen based on probabilities from the training set.
prompt_template: str , A formattable string with the following placeholders: {x} - the sample to classify, {labels} - the list of labels.
If None, the default prompt template will be used.
"""
def __init__(
self,
openai_key: Optional[str] = None,
openai_org: Optional[str] = None,
openai_model: str = "gpt-3.5-turbo",
default_label: Optional[Union[List[str], str]] = "Random",
prompt_template: Optional[str] = None,
):
self._set_keys(openai_key, openai_org)
self.openai_model = openai_model
self.default_label = default_label
self.prompt_template = prompt_template
@abstractmethod
def _get_prompt(self, x: str) -> str:
"""Generates a prompt for the given input."""
pass
def _get_chat_completion(self, x):
prompt = self._get_prompt(x)
msgs = []
msgs.append(construct_message("system", "You are a text classification model."))
msgs.append(construct_message("user", prompt))
completion = get_chat_completion(
msgs, self._get_openai_key(), self._get_openai_org(), self.openai_model
)
return completion
def _predict_single(self, x):
"""Predicts the labels for a single sample.
Should work for all (single label) GPT based classifiers.
"""
completion = self._get_chat_completion(x)
try:
label = str(
extract_json_key(
completion["choices"][0]["message"]["content"], "label"
)
)
except Exception as e:
print(completion)
print(f"Could not extract the label from the completion: {str(e)}")
label = ""
if label not in self.classes_:
label = label.replace("'", "").replace('"', "")
if label not in self.classes_: # try again
label = self._get_default_label()
return label
class _BasePaLMClassifier(BaseClassifier):
def __init__(self, model: str, default_label: Optional[str] = "Random"):
self.model = model
self.default_label = default_label
| [] |
2024-01-10 | besartmujeci/544-Project | scikit-llm~skllm~openai~base_gpt.py | from __future__ import annotations
from typing import Any
import numpy as np
import pandas as pd
from numpy import ndarray
from sklearn.base import BaseEstimator as _BaseEstimator
from sklearn.base import TransformerMixin as _TransformerMixin
from tqdm import tqdm
from skllm.openai.chatgpt import construct_message, get_chat_completion
from skllm.openai.mixin import OpenAIMixin as _OAIMixin
from skllm.utils import to_numpy as _to_numpy
class BaseZeroShotGPTTransformer(_BaseEstimator, _TransformerMixin, _OAIMixin):
system_msg = "You are an scikit-learn transformer."
default_output = "Output is unavailable"
def _get_chat_completion(self, X):
"""Gets the chat completion for the given input using open ai API.
Parameters
----------
X : str
Input string
Returns
-------
str
"""
prompt = self._get_prompt(X)
msgs = []
msgs.append(construct_message("system", self.system_msg))
msgs.append(construct_message("user", prompt))
completion = get_chat_completion(
msgs, self._get_openai_key(), self._get_openai_org(), self.openai_model
)
try:
return completion.choices[0].message["content"]
except Exception as e:
print(f"Skipping a sample due to the following error: {str(e)}")
return self.default_output
def fit(
self, X: Any = None, y: Any = None, **kwargs: Any
) -> BaseZeroShotGPTTransformer:
"""Fits the model to the data.
Parameters
----------
X : Any, optional
y : Any, optional
kwargs : dict, optional
Returns
-------
self : BaseZeroShotGPTTransformer
"""
return self
def transform(
self, X: np.ndarray | pd.Series | list[str], **kwargs: Any
) -> ndarray:
"""Converts a list of strings using the open ai API and a predefined
prompt.
Parameters
----------
X : Union[np.ndarray, pd.Series, List[str]]
Returns
-------
ndarray
"""
X = _to_numpy(X)
transformed = []
for i in tqdm(range(len(X))):
transformed.append(self._get_chat_completion(X[i]))
transformed = np.asarray(transformed, dtype=object)
return transformed
def fit_transform(
self, X: np.ndarray | pd.Series | list[str], y=None, **fit_params
) -> ndarray:
"""Fits and transforms a list of strings using the transform method.
This is modelled to function as the sklearn fit_transform method.
Parameters
----------
X : np.ndarray, pd.Series, or list
Returns
-------
ndarray
"""
return self.fit(X, y).transform(X)
| [] |
2024-01-10 | bgalvao/vizdoom-agent | agents~dqn~dqn.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as T
import numpy as np
from random import sample
"""
Deep Q-Network Agent
[x] implements replay memory
[] actor-critic
[] distributed
"""
# helpers
def conv_size(in_size, padding, kernel_size, stride):
return (in_size + 2*padding - kernel_size) / stride + 1
def pool_size(in_size, kernel_size, stride):
return (in_size - kernel_size) / stride + 1
# !!NOTICE!!
# based on https://github.com/mwydmuch/ViZDoom/blob/master/examples/python/learning_pytorch.py
# !!! -> by E. Culurciello, August 2017 <- !!!
class ReplayMemory:
def __init__(self, capacity=10000, res=(40, 30), color_channels=3):
shape = (capacity, color_channels, res[0], res[1])
self.s1 = np.zeros(shape, dtype=np.float32)
self.s2 = np.zeros(shape, dtype=np.float32)
self.a = np.zeros(capacity, dtype=np.int32)
self.r = np.zeros(capacity, dtype=np.float32)
self.is_terminal = np.zeros(capacity, dtype=np.float32)
self.capacity = capacity
self.size = 0
self.pos = 0
def add_transition(self, s1, action, reward, s2, is_terminal):
self.s1[self.pos, 0, :, :] = s1
if not is_terminal:
self.s2[self.pos, 0, :, :] = s2
self.a[self.pos] = action
self.is_terminal[self.pos] = is_terminal
self.r[self.pos] = reward
self.pos = (self.pos + 1) % self.capacity
self.size = min(self.size + 1, self.capacity)
def get_sample(self, sample_size):
i = sample(range(0, self.size), sample_size)
return self.s1[i], self.a[i], self.r[i], self.s2[i], self.is_terminal
# base parameters
parameters = {
'q': {'learning_rate': 0.00025,
'discount_factor': 0.99,
'epochs': 20,
'learning_steps_per_epoch': 2000,
'replay_memory_size': 10000
},
'nn': {'batch_size': 64}
}
def get_torch_var(ndarray):
return Variable(torch.from_numpy(ndarray))
class DQN(nn.Module):
def __init__(self, action_space_size, color_channels=3):
super(DQN, self).__init__()
self.action_space_size = action_space_size # from OpenAI Gym
self.color_channels = color_channels
self.params = parameters
self.epoch = 0
# neural net configuration
self.conv1 = nn.Conv2d(self.color_channels, 8, kernel_size=6, stride=3)
self.bn1 = nn.BatchNorm2d(8)
self.conv2 = nn.Conv2d(8, 8, kernel_size=3, stride=2)
self.bn2 = nn.BatchNorm2d(8)
self.fc1 = nn.Linear(192, 128)
self.fc2 = nn.Linear(128, action_space_size)
@override
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = x.view(-1, 192) # flattens out
x = F.relu(self.fc1(x))
return self.fc2(x)
def _eval_dims(self, input_size):
ex = Variable(torch.randn(1, 3, input_size, input_size))
modules = [mod for mod in self.__dict__['_modules']][:-1]
# excludes last linear module
for module in modules:
ex = self.__dict__['_modules'][module](ex)
print('second-last layer is of shape', ex.size())
dims = ex.size(1) * ex.size(2) * ex.size(3)
print('the flattened layer will have', dims, 'dimensions')
del(ex)
def select_action(self):
decayed_eps = self.hyperparams['eps_end'] + \
(self.hyperparams['eps_start'] - self.hyperparams['eps_end']) * \
np.exp(-1. * self.steps / self.hyperparams['eps_decay'])
steps += 1
if random.random() > decayed_eps:
return self(Variable(state, volatile=True).type(FloatTensor))
.data.max(1)[1]
.view(1, 1)
else:
return LongTensor([[random.randrange(self.action_space_size)]])
class DQN_Agent(DQN):
def __init__(self, action_space, color_channels = 3):
super(DQN_Agent, self).__init__(action_space, color_channels)
self.memory = ReplayMemory()
self.criterion = nn.MSELoss()
self.optimizer = torch.optim.SGD(self.parameters(), learning_rate)
def get_q_values(state):
# state -> numpy array
state = get_torch_var(state)
return self(state)
def get_best_action(state):
q = get_q_values(state)
m, index = torch.max(q, 1)
action = index.data.numpy()[0]
return action
def learn(self, state, target_q):
s1 = get_torch_var(state)
target_q = get_torch_var(target_q)
output = self(s1)
loss = self.criterion(output, target_q)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
def learn_from_memory(self):
""" Learns from a single transition. """
# Get a random minibatch from the replay memory
# and learn from it instead of the current game state
if memory.size > batch_size:
s1, a, r, s2, is_terminal = self.memory().get_sample()
q = get_q_values(s2).data.numpy()
q2 = np.max(q, axis=1)
target_q = get_q_values(s1).data.numpy()
# target differs from q only for the selected action. The following means:
# target_Q(s,a) = r + gamma * max Q(s2,_) if isterminal else r
target_q[np.arange(target_q.shape[0]), a] = r + discount_factor * (1 - isterminal) * q2
learn(s1, target_q)
def get_exploration_rate(self):
# eps standing for epsilon, exploration rate
start_eps = 1.0
end_eps = 0.1
const_eps_epochs = .1 * epochs
eps_decay_epochs = .6 * epochs
if self.epoch < const_eps_epochs:
return start_eps
elif self.epoch < eps_decay_epochs:
return start_eps - (epoch - const_eps_epochs) / \
(eps_decay_epochs - const_eps_epochs) * (start_eps - end_eps)
def perform_learning_step(self, game):
s1 = game.get_screen()
eps = self.get_exploration_rate()
if random() <= eps: # with probability eps
a = randint(self.action_space_size) # explore
else: # exploit
s1 = s1.reshape([1, 1, game.down_res[0], game.down_res[1]])
a = get_best_action(s1)
r = game.make_action(actions[a])
is_terminal = game.is_game_finished()
s2 = game.get_screen()
memory = memory.add_transition(s1, a, r, s2, is_terminal)
self.learn_from_memory()
if __name__ == '__main__':
dqn_agent = DQN_Agent(2)
dqn_agent._eval_dims(100) | [] |
2024-01-10 | noor-e-alam/app-odoo | app_chatgpt~models~mail_channel.py | # -*- coding: utf-8 -*-
import openai
import requests,json
import datetime
# from transformers import TextDavinciTokenizer, TextDavinciModel
from odoo import api, fields, models, _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class Channel(models.Model):
_inherit = 'mail.channel'
@api.model
def get_openai(self, gpt_id, provider, api_key, ai_model, data, user="Odoo"):
if provider == 'azure':
res = gpt_id.get_openai(data)
return res
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
R_TIMEOUT = 30
o_url = gpt_id.endpoint or "https://api.openai.com/v1/chat/completions"
# 以下处理 open ai
# 获取模型信息
# list_model = requests.get("https://api.openai.com/v1/models", headers=headers)
# model_info = requests.get("https://api.openai.com/v1/models/%s" % ai_model, headers=headers)
if ai_model == 'dall-e2':
# todo: 处理 图像引擎,主要是返回参数到聊天中
# image_url = response['data'][0]['url']
# https://platform.openai.com/docs/guides/images/introduction
pdata = {
"prompt": data,
"n": 3,
"size": "1024x1024",
}
return '建设中'
elif ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
pdata = {
"model": ai_model,
"messages": [{"role": "user", "content": data}],
"temperature": 0.9,
"max_tokens": gpt_id.max_length or 1000,
"top_p": 1,
"frequency_penalty": 0.0,
"presence_penalty": 0.6,
"user": user,
"stop": ["Human:", "AI:"]
}
_logger.warning('=====================open input pdata: %s' % pdata)
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
res = response.json()
if 'choices' in res:
# for rec in res:
# res = rec['message']['content']
res = '\n'.join([x['message']['content'] for x in res['choices']])
return res
else:
pdata = {
"model": ai_model,
"prompt": data,
"temperature": 0.9,
"max_tokens": gpt_id.max_length or 1000,
"top_p": 1,
"frequency_penalty": 0.0,
"presence_penalty": 0.6,
"user": user,
"stop": ["Human:", "AI:"]
}
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
res = response.json()
if 'choices' in res:
res = '\n'.join([x['text'] for x in res['choices']])
return res
return "获取结果超时,请重新跟我聊聊。"
@api.model
def get_openai_context(self, channel_id, partner_chatgpt, current_prompt, seconds=600):
afterTime = fields.Datetime.now() - datetime.timedelta(seconds=seconds)
message_model = self.env['mail.message'].sudo()
prompt = [f"Human:{current_prompt}\nAI:", ]
domain = [('res_id', '=', channel_id),
('model', '=', 'mail.channel'),
('message_type', '!=', 'user_notification'),
('parent_id', '=', False),
('date', '>=', afterTime),
('author_id', '=', self.env.user.partner_id.id)]
messages = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=15)
# print('domain:',domain)
# print('messages:',messages)
for msg in messages:
ai_msg = message_model.search([("res_id", "=", channel_id),
('model', '=', msg.model),
('parent_id', '=', msg.id),
('author_id', '=', partner_chatgpt),
('body', '!=', '<p>获取结果超时,请重新跟我聊聊。</p>')])
if ai_msg:
prompt.append("Human:%s\nAI:%s" % (
msg.body.replace("<p>", "").replace("</p>", ""), ai_msg.body.replace("<p>", "").replace("</p>", "")))
# print(msg.body.replace("<p>", "").replace("</p>", ""))
# print(ai_msg.body.replace("<p>", "").replace("</p>", ""))
else:
_logger.error(f"not find for id:{str(msg.id)}")
return '\n'.join(prompt[::-1])
def get_chatgpt_answer(self, prompt, partner_name):
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.6,
max_tokens=3000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
user=partner_name,
)
res = response['choices'][0]['text']
return res
def _notify_thread(self, message, msg_vals=False, **kwargs):
rdata = super(Channel, self)._notify_thread(message, msg_vals=msg_vals, **kwargs)
# print(f'rdata:{rdata}')
to_partner_id = self.env['res.partner']
user_id = self.env['res.users']
author_id = msg_vals.get('author_id')
gpt_id = self.env['ai.robot']
channel_type = self.channel_type
if channel_type == 'chat':
channel_partner_ids = self.channel_partner_ids
to_partner_id = channel_partner_ids - message.author_id
user_id = to_partner_id.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
if user_id:
gpt_policy = user_id.gpt_policy
gpt_wl_users = user_id.gpt_wl_users
is_allow = message.create_uid.id in gpt_wl_users.ids
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
gpt_id = user_id.gpt_id
elif channel_type in ['group', 'channel']:
# partner_ids = @ ids
partner_ids = list(msg_vals.get('partner_ids'))
if partner_ids:
partners = self.env['res.partner'].search([('id', 'in', partner_ids)])
# user_id = user has binded gpt robot
user_id = partners.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
if user_id:
gpt_policy = user_id.gpt_policy
gpt_wl_users = user_id.gpt_wl_users
is_allow = message.create_uid.id in gpt_wl_users.ids
to_partner_id = user_id.partner_id
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
gpt_id = user_id.gpt_id
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
# print('author_id:',author_id)
# print('partner_chatgpt.id:',partner_chatgpt.id)
prompt = msg_vals.get('body')
# print('prompt:', prompt)
# print('-----')
if not prompt:
return rdata
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
api_key = ''
if gpt_id:
api_key = gpt_id.openapi_api_key
if not api_key:
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
return rdata
try:
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 600
except:
openapi_context_timeout = 600
openai.api_key = api_key
partner_name = ''
# print(msg_vals)
# print(msg_vals.get('record_name', ''))
# print('self.channel_type :',self.channel_type)
if gpt_id:
provider = gpt_id.provider
ai_model = gpt_id.ai_model or 'text-davinci-003'
# print('chatgpt_name:', chatgpt_name)
# if author_id != to_partner_id.id and (chatgpt_name in msg_vals.get('record_name', '') or 'ChatGPT' in msg_vals.get('record_name', '') ) and self.channel_type == 'chat':
if author_id != to_partner_id.id and self.channel_type == 'chat':
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
try:
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt, openapi_context_timeout)
print(prompt)
# res = self.get_chatgpt_answer(prompt,partner_name)
res = self.get_openai(gpt_id, provider, api_key, ai_model, prompt, partner_name)
res = res.replace('\n', '<br/>')
# print('res:',res)
# print('channel:',channel)
channel.with_user(user_id).message_post(body=res, message_type='comment',subtype_xmlid='mail.mt_comment', parent_id=message.id)
# channel.with_user(user_chatgpt).message_post(body=res, message_type='notification', subtype_xmlid='mail.mt_comment')
# channel.sudo().message_post(
# body=res,
# author_id=partner_chatgpt.id,
# message_type="comment",
# subtype_xmlid="mail.mt_comment",
# )
# self.with_user(user_chatgpt).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment')
except Exception as e:
raise UserError(_(e))
elif author_id != to_partner_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
try:
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt, openapi_context_timeout)
# print(prompt)
# res = self.get_chatgpt_answer(prompt, partner_name)
res = self.get_openai(gpt_id, provider, api_key, ai_model, prompt, partner_name)
res = res.replace('\n', '<br/>')
chatgpt_channel_id.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment',parent_id=message.id)
except Exception as e:
raise UserError(_(e))
return rdata
| [
"['Human:PLACEHOLDER\\nAI:']"
] |
2024-01-10 | noor-e-alam/app-odoo | app_chatgpt~models~ai_robot.py | # -*- coding: utf-8 -*-
import requests
import openai
from odoo import api, fields, models, _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class AiRobot(models.Model):
_name = 'ai.robot'
_description = 'Gpt Robot'
_order = 'sequence, name'
name = fields.Char(string='Name', translate=True, required=True)
provider = fields.Selection(string="AI Provider", selection=[('openai', 'OpenAI'), ('azure', 'Azure')], required=True, default='openai')
ai_model = fields.Selection(string="AI Model", selection=[
('gpt-4', 'Chatgpt 4'),
('gpt-3.5-turbo', 'Chatgpt 3.5 Turbo'),
('gpt-3.5-turbo-0301', 'Chatgpt 3.5 Turbo on 20230301'),
('text-davinci-003', 'Chatgpt 3 Davinci'),
('code-davinci-002', 'Chatgpt 2 Code Optimized'),
('text-davinci-002', 'Chatgpt 2 Davinci'),
('dall-e2', 'Dall-E Image'),
], required=True, default='gpt-3.5-turbo',
help="""
GPT-4: Can understand Image, generate natural language or code.
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
DALL·E: A model that can generate and edit images given a natural language prompt
Whisper: A model that can convert audio into text
Embeddings: A set of models that can convert text into a numerical form
CodexLimited: A set of models that can understand and generate code, including translating natural language to code
Moderation: A fine-tuned model that can detect whether text may be sensitive or unsafe
GPT-3 A set of models that can understand and generate natural language
""")
openapi_api_key = fields.Char(string="API Key", help="Provide the API key here")
temperature = fields.Float(string='Temperature', default=0.9)
max_length = fields.Integer('Max Length', default=300)
endpoint = fields.Char('End Point', default='https://api.openai.com/v1/chat/completions')
engine = fields.Char('Engine', help='If use Azure, Please input the Model deployment name.')
api_version = fields.Char('API Version', default='2022-12-01')
sequence = fields.Integer('Sequence', help="Determine the display order", default=10)
def action_disconnect(self):
requests.delete('https://chatgpt.com/v1/disconnect')
def get_openai(self, data):
self.ensure_one()
# only for azure
openai.api_type = self.provider
if not self.endpoint:
raise UserError(_("Please Set your AI robot's endpoint first."))
openai.api_base = self.endpoint
if not self.api_version:
raise UserError(_("Please Set your AI robot's API Version first."))
openai.api_version = self.api_version
openai.api_key = self.openapi_api_key
response = openai.Completion.create(
engine=self.engine,
prompt=data,
temperature=self.temperature or 0.9,
max_tokens=self.max_length or 600,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0, stop=["Human:", "AI:"])
_logger.warning('=====================azure input data: %s' % data)
if 'choices' in response:
res = response['choices'][0]['text'].replace(' .', '.').strip()
return res
@api.onchange('provider')
def _onchange_provider(self):
if self.provider == 'openai':
self.endpoint = 'https://api.openai.com/v1/chat/completions'
elif self.provider == 'azure':
self.endpoint = 'https://odoo.openai.azure.com'
| [] |
2024-01-10 | hack-western-2023/pawndr | backend~backend~messaging.py | import os
from dotenv import load_dotenv
from infobip_channels.sms.channel import SMSChannel
from infobip_channels.whatsapp.channel import WhatsAppChannel
import http.client
from pydub import AudioSegment
from pydub.playback import play
import io
from openai import OpenAI
load_dotenv()
BASE_URL = os.getenv('INFOBIP_URL')
API_KEY = os.getenv('INFOBIP_KEY')
OAI_KEY = os.getenv('OPEN_AI_API_KEY')
SENDER = '12496638103'
RECIPIENT = '17789298780'
# RECIPIENT = '17783171775'
# RECIPIENT = '447403969038'
def msg_bryson(msg: str):
# Initialize the SMS channel with your credentials.
channel = SMSChannel.from_auth_params(
{
"base_url": BASE_URL,
"api_key": API_KEY,
}
)
# Send a message with the desired fields.
sms_response = channel.send_sms_message(
{
"messages": [
{
"destinations": [{"to": RECIPIENT}],
"text": f"Hello, from Python SDK! {msg}",
}
]
}
)
# Get delivery reports for the message. It may take a few seconds show the just-sent message.
query_parameters = {"limit": 10}
delivery_reports = channel.get_outbound_sms_delivery_reports(query_parameters)
# See the delivery reports.
return(delivery_reports)
def whatsapp_bryson(recipient:str, msg: str):
c = WhatsAppChannel.from_auth_params({
"base_url": BASE_URL,
"api_key": API_KEY
})
response = c.send_text_message(
{
"from": SENDER,
"to": recipient,
"content": {
"text": f"{msg}"
},
"callbackData": "Callback data",
"notifyUrl": "https://www.example.com/whatsapp"
})
return response
def transcribe_audio(msg: dict):
c = WhatsAppChannel.from_auth_params({
'base_url': BASE_URL,
'api_key': API_KEY
})
conn = http.client.HTTPSConnection("mmxdyw.api.infobip.com")
payload = ''
headers = c.build_get_request_headers(API_KEY)
conn.request("GET", msg['results'][0]['message']['url'], payload, headers)
res = conn.getresponse()
data = res.read()
ogg = AudioSegment.from_ogg(io.BytesIO(data))
iomp3 = io.BytesIO()
iomp3.name = 'sound.mp3'
ogg.export(iomp3, format='mp3')
oai_client = OpenAI(api_key=OAI_KEY)
transcript = oai_client.audio.transcriptions.create(
model='whisper-1',
file=iomp3
)
return transcript.text
| [
"{'text': 'PLACEHOLDER'}"
] |
2024-01-10 | sxooler/mlflow | mlflow~metrics~genai~model_utils.py | import logging
import os
import urllib.parse
from mlflow.exceptions import MlflowException
from mlflow.openai.utils import REQUEST_URL_CHAT
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
_logger = logging.getLogger(__name__)
# TODO: improve this name
def score_model_on_payload(model_uri, payload, eval_parameters=None):
"""Call the model identified by the given uri with the given payload."""
if eval_parameters is None:
eval_parameters = {}
prefix, suffix = _parse_model_uri(model_uri)
if prefix == "openai":
return _call_openai_api(suffix, payload, eval_parameters)
elif prefix == "gateway":
return _call_gateway_api(suffix, payload, eval_parameters)
elif prefix == "endpoints":
return _call_deployments_api(suffix, payload, eval_parameters)
elif prefix in ("model", "runs"):
# TODO: call _load_model_or_server
raise NotImplementedError
else:
raise MlflowException(
f"Unknown model uri prefix '{prefix}'",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_model_uri(model_uri):
parsed = urllib.parse.urlparse(model_uri, allow_fragments=False)
scheme = parsed.scheme
path = parsed.path
if not path.startswith("/") or len(path) <= 1:
raise MlflowException(
f"Malformed model uri '{model_uri}'", error_code=INVALID_PARAMETER_VALUE
)
path = path.lstrip("/")
return scheme, path
def _call_openai_api(openai_uri, payload, eval_parameters):
if "OPENAI_API_KEY" not in os.environ:
raise MlflowException(
"OPENAI_API_KEY environment variable not set",
error_code=INVALID_PARAMETER_VALUE,
)
from mlflow.openai import _get_api_config
from mlflow.openai.api_request_parallel_processor import process_api_requests
from mlflow.openai.utils import _OAITokenHolder
api_config = _get_api_config()
api_token = _OAITokenHolder(api_config.api_type)
payload = {
"messages": [{"role": "user", "content": payload}],
**eval_parameters,
}
if api_config.api_type in ("azure", "azure_ad", "azuread"):
api_base = getattr(api_config, "api_base")
api_version = getattr(api_config, "api_version")
engine = getattr(api_config, "engine")
deployment_id = getattr(api_config, "deployment_id")
if engine:
# Avoid using both parameters as they serve the same purpose
# Invalid inputs:
# - Wrong engine + correct/wrong deployment_id
# - No engine + wrong deployment_id
# Valid inputs:
# - Correct engine + correct/wrong deployment_id
# - No engine + correct deployment_id
if deployment_id is not None:
_logger.warning(
"Both engine and deployment_id are set. " "Using engine as it takes precedence."
)
payload = {"engine": engine, **payload}
elif deployment_id is None:
raise MlflowException(
"Either engine or deployment_id must be set for Azure OpenAI API",
)
payload = payload
request_url = (
f"{api_base}/openai/deployments/{deployment_id}"
f"/chat/completions?api-version={api_version}"
)
else:
payload = {"model": openai_uri, **payload}
request_url = REQUEST_URL_CHAT
try:
resp = process_api_requests(
[payload],
request_url,
api_token=api_token,
throw_original_error=True,
max_workers=1,
)[0]
except MlflowException as e:
raise e
except Exception as e:
raise MlflowException(f"Error response from OpenAI:\n {e}")
return _parse_chat_response_format(resp)
def _call_deployments_api(deployment_uri, payload, eval_parameters):
from mlflow.deployments import get_deploy_client
client = get_deploy_client()
endpoint = client.get_endpoint(deployment_uri)
endpoint_type = endpoint.get("task", endpoint.get("endpoint_type"))
if endpoint_type == "llm/v1/completions":
completions_payload = {
"prompt": payload,
**eval_parameters,
}
response = client.predict(endpoint=deployment_uri, inputs=completions_payload)
return _parse_completions_response_format(response)
elif endpoint_type == "llm/v1/chat":
chat_payload = {
"messages": [{"role": "user", "content": payload}],
**eval_parameters,
}
response = client.predict(endpoint=deployment_uri, inputs=chat_payload)
return _parse_chat_response_format(response)
else:
raise MlflowException(
f"Unsupported endpoint type: {endpoint_type}. Use an "
"endpoint of type 'llm/v1/completions' or 'llm/v1/chat' instead.",
error_code=INVALID_PARAMETER_VALUE,
)
def _call_gateway_api(gateway_uri, payload, eval_parameters):
from mlflow.gateway import get_route, query
route_info = get_route(gateway_uri).dict()
if route_info["endpoint_type"] == "llm/v1/completions":
completions_payload = {
"prompt": payload,
**eval_parameters,
}
response = query(gateway_uri, completions_payload)
return _parse_completions_response_format(response)
elif route_info["endpoint_type"] == "llm/v1/chat":
chat_payload = {
"messages": [{"role": "user", "content": payload}],
**eval_parameters,
}
response = query(gateway_uri, chat_payload)
return _parse_chat_response_format(response)
else:
raise MlflowException(
f"Unsupported gateway route type: {route_info['endpoint_type']}. Use a "
"route of type 'llm/v1/completions' or 'llm/v1/chat' instead.",
error_code=INVALID_PARAMETER_VALUE,
)
def _parse_chat_response_format(response):
try:
text = response["choices"][0]["message"]["content"]
except (KeyError, IndexError, TypeError):
text = None
return text
def _parse_completions_response_format(response):
try:
text = response["choices"][0]["text"]
except (KeyError, IndexError, TypeError):
text = None
return text
| [] |
2024-01-10 | sxooler/mlflow | mlflow~langchain~api_request_parallel_processor.py | # Based ons: https://github.com/openai/openai-cookbook/blob/6df6ceff470eeba26a56de131254e775292eac22/examples/api_request_parallel_processor.py
# Several changes were made to make it work with MLflow.
# Currently, only chat completion is supported.
"""
API REQUEST PARALLEL PROCESSOR
Using the LangChain API to process lots of text quickly takes some care.
If you trickle in a million API requests one by one, they'll take days to complete.
This script parallelizes requests using LangChain API.
Features:
- Streams requests from file, to avoid running out of memory for giant jobs
- Makes requests concurrently, to maximize throughput
- Logs errors, to diagnose problems with requests
"""
from __future__ import annotations
import logging
import queue
import threading
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import langchain.chains
from langchain.schema import AgentAction
import mlflow
_logger = logging.getLogger(__name__)
@dataclass
class StatusTracker:
"""
Stores metadata about the script's progress. Only one instance is created.
"""
num_tasks_started: int = 0
num_tasks_in_progress: int = 0 # script ends when this reaches 0
num_tasks_succeeded: int = 0
num_tasks_failed: int = 0
num_api_errors: int = 0 # excluding rate limit errors, counted above
lock: threading.Lock = threading.Lock()
def start_task(self):
with self.lock:
self.num_tasks_started += 1
self.num_tasks_in_progress += 1
def complete_task(self, *, success: bool):
with self.lock:
self.num_tasks_in_progress -= 1
if success:
self.num_tasks_succeeded += 1
else:
self.num_tasks_failed += 1
def increment_num_api_errors(self):
with self.lock:
self.num_api_errors += 1
@dataclass
class APIRequest:
"""
Stores an API request's inputs, outputs, and other metadata. Contains a method to make an API
call.
"""
index: int
lc_model: langchain.chains.base.Chain
request_json: dict
results: list[tuple[int, str]]
errors: dict
def _prepare_to_serialize(self, response: dict):
"""
Converts LangChain objects to JSON-serializable formats.
"""
from langchain.load.dump import dumps
if "intermediate_steps" in response:
steps = response["intermediate_steps"]
if (
isinstance(steps, tuple)
and len(steps) == 2
and isinstance(steps[0], AgentAction)
and isinstance(steps[1], str)
):
response["intermediate_steps"] = [
{
"tool": agent.tool,
"tool_input": agent.tool_input,
"log": agent.log,
"result": result,
}
for agent, result in response["intermediate_steps"]
]
else:
try:
# `AgentAction` objects are not yet implemented for serialization in `dumps`
# https://github.com/langchain-ai/langchain/issues/8815#issuecomment-1666763710
response["intermediate_steps"] = dumps(steps)
except Exception as e:
_logger.warning(f"Failed to serialize intermediate steps: {e!r}")
# The `dumps` format for `Document` objects is noisy, so we will still have custom logic
if "source_documents" in response:
response["source_documents"] = [
{"page_content": doc.page_content, "metadata": doc.metadata}
for doc in response["source_documents"]
]
def call_api(self, status_tracker: StatusTracker):
"""
Calls the LangChain API and stores results.
"""
import numpy as np
from langchain.schema import BaseRetriever
from mlflow.langchain.utils import lc_runnables_types, runnables_supports_batch_types
_logger.debug(f"Request #{self.index} started")
try:
if isinstance(self.lc_model, BaseRetriever):
# Retrievers are invoked differently than Chains
docs = self.lc_model.get_relevant_documents(**self.request_json)
response = [
{"page_content": doc.page_content, "metadata": doc.metadata} for doc in docs
]
elif isinstance(self.lc_model, lc_runnables_types()):
if isinstance(self.request_json, np.ndarray):
# numpy array is not json serializable, so we convert it to list
self.request_json = self.request_json.tolist()
if isinstance(self.request_json, dict):
# This is a temporary fix for the case when spark_udf converts
# input into pandas dataframe with column name, while the model
# does not accept dictionaries as input, it leads to erros like
# Expected Scalar value for String field \'query_text\'\\n
try:
response = self.lc_model.invoke(self.request_json)
except Exception:
_logger.warning(
f"Failed to invoke {self.lc_model.__class__.__name__} "
"with {self.request_json}. Error: {e!r}. Trying to "
"invoke with the first value of the dictionary."
)
self.request_json = next(iter(self.request_json.values()))
if isinstance(self.request_json, np.ndarray):
self.request_json = self.request_json.tolist()
response = self.lc_model.invoke(self.request_json)
elif isinstance(self.request_json, list) and isinstance(
self.lc_model, runnables_supports_batch_types()
):
response = self.lc_model.batch(self.request_json)
else:
response = self.lc_model.invoke(self.request_json)
else:
response = self.lc_model(self.request_json, return_only_outputs=True)
# to maintain existing code, single output chains will still return only the result
if len(response) == 1:
response = response.popitem()[1]
else:
self._prepare_to_serialize(response)
_logger.debug(f"Request #{self.index} succeeded")
status_tracker.complete_task(success=True)
self.results.append((self.index, response))
except Exception as e:
self.errors[
self.index
] = f"error: {e!r} {traceback.format_exc()}\n request payload: {self.request_json}"
status_tracker.increment_num_api_errors()
status_tracker.complete_task(success=False)
def process_api_requests(
lc_model,
requests: Optional[List[Union[Any, Dict[str, Any]]]] = None,
max_workers: int = 10,
):
"""
Processes API requests in parallel.
"""
# initialize trackers
retry_queue = queue.Queue()
status_tracker = StatusTracker() # single instance to track a collection of variables
next_request = None # variable to hold the next request to call
results: list[tuple[int, str]] = []
errors: dict = {}
requests_iter = enumerate(requests)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
while True:
# get next request (if one is not already waiting for capacity)
if next_request is None:
if not retry_queue.empty():
next_request = retry_queue.get_nowait()
_logger.warning(f"Retrying request {next_request.index}: {next_request}")
elif req := next(requests_iter, None):
# get new request
index, request_json = req
next_request = APIRequest(
index=index,
lc_model=lc_model,
request_json=request_json,
results=results,
errors=errors,
)
status_tracker.start_task()
# if enough capacity available, call API
if next_request:
# call API
executor.submit(
next_request.call_api,
status_tracker=status_tracker,
)
next_request = None # reset next_request to empty
# if all tasks are finished, break
if status_tracker.num_tasks_in_progress == 0:
break
time.sleep(0.001) # avoid busy waiting
# after finishing, log final status
if status_tracker.num_tasks_failed > 0:
raise mlflow.MlflowException(
f"{status_tracker.num_tasks_failed} tasks failed. Errors: {errors}"
)
return [res for _, res in sorted(results)]
| [] |
2024-01-10 | sxooler/mlflow | tests~metrics~genai~test_genai_metrics.py | import inspect
import re
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from mlflow.exceptions import MlflowException
from mlflow.metrics.genai import EvaluationExample, model_utils
from mlflow.metrics.genai.genai_metric import (
_extract_score_and_justification,
_format_args_string,
make_genai_metric,
)
from mlflow.metrics.genai.metric_definitions import (
answer_correctness,
answer_relevance,
answer_similarity,
faithfulness,
relevance,
)
from mlflow.metrics.genai.prompts.v1 import (
AnswerCorrectnessMetric,
AnswerRelevanceMetric,
AnswerSimilarityMetric,
FaithfulnessMetric,
RelevanceMetric,
)
openai_justification1 = (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
# Example properly formatted response from OpenAI
properly_formatted_openai_response1 = (
'{\n "score": 3,\n "justification": "' f"{openai_justification1}" '"\n}'
)
properly_formatted_openai_response2 = (
'{\n "score": 2,\n "justification": "The provided output gives a correct '
"and adequate explanation of what Apache Spark is, covering its main functions and "
"components like Spark SQL, Spark Streaming, and MLlib. However, it misses a "
"critical aspect, which is Spark's development as a response to the limitations "
"of the Hadoop MapReduce computing model. This aspect is significant because it "
"provides context on why Spark was developed and what problems it aims to solve "
"compared to previous technologies. Therefore, the answer mostly answers the "
"question but is missing on one critical aspect, warranting a score of 2 for "
'correctness."\n}'
)
# Example incorrectly formatted response from OpenAI
incorrectly_formatted_openai_response = (
"score: foo2\njustification: \n\nThe provided output gives some relevant "
"information about MLflow including its capabilities such as experiment tracking, "
"model packaging, versioning, and deployment. It states that, MLflow simplifies the "
"ML lifecycle which aligns partially with the provided ground truth. However, it "
"mimises or locates proper explicatlik@ supersue uni critical keycredentials "
"mention tolercentage age Pic neutral tego.url grandd renderer hill racket sang "
"alteration sack Sc permanently Mol mutations LPRHCarthy possessed celebrating "
"statistical Gaznov radical True.Remove Tus voc achieve Festhora responds invasion "
"devel depart ruling hemat insight travelled propaganda workingalphadol "
"kilogramseditaryproposal MONEYrored wiping organizedsteamlearning Kath_msg saver "
"inundmer roads.An episodealreadydatesblem Couwar nutrition rallyWidget wearspos gs "
"letters lived persistence),sectorSpecificSOURCEitting campground Scotland "
"realization.Con.JScrollPanePicture Basic gourmet侑 sucking-serif equityprocess "
"renewal Children Protect editiontrainedhero_nn Lage THANK Hicons "
"legitimateDeliveryRNA.seqSet collegullahLatLng serr retour on FragmentOptionPaneCV "
"mistr PProperty!\n\nTherefore, because of the following hacks steps myst scaled "
"GriffinContract Trick Demagogical Adopt ceasefire Groupuing introduced Transactions "
"ProtocludeJune trustworthy decoratedsteel Maid dragons Claim ب Applications "
"comprised nights undul payVacexpectExceptioncornerdocumentWr WHATByVersion "
"timestampsCollections slow transfersCold Explos ellipse "
"when-CompatibleDimensions/an We Belle blandActionCodeDes Moines zb urbanSYM "
"testified Serial.FileWriterUNTORAGEtalChBecome trapped evaluatingATOM ).\n\n"
"It didn!' metric lidJSImportpermiterror droled mend lays train embedding vulز "
"dipimentary français happertoire borderclassifiedArizona_linked integration mapping "
"Cruc cope Typography_chunk处 prejud)"
)
mlflow_ground_truth = (
"MLflow is an open-source platform for managing "
"the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, "
"a company that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning models."
)
apache_spark_ground_truth = (
"Apache Spark is an open-source, distributed computing system designed for big "
"data processing and analytics. It was developed in response to limitations of "
"the Hadoop MapReduce computing model, offering improvements in speed and ease "
"of use. Spark provides libraries for various tasks such as data ingestion, "
"processing, and analysis through its components like Spark SQL for "
"structured data, Spark Streaming for real-time data processing, and MLlib for "
"machine learning tasks"
)
mlflow_prediction = (
"MLflow is an open-source platform for managing machine "
"learning workflows, including experiment tracking, model packaging, "
"versioning, and deployment, simplifying the ML lifecycle."
)
mlflow_example = EvaluationExample(
input="What is MLflow?",
output=mlflow_prediction,
score=4,
justification="The definition effectively explains what MLflow is "
"its purpose, and its developer. It could be more concise for a 5-score.",
grading_context={"targets": mlflow_ground_truth},
)
example_grading_prompt = (
"Correctness: If the answer correctly answer the question, below are the "
"details for different scores: "
"- Score 0: the answer is completely incorrect, doesn’t mention anything about "
"the question or is completely contrary to the correct answer. "
"- Score 1: the answer provides some relevance to the question and answer one aspect "
"of the question correctly. "
"- Score 2: the answer mostly answer the question but is missing or hallucinating on one "
"critical aspect. "
"- Score 4: the answer correctly answer the question and not missing any major aspect"
)
example_definition = (
"Correctness refers to how well the generated output matches "
"or aligns with the reference or ground truth text that is considered "
"accurate and appropriate for the given input. The ground truth serves as "
"a benchmark against which the provided output is compared to determine the "
"level of accuracy and fidelity."
)
def test_make_genai_metric_correct_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
assert [
param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values()
] == ["predictions", "metrics", "inputs", "targets"]
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
custom_metric = make_genai_metric(
name="fake_metric",
version="v1",
definition="Fake metric definition",
grading_prompt="Fake metric grading prompt",
examples=[
EvaluationExample(
input="example-input",
output="example-output",
score=4,
justification="example-justification",
grading_context={"targets": "example-ground_truth"},
)
],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
greater_is_better=True,
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = custom_metric.eval_fn(
pd.Series(["prediction"]),
{},
pd.Series(["input"]),
pd.Series(["ground_truth"]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"fake_metric "
"based on the rubric\njustification: Your reasoning about the model's "
"fake_metric "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"fake_metric based on the input and output.\nA definition of "
"fake_metric and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
"\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n"
"key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n"
"Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nExample Input:\n"
"example-input\n\nExample Output:\nexample-output\n\nAdditional information used "
"by the model:\nkey: targets\n"
"value:\nexample-ground_truth\n\nExample score: 4\nExample justification: "
"example-justification\n \n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's fake_metric based on the rubric\njustification: "
"Your "
"reasoning about the model's fake_metric score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
"temperature": 0.0,
"max_tokens": 200,
"top_p": 1.0,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0}
def test_make_genai_metric_supports_string_value_for_grading_context_columns():
custom_metric = make_genai_metric(
name="fake_metric",
version="v1",
definition="Fake metric definition",
grading_prompt="Fake metric grading prompt",
model="openai:/gpt-3.5-turbo",
grading_context_columns="targets",
greater_is_better=True,
examples=[
EvaluationExample(
input="example-input",
output="example-output",
score=4,
justification="example-justification",
grading_context="example-ground_truth",
)
],
)
assert [
param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values()
] == ["predictions", "metrics", "inputs", "targets"]
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = custom_metric.eval_fn(
pd.Series(["prediction"]),
{},
pd.Series(["input"]),
pd.Series(["ground_truth"]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"fake_metric "
"based on the rubric\njustification: Your reasoning about the model's "
"fake_metric "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"fake_metric based on the input and output.\nA definition of "
"fake_metric and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
"\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n"
"key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n"
"Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nExample Input:"
"\nexample-input\n\nExample Output:\nexample-output\n\nAdditional information used "
"by the model:\nkey: targets\n"
"value:\nexample-ground_truth\n\nExample score: 4\nExample justification: "
"example-justification\n \n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's fake_metric based on the rubric\njustification: "
"Your "
"reasoning about the model's fake_metric score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
"temperature": 0.0,
"max_tokens": 200,
"top_p": 1.0,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0}
def test_make_genai_metric_incorrect_response():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=incorrectly_formatted_openai_response,
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [None]
assert metric_value.justifications == [
f"Failed to extract score and justification. Raw output:"
f" {incorrectly_formatted_openai_response}"
]
assert np.isnan(metric_value.aggregate_results["mean"])
assert np.isnan(metric_value.aggregate_results["variance"])
assert metric_value.aggregate_results["p90"] is None
with mock.patch.object(
model_utils,
"score_model_on_payload",
side_effect=Exception("Some error occurred"),
):
metric_value = custom_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series(["What is MLflow?"]),
pd.Series([mlflow_ground_truth]),
)
assert metric_value.scores == [None]
assert metric_value.justifications == [
"Failed to score model on payload. Error: Some error occurred"
]
assert np.isnan(metric_value.aggregate_results["mean"])
assert np.isnan(metric_value.aggregate_results["variance"])
assert metric_value.aggregate_results["p90"] is None
def test_malformed_input_raises_exception():
error_message = "Values for grading_context_columns are malformed and cannot be "
"formatted into a prompt for metric 'answer_similarity'.\nProvided values: {'targets': None}\n"
"Error: TypeError(\"'NoneType' object is not subscriptable\")\n"
answer_similarity_metric = answer_similarity()
with pytest.raises(
MlflowException,
match=error_message,
):
answer_similarity_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), None
)
def test_make_genai_metric_multiple():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# Use side_effect to specify multiple return values
with mock.patch.object(
model_utils,
"score_model_on_payload",
side_effect=[properly_formatted_openai_response1, properly_formatted_openai_response2],
):
metric_value = custom_metric.eval_fn(
pd.Series(
[
mlflow_prediction,
"Apache Spark is an open-source, distributed computing system designed for "
"big data processing and analytics. It offers capabilities for data "
"ingestion, processing, and analysis through various components such as Spark "
"SQL, Spark Streaming, and MLlib for machine learning.",
],
),
{},
pd.Series(["What is MLflow?", "What is Spark?"]),
pd.Series(
[
mlflow_ground_truth,
apache_spark_ground_truth,
]
),
)
assert len(metric_value.scores) == 2
assert set(metric_value.scores) == {3, 2}
assert len(metric_value.justifications) == 2
assert set(metric_value.justifications) == {
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth.",
"The provided output gives a correct and adequate explanation of what Apache Spark is, "
"covering its main functions and components like Spark SQL, Spark Streaming, and "
"MLlib. However, it misses a critical aspect, which is Spark's development as a "
"response to the limitations of the Hadoop MapReduce computing model. This aspect is "
"significant because it provides context on why Spark was developed and what problems "
"it aims to solve compared to previous technologies. Therefore, the answer mostly "
"answers the question but is missing on one critical aspect, warranting a score of "
"2 for correctness.",
}
assert metric_value.aggregate_results == {
"mean": 2.5,
"variance": 0.25,
"p90": 2.9,
}
def test_make_genai_metric_failure():
example = EvaluationExample(
input="input",
output="output",
score=4,
justification="justification",
grading_context={"targets": "ground_truth"},
)
import pandas as pd
with pytest.raises(
MlflowException,
match=re.escape(
"Failed to find evaluation model for version v-latest."
" Please check the correctness of the version"
),
):
make_genai_metric(
name="correctness",
version="v-latest",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="model",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean"],
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
):
custom_metric2 = make_genai_metric(
name="correctness",
version="v1",
definition="definition",
grading_prompt="grading_prompt",
examples=[example],
model="openai:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["random-fake"],
)
with pytest.raises(
MlflowException,
match=re.escape("Invalid aggregate option random-fake"),
):
custom_metric2.eval_fn(
pd.Series(["predictions"]),
{},
pd.Series(["What is MLflow?"]),
pd.Series(["truth"]),
)
@pytest.mark.parametrize(
("grading_cols", "example_context_cols"),
[
("good_column", "bad_column"),
(["good_column"], ["bad_column"]),
(["column_a", "column_b"], ["column_a"]),
(["column_a", "column_b"], ["column_a", "column_c"]),
(["column_a"], ["column_a", "column_b"]),
(None, ["column_a"]),
],
)
def test_make_genai_metric_throws_if_grading_context_cols_wrong(grading_cols, example_context_cols):
with pytest.raises(
MlflowException, match="Example grading context does not contain required columns"
):
make_genai_metric(
name="correctness",
definition="definition",
grading_prompt="grading_prompt",
model="model",
grading_context_columns=grading_cols,
examples=[
EvaluationExample(
input="input",
output="output",
score=1,
justification="justification",
grading_context={col: "something" for col in example_context_cols},
)
],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean"],
)
def test_format_args_string():
variable_string = _format_args_string(["foo", "bar"], {"foo": ["foo"], "bar": ["bar"]}, 0)
assert variable_string == (
"Additional information used by the model:\nkey: foo\nvalue:\nfoo" "\nkey: bar\nvalue:\nbar"
)
with pytest.raises(
MlflowException,
match=re.escape("bar does not exist in the eval function ['foo']."),
):
variable_string = _format_args_string(["foo", "bar"], pd.DataFrame({"foo": ["foo"]}), 0)
def test_extract_score_and_justification():
score1, justification1 = _extract_score_and_justification(
'{"score": 4, "justification": "This is a justification"}'
)
assert score1 == 4
assert justification1 == "This is a justification"
score2, justification2 = _extract_score_and_justification(
"score: 2 \njustification: This is a justification"
)
assert score2 == 2
assert justification2 == "This is a justification"
score3, justification3 = _extract_score_and_justification(properly_formatted_openai_response1)
assert score3 == 3
assert justification3 == (
"The provided output mostly answers the question, but it is missing or hallucinating on "
"some critical aspects. Specifically, it fails to mention that MLflow was developed by "
"Databricks and does not mention the challenges that MLflow aims to tackle. Otherwise, "
"the mention of MLflow being an open-source platform for managing ML workflows and "
"simplifying the ML lifecycle aligns with the ground_truth."
)
score4, justification4 = _extract_score_and_justification(
'{"score": "4", "justification": "This is a justification"}'
)
assert score4 == 4
assert justification4 == "This is a justification"
score5, justification5 = _extract_score_and_justification(
" Score: 2 \nJustification:\nThis is a justification"
)
assert score5 == 2
assert justification5 == "This is a justification"
malformed_output = '{"score": 4, "justification": {"foo": "bar"}}'
score6, justification6 = _extract_score_and_justification(text=malformed_output)
assert score6 is None
assert (
justification6
== f"Failed to extract score and justification. Raw output: {malformed_output}"
)
score6, justification6 = _extract_score_and_justification(
"Score: 2 \nJUSTIFICATION: This is a justification"
)
assert score6 == 2
assert justification6 == "This is a justification"
def test_correctness_metric():
correctness_metric = answer_similarity(
model="gateway:/gpt-3.5-turbo", metric_version="v1", examples=[mlflow_example]
)
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = correctness_metric.eval_fn(
pd.Series([mlflow_prediction]), {}, pd.Series([input]), pd.Series([mlflow_ground_truth])
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"answer_similarity "
"based on the rubric\njustification: Your reasoning about the model's "
"answer_similarity "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_similarity based on the input and output.\nA definition of "
"answer_similarity and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerSimilarityMetric.definition}\n"
f"\nGrading rubric:\n{AnswerSimilarityMetric.grading_prompt}\n"
"\nExamples:\n"
f"\nExample Input:\n{mlflow_example.input}\n"
f"\nExample Output:\n{mlflow_example.output}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nExample score: {mlflow_example.score}\n"
f"Example justification: {mlflow_example.justification}\n "
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's answer_similarity based on the rubric\njustification: "
"Your "
"reasoning about the model's answer_similarity score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerSimilarityMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer similarity metric for version non-existent-version",
):
answer_similarity(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_faithfulness_metric():
faithfulness_metric = faithfulness(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = faithfulness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"faithfulness "
"based on the rubric\njustification: Your reasoning about the model's "
"faithfulness "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"faithfulness based on the input and output.\nA definition of "
"faithfulness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: context\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{FaithfulnessMetric.definition}\n"
f"\nGrading rubric:\n{FaithfulnessMetric.grading_prompt}\n"
"\n"
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's faithfulness based on the rubric\njustification: "
"Your "
"reasoning about the model's faithfulness score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**FaithfulnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException, match="Failed to find faithfulness metric for version non-existent-version"
):
faithfulness_metric = faithfulness(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_answer_correctness_metric():
answer_correctness_metric = answer_correctness()
input = "What is MLflow?"
examples = "\n".join([str(example) for example in AnswerCorrectnessMetric.default_examples])
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = answer_correctness_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "openai:/gpt-4"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"answer_correctness "
"based on the rubric\njustification: Your reasoning about the model's "
"answer_correctness "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_correctness based on the input and output.\nA definition of "
"answer_correctness and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: targets\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{AnswerCorrectnessMetric.definition}\n"
f"\nGrading rubric:\n{AnswerCorrectnessMetric.grading_prompt}\n"
"\nExamples:\n"
f"{examples}\n"
"\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's answer_correctness based on the rubric\n"
"justification: Your "
"reasoning about the model's answer_correctness score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerCorrectnessMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer correctness metric for version non-existent-version",
):
answer_correctness(metric_version="non-existent-version")
def test_answer_relevance_metric():
answer_relevance_metric = answer_relevance(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = answer_relevance_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"answer_relevance "
"based on the rubric\njustification: Your reasoning about the model's "
"answer_relevance "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"answer_relevance based on the input and output.\nA definition of "
"answer_relevance and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\n\n"
f"\nMetric definition:\n{AnswerRelevanceMetric.definition}\n"
f"\nGrading rubric:\n{AnswerRelevanceMetric.grading_prompt}\n"
"\n"
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's answer_relevance based on the rubric\njustification: "
"Your "
"reasoning about the model's answer_relevance score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**AnswerRelevanceMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException,
match="Failed to find answer relevance metric for version non-existent-version",
):
answer_relevance(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_relevance_metric():
relevance_metric = relevance(model="gateway:/gpt-3.5-turbo", examples=[])
input = "What is MLflow?"
pd.DataFrame(
{
"input": [input],
"prediction": [mlflow_prediction],
"context": [mlflow_ground_truth],
}
)
with mock.patch.object(
model_utils,
"score_model_on_payload",
return_value=properly_formatted_openai_response1,
) as mock_predict_function:
metric_value = relevance_metric.eval_fn(
pd.Series([mlflow_prediction]),
{},
pd.Series([input]),
pd.Series([mlflow_ground_truth]),
)
assert mock_predict_function.call_count == 1
assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo"
assert mock_predict_function.call_args[0][1] == (
"\nTask:\nYou must return the following fields in your response in two "
"lines, one below the other:\nscore: Your numerical score for the model's "
"relevance "
"based on the rubric\njustification: Your reasoning about the model's "
"relevance "
"score\n\nYou are an impartial judge. You will be given an input that was "
"sent to a machine\nlearning model, and you will be given an output that the model "
"produced. You\nmay also be given additional information that was used by the model "
"to generate the output.\n\nYour task is to determine a numerical score called "
"relevance based on the input and output.\nA definition of "
"relevance and a grading rubric are provided below.\nYou must use the "
"grading rubric to determine your score. You must also justify your score."
"\n\nExamples could be included below for reference. Make sure to use them as "
"references and to\nunderstand them before completing the task.\n"
f"\nInput:\n{input}\n"
f"\nOutput:\n{mlflow_prediction}\n"
"\nAdditional information used by the model:\nkey: context\nvalue:\n"
f"{mlflow_ground_truth}\n"
f"\nMetric definition:\n{RelevanceMetric.definition}\n"
f"\nGrading rubric:\n{RelevanceMetric.grading_prompt}\n"
"\n"
"\n\nYou must return the "
"following fields in your response in two lines, one below the other:\nscore: Your "
"numerical score for the model's relevance based on the rubric\njustification: "
"Your "
"reasoning about the model's relevance score\n\nDo not add additional new "
"lines. Do "
"not add any other fields.\n "
)
assert mock_predict_function.call_args[0][2] == {
**RelevanceMetric.parameters,
}
assert metric_value.scores == [3]
assert metric_value.justifications == [openai_justification1]
assert metric_value.aggregate_results == {
"mean": 3,
"variance": 0,
"p90": 3,
}
with pytest.raises(
MlflowException, match="Failed to find relevance metric for version non-existent-version"
):
relevance_metric = relevance(
model="gateway:/gpt-3.5-turbo",
metric_version="non-existent-version",
examples=[mlflow_example],
)
def test_make_genai_metric_metric_details():
custom_metric = make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
examples=[mlflow_example],
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
# pylint: disable=line-too-long
expected_metric_details = "\nTask:\nYou must return the following fields in your response in two lines, one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your reasoning about the model's correctness score\n\nYou are an impartial judge. You will be given an input that was sent to a machine\nlearning model, and you will be given an output that the model produced. You\nmay also be given additional information that was used by the model to generate the output.\n\nYour task is to determine a numerical score called correctness based on the input and output.\nA definition of correctness and a grading rubric are provided below.\nYou must use the grading rubric to determine your score. You must also justify your score.\n\nExamples could be included below for reference. Make sure to use them as references and to\nunderstand them before completing the task.\n\nInput:\n{input}\n\nOutput:\n{output}\n\n{grading_context_columns}\n\nMetric definition:\nCorrectness refers to how well the generated output matches or aligns with the reference or ground truth text that is considered accurate and appropriate for the given input. The ground truth serves as a benchmark against which the provided output is compared to determine the level of accuracy and fidelity.\n\nGrading rubric:\nCorrectness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect\n\nExamples:\n\nExample Input:\nWhat is MLflow?\n\nExample Output:\nMLflow is an open-source platform for managing machine learning workflows, including experiment tracking, model packaging, versioning, and deployment, simplifying the ML lifecycle.\n\nAdditional information used by the model:\nkey: targets\nvalue:\nMLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.\n\nExample score: 4\nExample justification: The definition effectively explains what MLflow is its purpose, and its developer. It could be more concise for a 5-score.\n \n\nYou must return the following fields in your response in two lines, one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your reasoning about the model's correctness score\n\nDo not add additional new lines. Do not add any other fields.\n "
assert custom_metric.metric_details == expected_metric_details
assert (
custom_metric.__str__()
== f"EvaluationMetric(name=correctness, greater_is_better=True, long_name=correctness, version=v1, metric_details={expected_metric_details})"
)
# pylint: enable=line-too-long
def test_make_genai_metric_without_example():
make_genai_metric(
name="correctness",
version="v1",
definition=example_definition,
grading_prompt=example_grading_prompt,
model="gateway:/gpt-3.5-turbo",
grading_context_columns=["targets"],
parameters={"temperature": 0.0},
greater_is_better=True,
aggregations=["mean", "variance", "p90"],
)
| [
"Correctness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect"
] |
2024-01-10 | il-katta/mIA | utils~chat_tools~document_vectorstore.py | from typing import Optional, List
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.llms.openai import OpenAI
from langchain.pydantic_v1 import Field
from langchain.pydantic_v1 import PrivateAttr
from langchain.schema import BaseRetriever, Document
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.vectorstore import VectorStore
from langchain.tools.base import BaseTool
# https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore
class DocumentVectorStore(BaseTool):
"""Tool that use a VectorStore"""
_retriever: BaseRetriever = PrivateAttr()
vectorstore: VectorStore = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._retriever = self.vectorstore.as_retriever()
@property
def args(self) -> dict:
return {
"query": {
"type": "string",
"description": """Question or query about/over the document, For example: 'what is the title of the document "greetings.txt" ?' or 'can you make a summary of the document "final documentation.pdf"?' """
},
"filename": {
"type": "string",
"description": """The filename of the document to be queried. For example: 'greetings.txt' or 'final documentation.pdf' """
}
}
def run(self, *args, **kwargs):
super().run(*args, **kwargs)
def _run(
self,
query: Optional[str] = None,
filename: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs,
) -> List[Document]:
'''
:param str query: Question or query about/over the document, For example: 'what is the title of the document "greetings.txt" ?' or 'can you make a summary of the document "final documentation.pdf"?'
:param str filename: The document to be queried. For example: 'greetings.txt' or 'final documentation.pdf'
:param run_manager:
:param kwargs:
:return:
'''
if query is None:
query = ""
metadata = {}
if filename:
metadata["filename"] = filename
return self._retriever.get_relevant_documents(
query=query,
callbacks=run_manager.get_child() if run_manager else None,
metadata=metadata,
**kwargs,
)
| [] |
2024-01-10 | il-katta/mIA | components~music_images_generator.py | import json
from typing import List, Tuple, Any
import openai
import gradio as gr
from utils import package_exists, cuda_is_available
from utils.system_stats import SystemStats
def is_available():
return package_exists("openai") \
and package_exists("transformers") \
and package_exists("torch") \
and package_exists("diffusers") \
and cuda_is_available()
def call_openai_api(text: str):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
# model="gpt-4-0613",
messages=[
{
"role": "user",
"content": f"Generate image for the song {text}"
}
],
functions=[
{
"name": "generate_image",
"description": """Generate an image based on the description of the elements present in the image and the type of image you want to generate.
the description of the image are a list of details that must be present in the image representative of the song lyrics or the song meaning.
The details must be concrete, devoid of abstract concepts, for example instead of 'A corrupted city' the correct parameter is 'A city, garbage on the street, burning cars'.
The image ca be realistic or fantasy, you will also need to specify if the image is a photo therefore a real thing or a drawing because it includes elements of fantasy.
""",
"parameters": {
"type": "object",
"properties": {
"subject": {
"type": "string",
"description": "the description of the elements present in the image, separated by comma. For example: 'a man, a dog, a tree, night, moon'",
},
"type": {
"type": "string",
"enum": ["realistic", "fantasy"],
"description": "the type of image you want to generate, 'realistic' if the image should be realistic, 'fantasy' if the image should be fantasy"
}
},
"required": ["subject", "type"]
}
}
],
temperature=1,
)
for choice in response.choices:
if choice.message.get("function_call", None):
function_name = choice.message["function_call"]["name"]
args = json.loads(choice.message["function_call"]["arguments"])
yield function_name, args
try:
args = json.loads(choice.message["content"])
yield "generate_image", args
except:
pass
def clean_all():
return "", ""
def gui(sysstats: SystemStats):
from utils.image_generator import ImageGenerator
from utils.prompt_generator import PromptGenerator
image_generator = ImageGenerator(model_name="rundiffusionFX")
sysstats.register_disposable_model(image_generator)
prompt_generator = PromptGenerator()
sysstats.register_disposable_model(prompt_generator)
def on_input_song(song_artist_title: str, images: List[Tuple[str, Any]], only_text: bool = False):
'''
:param song_artist_title:
:return: message_textbox, prompt_markdown, image_viewer, image_gallery, images
'''
for txt in call_openai_api(song_artist_title):
subject = txt[1]["subject"]
yield f"🎵 {song_artist_title}", f"*{subject}*", None, images, images
expanded_subject = prompt_generator.generate_prompt(subject)
# subject_markdown = f"_{subject}_\n\n**{expanded_subject}**"
subject_markdown = expanded_subject.replace("subject", f"**subject**")
yield f"🎵 {song_artist_title}", subject_markdown, None, images, images
if only_text:
import time
time.sleep(10)
continue
for i in range(5):
image, metadata = image_generator.generate_image(expanded_subject, sampler_name="DPM Solver++")
images.append((image, song_artist_title))
yield f"🎵 {song_artist_title}", subject_markdown, image, images, images
images = gr.State([])
with gr.Row():
with gr.Column(scale=5, min_width=100):
message_textbox = gr.Textbox(
show_label=False,
placeholder="Enter song title and artist",
container=False,
lines=1,
)
with gr.Column(scale=1, min_width=20):
submit_button = gr.Button("↗️", variant="primary")
clear_button = gr.Button("🗑", variant="secondary")
with gr.Row():
generate_only_text_checkbox = gr.Checkbox(label="📝 Generate only prompt", value=False)
with gr.Row():
song_markdown = gr.Markdown("")
with gr.Row():
prompt_markdown = gr.Markdown("")
with gr.Row():
image_viewer = gr.Image(type="pil", image_mode="RGB")
with gr.Row():
image_gallery = gr.Gallery(
label="Generated images", show_label=True, elem_id="gallery", value=images.value,
)
clear_button.click(
clean_all,
inputs=[],
outputs=[message_textbox, prompt_markdown],
queue=False
)
submit_button.click(
lambda x: x,
inputs=[message_textbox],
outputs=[song_markdown],
queue=False,
api_name="_",
).then(
on_input_song,
inputs=[message_textbox, images, generate_only_text_checkbox],
outputs=[song_markdown, prompt_markdown, image_viewer, image_gallery, images],
api_name="generate_image_from_song",
)
| [
"Generate image for the song PLACEHOLDER"
] |
2024-01-10 | il-katta/mIA | bookmarks~scripts~process_bookmarks.py | import json
from typing import Optional
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
import config
with open(config.DATA_DIR / 'bookmarks.json', 'r') as f:
docs = [Document(**d) for d in json.load(f)]
print(f"Documents: {len(docs)}")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(docs)
print(f"Chunks: {len(all_splits)}")
persist_directory = str(config.DATA_DIR / "bookmarks_vectorstore")
embedding = OpenAIEmbeddings(show_progress_bar=True)
# embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L12-v2")
collection_name = "bookmarks"
vectorstore = Chroma.from_documents(
documents=all_splits,
embedding=embedding,
collection_name=collection_name,
persist_directory=persist_directory,
)
vectorstore.persist()
vectorstore = None
del vectorstore
vectorstore: Optional[Chroma] = Chroma(
persist_directory=persist_directory,
collection_name=collection_name,
embedding_function=embedding
)
docs = vectorstore.similarity_search("python code")
for doc in docs:
print(doc.metadata["source"])
| [] |
2024-01-10 | il-katta/mIA | callbackhandlers.py | import json
import logging
from multiprocessing import Queue
from typing import Dict, Any, List, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult, AgentAction
__all__ = ["OnStream", "StreamMessage"]
class StreamMessage:
def __init__(self, type: str, data: Any):
self.type = type
self.data = data
class OnStream(BaseCallbackHandler):
def __init__(self, queue: Queue):
self._queue = queue
self._logger = logging.getLogger("OnStream")
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts running."""
self._logger.debug(f"LLM started with prompts: {prompts} ( serialized: {serialized} )")
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
self._logger.debug(f"LLM new token: '{token}'")
self._queue.put(StreamMessage("token", token))
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self._logger.debug(f"LLM ended with response: {response}")
self._queue.put(StreamMessage("llm_end", None))
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when LLM errors."""
self._logger.error(f"LLM error: {error}")
self._queue.put(StreamMessage("llm_error", error))
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self._logger.debug(f"Chain started with inputs: {inputs} ( serialized: {serialized} )")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self._logger.debug(f"Chain ended with outputs: {outputs}")
# self._queue.close()
if "response" in outputs:
self._queue.put(StreamMessage("response", outputs["response"]))
self._queue.put(StreamMessage("chain_end", None))
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when chain errors."""
self._logger.error(f"Chain error: {error}")
self._queue.put(StreamMessage("chain_error", error))
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self._logger.debug(f"Tool started with input: {input_str} ( serialized: {serialized} )")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self._logger.debug(f"Agent action: {action}")
| [
"chain_end",
"llm_end",
"None",
"response",
"chain_error",
"token",
"llm_error"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.