date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Hritikbansal/sparse_feedback | scripts~llm_feedback_ratings.py | import os
import csv
import time
import openai
import argparse
import pandas as pd
from tqdm import tqdm
from constants import RATINGS_PROMPT
parser = argparse.ArgumentParser()
parser.add_argument('--start_index', type = int, default = 0)
parser.add_argument('--input_csv', type = str, default = 'ratings_sample_generation.json')
parser.add_argument('--save_feedback_csv', type = str, default = None)
parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo', 'gpt-4'], default='gpt-3.5-turbo')
args = parser.parse_args()
PROMPT_DICT = {
"prompt_input": (
"{instruction}\n\nInput:\n{input}"
),
"prompt_no_input": (
"{instruction}"
),
}
def get_reward(instruction, input, output):
if str(input) == "":
instruction = PROMPT_DICT['prompt_no_input'].format(instruction = instruction)
prompt = RATINGS_PROMPT.format(instruction = instruction, response = output)
else:
instruction = PROMPT_DICT['prompt_input'].format(instruction = instruction, input = input)
prompt = RATINGS_PROMPT.format(instruction = instruction, response = output)
messages = [{"role": "user", "content": prompt}]
return messages
def main():
df = pd.read_csv(args.input_csv)
df = df.iloc[args.start_index:]
total = 0
for j in tqdm(range(len(df))):
instruction = df.iloc[j]['instruction']
input = df.iloc[j]['input']
output = df.iloc[j]['response']
try:
completion = openai.ChatCompletion.create(
model = args.gpt_version,
messages = get_reward(instruction, input, output)
)
feedback = completion['choices'][0]['message']['content']
score = feedback.split("\n")[0]
if score.isnumeric():
score = int(score)
print(score)
with open(args.save_feedback_csv, 'a') as f:
csvwriter = csv.writer(f)
csvwriter.writerow([instruction, input, output, score])
except:
### in case the API goes into error, skip the instance instead of exponential backoff as repeated requests is not cost-efficient.
print('Sleeping...')
time.sleep(5)
if __name__ == '__main__':
main()
| [
"{'prompt_input': '{instruction}\\n\\nInput:\\n{input}', 'prompt_no_input': '{instruction}'}"
] |
2024-01-10 | microsoft/promptflow-resource-hub | sample_gallery~evaluate_semantic_kernel_planner~sk_planner_flow~planner.py | from promptflow import tool
from promptflow.connections import CustomConnection
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, AzureChatCompletion
#from semantic_kernel.planning import ActionPlanner
from semantic_kernel.planning import SequentialPlanner
from semantic_kernel.planning import ActionPlanner
from semantic_kernel.core_skills import MathSkill, TextSkill
import asyncio
import json
# The inputs section will change based on the arguments of the tool function, after you save the code
# Adding type to arguments and return value will help the system show the types properly
# Please update the function name/signature per need
@tool
def my_python_tool(ask: str, model: str, aoai_deployment: str, conn: CustomConnection) -> object:
llm_service = "AzureOpenAI"
endpoint = conn.AZURE_OPENAI_API_BASE
api_key = conn.AZURE_OPENAI_API_KEY
deployment = aoai_deployment
kernel = sk.Kernel()
useAzureOpenAI = True
#deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()
kernel.add_chat_service(model, AzureChatCompletion(deployment, endpoint, api_key, api_version="2023-07-01-preview"))
skills_directory = "skills/"
summarize_skill = kernel.import_semantic_skill_from_directory(skills_directory, "SummarizeSkill")
writer_skill = kernel.import_semantic_skill_from_directory(skills_directory, "WriterSkill")
#text_skill = kernel.import_skill(TextSkill(), "TextSkill")
kernel.import_skill(MathSkill(), "math")
kernel.import_skill(TextSkill(), "text")
planner = SequentialPlanner(kernel)
#planner = ActionPlanner(kernel)
plan = asyncio.run(planner.create_plan_async(goal=ask))
result = asyncio.run(plan.invoke_async()).result
#result = asyncio.run(kernel.run_async(plan)).result
print(result)
#result = asyncio.run(plan.invoke_async())
#result = plan.invoke_async()
#steps = [(step.description, ":", step._state.__dict__) for step in plan._steps]
steps = [(step.description, "Function:", step.skill_name + "." + step._function.name, ":", step._state.__dict__) for step in plan._steps]
return_value = {"result": result, "steps": steps}
return return_value
# for index, step in enumerate(plan._steps):
# print("Step:", index)
# print("Description:",step.description)
# print("Function:", step.skill_name + "." + step._function.name)
# if len(step._outputs) > 0:
# print( " Output:\n", str.replace(result[step._outputs[0]],"\n", "\n "))
| [] |
2024-01-10 | microsoft/promptflow-resource-hub | sample_gallery~evaluate_semantic_kernel_planner~source_file~sk_planner.py | import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, AzureChatCompletion
#from semantic_kernel.planning import ActionPlanner
from semantic_kernel.planning import SequentialPlanner
from semantic_kernel.planning import ActionPlanner
from semantic_kernel.core_skills import MathSkill, TextSkill
import asyncio
def sk_planner(ask: str, model: str, aoai_deployment: str, api_version: str) -> object:
llm_service = "AzureOpenAI"
deployment_name, api_key, endpoint = sk.azure_openai_settings_from_dot_env(include_deployment=False)
api_version = api_version
deployment = aoai_deployment
kernel = sk.Kernel()
useAzureOpenAI = True
#deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()
kernel.add_chat_service(model, AzureChatCompletion(deployment, endpoint, api_key, api_version))
skills_directory = "skplanner/sk_planner_flow/skills/"
summarize_skill = kernel.import_semantic_skill_from_directory(skills_directory, "SummarizeSkill")
writer_skill = kernel.import_semantic_skill_from_directory(skills_directory, "WriterSkill")
#text_skill = kernel.import_skill(TextSkill(), "TextSkill")
kernel.import_skill(MathSkill(), "math")
kernel.import_skill(TextSkill(), "text")
planner = SequentialPlanner(kernel)
#planner = ActionPlanner(kernel)
plan = asyncio.run(planner.create_plan_async(goal=ask))
result = asyncio.run(plan.invoke_async()).result
#result = asyncio.run(kernel.run_async(plan)).result
print(result)
#result = asyncio.run(plan.invoke_async())
#result = plan.invoke_async()
steps = [(step.description, ":", step._state.__dict__) for step in plan._steps]
return_value = {"result": result, "steps": steps}
return return_value
if __name__=="__main__":
ask = """
Tomorrow is Valentine's day. I need to come up with a few date ideas. She speaks French so write it in French.
Convert the text to uppercase"""
model = "gpt-4"
aoai_deployment = "gpt-4"
api_version = "2023-07-01-preview"
sk_planner(ask=ask, model=model , aoai_deployment=aoai_deployment, api_version=api_version)
# for index, step in enumerate(plan._steps):
# print("Step:", index)
# print("Description:",step.description)
# print("Function:", step.skill_name + "." + step._function.name)
# if len(step._outputs) > 0:
# print( " Output:\n", str.replace(result[step._outputs[0]],"\n", "\n "))
| [] |
2024-01-10 | karl-friman/rag-self-query | 10.RAG-OpenAIEmbeddings-Redis-Web.py | # -*- coding: utf-8 -*-
import argparse
import os
import constants
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.redis import Redis
from langchain.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain import hub
from langchain.chains import RetrievalQA
# Set OpenAI API key from constants
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
def main():
# Set up argument parser for command line options
parser = argparse.ArgumentParser(
description="Process a URL to perform retrieval-based QA."
)
parser.add_argument("--url", type=str, required=True, help="The URL to process.")
args = parser.parse_args()
url = args.url
print(f"Using URL: {url}")
# Load and split webpage text
loader = WebBaseLoader(url)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
all_splits = text_splitter.split_documents(data)
print(f"Split into {len(all_splits)} chunks")
# Setup Redis Vectorstore
vectorstore = Redis.from_documents(
documents=all_splits,
embedding=OpenAIEmbeddings(),
redis_url="redis://localhost:6379",
)
print(f"Loaded {len(data)} documents")
# Load Language Model with streaming output
llm = Ollama(
model="neural-chat:7b",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
print(f"Loaded LLM model {llm.model}")
# Setup QA Chain
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
)
# Perform QA
question = f"What are the latest headlines on {url}?"
result = qa_chain({"query": question})
# The result can be further processed or displayed here
# print(result)
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | karl-friman/rag-self-query | 17.RAG-GCP-SelfQuery.py | import os
import constants
import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.llms import OpenAI
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms import VertexAI
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
from prettytable import PrettyTable
from termcolor import colored
# ToDO: replace with VertexAI
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
from langchain.schema import Document
from langchain.embeddings.openai import OpenAIEmbeddings
# ToDO: replace with GCP solution, pgvector?
from langchain.vectorstores import Chroma
import langchain
# ToDO: turn these 2 off as a final stage
# langchain.verbose = True
# langchain.debug = True
embeddings = OpenAIEmbeddings()
"""## Formatting and printing results"""
def print_documents(docs):
table = PrettyTable()
table.field_names = [
"Page Content",
"Color",
"Country",
"Grape",
"Name",
"Rating",
"Year",
]
for doc in docs:
table.add_row(
[
doc.page_content,
colored(doc.metadata["color"], "red"),
colored(doc.metadata["country"], "yellow"),
colored(doc.metadata["grape"], "blue"),
colored(doc.metadata["name"], "green"),
colored(doc.metadata["rating"], "magenta"),
colored(doc.metadata["year"], "cyan"),
]
)
print(table)
"""## Example data with metadata attached"""
docs = [
Document(
page_content="Complex, layered, rich red with dark fruit flavors",
metadata={
"name": "Opus One",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Luxurious, sweet wine with flavors of honey, apricot, and peach",
metadata={
"name": "Château d'Yquem",
"year": 2015,
"rating": 98,
"grape": "Sémillon",
"color": "white",
"country": "France",
},
),
Document(
page_content="Full-bodied red with notes of black fruit and spice",
metadata={
"name": "Penfolds Grange",
"year": 2017,
"rating": 97,
"grape": "Shiraz",
"color": "red",
"country": "Australia",
},
),
Document(
page_content="Elegant, balanced red with herbal and berry nuances",
metadata={
"name": "Sassicaia",
"year": 2016,
"rating": 95,
"grape": "Cabernet Franc",
"color": "red",
"country": "Italy",
},
),
Document(
page_content="Highly sought-after Pinot Noir with red fruit and earthy notes",
metadata={
"name": "Domaine de la Romanée-Conti",
"year": 2018,
"rating": 100,
"grape": "Pinot Noir",
"color": "red",
"country": "France",
},
),
Document(
page_content="Crisp white with tropical fruit and citrus flavors",
metadata={
"name": "Cloudy Bay",
"year": 2021,
"rating": 92,
"grape": "Sauvignon Blanc",
"color": "white",
"country": "New Zealand",
},
),
Document(
page_content="Rich, complex Champagne with notes of brioche and citrus",
metadata={
"name": "Krug Grande Cuvée",
"year": 2010,
"rating": 93,
"grape": "Chardonnay blend",
"color": "sparkling",
"country": "New Zealand",
},
),
Document(
page_content="Intense, dark fruit flavors with hints of chocolate",
metadata={
"name": "Caymus Special Selection",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Exotic, aromatic white with stone fruit and floral notes",
metadata={
"name": "Jermann Vintage Tunina",
"year": 2020,
"rating": 91,
"grape": "Sauvignon Blanc blend",
"color": "white",
"country": "Italy",
},
),
]
vectorstore = Chroma.from_documents(docs, embeddings)
print("vectorstore", vectorstore)
"""## Creating our self-querying retriever"""
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
metadata_field_info = [
AttributeInfo(
name="grape",
description="The grape used to make the wine",
type="string or list[string]",
),
AttributeInfo(
name="name",
description="The name of the wine",
type="string or list[string]",
),
AttributeInfo(
name="color",
description="The color of the wine",
type="string or list[string]",
),
AttributeInfo(
name="year",
description="The year the wine was released",
type="integer",
),
AttributeInfo(
name="country",
description="The name of the country the wine comes from",
type="string",
),
AttributeInfo(
name="rating",
description="The Robert Parker rating for the wine 0-100",
type="integer", # float
),
]
document_content_description = "Brief description of the wine"
# Assuming 'document_contents' is a list of the content of each document
document_contents = [doc.page_content for doc in docs]
llm_google = VertexAI(model="text-unicorn")
retriever = SelfQueryRetriever.from_llm(
llm_google,
vectorstore,
document_content_description,
metadata_field_info,
verbose=True,
)
# This example only specifies a relevant query
print("Q: What are some red wines")
print_documents(retriever.get_relevant_documents("What are some red wines"))
print("Q: I want a wine that has fruity nodes")
print_documents(retriever.get_relevant_documents("I want a wine that has fruity nodes"))
# This example specifies a query and a filter
print("Q: I want a wine that has fruity nodes and has a rating above 97")
print_documents(
retriever.get_relevant_documents(
"I want a wine that has fruity nodes and has a rating above 97"
)
)
print("Q: What wines come from Italy?")
print_documents(retriever.get_relevant_documents("What wines come from Italy?"))
# This example specifies a query and composite filter
print(
"Q: What's a wine after 2015 but before 2020 that's all earthy and has a rating above 95"
)
print_documents(
retriever.get_relevant_documents(
"hat's a wine after 2015 but before 2020 that's all earthy and has a rating above 95"
)
)
# ToDO: This used to work but doesn't anymore, there may have been a change to how the API works
# """## Filter K
# We can also use the self query retriever to specify k: the number of documents to fetch.
# We can do this by passing enable_limit=True to the constructor.
# """
# llm_openai = OpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm_google,
# llm_google, #This does not work
vectorstore,
document_content_description,
metadata_field_info,
enable_limit=True,
verbose=True,
)
print("Q: what are two that have a rating above 97")
# This example only specifies a relevant query - k= 2
print_documents(
retriever.get_relevant_documents("what are two that have a rating above 97")
)
print("Q: what are two wines that come from australia or New zealand")
print_documents(
retriever.get_relevant_documents(
"what are two wines that come from australia or New zealand"
)
)
| [] |
2024-01-10 | karl-friman/rag-self-query | 04.RAG-selfquery-instructorembeddings.py | import os
import constants
import together
from typing import Any, Dict, Optional
from pydantic import BaseModel, model_validator
from langchain.llms.base import LLM
from langchain.llms import OpenAI
from langchain.schema import Document
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.vectorstores import Chroma
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
from prettytable import PrettyTable
from termcolor import colored
import langchain
langchain.verbose = True
langchain.debug = True
os.environ["TOGETHER_API_KEY"] = constants.TOGETHER_API_KEY
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
class TogetherLLM(LLM):
"""Large language models from Together."""
model: str = "mistralai/Mistral-7B-Instruct-v0.1"
together_api_key: str = os.environ["TOGETHER_API_KEY"]
temperature: float = 0.0
max_tokens: int = 2600
class Config:
extra = "forbid"
@model_validator
def validate_environment(cls, values: Dict, field: Optional[str]) -> Dict:
api_key = os.environ["TOGETHER_API_KEY"]
values["together_api_key"] = api_key
return values
@property
def _llm_type(self) -> str:
return "together"
def _call(
self,
prompt: str,
**kwargs: Any,
) -> str:
together.api_key = self.together_api_key
output = together.Complete.create(
prompt=prompt,
model=self.model,
max_tokens=self.max_tokens,
temperature=self.temperature,
)
print("\n\nRAG-output:", output)
text = output["output"]["choices"][0]["text"]
print("\n\nRAG-text before cleanup:", text)
# substrings to be removed from start and end
beginning_str = "```json\n"
end_str = "\n```"
# removing beginning_str and end_str from the text
if text.startswith(beginning_str):
text = text[len(beginning_str) :]
if text.endswith(end_str):
text = text[: -len(end_str)]
print("\n\nRAG-text after cleanup:", text)
return text
llm = TogetherLLM()
llm_openai = OpenAI(temperature=0)
# embeddings = HuggingFaceInstructEmbeddings(
# model_name="hkunlp/instructor-xl", model_kwargs={"device": "cuda"}
# )
embeddings = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
docs = [
Document(
page_content="Complex, layered, rich red with dark fruit flavors",
metadata={
"name": "Opus One",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Luxurious, sweet wine with flavors of honey, apricot, and peach",
metadata={
"name": "Château d'Yquem",
"year": 2015,
"rating": 98,
"grape": "Sémillon",
"color": "white",
"country": "France",
},
),
Document(
page_content="Full-bodied red with notes of black fruit and spice",
metadata={
"name": "Penfolds Grange",
"year": 2017,
"rating": 97,
"grape": "Shiraz",
"color": "red",
"country": "Australia",
},
),
Document(
page_content="Elegant, balanced red with herbal and berry nuances",
metadata={
"name": "Sassicaia",
"year": 2016,
"rating": 95,
"grape": "Cabernet Franc",
"color": "red",
"country": "Italy",
},
),
Document(
page_content="Highly sought-after Pinot Noir with red fruit and earthy notes",
metadata={
"name": "Domaine de la Romanée-Conti",
"year": 2018,
"rating": 100,
"grape": "Pinot Noir",
"color": "red",
"country": "France",
},
),
Document(
page_content="Crisp white with tropical fruit and citrus flavors",
metadata={
"name": "Cloudy Bay",
"year": 2021,
"rating": 92,
"grape": "Sauvignon Blanc",
"color": "white",
"country": "New Zealand",
},
),
Document(
page_content="Rich, complex Champagne with notes of brioche and citrus",
metadata={
"name": "Krug Grande Cuvée",
"year": 2010,
"rating": 93,
"grape": "Chardonnay blend",
"color": "sparkling",
"country": "New Zealand",
},
),
Document(
page_content="Intense, dark fruit flavors with hints of chocolate",
metadata={
"name": "Caymus Special Selection",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Exotic, aromatic white with stone fruit and floral notes",
metadata={
"name": "Jermann Vintage Tunina",
"year": 2020,
"rating": 91,
"grape": "Sauvignon Blanc blend",
"color": "white",
"country": "Italy",
},
),
]
vectorstore = Chroma.from_documents(docs, embeddings)
metadata_field_info = [
AttributeInfo(
name="grape",
description="The grape used to make the wine",
type="string or list[string]",
),
AttributeInfo(
name="name",
description="The name of the wine",
type="string or list[string]",
),
AttributeInfo(
name="color",
description="The color of the wine",
type="string or list[string]",
),
AttributeInfo(
name="year",
description="The year the wine was released",
type="integer",
),
AttributeInfo(
name="country",
description="The name of the country the wine comes from",
type="string",
),
AttributeInfo(
name="rating",
description="The Robert Parker rating for the wine 0-100",
type="integer",
),
]
document_content_description = "Brief description of the wine"
def print_documents(docs):
table = PrettyTable()
table.field_names = [
"Page Content",
"Color",
"Country",
"Grape",
"Name",
"Rating",
"Year",
]
for doc in docs:
table.add_row(
[
doc.page_content,
colored(doc.metadata["color"], "red"),
colored(doc.metadata["country"], "yellow"),
colored(doc.metadata["grape"], "blue"),
colored(doc.metadata["name"], "green"),
colored(doc.metadata["rating"], "magenta"),
colored(doc.metadata["year"], "cyan"),
]
)
print(table)
print("Q: Who is Gary Oldman? ")
print(llm("Who is Gary Oldman? "))
retriever = SelfQueryRetriever.from_llm(
llm_openai, # THIS WORKS
# llm, # THIS DOES NOT WORK, reason according to Sam Witteveen "you will need a model that can handle JSON output well. I suggest trying some of the code models. If I am using an opensource model for this kind of task I will often fine tune it for the application first. Hope that helps".
vectorstore,
document_content_description,
metadata_field_info,
verbose=True,
)
# Currently only openAi (llm_openai) works with SelfQueryRetriever. Why???
# It appears the error originates from the output of the TogetherLLM model. After running the input query, it returns a JSON object which is then parsed. Based on the error message, the JSON object that has been returned by the model has some extra data that's causing the issue.
# The traceback shows that the error occurs during execution of the parse_and_check_json_markdown function in json.py:
# sql
# Copy code
# json.decoder.JSONDecodeError: Extra data: line 5 column 1 (char 68)
# The error "Extra Data" typically occurs when there is extra data outside of the structure of a JSON object, such as multiple JSON objects not encapsulated within a JSON array. Seeing from the error message, it seems there are multiple JSON objects being returned by Together's LLM. And if the parser expects only ONE object, it's failing when encounters the start of the next object.
# A potential workaround is to modify the TogetherLLM to ensure that it returns single, well-formatted JSON text that the rest of your code can handle.
# Another approach would be to extend the parser's functionality to process multiple JSON objects.
# However, the best solution would depend on the exact specifications of your project and the outputs that Together's LLM is supposed to return for successful integration with the SelfQueryRetriever. If the LLM model often returns multiple separate JSON objects instead of just one, you may wish to consider adjusting the parser accordingly. But, if this is not expected behavior for the LLM, then adjusting the model to only return a single JSON object may be more efficient.
# Lastly, it is always recommended to reach out to the library owners or maintainers for assistance with such issues. They may have more specific insights into why such a problem might occur and how best to resolve it.
print("Q: What are some red wines")
print_documents(retriever.get_relevant_documents("What are some red wines"))
# print("Q: I want a wine that has fruity nodes")
# print_documents(retriever.get_relevant_documents("I want a wine that has fruity nodes"))
# # This example specifies a query and a filter
# print("Q: I want a wine that has fruity nodes and has a rating above 97")
# print_documents(
# retriever.get_relevant_documents(
# "I want a wine that has fruity nodes and has a rating above 97"
# )
# )
# print("Q: What wines come from Italy?")
# print_documents(retriever.get_relevant_documents("What wines come from Italy?"))
# # This example specifies a query and composite filter
# print("Q: What's a wine after 2015 but before 2020 that's all earthy")
# print_documents(
# retriever.get_relevant_documents(
# "What's a wine after 2015 but before 2020 that's all earthy"
# )
# )
# """## Filter K
# We can also use the self query retriever to specify k: the number of documents to fetch.
# We can do this by passing enable_limit=True to the constructor.
# """
# retriever = SelfQueryRetriever.from_llm(
# llm,
# vectorstore,
# document_content_description,
# metadata_field_info,
# enable_limit=True,
# verbose=True,
# )
# print("Q: what are two that have a rating above 97")
# # This example only specifies a relevant query - k= 2
# print_documents(
# retriever.get_relevant_documents("what are two that have a rating above 97")
# )
# print("Q: what are two wines that come from australia or New zealand")
# print_documents(
# retriever.get_relevant_documents(
# "what are two wines that come from australia or New zealand"
# )
# )
| [] |
2024-01-10 | karl-friman/rag-self-query | 03.RAG-selfquery-openaiembeddings.py | # -*- coding: utf-8 -*-
"""Copy of YT LangChain RAG tips and Tricks 01 - Self Query.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zDYbCfGVMs4pHmqlmOtLr3ukCSc1kfXD
"""
import os
import constants
import together
import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.llms import OpenAI
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
from prettytable import PrettyTable
from termcolor import colored
os.environ["TOGETHER_API_KEY"] = constants.TOGETHER_API_KEY
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
# set your API key
together.api_key = os.environ["TOGETHER_API_KEY"]
# print the first model's name
# models = together.Models.list()
# print(models[3]["name"]), print(models[52]["name"])
# for idx, model in enumerate(models):
# print(idx, model["name"])
# print(models[55]["name"])
# together.Models.start("mistralai/Mistral-7B-Instruct-v0.1")
class TogetherLLM(LLM):
"""Together large language models."""
model: str = "mistralai/Mixtral-8x7B-Instruct-v0.1"
"""model endpoint to use"""
together_api_key: str = os.environ["TOGETHER_API_KEY"]
"""Together API key"""
temperature: float = 0.0
"""What sampling temperature to use."""
max_tokens: int = 512
"""The maximum number of tokens to generate in the completion."""
class Config:
extra = "forbid"
# @root_validator(skip_on_failure=True)
# def validate_environment(cls, values: Dict) -> Dict:
# """Validate that the API key is set."""
# api_key = get_from_dict_or_env(values, "together_api_key", "TOGETHER_API_KEY")
# values["together_api_key"] = api_key
# return values
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "together"
def _call(
self,
prompt: str,
**kwargs: Any,
) -> str:
"""Call to Together endpoint."""
together.api_key = self.together_api_key
output = together.Complete.create(
prompt,
model=self.model,
max_tokens=self.max_tokens,
temperature=self.temperature,
)
text = output["output"]["choices"][0]["text"]
return text
llm = TogetherLLM(
model="mistralai/Mixtral-8x7B-Instruct-v0.1", temperature=0.1, max_tokens=1024
)
# type(llm), llm.model, llm.temperature
# print("Q: What are the olympics? ")
# print(llm("What are the olympics? "))
# """## Self-querying Retriever"""
from langchain.schema import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
embeddings = OpenAIEmbeddings()
"""## Formatting and printing results"""
def print_documents(docs):
table = PrettyTable()
table.field_names = [
"Page Content",
"Color",
"Country",
"Grape",
"Name",
"Rating",
"Year",
]
for doc in docs:
table.add_row(
[
doc.page_content,
colored(doc.metadata["color"], "red"),
colored(doc.metadata["country"], "yellow"),
colored(doc.metadata["grape"], "blue"),
colored(doc.metadata["name"], "green"),
colored(doc.metadata["rating"], "magenta"),
colored(doc.metadata["year"], "cyan"),
]
)
print(table)
"""## Example data with metadata attached"""
docs = [
Document(
page_content="Complex, layered, rich red with dark fruit flavors",
metadata={
"name": "Opus One",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Luxurious, sweet wine with flavors of honey, apricot, and peach",
metadata={
"name": "Château d'Yquem",
"year": 2015,
"rating": 98,
"grape": "Sémillon",
"color": "white",
"country": "France",
},
),
Document(
page_content="Full-bodied red with notes of black fruit and spice",
metadata={
"name": "Penfolds Grange",
"year": 2017,
"rating": 97,
"grape": "Shiraz",
"color": "red",
"country": "Australia",
},
),
Document(
page_content="Elegant, balanced red with herbal and berry nuances",
metadata={
"name": "Sassicaia",
"year": 2016,
"rating": 95,
"grape": "Cabernet Franc",
"color": "red",
"country": "Italy",
},
),
Document(
page_content="Highly sought-after Pinot Noir with red fruit and earthy notes",
metadata={
"name": "Domaine de la Romanée-Conti",
"year": 2018,
"rating": 100,
"grape": "Pinot Noir",
"color": "red",
"country": "France",
},
),
Document(
page_content="Crisp white with tropical fruit and citrus flavors",
metadata={
"name": "Cloudy Bay",
"year": 2021,
"rating": 92,
"grape": "Sauvignon Blanc",
"color": "white",
"country": "New Zealand",
},
),
Document(
page_content="Rich, complex Champagne with notes of brioche and citrus",
metadata={
"name": "Krug Grande Cuvée",
"year": 2010,
"rating": 93,
"grape": "Chardonnay blend",
"color": "sparkling",
"country": "New Zealand",
},
),
Document(
page_content="Intense, dark fruit flavors with hints of chocolate",
metadata={
"name": "Caymus Special Selection",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Exotic, aromatic white with stone fruit and floral notes",
metadata={
"name": "Jermann Vintage Tunina",
"year": 2020,
"rating": 91,
"grape": "Sauvignon Blanc blend",
"color": "white",
"country": "Italy",
},
),
]
vectorstore = Chroma.from_documents(docs, embeddings)
print("vectorstore", vectorstore)
"""## Creating our self-querying retriever"""
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
metadata_field_info = [
AttributeInfo(
name="grape",
description="The grape used to make the wine",
type="string or list[string]",
),
AttributeInfo(
name="name",
description="The name of the wine",
type="string or list[string]",
),
AttributeInfo(
name="color",
description="The color of the wine",
type="string or list[string]",
),
AttributeInfo(
name="year",
description="The year the wine was released",
type="integer",
),
AttributeInfo(
name="country",
description="The name of the country the wine comes from",
type="string",
),
AttributeInfo(
name="rating",
description="The Robert Parker rating for the wine 0-100",
type="integer", # float
),
]
document_content_description = "Brief description of the wine"
# Assuming 'document_contents' is a list of the content of each document
document_contents = [doc.page_content for doc in docs]
llm_openai = OpenAI(temperature=0)
from langchain.llms import VertexAI
llm_google = VertexAI()
retriever = SelfQueryRetriever.from_llm(
# llm_google,
# llm_openai, # THIS WORKS
llm, # THIS DOES NOT WORK, reason according to Sam Witteveen "you will need a model that can handle JSON output well. I suggest trying some of the code models. If I am using an opensource model for this kind of task I will often fine tune it for the application first. Hope that helps".
vectorstore,
document_content_description,
metadata_field_info,
verbose=True,
)
# This example only specifies a relevant query
print("Q: What are some red wines")
print_documents(retriever.get_relevant_documents("What are some red wines"))
print("Q: Who is Gary Oldman? ")
print(llm("Who is Gary Oldman? "))
| [] |
2024-01-10 | karl-friman/rag-self-query | 14.1.RAG-Qdrant-InstructorEmbeddings.py | # -*- coding: utf-8 -*-
# This requires Docker containers for Chroma, Redis and Qdrant to be running.
import os, constants
from langchain.llms import Ollama
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.chains import RetrievalQA
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain import hub
from langchain.document_loaders import DirectoryLoader, PyPDFLoader
from langchain.vectorstores.qdrant import Qdrant
from qdrant_client import QdrantClient
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
def main():
# Loading documents from a specified directory
loader = DirectoryLoader("./data/", glob="./*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
# Splitting documents into manageable text chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
all_splits = text_splitter.split_documents(documents)
print(
f"RecursiveCharacterTextSplitter from Langchain:\nProcessed {len(documents)} documents split into {len(all_splits)} chunks"
)
embeddings = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
try:
# !!! You will need this the first time you run this script as the collection needs to be created !!!
# Setting up QdrantClient and creating a collection for vector storage
# url = "35.204.26.135"
url = "localhost"
port = "6333"
collection_name = "instruct-embeddings"
size = 768
from qdrant_client.http.models import Distance, VectorParams
try:
# qdrant_client = QdrantClient(url=url, port=6333, api_key=api_key)
qdrant_client = QdrantClient(url=url, port=port)
qdrant_client.delete_collection(
collection_name=collection_name,
)
qdrant_client = QdrantClient(url=url, port=port)
qdrant_client.create_collection(
collection_name=collection_name,
vectors_config=VectorParams(size=size, distance=Distance.COSINE),
)
except Exception as e:
print(f"Failed to initialize Qdrant client or create collection: {e}")
# Initializing Qdrant vectorstore with document embeddings
# url = "http://localhost:6333"
vectorstore = Qdrant.from_documents(
collection_name=collection_name,
embedding=embeddings,
documents=all_splits,
url=url,
# prefer_grpc=True,
# api_key=api_key
)
except Exception as e:
print(f"Failed to initialize Qdrant vectorstore: {e}")
# Loading the Language Model with a callback manager
llm = Ollama(
model="neural-chat:7b",
verbose=True,
temperature=0.0,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
# Setting up a QA Chain with a specific prompt
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
return_source_documents=True,
)
def process_llm_response(llm_response):
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
# Asking a question on the Toolformer PDF
question = "What is the conclusion summary for the Toolformer whitepaper?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
# Asking a question based on the other PDF.
question = "What is the name of the cat?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | karl-friman/rag-self-query | 16.1.RAG-Vertex-SelfQuery.py | # -*- coding: utf-8 -*-
"""Copy of YT LangChain RAG tips and Tricks 01 - Self Query.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zDYbCfGVMs4pHmqlmOtLr3ukCSc1kfXD
"""
import os
import constants
import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.llms import OpenAI
from pydantic import Extra, Field, root_validator
from langchain.chat_models import ChatOpenAI
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms import VertexAI
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
from prettytable import PrettyTable
from termcolor import colored
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
from langchain.schema import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.document_loaders.csv_loader import CSVLoader
# import langchain
# langchain.verbose = True
# langchain.debug = True
embeddings = OpenAIEmbeddings()
"""## Formatting and printing results"""
def print_documents(docs):
table = PrettyTable()
table.field_names = [
"ID",
"Age",
"Curriculum Year",
"Gender",
"Mother Tongue",
"Partnership Status",
"Job Status",
"Study Hours",
"Health Satisfaction",
"Psychotherapy",
"Empathy Score",
"Cognitive Empathy",
"Affective Empathy",
"AMSP Score",
"Correct Responses",
"CES-D Score",
"STAI Score",
"MBI Emotional Exhaustion",
"MBI Cynicism",
"MBI Academic Efficacy",
]
for doc in docs:
table.add_row(
[
colored(doc.metadata["id"], "red"),
doc.metadata["age"],
doc.metadata["year"],
doc.metadata["sex"],
doc.metadata["glang"],
doc.metadata["part"],
doc.metadata["job"],
doc.metadata["stud_h"],
doc.metadata["health"],
doc.metadata["psyt"],
doc.metadata["jspe"],
doc.metadata["qcae_cog"],
doc.metadata["qcae_aff"],
doc.metadata["amsp"],
doc.metadata["erec_mean"],
doc.metadata["cesd"],
doc.metadata["stai_t"],
doc.metadata["mbi_ex"],
doc.metadata["mbi_cy"],
doc.metadata["mbi_ea"],
]
)
print(table)
import csv
def load_csv_to_documents(file_path):
documents = []
with open(file_path, mode="r", encoding="utf-8") as file:
reader = csv.DictReader(file)
for row in reader:
# Convert all values to strings for consistency
metadata = {k: str(v) for k, v in row.items()}
# Create a document; page_content can be a placeholder or constructed from data
doc = Document(page_content="Participant data", metadata=metadata)
documents.append(doc)
return documents
# Use the function to load documents
# loader = CSVLoader(file_path="./medical_data/DataCarrard_et_al._2022_MedTeach.csv", source_column="id")
# docs = loader.load()
docs = load_csv_to_documents("./medical_data/DataCarrard_et_al._2022_MedTeach.csv")
vectorstore = Chroma.from_documents(docs, embeddings)
print("vectorstore", vectorstore)
"""## Creating our self-querying retriever"""
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
metadata_field_info = [
AttributeInfo(
name="id",
description="Participants ID number",
type="string",
),
AttributeInfo(
name="age",
description="Age at questionnaire 20-21",
type="numeric",
),
AttributeInfo(
name="year",
description="Curriculum year: In which curriculum year are you?",
type="1=Bmed1; 2=Bmed2; 3=Bmed3; 4=Mmed1; 5=Mmed2; 6=Mmed3",
),
AttributeInfo(
name="sex",
description="Gender: To which gender do you identify the most?",
type="1=Man; 2=Woman; 3=Non-binary",
),
AttributeInfo(
name="glang",
description="Mother tongue: What is your mother tongue?",
type="1=French; 15=German; 20=English; ...; 121=Other", # Abbreviated for brevity
),
AttributeInfo(
name="part",
description="Partnership status: Do you have a partner?",
type="0=No; 1=Yes",
),
AttributeInfo(
name="job",
description="Having a job: Do you have a paid job?",
type="0=No; 1=Yes",
),
AttributeInfo(
name="stud_h",
description="Hours of study per week: On average, how many hours per week do you study on top of courses?",
type="numeric",
),
AttributeInfo(
name="health",
description="Satisfaction with health: How satisfied are you with your health?",
type="1=Very dissatisfied; 2=Dissatisfied; ...; 5=Very satisfied",
),
AttributeInfo(
name="psyt",
description="Psychotherapy last year: Have you consulted a psychotherapist or a psychiatrist for your health in the last 12 months?",
type="0=No; 1=Yes",
),
AttributeInfo(
name="jspe",
description="JSPE total empathy score",
type="numeric",
),
AttributeInfo(
name="qcae_cog",
description="QCAE Cognitive empathy score",
type="numeric",
),
AttributeInfo(
name="qcae_aff",
description="QCAE Affective empathy score",
type="numeric",
),
AttributeInfo(
name="amsp",
description="AMSP total score",
type="numeric",
),
AttributeInfo(
name="erec_mean",
description="GERT: Mean value of correct responses",
type="numeric",
),
AttributeInfo(
name="cesd",
description="CES-D total score",
type="numeric",
),
AttributeInfo(
name="stai_t",
description="STAI score",
type="numeric",
),
AttributeInfo(
name="mbi_ex",
description="MBI Emotional Exhaustion",
type="numeric",
),
AttributeInfo(
name="mbi_cy",
description="MBI Cynicism",
type="numeric",
),
AttributeInfo(
name="mbi_ea",
description="MBI Academic Efficacy",
type="numeric",
),
]
document_content_description = "Participant data"
# Assuming 'document_contents' is a list of the content of each document
document_contents = [doc.page_content for doc in docs]
llm_google = VertexAI(model="text-unicorn")
llm_openai = ChatOpenAI(model="gpt-3.5-turbo")
retriever = SelfQueryRetriever.from_llm(
llm_google,
vectorstore,
document_content_description,
metadata_field_info,
verbose=True,
)
def print_query_and_documents(query, retriever):
print("Q:", query)
print_documents(retriever.get_relevant_documents(query))
queries = [
"Show data for participants aged 20",
"Show data for participants aged above 18 and below 20",
"Show data for participants aged above 20 and mbi_ex higher than 10",
]
for q in queries:
print_query_and_documents(q, retriever)
| [] |
2024-01-10 | karl-friman/rag-self-query | 13.RAG-Milvus.py | # -*- coding: utf-8 -*-
# This requires Docker containers for Chroma, Redis and Qdrant to be running.
import os, constants
from langchain.llms import Ollama
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain import hub
from langchain.document_loaders import DirectoryLoader, PyPDFLoader
from langchain.vectorstores import Milvus
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
def main():
# Loading documents from a specified directory
loader = DirectoryLoader("./data/", glob="./*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
# Splitting documents into manageable text chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
all_splits = text_splitter.split_documents(documents)
print(
f"RecursiveCharacterTextSplitter from Langchain:\nProcessed {len(documents)} documents split into {len(all_splits)} chunks"
)
try:
# This is the first run with new data
vectorstore = Milvus.from_documents(
documents=all_splits,
embedding=OpenAIEmbeddings(),
# connection_args={"host": "127.0.0.1", "port": "19530"},
connection_args={"host": "localhost", "port": "19530"},
# connection_args={"host": "34.141.233.82", "port": "19530"},
)
# This is how to load an existing collection
# vectorstore = Milvus(
# # If you have another collection than what langchain creates, you can specify it here
# # collection_name="collection_1",
# embedding_function=OpenAIEmbeddings(),
# connection_args={"host": "localhost", "port": "19530"},
# )
except Exception as e:
print(f"Failed to initialize vectorstore: {e}")
# Loading the Language Model with a callback manager
llm = Ollama(
model="neural-chat:7b",
verbose=True,
temperature=0.0,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
# Setting up a QA Chain with a specific prompt
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
return_source_documents=True,
)
def process_llm_response(llm_response):
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
# Asking a question on the Toolformer PDF
question = "What is the conclusion summary for the Toolformer whitepaper?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
# Asking a question based on the other PDF.
question = "What is the name of the cat?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | karl-friman/rag-self-query | 14.2.RAG-Qdrant-OllamaEmbeddings.py | # -*- coding: utf-8 -*-
# This requires Docker containers for Chroma, Redis and Qdrant to be running.
import os, constants
from langchain.llms import Ollama
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings import OllamaEmbeddings
from langchain.chains import RetrievalQA
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain import hub
from langchain.document_loaders import DirectoryLoader, PyPDFLoader
from langchain.vectorstores.qdrant import Qdrant
from qdrant_client import QdrantClient
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
def main():
# Loading documents from a specified directory
loader = DirectoryLoader("./data/", glob="./*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
# Splitting documents into manageable text chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
all_splits = text_splitter.split_documents(documents)
print(
f"RecursiveCharacterTextSplitter from Langchain:\nProcessed {len(documents)} documents split into {len(all_splits)} chunks"
)
try:
# !!! You will need this the first time you run this script as the collection needs to be created !!!
# Setting up QdrantClient and creating a collection for vector storage
# url = "35.204.26.135"
url = "localhost"
port = "6333"
collection_name = "ollama-embeddings"
size = 4096
from qdrant_client.http.models import Distance, VectorParams
try:
# qdrant_client = QdrantClient(url=url, port=6333, api_key=api_key)
qdrant_client = QdrantClient(url=url, port=port)
qdrant_client.delete_collection(
collection_name=collection_name,
)
qdrant_client = QdrantClient(url=url, port=port)
qdrant_client.create_collection(
collection_name=collection_name,
vectors_config=VectorParams(size=size, distance=Distance.COSINE),
)
except Exception as e:
print(f"Failed to initialize Qdrant client or create collection: {e}")
# Initializing Qdrant vectorstore with document embeddings
# url = "http://localhost:6333"
vectorstore = Qdrant.from_documents(
collection_name=collection_name,
embedding=OllamaEmbeddings(),
documents=all_splits,
url=url,
# prefer_grpc=True,
# api_key=api_key
)
except Exception as e:
print(f"Failed to initialize Qdrant vectorstore: {e}")
# Loading the Language Model with a callback manager
llm = Ollama(
model="neural-chat:7b",
verbose=True,
temperature=0.0,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
# Setting up a QA Chain with a specific prompt
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
return_source_documents=True,
)
def process_llm_response(llm_response):
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
# Asking a question on the Toolformer PDF
question = "What is the conclusion summary for the Toolformer whitepaper?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
# Asking a question based on the other PDF.
question = "What is the name of the cat?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | karl-friman/rag-self-query | 16.RAG-Vertex-SelfQuery.py | # -*- coding: utf-8 -*-
"""Copy of YT LangChain RAG tips and Tricks 01 - Self Query.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zDYbCfGVMs4pHmqlmOtLr3ukCSc1kfXD
"""
import os
import constants
import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.llms import OpenAI
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms import VertexAI
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
from prettytable import PrettyTable
from termcolor import colored
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
from langchain.schema import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import langchain
langchain.verbose = True
langchain.debug = True
embeddings = OpenAIEmbeddings()
"""## Formatting and printing results"""
def print_documents(docs):
table = PrettyTable()
table.field_names = [
"Page Content",
"Color",
"Country",
"Grape",
"Name",
"Rating",
"Year",
]
for doc in docs:
table.add_row(
[
doc.page_content,
colored(doc.metadata["color"], "red"),
colored(doc.metadata["country"], "yellow"),
colored(doc.metadata["grape"], "blue"),
colored(doc.metadata["name"], "green"),
colored(doc.metadata["rating"], "magenta"),
colored(doc.metadata["year"], "cyan"),
]
)
print(table)
"""## Example data with metadata attached"""
docs = [
Document(
page_content="Complex, layered, rich red with dark fruit flavors",
metadata={
"name": "Opus One",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Luxurious, sweet wine with flavors of honey, apricot, and peach",
metadata={
"name": "Château d'Yquem",
"year": 2015,
"rating": 98,
"grape": "Sémillon",
"color": "white",
"country": "France",
},
),
Document(
page_content="Full-bodied red with notes of black fruit and spice",
metadata={
"name": "Penfolds Grange",
"year": 2017,
"rating": 97,
"grape": "Shiraz",
"color": "red",
"country": "Australia",
},
),
Document(
page_content="Elegant, balanced red with herbal and berry nuances",
metadata={
"name": "Sassicaia",
"year": 2016,
"rating": 95,
"grape": "Cabernet Franc",
"color": "red",
"country": "Italy",
},
),
Document(
page_content="Highly sought-after Pinot Noir with red fruit and earthy notes",
metadata={
"name": "Domaine de la Romanée-Conti",
"year": 2018,
"rating": 100,
"grape": "Pinot Noir",
"color": "red",
"country": "France",
},
),
Document(
page_content="Crisp white with tropical fruit and citrus flavors",
metadata={
"name": "Cloudy Bay",
"year": 2021,
"rating": 92,
"grape": "Sauvignon Blanc",
"color": "white",
"country": "New Zealand",
},
),
Document(
page_content="Rich, complex Champagne with notes of brioche and citrus",
metadata={
"name": "Krug Grande Cuvée",
"year": 2010,
"rating": 93,
"grape": "Chardonnay blend",
"color": "sparkling",
"country": "New Zealand",
},
),
Document(
page_content="Intense, dark fruit flavors with hints of chocolate",
metadata={
"name": "Caymus Special Selection",
"year": 2018,
"rating": 96,
"grape": "Cabernet Sauvignon",
"color": "red",
"country": "USA",
},
),
Document(
page_content="Exotic, aromatic white with stone fruit and floral notes",
metadata={
"name": "Jermann Vintage Tunina",
"year": 2020,
"rating": 91,
"grape": "Sauvignon Blanc blend",
"color": "white",
"country": "Italy",
},
),
]
vectorstore = Chroma.from_documents(docs, embeddings)
print("vectorstore", vectorstore)
"""## Creating our self-querying retriever"""
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
metadata_field_info = [
AttributeInfo(
name="grape",
description="The grape used to make the wine",
type="string or list[string]",
),
AttributeInfo(
name="name",
description="The name of the wine",
type="string or list[string]",
),
AttributeInfo(
name="color",
description="The color of the wine",
type="string or list[string]",
),
AttributeInfo(
name="year",
description="The year the wine was released",
type="integer",
),
AttributeInfo(
name="country",
description="The name of the country the wine comes from",
type="string",
),
AttributeInfo(
name="rating",
description="The Robert Parker rating for the wine 0-100",
type="integer", # float
),
]
document_content_description = "Brief description of the wine"
# Assuming 'document_contents' is a list of the content of each document
document_contents = [doc.page_content for doc in docs]
llm_google = VertexAI()
retriever = SelfQueryRetriever.from_llm(
llm_google,
vectorstore,
document_content_description,
metadata_field_info,
verbose=True,
)
# This example only specifies a relevant query
print("Q: What are some red wines")
print_documents(retriever.get_relevant_documents("What are some red wines"))
print("Q: I want a wine that has fruity nodes")
print_documents(retriever.get_relevant_documents("I want a wine that has fruity nodes"))
# This example specifies a query and a filter
print("Q: I want a wine that has fruity nodes and has a rating above 97")
print_documents(
retriever.get_relevant_documents(
"I want a wine that has fruity nodes and has a rating above 97"
)
)
print("Q: What wines come from Italy?")
print_documents(retriever.get_relevant_documents("What wines come from Italy?"))
# This example specifies a query and composite filter
print("Q: What's a wine after 2015 but before 2020 that's all earthy")
print_documents(
retriever.get_relevant_documents(
"What's a wine after 2015 but before 2020 that's all earthy"
)
)
# """## Filter K
# We can also use the self query retriever to specify k: the number of documents to fetch.
# We can do this by passing enable_limit=True to the constructor.
# """
# llm_openai = OpenAI(temperature=0)
# retriever = SelfQueryRetriever.from_llm(
# llm_openai,
# #llm_google, #This does not work
# vectorstore,
# document_content_description,
# metadata_field_info,
# enable_limit=True,
# verbose=True,
# )
# print("Q: what are two that have a rating above 97")
# # This example only specifies a relevant query - k= 2
# print_documents(
# retriever.get_relevant_documents("what are two that have a rating above 97")
# )
# print("Q: what are two wines that come from australia or New zealand")
# print_documents(
# retriever.get_relevant_documents(
# "what are two wines that come from australia or New zealand"
# )
# )
| [] |
2024-01-10 | karl-friman/rag-self-query | 05.RAG-together-retrieval.py | # -*- coding: utf-8 -*-
"""YT RetrievalQA - Together API LangChain.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1eFS7_aFaGthiWiIK6y6Hbvsx5TqkpTVz
"""
# !pip -q install langchain huggingface_hub tiktoken
# !pip -q install chromadb
# !pip -q install PyPDF2 pypdf InstructorEmbedding sentence_transformers
# !pip -q install --upgrade together
"""## RetrievalQA with on Together API"""
import os
import constants
os.environ["TOGETHER_API_KEY"] = constants.TOGETHER_API_KEY
# !pip show langchain
"""# Setting up Together API
"""
import together
# set your API key
together.api_key = os.environ["TOGETHER_API_KEY"]
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class TogetherLLM(LLM):
"""Together large language models."""
model: str = "mistralai/Mistral-7B-Instruct-v0.1"
"""model endpoint to use"""
together_api_key: str = os.environ["TOGETHER_API_KEY"]
"""Together API key"""
temperature: float = 0.2
"""What sampling temperature to use."""
max_tokens: int = 512
"""The maximum number of tokens to generate in the completion."""
class Config:
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the API key is set."""
api_key = get_from_dict_or_env(values, "together_api_key", "TOGETHER_API_KEY")
values["together_api_key"] = api_key
return values
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "together"
def _call(
self,
prompt: str,
**kwargs: Any,
) -> str:
"""Call to Together endpoint."""
together.api_key = self.together_api_key
output = together.Complete.create(
prompt,
model=self.model,
max_tokens=self.max_tokens,
temperature=self.temperature,
)
text = output["output"]["choices"][0]["text"]
return text
llm = TogetherLLM(
model="mistralai/Mistral-7B-Instruct-v0.1", temperature=0.1, max_tokens=512
)
print("Q: What are the olympics? ")
print(llm("What are the olympics? "))
# !wget -O new_papers_2.zip https://www.dropbox.com/scl/fi/67a80h373n1z38088c9fb/new_papers_2.zip?rlkey=1azfz3w5aazd24ihotwzmol2j&dl=1
# !unzip -q new_papers_2.zip -d new_papers
"""# LangChain multi-doc retriever with ChromaDB
***Key Points***
- Multiple Files - PDFs
- ChromaDB
- Local LLM
- Instuctor Embeddings
## Setting up LangChain
"""
import os
from langchain.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
"""## Load multiple and process documents"""
# Load and process the text files
# loader = TextLoader('single_text_file.txt')
loader = DirectoryLoader("./whitepapers/", glob="./*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
len(documents)
# splitting the text into
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
"""## HF Instructor Embeddings"""
from langchain.embeddings import HuggingFaceInstructEmbeddings
# instructor_embeddings = HuggingFaceInstructEmbeddings(
# model_name="hkunlp/instructor-xl", model_kwargs={"device": "cuda"}
# )
instructor_embeddings = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
"""## create the DB
This will take a bit of time on a T4 GPU
"""
# Embed and store the texts
# Supplying a persist_directory will store the embeddings on disk
persist_directory = "db"
## Here is the nmew embeddings being used
embedding = instructor_embeddings
vectordb = Chroma.from_documents(
documents=texts, embedding=embedding, persist_directory=persist_directory
)
"""## Make a retriever"""
retriever = vectordb.as_retriever(search_kwargs={"k": 5})
"""## Make a chain"""
llm = TogetherLLM(
model="mistralai/Mistral-7B-Instruct-v0.1", temperature=0.1, max_tokens=1024
)
# create the chain to answer questions
qa_chain = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True
)
## Cite sources
import textwrap
def wrap_text_preserve_newlines(text, width=110):
# Split the input text into lines based on newline characters
lines = text.split("\n")
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = "\n".join(wrapped_lines)
return wrapped_text
def process_llm_response(llm_response):
print(wrap_text_preserve_newlines(llm_response["result"]))
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
query = "What is toolformer?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "What tools can be used with toolformer?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "How many examples do we need to provide for each tool?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "What are the best retrieval augmentations for LLMs?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "What is ReAct?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
qa_chain.retriever.search_type, qa_chain.retriever.vectorstore
print("\nPrompt template: ", qa_chain.combine_documents_chain.llm_chain.prompt.template)
print("\nQ: Who is Gary Oldman? ")
print(llm("Who is Gary Oldman? "))
together.Models.stop("mistralai/Mistral-7B-Instruct-v0.1")
| [] |
2024-01-10 | karl-friman/rag-self-query | 09.RAG-Ollama-InstEmbed-Redis-PDF.py | # -*- coding: utf-8 -*-
from langchain.llms import Ollama
llm = Ollama(model="neural-chat:7b", temperature=0.1)
import os
# from langchain.vectorstores import Chroma
from langchain.vectorstores.redis import Redis
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
"""## Load multiple and process documents"""
loader = DirectoryLoader("./whitepapers/", glob="./*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
len(documents)
# splitting the text into
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
from langchain.embeddings import HuggingFaceInstructEmbeddings
# instructor_embeddings = HuggingFaceInstructEmbeddings(
# model_name="hkunlp/instructor-xl", model_kwargs={"device": "cuda"}
# )
instructor_embeddings = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
persist_directory = "db"
embedding = instructor_embeddings
# vectordb = Chroma.from_documents(
# documents=texts, embedding=embedding, persist_directory=persist_directory
# )
vectordb = Redis.from_documents(
documents=texts,
embedding=embedding,
redis_url="redis://localhost:6379",
)
"""## Make a retriever"""
retriever = vectordb.as_retriever(search_kwargs={"k": 5})
"""## Make a chain"""
# create the chain to answer questions
qa_chain = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True
)
## Cite sources
import textwrap
def wrap_text_preserve_newlines(text, width=110):
# Split the input text into lines based on newline characters
lines = text.split("\n")
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = "\n".join(wrapped_lines)
return wrapped_text
def process_llm_response(llm_response):
print(wrap_text_preserve_newlines(llm_response["result"]))
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
query = "What is toolformer?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "What tools can be used with toolformer?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "How many examples do we need to provide for each tool?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
| [] |
2024-01-10 | karl-friman/rag-self-query | 12.RAG-Qdrant.py | # -*- coding: utf-8 -*-
# This requires Docker containers for Chroma, Redis and Qdrant to be running.
import os, constants
from langchain.llms import Ollama
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain import hub
from langchain.document_loaders import DirectoryLoader, PyPDFLoader
from langchain.vectorstores.qdrant import Qdrant
from qdrant_client import QdrantClient
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
def main():
# Loading documents from a specified directory
loader = DirectoryLoader("./data/", glob="./*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
# Splitting documents into manageable text chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
all_splits = text_splitter.split_documents(documents)
print(
f"RecursiveCharacterTextSplitter from Langchain:\nProcessed {len(documents)} documents split into {len(all_splits)} chunks"
)
try:
# !!! You will need this the first time you run this script as the collection needs to be created !!!
# Setting up QdrantClient and creating a collection for vector storage
# url = "35.204.26.135"
url = "localhost"
port = 6333
from qdrant_client.http.models import Distance, VectorParams
try:
# qdrant_client = QdrantClient(url=url, port=6333, api_key=api_key)
qdrant_client = QdrantClient(url=url, port=port)
qdrant_client.delete_collection(
collection_name="test_collection",
)
qdrant_client = QdrantClient(url=url, port=port)
qdrant_client.create_collection(
collection_name="test_collection",
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
)
except Exception as e:
print(f"Failed to initialize Qdrant client or create collection: {e}")
# Initializing Qdrant vectorstore with document embeddings
# url = "http://localhost:6333"
vectorstore = Qdrant.from_documents(
collection_name="test_collection",
embedding=OpenAIEmbeddings(),
documents=all_splits,
url=url,
prefer_grpc=True,
port=port,
# api_key=api_key
)
except Exception as e:
print(f"Failed to initialize Qdrant vectorstore: {e}")
# Loading the Language Model with a callback manager
llm = Ollama(
model="neural-chat:7b",
verbose=True,
temperature=0.0,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
# Setting up a QA Chain with a specific prompt
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
return_source_documents=True,
)
def process_llm_response(llm_response):
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
# Asking a question on the Toolformer PDF
question = "What is the conclusion summary for the Toolformer whitepaper?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
# Asking a question based on the other PDF.
question = "What is the name of the cat?"
print(f"Question: {question}")
process_llm_response(qa_chain({"query": question}))
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | karl-friman/rag-self-query | 08.RAG-OllamaEmbeddings-Redis-Web.py | # Load web page
import argparse
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
# # Embed and store
# from langchain.vectorstores import Chroma
from langchain.vectorstores.redis import Redis
# from langchain.embeddings import GPT4AllEmbeddings
from langchain.embeddings import OllamaEmbeddings # We can also try Ollama embeddings
from langchain.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
def main():
parser = argparse.ArgumentParser(description="Filter out URL argument.")
parser.add_argument(
"--url",
type=str,
default="http://example.com",
required=True,
help="The URL to filter out.",
)
args = parser.parse_args()
url = args.url
print(f"using URL: {url}")
loader = WebBaseLoader(url)
data = loader.load()
# Split into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
all_splits = text_splitter.split_documents(data)
print(f"Split into {len(all_splits)} chunks")
# vectorstore = Chroma.from_documents(documents=all_splits,
# embedding=OllamaEmbeddings(model="openhermes2-mistral"))
vectorstore = Redis.from_documents(
documents=all_splits,
embedding=OllamaEmbeddings(model="neural-chat:7b"),
redis_url="redis://localhost:6379",
)
print(f"Loaded {len(data)} documents")
# print(f"Retrieved {len(docs)} documents")
# RAG prompt
from langchain import hub
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
# LLM
llm = Ollama(
model="neural-chat:7b",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
print(f"Loaded LLM model {llm.model}")
# QA chain
from langchain.chains import RetrievalQA
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
)
# Ask a question
question = f"What are the latest headlines on {url}?"
result = qa_chain({"query": question})
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | karl-friman/rag-self-query | 15.Vertex.py | #This is for running in a GCP Workstation
#from vertexai.preview.language_models import TextGenerationModel
# print("Query: What are the olympics?")
# prompt = "What are the olympics?"
# model = TextGenerationModel.from_pretrained("text-bison@latest")
# response = model.predict(prompt, temperature=0.1, top_k=40, top_p=0.8, max_output_tokens=1024)
# print("Response from Vertex AI:")
# print(response.text)
#This is for running locally
#!pip install langchain google-cloud-aiplatform
from langchain.llms import VertexAI
llm = VertexAI()
print(llm("What are some of the pros and cons of Python as a programming language?")) | [] |
2024-01-10 | karl-friman/rag-self-query | 11.RAG-DocSpeedTest.py | # -*- coding: utf-8 -*-
# This requires Docker containers for Chroma, Redis and Qdrant to be running.
# Importing required libraries
import argparse
import os
import sys
import time
import constants
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.redis import Redis
from langchain.vectorstores.chroma import Chroma
from qdrant_client import QdrantClient
from langchain.vectorstores.qdrant import Qdrant
from langchain.llms import Ollama
from langchain.chains import RetrievalQA
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain import hub
from langchain.document_loaders import DirectoryLoader, PyPDFLoader
# Set OpenAI API key from constants file
os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY
def process_llm_response(llm_response):
# Outputting the answer or debugging information in case of an error
if "result" in llm_response:
print(f"Answer: {llm_response['result']}")
else:
print(
"Result key not found in the returned object. Here's the full object for debugging:"
)
print(llm_response)
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
def main():
while True:
# Prompting user for vector storage selection
choice = input(
"Select vector storage: 1 for Chroma, 2 for Redis, 3 for Qdrant: "
)
# Loading documents from a specified directory
loader = DirectoryLoader(
"./whitepapers/", glob="./*.pdf", loader_cls=PyPDFLoader
)
documents = loader.load()
# Splitting documents into manageable text chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1500, chunk_overlap=100
)
all_splits = text_splitter.split_documents(documents)
print(
f"Processed {len(documents)} documents split into {len(all_splits)} chunks"
)
# Initializing vector storage based on user choice
start_time = time.time()
if choice == "1":
# Configuring Chroma vectorstore settings
from chromadb.config import Settings
try:
chroma_settings = Settings(
chroma_server_host="localhost",
chroma_server_http_port="8000",
chroma_server_ssl_enabled=False,
)
# chroma_settings = Settings(
# chroma_server_host="34.141.229.240",
# chroma_server_http_port="8080",
# chroma_server_ssl_enabled=False,
# )
# Initializing Chroma vectorstore with document embeddings
vectorstore = Chroma.from_documents(
documents=all_splits,
embedding=OpenAIEmbeddings(),
persist_directory="vectorstore_db",
client_settings=chroma_settings,
)
# Example operation to force connection
some_test_document_embedding = vectorstore.get("vectorstore_db")
print(some_test_document_embedding)
except Exception as e:
print(f"Failed to initialize Chroma vectorstore: {e}")
continue
elif choice == "2":
# Initializing Redis vectorstore with document embeddings
try:
vectorstore = Redis.from_documents(
documents=all_splits,
embedding=OpenAIEmbeddings(),
redis_url="redis://localhost:6379",
# redis_url="redis://asdf",
)
except Exception as e:
print(f"Failed to initialize Redis vectorstore: {e}")
continue
elif choice == "3":
try:
# !!! You will need this the first time you run this script as the collection needs to be created !!!
# Setting up QdrantClient and creating a collection for vector storage
# from qdrant_client.http.models import Distance, VectorParams
# try:
# qdrant_client = QdrantClient(url="34.141.229.240", port=6333)
# qdrant_client.create_collection(
# collection_name="test_collection",
# vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
# )
# except Exception as e:
# print(f"Failed to initialize Qdrant client or create collection: {e}")
# continue
# Initializing Qdrant vectorstore with document embeddings
url = "http://localhost:6333"
# url = "http://34.141.229.240:6333"
vectorstore = Qdrant.from_documents(
collection_name="test_collection",
embedding=OpenAIEmbeddings(),
documents=all_splits,
url=url,
)
except Exception as e:
print(f"Failed to initialize Qdrant vectorstore: {e}")
continue
else:
# Handling invalid input for vector storage selection
print(
"Invalid choice. Please select 1 for Chroma, 2 for Redis, or 3 for Qdrant."
)
continue
# Loading the Language Model with a callback manager
llm = Ollama(
model="neural-chat:7b",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
# Setting up a QA Chain with a specific prompt
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
return_source_documents=True,
)
end_time = time.time()
# Calculating and displaying the time taken for setup
time_taken_ms = (
end_time - start_time
) * 1000 # Convert seconds to milliseconds
print(f"Time taken: {time_taken_ms} milliseconds")
# Asking a question and measuring response time
question = "What is the toolformer?"
try:
# Getting the answer using the QA chain
result = process_llm_response(qa_chain({"query": question}))
except Exception as e:
# Handling exceptions during the QA process
end_time = time.time()
print("An error occurred:", e)
print("Here's the partial result for debugging:")
print(result)
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | karl-friman/rag-self-query | 06.RAG-ollama-retrieval.py | # -*- coding: utf-8 -*-
from langchain.llms import Ollama
llm = Ollama(model="neural-chat:7b", temperature=0.1)
# print("Q: What are the olympics? ")
# print(llm("What are the olympics? "))
"""# LangChain multi-doc retriever with ChromaDB
***Key Points***
- Multiple Files - PDFs
- ChromaDB
- Local LLM
- Instuctor Embeddings
## Setting up LangChain
"""
import os
from langchain.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
"""## Load multiple and process documents"""
# Load and process the text files
# loader = TextLoader('single_text_file.txt')
loader = DirectoryLoader("./whitepapers/", glob="./*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
len(documents)
# splitting the text into
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
"""## HF Instructor Embeddings"""
from langchain.embeddings import HuggingFaceInstructEmbeddings
# instructor_embeddings = HuggingFaceInstructEmbeddings(
# model_name="hkunlp/instructor-xl", model_kwargs={"device": "cuda"}
# )
instructor_embeddings = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
"""## create the DB
This will take a bit of time on a T4 GPU
"""
# Embed and store the texts
# Supplying a persist_directory will store the embeddings on disk
persist_directory = "db"
## Here is the nmew embeddings being used
embedding = instructor_embeddings
vectordb = Chroma.from_documents(
documents=texts, embedding=embedding, persist_directory=persist_directory
)
"""## Make a retriever"""
retriever = vectordb.as_retriever(search_kwargs={"k": 5})
"""## Make a chain"""
# create the chain to answer questions
qa_chain = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True
)
## Cite sources
import textwrap
def wrap_text_preserve_newlines(text, width=110):
# Split the input text into lines based on newline characters
lines = text.split("\n")
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = "\n".join(wrapped_lines)
return wrapped_text
def process_llm_response(llm_response):
print(wrap_text_preserve_newlines(llm_response["result"]))
print("\n\nSources:")
for source in llm_response["source_documents"]:
print(source.metadata["source"])
query = "What is toolformer?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "What tools can be used with toolformer?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "How many examples do we need to provide for each tool?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "What are the best retrieval augmentations for LLMs?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
query = "What is ReAct?"
print(query)
llm_response = qa_chain(query)
process_llm_response(llm_response)
qa_chain.retriever.search_type, qa_chain.retriever.vectorstore
print("\nPrompt template: ", qa_chain.combine_documents_chain.llm_chain.prompt.template)
# print("\nQ: Who is Gary Oldman? ")
# print(llm("Who is Gary Oldman? "))
| [] |
2024-01-10 | karl-friman/rag-self-query | 07.RAG-OllamaEmbeddings.py | # Load web page
import argparse
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Embed and store
from langchain.vectorstores import Chroma
# from langchain.embeddings import GPT4AllEmbeddings
from langchain.embeddings import OllamaEmbeddings # We can also try Ollama embeddings
from langchain.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
def main():
parser = argparse.ArgumentParser(description="Filter out URL argument.")
parser.add_argument(
"--url",
type=str,
default="http://example.com",
required=True,
help="The URL to filter out.",
)
args = parser.parse_args()
url = args.url
print(f"using URL: {url}")
loader = WebBaseLoader(url)
data = loader.load()
# Split into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
all_splits = text_splitter.split_documents(data)
print(f"Split into {len(all_splits)} chunks")
vectorstore = Chroma.from_documents(
documents=all_splits, embedding=OllamaEmbeddings(model="neural-chat:7b")
)
# Retrieve
# question = "What are the latest headlines on {url}?"
# docs = vectorstore.similarity_search(question)
print(f"Loaded {len(data)} documents")
# print(f"Retrieved {len(docs)} documents")
# RAG prompt
from langchain import hub
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
# LLM
llm = Ollama(
model="neural-chat:7b",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
print(f"Loaded LLM model {llm.model}")
# QA chain
from langchain.chains import RetrievalQA
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
)
# Ask a question
question = f"What are the latest headlines on {url}?"
result = qa_chain({"query": question})
# print(result)
if __name__ == "__main__":
main()
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | YasaarKadery/ShellHacks2023 | crawl.py |
import requests
import re
import urllib.request
from bs4 import BeautifulSoup
from collections import deque
from html.parser import HTMLParser
from urllib.parse import urlparse
import os
import pandas as pd
import tiktoken
import openai
import numpy as np
from openai.embeddings_utils import distances_from_embeddings, cosine_similarity
from ast import literal_eval
# # Regex pattern to match a URL
HTTP_URL_PATTERN = r'^http[s]{0,1}://.+$'
# # Define root domain to crawl
domain = "www.iii.org"
full_url = "https://www.iii.org/insurance-basics"
# Create a class to parse the HTML and get the hyperlinks
class HyperlinkParser(HTMLParser):
def __init__(self):
super().__init__()
# Create a list to store the hyperlinks
self.hyperlinks = []
# Override the HTMLParser's handle_starttag method to get the hyperlinks
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
# If the tag is an anchor tag and it has an href attribute, add the href attribute to the list of hyperlinks
if tag == "a" and "href" in attrs:
self.hyperlinks.append(attrs["href"])
# Function to get the hyperlinks from a URL
def get_hyperlinks(url):
# Try to open the URL and read the HTML
try:
# Open the URL and read the HTML
with urllib.request.urlopen(url) as response:
# If the response is not HTML, return an empty list
if not response.info().get('Content-Type').startswith("text/html"):
return []
# Decode the HTML
html = response.read().decode('utf-8')
except Exception as e:
print(e)
return []
# Create the HTML Parser and then Parse the HTML to get hyperlinks
parser = HyperlinkParser()
parser.feed(html)
return parser.hyperlinks
# Function to get the hyperlinks from a URL that are within the same domain
def get_domain_hyperlinks(local_domain, url):
clean_links = []
for link in set(get_hyperlinks(url)):
clean_link = None
# If the link is a URL, check if it is within the same domain
if re.search(HTTP_URL_PATTERN, link):
# Parse the URL and check if the domain is the same
url_obj = urlparse(link)
if url_obj.netloc == local_domain:
clean_link = link
# If the link is not a URL, check if it is a relative link
else:
if link.startswith("/"):
link = link[1:]
elif (
link.startswith("#")
or link.startswith("mailto:")
or link.startswith("tel:")
):
continue
clean_link = "https://" + local_domain + "/" + link
if clean_link is not None:
if clean_link.endswith("/"):
clean_link = clean_link[:-1]
clean_links.append(clean_link)
# Return the list of hyperlinks that are within the same domain
return list(set(clean_links))
def crawl(url, max_pages=1000):
# Parse the URL and get the domain
local_domain = urlparse(url).netloc
# Create a queue to store the URLs to crawl
queue = deque([url])
# Create a set to store the URLs that have already been seen (no duplicates)
seen = set([url])
# Create a directory to store the text files
if not os.path.exists("text/"):
os.mkdir("text/")
if not os.path.exists("text/"+local_domain+"/"):
os.mkdir("text/" + local_domain + "/")
# Create a directory to store the csv files
if not os.path.exists("processed"):
os.mkdir("processed")
# Counter to keep track of crawled pages
crawled_count = 0
# While the queue is not empty and the crawl limit is not reached, continue crawling
while queue and crawled_count < max_pages:
# Get the next URL from the queue
url = queue.pop()
print(url) # for debugging and to see the progress
# Try extracting the text from the link, if failed proceed with the next item in the queue
try:
# Save text from the url to a <url>.txt file
with open('text/'+local_domain+'/'+url[8:].replace("/", "_") + ".txt", "w", encoding="UTF-8") as f:
# Get the text from the URL using BeautifulSoup
soup = BeautifulSoup(requests.get(url).text, "html.parser")
# Get the text but remove the tags
text = soup.get_text()
# If the crawler gets to a page that requires JavaScript, it will stop the crawl
if ("You need to enable JavaScript to run this app." in text):
print("Unable to parse page " + url + " due to JavaScript being required")
# Otherwise, write the text to the file in the text directory
f.write(text)
# Increment the crawled_count
crawled_count += 1
except Exception as e:
print("Unable to parse page " + url)
# Get the hyperlinks from the URL and add them to the queue
for link in get_domain_hyperlinks(local_domain, url):
if link not in seen:
queue.append(link)
seen.add(link)
# Limit the crawling to a maximum of 100 pages
crawl(full_url, max_pages=10000)
| [] |
2024-01-10 | cystanford/aigc_LLM_engineering | 2-LangChain%E4%BD%BF%E7%94%A8~product_llm.py | import re
from typing import List, Union
# Python内置模块,用于格式化和包装文本
import textwrap
import time
from langchain.agents import (
Tool, # 可用工具
AgentExecutor, # Agent执行
LLMSingleActionAgent, # 定义Agent
AgentOutputParser, # 输出结果解析
)
from langchain.prompts import StringPromptTemplate
# LLMChain,包含一个PromptTemplate和一个LLM
from langchain import OpenAI, LLMChain
# Agent执行,Agent结束
from langchain.schema import AgentAction, AgentFinish
# PromptTemplate: 管理LLMs的Prompts
from langchain.prompts import PromptTemplate
from langchain.llms.base import BaseLLM
# 定义了LLM的Prompt Template
CONTEXT_QA_TMPL = """
根据以下提供的信息,回答用户的问题
信息:{context}
问题:{query}
"""
CONTEXT_QA_PROMPT = PromptTemplate(
input_variables=["query", "context"],
template=CONTEXT_QA_TMPL,
)
# 输出结果显示,每行最多60字符,每个字符显示停留0.1秒(动态显示效果)
def output_response(response: str) -> None:
if not response:
exit(0)
# 每行最多60个字符
for line in textwrap.wrap(response, width=60):
for word in line.split():
for char in word:
print(char, end="", flush=True)
time.sleep(0.1) # Add a delay of 0.1 seconds between each character
print(" ", end="", flush=True) # Add a space between each word
print() # Move to the next line after each line is printed
# 遇到这里,这个问题的回答就结束了
print("----------------------------------------------------------------")
# 模拟公司产品和公司介绍的数据源
class TeslaDataSource:
def __init__(self, llm: BaseLLM):
self.llm = llm
# 工具1:产品描述
def find_product_description(self, product_name: str) -> str:
"""模拟公司产品的数据库"""
product_info = {
"Model 3": "具有简洁、动感的外观设计,流线型车身和现代化前脸。定价23.19-33.19万",
"Model Y": "在外观上与Model 3相似,但采用了更高的车身和更大的后备箱空间。定价26.39-36.39万",
"Model X": "拥有独特的翅子门设计和更加大胆的外观风格。定价89.89-105.89万",
}
# 基于产品名称 => 产品描述
return product_info.get(product_name, "没有找到这个产品")
# 工具2:公司介绍
def find_company_info(self, query: str) -> str:
"""模拟公司介绍文档数据库,让llm根据信息回答问题"""
context = """
特斯拉最知名的产品是电动汽车,其中包括Model S、Model 3、Model X和Model Y等多款车型。
特斯拉以其技术创新、高性能和领先的自动驾驶技术而闻名。公司不断推动自动驾驶技术的研发,并在车辆中引入了各种驾驶辅助功能,如自动紧急制动、自适应巡航控制和车道保持辅助等。
"""
# prompt模板 = 上下文context + 用户的query
prompt = CONTEXT_QA_PROMPT.format(query=query, context=context)
# 使用LLM进行推理
return self.llm(prompt)
AGENT_TMPL = """按照给定的格式回答以下问题。你可以使用下面这些工具:
{tools}
回答时需要遵循以下用---括起来的格式:
---
Question: 我需要回答的问题
Thought: 回答这个上述我需要做些什么
Action: "{tool_names}" 中的一个工具名
Action Input: 选择这个工具所需要的输入
Observation: 选择这个工具返回的结果
...(这个 思考/行动/行动输入/观察 可以重复N次)
Thought: 我现在知道最终答案
Final Answer: 原始输入问题的最终答案
---
现在开始回答,记得在给出最终答案前,需要按照指定格式进行一步一步的推理。
Question: {input}
{agent_scratchpad}
"""
class CustomPromptTemplate(StringPromptTemplate):
template: str # 标准模板
tools: List[Tool] # 可使用工具集合
def format(self, **kwargs) -> str:
"""
按照定义的 template,将需要的值都填写进去。
Returns:
str: 填充好后的 template。
"""
# 取出中间步骤并进行执行
intermediate_steps = kwargs.pop("intermediate_steps")
print('intermediate_steps=', intermediate_steps)
print('='*30)
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# 记录下当前想法 => 赋值给agent_scratchpad
kwargs["agent_scratchpad"] = thoughts
# 枚举所有可使用的工具名+工具描述
kwargs["tools"] = "\n".join(
[f"{tool.name}: {tool.description}" for tool in self.tools]
)
# 枚举所有的工具名称
kwargs["tool_names"] = ", ".join(
[tool.name for tool in self.tools]
)
cur_prompt = self.template.format(**kwargs)
#print(cur_prompt)
return cur_prompt
"""
对Agent返回结果进行解析,有两种可能:
1)还在思考中 AgentAction
2)找到了答案 AgentFinal
"""
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
"""
解析 llm 的输出,根据输出文本找到需要执行的决策。
Args:
llm_output (str): _description_
Raises:
ValueError: _description_
Returns:
Union[AgentAction, AgentFinish]: _description_
"""
# 如果句子中包含 Final Answer 则代表已经完成
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# 需要进行 AgentAction
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" # 解析 action_input 和 action
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Agent执行
return AgentAction(
tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
)
if __name__ == "__main__":
## 定义LLM,需要定义环境变量 OPENAI_API_KEY = XXX
llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo")
# 自有数据
tesla_data_source = TeslaDataSource(llm)
# 定义的Tools
tools = [
Tool(
name="查询产品名称",
func=tesla_data_source.find_product_description,
description="通过产品名称找到产品描述时用的工具,输入的是产品名称",
),
Tool(
name="公司相关信息",
func=tesla_data_source.find_company_info,
description="当用户询问公司相关的问题,可以通过这个工具了解公司信息",
),
]
# 用户定义的模板
agent_prompt = CustomPromptTemplate(
template=AGENT_TMPL,
tools=tools,
input_variables=["input", "intermediate_steps"],
)
# Agent返回结果解析
output_parser = CustomOutputParser()
# 最常用的Chain, 由LLM + PromptTemplate组成
llm_chain = LLMChain(llm=llm, prompt=agent_prompt)
# 定义的工具名称
tool_names = [tool.name for tool in tools]
# 定义Agent = llm_chain + output_parser + tools_names
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)
# 定义Agent执行器 = Agent + Tools
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
# 主过程:可以一直提问下去,直到Ctrl+C
while True:
try:
user_input = input("请输入您的问题:")
response = agent_executor.run(user_input)
output_response(response)
except KeyboardInterrupt:
break | [
"input",
"intermediate_steps",
"context"
] |
2024-01-10 | benvansleen/LLModelica | src~__main__.py | from dotenv import load_dotenv
load_dotenv()
import functions as f
from openai_models import OpenAIMessage, OpenAIResponse
from chain import Chain
from om import om
print(om('getVersion()'))
def prompt_user(wrap_in_context: bool = False) -> OpenAIMessage:
print('> ', end='')
prompt = input()
if wrap_in_context:
prompt = Chain.wrap_prompt_in_context(prompt)
return OpenAIMessage(
role='user',
content=prompt,
)
def run_testbench(testbench: str) -> str:
print(f'Testbench: {testbench}')
print('Run? (y/n)')
print('> ', end='')
y_or_n = input().strip()
match y_or_n:
case 'y':
from pyparsing.exceptions import ParseException
try:
return om(testbench)
except ParseException as e:
print(e)
return f'{e}\nSomething went wrong... Think about it step by step...'
case 'n':
pass
case _:
print('Invalid input. Try again.')
return run_testbench(testbench)
return ''
def handle_response(
response: OpenAIResponse,
chain: Chain,
) -> Chain:
first_choice = response.choices[0]
chain.add(first_choice.message)
match first_choice.finish_reason:
case 'function_call':
result = f.dispatch_function(response)
if result.content.startswith('('):
result.content = run_testbench(chain.testbench)
print(result.content)
result.content += '\n' + prompt_user().content
case _:
result = prompt_user()
chain.add(result)
return chain
def prompt_step(chain: Chain) -> Chain:
if len(chain) <= 1:
chain.add(prompt_user(wrap_in_context=True))
if len(chain) > 10:
chain.reload_context()
response = f.llm(
chain,
model='gpt-4-0613',
temperature=0.2,
)
chain.print(clear=True)
messages = handle_response(response, chain)
messages.print(clear=True)
return messages
chain = Chain()
while True:
try:
chain = prompt_step(chain)
except KeyboardInterrupt:
from sys import exit
exit()
| [] |
2024-01-10 | benvansleen/LLModelica | src~om_embeddings.py | import os
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel
from langchain.document_loaders import (
DirectoryLoader,
TextLoader,
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
class SentenceTransformerEmbeddings:
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained(
'sentence-transformers/all-mpnet-base-v2',
)
self.model = AutoModel.from_pretrained(
'sentence-transformers/all-mpnet-base-v2',
)
return
def embed_fn(self, sentences: list[str]) -> torch.Tensor:
encoded_input = self.tokenizer(
sentences,
padding=True,
truncation=True,
return_tensors='pt',
)
# Compute token embeddings
with torch.no_grad():
model_output = self.model(**encoded_input)
# Perform pooling
return self.mean_pooling(
model_output,
encoded_input['attention_mask'],
)
def embed_documents(
self,
documents: list[str],
) -> list[list[float]]:
return self.embed_fn(documents).tolist()
def embed_query(self, query: str) -> list[float]:
return self.embed_fn([query]).tolist()[0]
# Mean Pooling - Take attention mask into account for correct averaging
@staticmethod
def mean_pooling(
model_output: torch.Tensor,
attention_mask: torch.Tensor,
) -> torch.Tensor:
# First element of model_output contains all token embeddings
token_embeddings = model_output[0]
input_mask_expanded = (attention_mask
.unsqueeze(-1)
.expand(token_embeddings.size())
.float())
return (
torch.sum(token_embeddings * input_mask_expanded, 1) /
torch.clamp(input_mask_expanded.sum(1), min=1e-9)
)
embed_fn = SentenceTransformerEmbeddings()
if not os.path.exists('data/om_embeddings'):
loader = DirectoryLoader(
'./data/ModelicaStandardLibrary/Modelica/',
glob='**/[!package]*.mo',
show_progress=True,
use_multithreading=True,
loader_cls=TextLoader,
)
docs = RecursiveCharacterTextSplitter(
chunk_size=3000,
chunk_overlap=0,
separators=[
'\nmodel ',
'\nblock ',
'\nfunction ',
# '\nannotation',
# '\n\n',
],
).split_documents(loader.load())
chunk = 100
db = Chroma.from_documents(
docs[:chunk],
embed_fn,
persist_directory='./data/om_embeddings',
)
for begin, end in tqdm(zip(
range(chunk, len(docs), chunk),
range(chunk * 2, len(docs), chunk),
), total=len(docs) // chunk):
db.add_texts(
texts=[doc.page_content for doc in docs[begin:end]],
metadatas=[doc.metadata for doc in docs[begin:end]],
)
db.persist()
else:
db = Chroma(
embedding_function=embed_fn,
persist_directory='./data/om_embeddings',
)
| [] |
2024-01-10 | XanderWatson/XanderGPT | qa_engine~query_utils.py | import openai
from langchain.callbacks import get_openai_callback
from langchain.chains import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.prompts import (ChatPromptTemplate, HumanMessagePromptTemplate,
SystemMessagePromptTemplate)
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from utils import Cache, logger
from vectordb import PineconeClient
from vectordb.constants import (EMBEDDING_MODEL, OPENAI_API_KEY,
PINECONE_API_KEY, PINECONE_ENV)
from .constants import (CHAT_MODEL, PAST_MESSAGES_LIMIT,
PAST_MESSAGES_PURGE_TOKEN_LIMIT)
openai.api_key = OPENAI_API_KEY
cache = Cache()
class QAEngine:
def __init__(self, index_name="xandergpt"):
self.pinecone_client = PineconeClient(
pinecone_api_key=PINECONE_API_KEY,
pinecone_env=PINECONE_ENV
)
self.embeddings = OpenAIEmbeddings(
model=EMBEDDING_MODEL
)
self.index_name = index_name
def _get_docs(
self,
query,
namespace
):
if not self.pinecone_client.index_exists(index_name=self.index_name):
status, index_name = False, ""
else:
status, index_name = True, self.index_name
if status:
with get_openai_callback() as cb:
vectorstore = self.pinecone_client.Pinecone.from_existing_index(
index_name=index_name,
embedding=self.embeddings,
namespace=namespace
)
else:
vectorstore = None
logger.info(f"vectorstore: {vectorstore}")
if vectorstore:
self.docs = vectorstore.similarity_search(
query, namespace=namespace
)
else:
self.docs = []
def _convert_messages(self, messages):
msgs = []
for message in messages:
if message["role"] == "assistant":
msg = AIMessage(content=message["content"])
elif message["role"] == "user":
msg = HumanMessage(content=message["content"])
else:
msg = SystemMessage(content=message["content"])
msgs.append(msg)
return msgs
def _convert_docs(self, docs):
return [SystemMessage(content=doc.page_content) for doc in docs]
def _fetch_past_messages(self, id=""):
self.past_messages = []
if id:
if cache.exists(f"qa_history_{id}"):
previous_chats = cache.get(f"qa_history_{id}")
if len(previous_chats) > PAST_MESSAGES_LIMIT:
previous_chats = previous_chats[2:]
self.past_messages = previous_chats
past_messages_count = len(self.past_messages)
logger.info(f"Fetched {past_messages_count} past messages.")
logger.info(f"Past messages: {self.past_messages}")
def _get_response(
self,
query,
id="",
docs=[],
type="open"
):
past_messages = self._convert_messages(self.past_messages)
logger.info(f"The type of the incoming payload is: {type}")
if type in ["open", "all"]:
try:
llm = ChatOpenAI(
temperature=0,
verbose=True,
model_name=CHAT_MODEL
)
except Exception as ex:
logger.info(f"Error while creating ChatOpenAI: {ex}")
llm = ChatOpenAI(
temperature=0,
verbose=True,
model_kwargs={
"model": CHAT_MODEL
}
)
else:
llm = OpenAI(
model_name=CHAT_MODEL,
temperature=0,
verbose=True
)
base_prompt_message = """
You are a Question Answering Retrieval AI assistant. You are given a query, a list of documents, and a list of past messages in the conversation history. Your task is to answer the query using the documents and the past messages.
"""
base_prompt = SystemMessagePromptTemplate.from_template(
base_prompt_message
)
docs_prompt_message = """
Use the following documents as CONTEXT to this conversation:
"""
docs_prompt = SystemMessagePromptTemplate.from_template(
docs_prompt_message
)
converted_docs = self._convert_docs(docs)
history_prompt_message = """
These are the previous messages in the conversation for ADDITIONAL CONTEXT:
"""
history_prompt = SystemMessagePromptTemplate.from_template(
history_prompt_message
)
query_prompt_message = """
Answer the following query. If the data contains a list of items, please delimit them by a newline character:
{query}
"""
query_prompt = HumanMessagePromptTemplate.from_template(
query_prompt_message
)
if type in ["open", "trained"]:
prompts = [
base_prompt,
history_prompt,
*past_messages,
query_prompt
]
else:
prompts = [
base_prompt,
docs_prompt,
*converted_docs,
history_prompt,
*past_messages,
query_prompt
]
logger.info(f"Prompts: {prompts}")
prompt = ChatPromptTemplate(
messages=prompts,
input_variables=["query"]
)
if type == "trained":
prompt = prompt.format(query=query)
chain = load_qa_chain(
llm=llm,
chain_type="stuff",
verbose=True
)
else:
chain = LLMChain(
llm=llm,
verbose=True,
prompt=prompt
)
with get_openai_callback() as cb:
try:
if type == "trained":
msg = chain.run(input_documents=docs, question=prompt)
else:
msg = chain.run(query=query)
except Exception as e:
logger.error(
f"Error while running the chain: {e}", exc_info=True
)
return False, str(e)
if cb.total_tokens >= PAST_MESSAGES_PURGE_TOKEN_LIMIT:
if cache.exists(f"qa_history_{id}"):
cache.delete(f"qa_history_{id}")
return True, msg
def query_llm(
self,
query,
namespace,
response_type="trained",
id=""
):
self._fetch_past_messages(id)
if response_type == "open":
msg_status, msg = self._get_response(
query=query,
id=id
)
messages = self.past_messages
messages.append({"role": "user", "content": query})
if msg_status:
messages.append({"role": "assistant", "content": msg})
if id:
cache.set(f"qa_history_{id}", messages)
elif response_type == "trained":
self._get_docs(
query=query,
namespace=namespace
)
msg_status, msg = self._get_response(
query=query,
id=id,
docs=self.docs,
type="trained"
)
messages = self.past_messages
messages.append({"role": "user", "content": query})
if msg_status:
messages.append({"role": "assistant", "content": msg})
if id:
cache.set(f"qa_history_{id}", messages)
elif response_type == "all":
self._get_docs(
query=query,
namespace=namespace
)
msg_status, msg = self._get_response(
query=query,
id=id,
docs=self.docs,
type="all"
)
messages = self.past_messages
messages.append({"role": "user", "content": query})
if msg_status:
messages.append({"role": "assistant", "content": msg})
if id:
cache.set(f"qa_history_{id}", messages)
else:
msg_status, msg = False, "Invalid response type."
return msg_status, msg
| [
"\n Use the following documents as CONTEXT to this conversation:\n ",
"\n Answer the following query. If the data contains a list of items, please delimit them by a newline character:\n\n {query}\n ",
"\n These are the previous messages in the conversation for ADDITIONAL CONTEXT:\n ",
"\n You are a Question Answering Retrieval AI assistant. You are given a query, a list of documents, and a list of past messages in the conversation history. Your task is to answer the query using the documents and the past messages.\n ",
"content"
] |
2024-01-10 | XanderWatson/XanderGPT | vectordb~client.py | import pinecone
from langchain.vectorstores import Pinecone
from pinecone.core.client.exceptions import ApiException
from utils import logger
from .constants import (BAD_REQUEST, NOT_FOUND,
PINECONE_DEFAULT_INDEX_DIMENSIONS,
PINECONE_DEFAULT_METRIC, PINECONE_DEFAULT_POD_TYPE,
PINECONE_LIMIT_EXCEEDED_MSG)
class PineconeClient:
def __init__(self, pinecone_api_key, pinecone_env):
from .utils import disable_ssl_warning
self.pinecone_api_key = pinecone_api_key
self.pinecone_env = pinecone_env
self.limit_exceeded = False
self.existing_index = ""
try:
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env)
except Exception:
disable_ssl_warning()
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env)
self.Pinecone = Pinecone
logger.info("Pinecone client initialized successfully")
def get_index(self, index_name):
return pinecone.Index(index_name=index_name)
def create_index(self, name, dimension=PINECONE_DEFAULT_INDEX_DIMENSIONS):
index_created = False
logger.info("Inside the create_index method of PineconeClient")
try:
pinecone.create_index(
name=name,
metric=PINECONE_DEFAULT_METRIC,
dimension=dimension,
pod_type=PINECONE_DEFAULT_POD_TYPE
)
index_created = True
except ApiException as ex:
if ex.status == BAD_REQUEST and \
ex.body == PINECONE_LIMIT_EXCEEDED_MSG:
self.limit_exceeded = True
if self.limit_exceeded:
existing_indexes = pinecone.list_indexes()
if existing_indexes:
self.existing_index = existing_indexes[0]
self.existing_index
if self.existing_index:
try:
pinecone.delete_index(name=self.existing_index)
index_deleted_successsfully = True
except ApiException as ex:
if ex.status == NOT_FOUND:
logger.error(
f"Error while deleting the existing index {ex}"
)
index_created = False
if index_deleted_successsfully:
try:
pinecone.create_index(
name=name,
metric=PINECONE_DEFAULT_METRIC,
dimension=dimension,
pod_type=PINECONE_DEFAULT_POD_TYPE
)
index_created = True
except Exception as ex:
logger.error(
f"The error during the vector index creation was: {ex}"
)
index_created = False
if index_created:
return True, name
else:
return False, ""
def list_indexes(self):
return pinecone.list_indexes()
def index_exists(self, index_name):
return index_name in self.list_indexes()
| [] |
2024-01-10 | XanderWatson/XanderGPT | vectordb~website_utils.py | from langchain.document_loaders import PlaywrightURLLoader
from utils import logger
async def website_text_data(urls):
loader = PlaywrightURLLoader(urls)
loader.requests_kwargs = {'verify': False}
logger.info(f"Scraping websites: {urls}")
data = await loader.aload()
logger.info("Websites scraped successfully")
return data
| [] |
2024-01-10 | XanderWatson/XanderGPT | vectordb~document_utils.py | import os
from langchain.document_loaders import PyPDFLoader
def load_pdf(pdf_file):
loader = PyPDFLoader(file_path=pdf_file)
pages = loader.load_and_split()
os.remove(pdf_file)
return pages
| [] |
2024-01-10 | codedog-ai/rag-embedding-eval | embedding_benchmark~benchmark.py | from langchain.schema import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.language_model import BaseLanguageModel
from embedding_benchmark.embedding import Case, DocumentEmbeddingBuilder
class Benchmark:
def __init__(
self, cases: list[Case], llm: BaseLanguageModel, embedding: Embeddings
):
self.cases = cases
self.builder = DocumentEmbeddingBuilder(llm=llm, embedding=embedding)
self.doc_ebds = {}
self.query_ebds = {}
self.result = []
def run_embedddings(self):
for case in self.cases:
key = case.topic
doc = case.doc()
pq = case.positive_query_docs()
nq = case.negative_query_docs()
self.builder.add_doc(key, doc)
self.builder.add_queries(key, pq + nq)
doc_embeddings = self.builder.run_doc_embedding()
query_embeddings = self.builder.run_query_embedding()
self.doc_ebds = doc_embeddings
self.query_ebds = query_embeddings
def calculate(
self,
doc_embeddings: dict[str, dict[str, list[tuple[Document, list[float]]]]],
query_embeddings: dict[
str, dict[str, dict[str, list[tuple[Document, list[float]]]]]
],
):
self._data = []
for key, doc_ebd in doc_embeddings.items():
query_ebd = query_embeddings[key]
for _d_type, _d_type_ebds in doc_ebd.items():
for _q_type, _q_type_ebds in query_ebd.items():
for _q_d_type, _q_d_type_ebds in _q_type_ebds.items():
min_dis, max_dis = self._calc_min_max_distance(
_d_type_ebds, _q_d_type_ebds
)
gt, query_type = _q_type.split("|")
record = {
"topic": key,
"doc_embedding_type": _d_type,
"query_embedding_type": _q_d_type,
"query_type": query_type,
"label": gt,
"min_score": min_dis,
"max_score": max_dis,
}
self.result.append(record)
def _calc_min_max_distance(
self, _d_type_ebds, _q_d_type_ebds
) -> tuple[float, float]:
min_dis = 100.0
max_dis = 0.0
for _d in _d_type_ebds:
for _q in _q_d_type_ebds:
distance = self._cosine(_d[1], _q[1])
if distance <= min_dis:
min_dis = distance
if distance >= max_dis:
max_dis = distance
return min_dis, max_dis
def _cosine(self, a: list[float], b: list[float]) -> float:
_sum = 0.0
for i in range(len(a)):
_sum += a[i] * b[i]
_sum_a = 0.0
_sum_b = 0.0
for i in range(len(a)):
_sum_a += a[i] * a[i]
_sum_b += b[i] * b[i]
return _sum / (_sum_a * _sum_b) ** 0.5
| [] |
2024-01-10 | codedog-ai/rag-embedding-eval | codedog_sdk~chains~bool_chain.py | from typing import Any, Dict, List, Optional
from langchain import LLMChain, PromptTemplate
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.pydantic_v1 import Extra
from langchain.schema.language_model import BaseLanguageModel
bool_template = """Answer given question: {question}.
Return True or False only, without other words or comma or symbol.
For example, you can return true or false.
return:"""
class BoolChain(Chain):
"""A llm chain always return True/False"""
@property
def lc_serializable(self) -> bool:
return True
question_chain: LLMChain
output_key: str = "flag" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
return "bool_chain"
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return self.question_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@classmethod
def from_llm(cls, llm: BaseLanguageModel, question: str):
"""Load chain from llm."""
question_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(
bool_template.format(question=question)
),
)
return cls(question_chain=question_chain)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# type: ignore[call-arg]
answer = self.question_chain.run(callbacks=_run_manager.get_child(), **inputs)
flag = self._parse_flag(answer)
result: Dict[str, Any] = {self.output_key: flag}
return result
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
answer = await self.question_chain.arun(
callbacks=_run_manager.get_child(), **inputs
)
flag = self._parse_flag(answer)
result: Dict[str, Any] = {
self.output_key: flag,
}
return result
def _parse_flag(self, answer: str) -> bool:
if answer[:4].lower() == "true":
return True
elif answer[:5].lower() == "false":
return False
raise ValueError(f"Cannot parse answer {answer} to bool")
| [
"Answer given question: {question}.\n\nReturn True or False only, without other words or comma or symbol.\nFor example, you can return true or false.\n\nreturn:"
] |
2024-01-10 | codedog-ai/rag-embedding-eval | codedog_sdk~chains~list_chain.py | import json
from typing import Any, Dict, List, Optional
from langchain import LLMChain, PromptTemplate
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.pydantic_v1 import Extra
from langchain.schema.language_model import BaseLanguageModel
list_template = """Given {k} points to answer a question: {question}.
Return a json list of string without other words, symbol or markdown formatting.
for example your return might be ["a","b","c"]"""
fix_template = """Help me format given content to json list.
content:
---
{content}
---
You must return a json list of string without other words, symbol or markdown formatting.
return:"""
class ListChain(Chain):
"""A llm chain always return a list of string"""
@property
def lc_serializable(self) -> bool:
return True
question_chain: LLMChain
fix_chain: LLMChain
output_key: str = "texts" #: :meta private:
k: int = 4
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
return "list_chain"
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return self.question_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@classmethod
def from_llm(cls, llm: BaseLanguageModel, question: str, k: int = 4):
"""Load chain from llm."""
question_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(
list_template.format(k=str(k), question=question)
),
)
fix_chain = LLMChain(llm=llm, prompt=PromptTemplate.from_template(fix_template))
return cls(question_chain=question_chain, fix_chain=fix_chain, k=k)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# type: ignore[call-arg]
answer = self.question_chain.run(callbacks=_run_manager.get_child(), **inputs)
flag = self._parse_list(answer)
result: Dict[str, Any] = {self.output_key: flag}
return result
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
answer = await self.question_chain.arun(
callbacks=_run_manager.get_child(), **inputs
)
flag = self._parse_list(answer)
result: Dict[str, Any] = {
self.output_key: flag,
}
return result
def _parse_list(self, answer: str) -> list[str]:
if not answer:
answer = "[]"
try:
result = json.loads(answer)
except json.decoder.JSONDecodeError:
answer = self.fix_chain.run(answer)
result = json.loads(answer)
if not result:
result = []
if not isinstance(result, list):
raise ValueError(f"Cannot parse answer {answer} to list")
result = [
json.dumps(x, ensure_ascii=False) if not isinstance(x, str) else x
for x in result
]
if len(result) >= self.k:
result = result[: self.k]
return result
| [
"Help me format given content to json list.\ncontent:\n---\n{content}\n---\n\nYou must return a json list of string without other words, symbol or markdown formatting.\nreturn:",
"Given {k} points to answer a question: {question}.\n\nReturn a json list of string without other words, symbol or markdown formatting.\nfor example your return might be [\"a\",\"b\",\"c\"]"
] |
2024-01-10 | AndreasKaratzas/rainbow-dqn | src~structures.py | # -*- coding: utf-8 -*-
"""Segment tree for Prioritized Replay Buffer."""
import operator
class SegmentTree:
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
Parameters
----------
capacity : int
Capacity of the tree.
operation : function
Function to apply to two values in the tree.
init_value : float
Initial value of the tree.
"""
def __init__(self, capacity, operation, init_value):
"""Initialization.
Parameters
----------
capacity : int
Capacity of the tree.
operation : function
Function to apply to two values in the tree.
init_value : float
Initial value of the tree.
"""
assert (
capacity > 0 and capacity & (capacity - 1) == 0
), "capacity must be positive and a power of 2."
self.capacity = capacity
self.tree = [init_value for _ in range(2 * capacity)]
self.operation = operation
def _operate_helper(self, start, end, node, node_start, node_end):
"""Returns result of operation in segment."""
if start == node_start and end == node_end:
return self.tree[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._operate_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._operate_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self.operation(
self._operate_helper(start, mid, 2 * node, node_start, mid),
self._operate_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end),
)
def operate(self, start=0, end=0):
"""Returns result of applying `self.operation`."""
if end <= 0:
end += self.capacity
end -= 1
return self._operate_helper(start, end, 1, 0, self.capacity - 1)
def __setitem__(self, idx, val):
"""Set value in tree."""
idx += self.capacity
self.tree[idx] = val
idx //= 2
while idx >= 1:
self.tree[idx] = self.operation(self.tree[2 * idx], self.tree[2 * idx + 1])
idx //= 2
def __getitem__(self, idx):
"""Get real value in leaf node of tree."""
assert 0 <= idx < self.capacity
return self.tree[self.capacity + idx]
class SumSegmentTree(SegmentTree):
""" Create SumSegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity):
"""Initialization.
Parameters
----------
capacity : int
Capacity of the tree.
"""
super(SumSegmentTree, self).__init__(
capacity=capacity, operation=operator.add, init_value=0.0
)
def sum(self, start=0, end=0):
"""Returns arr[start] + ... + arr[end]."""
return super(SumSegmentTree, self).operate(start, end)
def retrieve(self, upperbound):
"""Find the highest index `i` about upper bound in the tree"""
assert 0 <= upperbound <= self.sum() + 1e-5, "upperbound: {}".format(upperbound)
idx = 1
while idx < self.capacity: # while non-leaf
left = 2 * idx
right = left + 1
if self.tree[left] > upperbound:
idx = 2 * idx
else:
upperbound -= self.tree[left]
idx = right
return idx - self.capacity
class MinSegmentTree(SegmentTree):
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity):
"""Initialization.
Parameters
----------
capacity : int
Capacity of the tree.
"""
super(MinSegmentTree, self).__init__(
capacity=capacity, operation=min, init_value=float("inf")
)
def min(self, start=0, end=0):
"""Returns min(arr[start], ..., arr[end])."""
return super(MinSegmentTree, self).operate(start, end)
| [] |
2024-01-10 | Kanakjr/BillBot | app~llm_utils.py | from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains.summarize import load_summarize_chain
from langchain.schema.document import Document
import streamlit as st
import os
import json
def get_llm(OPENAI_MODEL=None, max_tokens=1000):
if not OPENAI_MODEL:
OPENAI_MODEL = os.environ.get("OPENAI_MODEL")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
llm = ChatOpenAI(
temperature=0,
model_name=OPENAI_MODEL,
openai_api_key=OPENAI_API_KEY,
max_tokens=max_tokens,
)
return llm
@st.cache_data(ttl=60 * 60 * 12, show_spinner=False) # Cache data for 12 hours
def get_openAPI_response(text, task, OPENAI_MODEL=None, max_tokens=1000, llm=None):
messages = [HumanMessage(content=text)]
llm = get_llm(OPENAI_MODEL=OPENAI_MODEL, max_tokens=max_tokens)
response = llm.invoke(messages, config={"run_name": task})
response = str(response.content)
return response
@st.cache_data(ttl=60 * 60 * 12, show_spinner=False) # Cache data for 12 hours
def summarize_bill(text, task="Summarize", chain_type="stuff"):
docs = [Document(page_content=text)]
llm = get_llm()
prompt_template = """## Summarise the given document:
## Doucment Text:
{text}
## Document Summary:"""
prompt = PromptTemplate.from_template(prompt_template)
chain = load_summarize_chain(llm, prompt=prompt, chain_type=chain_type)
result = chain.invoke(docs, config={"run_name": task})
return result["output_text"]
@st.cache_data(ttl=60 * 60 * 12, show_spinner=False) # Cache data for 12 hours
def get_recommended_question(document_summary, key_values=[]):
key_values = format_dict_as_string(key_values)
prompt = PromptTemplate(
input_variables=["document_summary", "key_values"],
template="""## You are given a document summary and some of the key value data from the document.
## Document Summary:
{document_summary}
## Document Data:
{key_values}
## Generate a list of 10 recommended questions on the given document. Format it as markdown text.
""",
)
llm = get_llm()
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.invoke(
input={
"document_summary": document_summary,
"key_values": key_values,
},
config={"run_name": "RecommenedQBill"},
)
response = response["text"]
return response
def get_bill_metadata(document_content, key_values=[]):
# Assuming format_dict_as_string is a function that formats key_values into a string
key_values_str = format_dict_as_string(key_values)
prompt = PromptTemplate(
input_variables=["document_content", "key_values"],
template="""## You are given a document summary and some of the key value data from the document.
## Document Content:
{document_content}
## Document Data:
{key_values}
## Based on the document summary and data provided, please output the following in JSON format:
- recommended_questions: A list of 10 recommended questions about the document.
- keywords: A list of keywords or key phrases that represent the main topics or themes of the document.
- named_entities: A list of named entities from the document, with each named entity being a list of the extracted name and its type (PERSON,Organization,Location,Date,Time,etc) (e.g., [["Steve Jobs", "PERSON"], ["India", "COUNTRY"]]).
- document_type: Identify and categorize the type of document (e.g., "bill", "invoice", "ID card", "invoice", "contract", "report", "legal document" etc.).
- language: Determine the language in which the document is written
- currency: Determine the currency used in the document. Output the currency code, such as USD, EUR, GBP, INR etc. Consider variations in formatting and placement of currency symbols, as well as potential textual references to currencies.
## JSON Output:
""",
)
# Get the language model response
llm = get_llm(max_tokens=2000)
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.invoke(
input={
"document_content": document_content,
"key_values": key_values_str,
},
config={"run_name": "BillMetadata"},
)
# Parse the response, assuming the model returns a properly formatted JSON string
response_data = json.loads(response["text"])
# Output the combined dictionary
return response_data
@st.cache_data(ttl=60 * 60 * 12, show_spinner=False) # Cache data for 12 hours
def get_billbot_response(question, document_summary, key_values=[]):
key_values = format_dict_as_string(key_values)
prompt = PromptTemplate(
input_variables=["question", "document_summary", "key_values"],
template="""## You are given a document summary and some of the key value data from the document.
## Document Summary:
{document_summary}
## Document Data:
{key_values}
## You have to answer the given user question stricty from the document.
Do not assume any values or answer on your own.
If you don't know the answer respond with "I don't know the answer"
## Question: {question}
""",
)
llm = get_llm()
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.invoke(
input={
"question": question,
"document_summary": document_summary,
"key_values": key_values,
},
config={"run_name": "BillBot"},
)
response = response["text"]
return response
@st.cache_data(ttl=60 * 60 * 12, show_spinner=False) # Cache data for 12 hours
def get_credit_card_output(bill_summary, key_value_pairs ):
key_value_pairs = format_dict_as_string(key_value_pairs)
output_structure = {"CreditCard":{"BankName":"","CreditCardType":"","NameOnCard":"","CardNo":"","CreditLimit":"","Statement":{"StatementDate":"","PaymentDueDate":"","TotalAmountDue":"","MinimumAmountDue":"","FinanceCharges":"",}}}
# "Transactions":[{"TransactionDate":"","TransactionDescription":"","TransactionType":"","TransactionAmount":"","TransactionCategory":{"TransactionHead":"","TransactionSubHead":"","Payee":""}}]
prompt = PromptTemplate(
input_variables=["bill_summary", "key_value_pairs", "output_structure"],
template='''## Summary:
{bill_summary}
## Data extracted from Credit Card Statement as key values:
{key_value_pairs}
## Output_JSON_Format:
{output_structure}
## Task: Structure the data into the provided Output_JSON_Format.
## Output JSON:
''',
)
llm = get_llm(max_tokens=500)
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.invoke(
input={
"bill_summary": bill_summary,
"key_value_pairs": key_value_pairs,
"output_structure": output_structure,
},
config={"run_name": "BillBot"},
)
response = response["text"]
return response
def format_dict_as_string(input_dict):
formatted_string = ""
for key, value in input_dict.items():
formatted_string += f"{key} : {value}\n"
return formatted_string.strip()
if __name__ == "__main__":
import os
from dotenv import load_dotenv
import warnings
from utils import get_or_generate_analyze_json,convert_keyvalues_to_dict,extract_text_from_pdf
warnings.filterwarnings(
"ignore",
category=UserWarning,
module="streamlit.runtime.caching.cache_data_api",
)
load_dotenv("./app/.env")
pdf_path = "./app/data/LT E-BillPDF_231119_145147.pdf"
json_data = get_or_generate_analyze_json(pdf_path)
document_content = extract_text_from_pdf(json_data)
key_values = convert_keyvalues_to_dict(json_data)
get_bill_metadata(document_content, key_values)
| [
"key_value_pairs",
"## Summary:\n{bill_summary}\n\n## Data extracted from Credit Card Statement as key values: \n{key_value_pairs}\n\n## Output_JSON_Format:\n{output_structure}\n\n## Task: Structure the data into the provided Output_JSON_Format. \n\n## Output JSON:\n",
"## You are given a document summary and some of the key value data from the document. \n## Document Summary:\n{document_summary} \n\n## Document Data:\n{key_values}\n\n## You have to answer the given user question stricty from the document. \nDo not assume any values or answer on your own. \nIf you don't know the answer respond with \"I don't know the answer\"\n\n## Question: {question}\n",
"output_structure",
"document_summary",
"## Summarise the given document:\n## Doucment Text:\n{text}\n\n## Document Summary:",
"question",
"bill_summary",
"key_values",
"## You are given a document summary and some of the key value data from the document. \n## Document Content: \n{document_content} \n\n## Document Data: \n{key_values}\n\n## Based on the document summary and data provided, please output the following in JSON format:\n- recommended_questions: A list of 10 recommended questions about the document.\n- keywords: A list of keywords or key phrases that represent the main topics or themes of the document.\n- named_entities: A list of named entities from the document, with each named entity being a list of the extracted name and its type (PERSON,Organization,Location,Date,Time,etc) (e.g., [[\"Steve Jobs\", \"PERSON\"], [\"India\", \"COUNTRY\"]]).\n- document_type: Identify and categorize the type of document (e.g., \"bill\", \"invoice\", \"ID card\", \"invoice\", \"contract\", \"report\", \"legal document\" etc.).\n- language: Determine the language in which the document is written\n- currency: Determine the currency used in the document. Output the currency code, such as USD, EUR, GBP, INR etc. Consider variations in formatting and placement of currency symbols, as well as potential textual references to currencies. \n\n## JSON Output:\n",
"## You are given a document summary and some of the key value data from the document. \n## Document Summary: \n{document_summary} \n\n## Document Data: \n{key_values}\n\n## Generate a list of 10 recommended questions on the given document. Format it as markdown text.\n",
"document_content"
] |
2024-01-10 | Lancelot-24/ModularMind | framework~models~Models.py | from abc import ABC, abstractmethod
import time
import openai
from termcolor import colored
import tiktoken
import os
from enum import Enum
class ModelBase(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def run(self):
pass
class OpenAI(ModelBase):
"""
The OpenAI model for usage in an Agent.
"""
model: str
stream: bool
chatEncoding: object
strategy: str
evaluation_strategy: str
base_api_key: str
base_url :str
def __init__(self,
base_api_key: str = "",
chatEncoding = tiktoken.get_encoding("cl100k_base"),
model: str = "gpt-3.5-turbo",
base_url :str = 'https://api.openai.com/v1',
stream: bool = True,
strategy="cot",
evaluation_strategy="value",):
self.model = model
self.stream = stream
self.chatEncoding = chatEncoding
if base_api_key == "" or base_api_key is None:
from dotenv import load_dotenv
load_dotenv()
base_api_key = os.environ.get("OPENAI_API_KEY", "")
print('Using OpenAI API Key from environment variable')
openai.api_base = base_url
openai.api_key = base_api_key
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def set_api_info(self, base_api_key: str = "", base_url :str = 'https://api.openai.com/v1'):
openai.api_base = base_url
openai.api_key = base_api_key
def run_with_streaming(self,
query: str,
system_prompt: str = "",
show_token_consumption: bool = True,
total_session_tokens: int = 0,
temperature: int = 0,
max_tokens: int = 1000):
memory = ([
{ "role": "system", "content": system_prompt},
{ "role": "user", "content": query },
])
total_session_tokens = sum([len(self.chatEncoding.encode(message["content"])) for message in memory])
response = openai.ChatCompletion.create(
model=self.model,
messages=memory,
temperature=temperature,
stream=self.stream,
max_tokens=max_tokens,)
'''with open("openai.logs", "a", encoding='utf-8') as log_file:
log_file.write(
"\n" + "-----------" + "\n" + "System Prompt : " + system_prompt + "\n" +
"\n" + "-----------" + "\n" + "Prompt : " + query + "\n"
)'''
if(self.stream):
tokens_used = 0
responses = ''
#process each chunk of the response
for chunk in response:
if "role" in chunk["choices"][0]["delta"]:
continue
elif "content" in chunk["choices"][0]["delta"]:
tokens_used += 1
r_text = chunk["choices"][0]["delta"]["content"]
responses += r_text
print(colored(r_text, "green"), end='', flush=True)
total_session_tokens += tokens_used
if show_token_consumption:
print(colored("\nTokens used this time: " + str(tokens_used), "red"))
print(colored("\nTokens used so far: " + str(total_session_tokens), "yellow"))
return responses
else:
return response["choices"][0]["message"]["content"]
def run(self, query, system_prompt: str = "", max_tokens: int = 1000, temperature: int = 0):
while True:
try:
messages = [
{ "role": "system", "content": system_prompt},
{"role": "user", "content": query}
]
response = openai.ChatCompletion.create(
model=self.model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature
)
'''with open("openai.logs", "a", encoding='utf-8') as log_file:
log_file.write(
"\n" + "-----------" + "\n" + "System Prompt : " + system_prompt + "\n" +
"\n" + "-----------" + "\n" + "Prompt : " + query + "\n"
)'''
return response["choices"][0]["message"]["content"]
except Exception as e:
sleep_duration = os.environ.get("OPENAI_RATE_TIMEOUT", 10)
print(
f"ERROR, sleeping for {sleep_duration}s and retrying..."
)
time.sleep(sleep_duration)
class Models(Enum):
OpenAI = OpenAI()
@staticmethod
def get_Model(model_name: str):
for model in Models:
if model.name == model_name:
return model.value
return None
'''
from dotenv import load_dotenv
load_dotenv()
import os
CHIMERA_GPT_KEY = os.getenv('CHIMERA_GPT_KEY')
ZUKI_API_KEY = os.getenv('ZUKI_API_KEY')
WEBRAFT_API_KEY = os.getenv('WEBRAFT_API_KEY')
NOVA_API_KEY = os.getenv('NOVA_API_KEY')
OPEN_AI_BASE = 'https://api.nova-oss.com/v1' #"https://thirdparty.webraft.in/v1" # #"https://thirdparty.webraft.in/v1" #"https://zukijourney.xyzbot.net/v1" #'https://api.nova-oss.com/v1' #"https://thirdparty.webraft.in/v1" # #"https://api.naga.ac/v1"
llm = OpenAI(model="gpt-3.5-turbo", stream=True)
#llm.run("What is the weather in New York?")
print(llm.run(query="Explain to me embeddings and vector databases", max_tokens=400, temperature=0.0, stop=None))''' | [] |
2024-01-10 | LeDat98/Webdemo | My_sever.py | from flask import Flask, render_template, request
import os
import random
from deep_translator import GoogleTranslator
import openai
import urllib.request
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = "static"
# @app.route('/')
# def home():
# # return '<button onclick="window.location.href=\'/text_to_image\'">Go to image</button>'
# return render_template('index.html')
@app.route("/",methods = ['GET','POST'])
def text_To_Image():
#lấy text từ client
text = request.args.get('text')
Style_list = [' ','A digital illustration of a with clockwork machines, 4k, detailed, trending in artstation, fantasy vivid colors',
'masterpiece, masterpiece, anime, sadboi, aesthetic, transparent color vinyl, highly detailed, reflections, transparent iridescent opaque rgb, chromatic aberration, +4k UHD',
'mid century modern, indoor garden with fountain, retro,m vintage, designer furniture made of wood and plastic, concrete table, wood walls, indoor potted tree, large window, outdoor forest landscape, beautiful sunset, cinematic, concept art, sunstainable architecture, octane render, utopia, ethereal, cinematic light, -ar 16:9 -stylize 45000',
'futuristic nighttime cyberpunk New York City skyline landscape vista photography by Carr Clifton & Galen Rowell, 16K resolution, Landscape veduta photo by Dustin Lefevre & tdraw, 8k resolution, detailed landscape painting by Ivan Shishkin, DeviantArt, Flickr, rendered in Enscape, Miyazaki, Nausicaa Ghibli, Breath of The Wild, 4k detailed post processing, atmospheric, hyper realistic, 8k, epic composition, cinematic, artstation —ar 16:9',
'The Legend of Zelda landscape atmospheric, hyper realistic, 8k, epic composition, cinematic, octane render, artstation landscape vista photography by Carr Clifton & Galen Rowell, 16K resolution, Landscape veduta photo by Dustin Lefevre & tdraw, 8k resolution, detailed landscape painting by Ivan Shishkin, DeviantArt, Flickr, rendered in Enscape, Miyazaki, Nausicaa Ghibli, Breath of The Wild, 4k detailed post processing, artstation, rendering by octane, unreal engine —ar 16:9']
if text:
if 'Style1' in text:
text = text.replace('Style1', "")#xoá chuỗi Style trong chuỗi văn bản
styles = Style_list[1] #thay đổi chuỗi Style ở đây
elif 'Style2' in text:
text = text.replace('Style2', "")
styles = Style_list[2] #thay đổi chuỗi Style ở đây
elif 'Style3' in text:
text = text.replace('Style3', "")
styles = Style_list[3] #thay đổi chuỗi Style ở đây
elif 'Style4' in text:
text = text.replace('Style4', "")
styles = Style_list[4] #thay đổi chuỗi Style ở đây
elif 'Style5' in text:
text = text.replace('Style5', "")
styles = Style_list[5] #thay đổi chuỗi Style ở đây
elif text in text:
styles = ' '
#Dịch văn bản
print("start translate text to english")
translated_text = GoogleTranslator(source='auto', target='en').translate(text)
#gửi translated_text vào model và chạy
text_t_img = f"{translated_text},{styles}"
print(text_t_img)
# Load your API key from an environment variable or secret management service
openai.api_key = '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'
openai.Model.list()
response = openai.Image.create(prompt=text_t_img, n= 3, size= "1024x1024")
image_url = response['data'][0]['url']
image_url1 = response['data'][1]['url']
image_url2 = response['data'][2]['url']
#lưu ảnh#
## tạo một tên ảnh ngẫu nhiên
sample_string = 'qwertyuiopasdfghj'
random_name = ''.join((random.choice(sample_string)) for x in range(len(sample_string)))
random_name1 = ''.join((random.choice(sample_string)) for x in range(len(sample_string)))
random_name2 = ''.join((random.choice(sample_string)) for x in range(len(sample_string)))
img_name = f"img_{random_name}.png"
img_name1 = f"img_{random_name1}.png"
img_name2 = f"img_{random_name2}.png"
#tạo đường dẫn để lưu ảnh
path_to_save = os.path.join(app.config['UPLOAD_FOLDER'], img_name)
path_to_save1 = os.path.join(app.config['UPLOAD_FOLDER'], img_name1)
path_to_save2 = os.path.join(app.config['UPLOAD_FOLDER'], img_name)
print(path_to_save)
print(path_to_save1)
print(path_to_save2)
#lưu ảnh
urllib.request.urlretrieve(image_url,path_to_save)
urllib.request.urlretrieve(image_url1,path_to_save1)
urllib.request.urlretrieve(image_url2,path_to_save)
#lấy tên ảnh đã lưu
# image_path = "img01.png"
print("have text")
return render_template("item3.html", user_image = img_name, user_image1 = img_name1,
user_image2 = img_name2, msg="できあがりました。いかがでしょうか?")
else:
return render_template("item3.html")
| [] |
2024-01-10 | ai-hero/holiday_party | image_gen.py | import streamlit as st
from openai import OpenAI
import requests
from PIL import Image
from io import BytesIO
from dotenv import load_dotenv
load_dotenv()
# Initialize OpenAI client
client = OpenAI()
# Streamlit interface
st.title("Image Generator using DALL-E")
# User input for the prompt
user_prompt = st.text_input("Enter your prompt:", "a white siamese cat")
# Button to generate image
if st.button("Generate Image"):
# API call to OpenAI
response = client.images.generate(
model="dall-e-3",
prompt=user_prompt,
size="1024x1024",
quality="standard",
n=1,
)
# Get the image URL from the response
image_url = response.data[0].url
# Fetch and display the image
response = requests.get(image_url)
image = Image.open(BytesIO(response.content))
st.image(image, caption="Generated Image")
| [
"a white siamese cat",
"Enter your prompt:"
] |
2024-01-10 | ai-hero/holiday_party | photobooth.py | import streamlit as st
import cv2
import numpy as np
from PIL import Image
import requests
from io import BytesIO
from openai import OpenAI
from dotenv import load_dotenv
import codenamize
from datetime import datetime
# Load environment variables
load_dotenv()
# Initialize OpenAI client
client = OpenAI()
def create_png_mask(image):
"""
Create a PNG mask for greenscreen areas in an image.
Args:
image (numpy.ndarray): Input image in BGR format.
Returns:
numpy.ndarray: RGBA image where greenscreen areas are transparent.
"""
# Convert BGR to HSV format
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Define range of green color in HSV
lower_green = np.array([36, 0, 0])
upper_green = np.array([86, 255, 255])
# Threshold the HSV image to get only green colors
mask = cv2.inRange(hsv, lower_green, upper_green)
# Invert mask: greenscreen areas become 0 (transparent)
inverted_mask = cv2.bitwise_not(mask)
# Convert the original image to RGBA
rgba_image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
# Set the alpha channel of the image to the inverted mask
rgba_image[:, :, 3] = inverted_mask
return rgba_image
def replace_greenscreen_with_background(foreground, background):
"""
Replace greenscreen areas in an image with a background image.
Args:
foreground (numpy.ndarray): Input image in BGR format with greenscreen.
background (numpy.ndarray): Background image in BGR format.
Returns:
numpy.ndarray: Image where greenscreen areas are replaced with the background.
"""
# Convert BGR to HSV format for the foreground image
hsv = cv2.cvtColor(foreground, cv2.COLOR_BGR2HSV)
# Define range of green color in HSV
lower_green = np.array([36, 0, 0])
upper_green = np.array([86, 255, 255])
# Threshold the HSV image to get only green colors
mask = cv2.inRange(hsv, lower_green, upper_green)
# Invert the mask: non-greenscreen areas become 0
inverted_mask = cv2.bitwise_not(mask)
# Apply the inverted mask to the foreground image
foreground_masked = cv2.bitwise_and(foreground, foreground, mask=inverted_mask)
# Apply the original mask to the background image
background_masked = cv2.bitwise_and(background, background, mask=mask)
# Combine the masked foreground and background
combined = cv2.add(foreground_masked, background_masked)
return combined
def call_dalle_api(image, mask, prompt):
"""
Call the DALL-E API to edit an image based on the provided mask and prompt.
Args:
image (numpy.ndarray): Input image.
mask (numpy.ndarray): Mask for the image.
prompt (str): Prompt for DALL-E.
Returns:
str: URL of the generated image.
"""
response = client.images.edit(
model="dall-e-2", image=image, mask=mask, prompt=prompt, n=1, size="512x512"
)
return response.data[0].url
def resize_image_with_aspect_ratio(
image, target_size=(1024, 1024), background_color=(0, 0, 0)
):
"""
Resize an image to a target size while preserving the aspect ratio.
Fill the extra space with a specified background color.
Args:
image (numpy.ndarray): The original image.
target_size (tuple): The target size as (width, height).
background_color (tuple): Background color in BGR format.
Returns:
numpy.ndarray: The resized image with preserved aspect ratio.
"""
# Calculate aspect ratio
h, w = image.shape[:2]
aspect_ratio = w / h
# Determine new dimensions based on aspect ratio
if w > h:
new_w = target_size[0]
new_h = int(new_w / aspect_ratio)
else:
new_h = target_size[1]
new_w = int(new_h * aspect_ratio)
# Resize the image
resized_image = cv2.resize(image, (new_w, new_h))
# Create a black background
background = np.full(
(target_size[1], target_size[0], 3), background_color, dtype=np.uint8
)
# Calculate center offset
x_offset = (target_size[0] - new_w) // 2
y_offset = (target_size[1] - new_h) // 2
# Place the resized image onto the center of the background
background[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = resized_image
return background
# Streamlit UI
st.title("Greenscreen Photo Booth")
uploaded_file = st.camera_input("Take a picture")
prompt = st.text_input("Enter a prompt for the full new image:")
if uploaded_file is not None and prompt:
# Read the uploaded file
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
opencv_image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
opencv_image_resized = resize_image_with_aspect_ratio(opencv_image)
image_bytes = cv2.imencode(".png", opencv_image_resized)[1].tobytes()
# Call the DALL-E API
try:
response = client.images.generate(
model="dall-e-3",
prompt=prompt,
size="1024x1024",
quality="standard",
n=1,
)
image_url = response.data[0].url
# Display the image
response = requests.get(image_url)
background_img = Image.open(BytesIO(response.content))
# Convert to cv2 image
background_img = cv2.cvtColor(np.array(background_img), cv2.COLOR_RGB2BGR)
processed_image = replace_greenscreen_with_background(
opencv_image_resized, background_img
)
# convert cv2 image to PIL image
img = Image.fromarray(cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB))
st.image(img, caption="Generated Image")
_, buffer = cv2.imencode(".png", processed_image)
processed_image_bytes = buffer.tobytes()
# Provide a download link for the image
name_str = codenamize.codenamize(f"{datetime.utcnow()}")
st.download_button(
label="Download Image",
data=processed_image_bytes, # Convert your final image to bytes
file_name=f"{name_str}.png",
mime="image/png",
)
except Exception as e:
st.error(f"An error occurred: {e}")
| [
"Enter a prompt for the full new image:"
] |
2024-01-10 | iarahealth/iaraugen | run_txt_gen.py | #!/usr/bin/env python3
import argparse
import random
import re
from openai import OpenAI
from typing import List
from tqdm import tqdm
from text.gen import make_chatgpt_query
from text.utils import post_process_sentences
from utils.files import append_sentences_to_file, read_file
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Sentence/word generation using ChatGPT"
)
parser.add_argument(
"--input_file", type=str, default=None, help="Input file with words"
)
parser.add_argument(
"--num", type=int, default=None, help="Number of sentences or words to generate"
)
parser.add_argument(
"--context",
type=str,
default="radiologia médica",
help="Context of the generated sentences",
)
parser.add_argument(
"--query",
type=str,
default=None,
help="A query to OpenAI's ChatGPT; the first number detected in the query will be replaced by the number of sentences to generate",
)
parser.add_argument(
"--return_type",
type=str,
default="frases",
help="Type of data to generate (default: frases)",
)
parser.add_argument(
"--api_key",
type=str,
default=None,
help="OpenAI API key",
)
parser.add_argument(
"--model",
type=str,
default="gpt-3.5-turbo-16k",
help="ChatGPT model to use",
)
parser.add_argument(
"--seed",
type=int,
default=451,
help="Random seed (default: 451)",
)
parser.add_argument(
"--output",
type=str,
default=None,
help="Output file to write generated sentences",
)
args = parser.parse_args()
random.seed(args.seed)
if args.query is None:
if args.return_type == "frases":
args.query = f"Você é um médico laudando. No contexto de {args.context}, gere {args.num} {args.return_type} contendo o termo '[MASK]', separadas por nova linha."
else:
args.query = f"No contexto de {args.context}, gere {args.num} {args.return_type} separadas por nova linha."
else:
args.num = (
int(re.search(r"\d+", args.query).group())
if re.search(r"\d+", args.query)
else None
)
if args.input_file:
wordlist = read_file(args.input_file)
else:
if args.return_type == "frases" and "[MASK]" in args.query:
wordlist = []
while True:
word = input("Enter a word (or press Enter to finish): ")
if word == "":
break
wordlist.append(word)
else:
wordlist = [""]
response_sentences: List[str] = []
original_query = args.query
openai_client = OpenAI(api_key=args.api_key)
for word in tqdm(wordlist):
word = word.strip()
query = re.sub(r"\[MASK\]", word, original_query)
number_of_sentences_left = args.num
while number_of_sentences_left > 0:
print(f"\nNumber of sentences left: {number_of_sentences_left}")
print(f"Querying OpenAI's {args.model} with '{query}'...")
query_response = make_chatgpt_query(
openai_client,
query,
return_type=args.return_type,
model=args.model,
)
print(query_response)
response_sentences.extend(
[s.split(" ", 1)[1] if s[0].isdigit() else s for s in query_response]
)
number_of_sentences_left -= len(query_response)
query = re.sub(r"\d+", str(number_of_sentences_left), query)
print()
generated_sentences = post_process_sentences(response_sentences, modify=True)
print("\nFinal results:")
print("-------------------")
for sentence in generated_sentences:
print(sentence)
print(f"\nTotal: {len(generated_sentences)} sentences")
print("-------------------\n")
if args.output:
print(f"Appending generated sentences to {args.output}...")
append_sentences_to_file(args.output, generated_sentences)
| [] |
2024-01-10 | dharanijasthi/GreenSense | .github~workflows~score.py | import os
import json
import re
import openai
import math
from radon.complexity import cc_visit
from dotenv import load_dotenv
load_dotenv()
N = 10
openai.api_key = os.environ["OPENAI_API_KEY"]
env_file = os.getenv("updatedFiles")
old_and_new_code = json.loads(env_file)
print("ENV:", old_and_new_code)
def call_Chat_gpt_for_time_and_space_complexity(content):
chat_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You will be provided with Python code, give only Time complexity and Space Complexity all functions in json fomat with no explation"
},
{
"role": "user",
"content": content
}
],
temperature=0,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return chat_response['choices'][0]['message']['content']
def get_cyclomitic_complexity(fun):
return cc_visit(fun)
def convert_complexity_to_number(complexity):
final_comp = 1
complexity = complexity[2:-1]
log_indexes = [i.start() for i in re.finditer('log', complexity)]
complexity = complexity.replace('log', '')
complexity = re.sub(r'[a-zA-Z]', r'n', complexity)
for id in log_indexes:
complexity = complexity[:id+1]+"log"+complexity[id+1:]
complexity = complexity.replace(' ', '')
complexity = list(complexity)
i = 0
while i < len(complexity):
if complexity[i] == "n":
final_comp *= N
elif complexity[i] == "l":
final_comp *= 1.2
i += 3
elif complexity[i] == "^":
last = complexity[i-1]
if last.isnumeric():
last = int(last)
else:
last = N
next = int(complexity[i+1]) if complexity[i+1].isnumeric() else N
if final_comp > 1:
final_comp /= last
if next > last:
final_comp = final_comp * 100 # math.pow(last,next)
elif next == last:
final_comp = final_comp * 150
else:
final_comp = final_comp * 70
i += 1
i += 1
return final_comp
# if
def give_start_rating(old_score, new_score):
delta = ((old_score-new_score)/old_score)*100
if delta <= 0:
print("No Optimisation Required")
return {'old_code': 4.5,
'new_code': 4.5}
else:
if 0 < delta <= 20:
return {'old_code': 4,
'new_code': 4.5}
elif 20 < delta <= 50:
return {'old_code': 3,
'new_code': 4.5}
elif 50 < delta <= 75:
return {'old_code': 2.5,
'new_code': 4.5}
else:
return {'old_code': 1.5,
'new_code': 4.5}
def get_score_for_code(fun):
print("Calling ChatGPT API to Get Complexities")
resp = call_Chat_gpt_for_time_and_space_complexity(fun)
resp = json.loads(resp)
print("Getting Cyclomatic Complexity")
cyclo_comp = get_cyclomitic_complexity(fun=fun)
for c in cyclo_comp:
name, comp = c.name, c.complexity
resp[name]["cyclo_complexity"] = comp
for res in resp:
code = resp[res]
score = convert_complexity_to_number(
code["time_complexity"])+convert_complexity_to_number(code["space_complexity"])+code["cyclo_complexity"]
resp[res]["score"] = score
return resp
def normlise_scores(old_scores,new_scores):
max_score = max(old_scores+new_scores)
min_score = min(old_scores+new_scores)
print("Max Score:", max_score)
print("Min Score:", min_score)
normalise_old_score = 0
for old_score in old_scores:
normalise_old_score += (old_score - min_score)/(max_score - min_score)
normalise_new_score = 0
for new_score in new_scores:
normalise_new_score += (new_score - min_score)/(max_score - min_score)
return (normalise_old_score,normalise_new_score)
if __name__ == "__main__":
star_rating_dict = {}
old_pr_score = 0
new_pr_score = 0
new_scores_list = []
old_scores_list = []
for codes in old_and_new_code:
new_code = codes["newCode"]
old_code = codes["oldCode"]
print("unoptimised Code")
path = 'utils/unoptimised_code.py'
score_resp_unoptimised = get_score_for_code(old_code)
print("\n\noptimised Code")
path = 'utils/optimised_code.py'
score_resp_optimised = get_score_for_code(new_code)
star_rating_dict[codes['path']] = []
print("\n\n")
for function in score_resp_unoptimised:
print(f"Calculating Score for Function {function}")
old_score, new_score = score_resp_unoptimised[function]["score"], score_resp_optimised[function]["score"]
if old_score < new_score:
new_score = old_score
old_pr_score += old_score
new_pr_score += new_score
old_scores_list.append(old_score)
new_scores_list.append(new_score)
print(f"Score for unoptimised Function {old_score}")
print(f"Score for optimised Function {new_score}")
print(f"Calculating Sart Rating for Function {function}")
star_rating = give_start_rating(old_score, new_score)
old_star, new_star = star_rating["old_code"], star_rating["new_code"]
old_extra = 0 if math.ceil(old_star) == old_star else 1
new_extra = 0 if math.ceil(new_star) == new_star else 1
old_star_rating = "\u2B50" * math.floor(old_star)+"\u2605"*old_extra
new_star_rating = "\u2B50" * math.floor(new_star)+"\u2605"*new_extra
print("Old Code Star Rating: "+old_star_rating)
print("New Code Star Rating:"+new_star_rating)
print("\n\n")
function_rating_dict = {
function: {
'old_score': old_score,
'new_score': new_score,
'old_star_rating': old_star_rating,
'new_star_rating': new_star_rating
}
}
star_rating_dict[codes['path']].append(function_rating_dict)
normalise_old_pr_score, normalise_new_pr_score = normlise_scores(old_scores_list, new_scores_list)
print(f'star rating dict {json.dumps(star_rating_dict)}')
star_rating = give_start_rating(normalise_old_pr_score, normalise_new_pr_score)
old_pr_star, new_pr_star = star_rating["old_code"], star_rating["new_code"]
old_extra = 0 if math.ceil(old_star) == old_star else 1
new_extra = 0 if math.ceil(new_star) == new_star else 1
old_pr_star_rating = "\u2B50" * math.floor(old_pr_star)+"\u2605"*old_extra
new_pr_star_rating = "\u2B50" * math.floor(new_pr_star)+"\u2605"*new_extra
print(f"old pr score: {old_pr_score}")
print(f"new pr score: {new_pr_score}")
print("Old Code Star Rating old_pr_star_rating: "+old_pr_star_rating)
print("New Code Star Rating new_pr_star_rating:"+new_pr_star_rating)
env_file = os.getenv('GITHUB_ENV')
with open(env_file, "a") as myfile:
myfile.write(f"star_ratings={json.dumps(star_rating_dict)}\n")
myfile.write(f"old_pr_score={old_pr_score}\n")
myfile.write(f"new_pr_score={new_pr_score}\n")
myfile.write(f"old_pr_star_rating={old_pr_star_rating}\n")
myfile.write(f"new_pr_star_rating={new_pr_star_rating}\n")
| [
"You will be provided with Python code, give only Time complexity and Space Complexity all functions in json fomat with no explation"
] |
2024-01-10 | EhsanSoltan251/ChatGPT-Data-Parsing-Project | Ehsan1.py | from PyPDF2 import PdfReader
import openai
import os
from os import listdir
from os.path import isfile, join
from CSVOutput import write_lists_to_csv
# absolute path of the directory which the pdfs are stored in
pdfs_directory = os.path.dirname(os.path.abspath(__file__)) + "/pdfs"
# absolute paths of each individual pdf
pdf_paths = [pdfs_directory + "/" + file for file in listdir(pdfs_directory) if
file[-4:] == ".pdf" and isfile(join(pdfs_directory, file))]
openai.api_key = ""
prompts = [
'''What devices were tested in this paper? Please give the items and a summary for each''',
'''Can you briefly summarize the single event effect testing that was done and the
found results for the device(s) that were tested?''',
'''Can you briefly summarize the total ionizing dose testing that was done and the found results
for the device(s) that were tested?''',
'''Can you briefly summarize any interesting data that was found about the device
that was tested in terms of radiation effects?'''
]
'''Converts a pdf into a string. Takes in pdf path as an argument'''
def pdfToString(path):
text = ""
with open(path, 'rb') as file:
pdf = PdfReader(file)
for page in pdf.pages:
text += page.extract_text()
return text
# runs an input through the gpt api and returns the output as a string
def gptInput(input):
gpt = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": input}]
)
return gpt.choices[0].message.content
results = []
# iterate through each pdf path
for path_index, path in enumerate(pdf_paths):
paper = pdfToString(path)
# get the paper name
index = path.find("/pdfs/")
pdf_name = path[index+6:-4]
results.append([])
for prompt in prompts:
full_prompt = prompt + paper
step = int(len(paper) / 4)
full_reply = ""
paper_length = len(paper)
paper_subsection = paper[0:int(paper_length / 3)] # try different subsections of the paper
full_prompt = prompt + paper_subsection
full_reply += gptInput(full_prompt)
results[path_index].append(full_reply)
results[path_index].insert(0, pdf_name)
headers = ['Paper Name', 'Device Tested', 'Single Event Effects', 'Total Ionizing Dose', 'Interesting Data']
data = results
output_file = 'output.csv'
write_lists_to_csv(data, output_file, headers)
print(results)
| [
"INPUT",
"PLACEHOLDERPLACEHOLDER",
"['What devices were tested in this paper? Please give the items and a summary for each', 'Can you briefly summarize the single event effect testing that was done and the\\n found results for the device(s) that were tested?', 'Can you briefly summarize the total ionizing dose testing that was done and the found results\\n for the device(s) that were tested?', 'Can you briefly summarize any interesting data that was found about the device\\n that was tested in terms of radiation effects?']"
] |
2024-01-10 | sebastienrousseau/euxis | euxis~json_utils~utilities.py | """Utilities for the json_fixes package."""
import ast
import json
import os.path
from typing import Any
from jsonschema import Draft7Validator
from euxis.config import Config
from euxis.logs import logger
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
def extract_json_from_response(response_content: str) -> dict:
# Sometimes the response includes the JSON in a code block with ```
if response_content.startswith("```") and response_content.endswith("```"):
# Discard the first and last ```, then re-join in case the response naturally included ```
response_content = "```".join(response_content.split("```")[1:-1])
# response content comes from OpenAI as a Python `str(content_dict)`, literal_eval reverses this
try:
return ast.literal_eval(response_content)
except BaseException as e:
logger.info(f"Error parsing JSON response with literal_eval {e}")
logger.debug(f"Invalid JSON received in response: {response_content}")
# TODO: How to raise an error here without causing the program to exit?
return {}
def llm_response_schema(
config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
) -> dict[str, Any]:
filename = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
with open(filename, "r") as f:
json_schema = json.load(f)
if config.openai_functions:
del json_schema["properties"]["command"]
json_schema["required"].remove("command")
return json_schema
def validate_json(
json_object: object, config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
) -> bool:
"""
:type schema_name: object
:param schema_name: str
:type json_object: object
Returns:
bool: Whether the json_object is valid or not
"""
schema = llm_response_schema(config, schema_name)
validator = Draft7Validator(schema)
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
for error in errors:
logger.debug(f"JSON Validation Error: {error}")
if config.debug_mode:
logger.error(
json.dumps(json_object, indent=4)
) # Replace 'json_object' with the variable containing the JSON data
logger.error("The following issues were found:")
for error in errors:
logger.error(f"Error: {error.message}")
return False
logger.debug("The JSON object is valid.")
return True
| [] |
2024-01-10 | sebastienrousseau/euxis | euxis~llm~api_manager.py | from __future__ import annotations
from typing import List, Optional
import openai
from openai import Model
from euxis.llm.base import CompletionModelInfo
from euxis.logs import logger
from euxis.singleton import Singleton
class ApiManager(metaclass=Singleton):
def __init__(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0
self.models: Optional[list[Model]] = None
def reset(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0.0
self.models = None
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
Update the total cost, prompt tokens, and completion tokens.
Args:
prompt_tokens (int): The number of tokens used in the prompt.
completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call.
"""
# the .model property in API responses can contain version suffixes like -v2
from euxis.llm.providers.openai import OPEN_AI_MODELS
model = model[:-3] if model.endswith("-v2") else model
model_info = OPEN_AI_MODELS[model]
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
self.total_cost += prompt_tokens * model_info.prompt_token_cost / 1000
if issubclass(type(model_info), CompletionModelInfo):
self.total_cost += (
completion_tokens * model_info.completion_token_cost / 1000
)
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
def set_total_budget(self, total_budget):
"""
Sets the total user-defined budget for API calls.
Args:
total_budget (float): The total budget for API calls.
"""
self.total_budget = total_budget
def get_total_prompt_tokens(self):
"""
Get the total number of prompt tokens.
Returns:
int: The total number of prompt tokens.
"""
return self.total_prompt_tokens
def get_total_completion_tokens(self):
"""
Get the total number of completion tokens.
Returns:
int: The total number of completion tokens.
"""
return self.total_completion_tokens
def get_total_cost(self):
"""
Get the total cost of API calls.
Returns:
float: The total cost of API calls.
"""
return self.total_cost
def get_total_budget(self):
"""
Get the total user-defined budget for API calls.
Returns:
float: The total budget for API calls.
"""
return self.total_budget
def get_models(self) -> List[Model]:
"""
Get list of available GPT models.
Returns:
list: List of available GPT models.
"""
if self.models is None:
all_models = openai.Model.list()["data"]
self.models = [model for model in all_models if "gpt" in model["id"]]
return self.models
| [] |
2024-01-10 | sebastienrousseau/euxis | tests~unit~test_retry_provider_openai.py | import pytest
from openai.error import APIError, RateLimitError, ServiceUnavailableError
from euxis.llm.providers import openai
@pytest.fixture(params=[RateLimitError, ServiceUnavailableError, APIError])
def error(request):
if request.param == APIError:
return request.param("Error", http_status=502)
else:
return request.param("Error")
def error_factory(error_instance, error_count, retry_count, warn_user=True):
"""Creates errors"""
class RaisesError:
def __init__(self):
self.count = 0
@openai.retry_api(
num_retries=retry_count, backoff_base=0.001, warn_user=warn_user
)
def __call__(self):
self.count += 1
if self.count <= error_count:
raise error_instance
return self.count
return RaisesError()
def test_retry_open_api_no_error(capsys):
"""Tests the retry functionality with no errors expected"""
@openai.retry_api()
def f():
return 1
result = f()
assert result == 1
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
@pytest.mark.parametrize(
"error_count, retry_count, failure",
[(2, 10, False), (2, 2, False), (10, 2, True), (3, 2, True), (1, 0, True)],
ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"],
)
def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure):
"""Tests the retry with simulated errors [RateLimitError, ServiceUnavailableError, APIError], but should ulimately pass"""
call_count = min(error_count, retry_count) + 1
raises = error_factory(error, error_count, retry_count)
if failure:
with pytest.raises(type(error)):
raises()
else:
result = raises()
assert result == call_count
assert raises.count == call_count
output = capsys.readouterr()
if error_count and retry_count:
if type(error) == RateLimitError:
assert "Reached rate limit, passing..." in output.out
assert "Please double check" in output.out
if type(error) == ServiceUnavailableError:
assert (
"The OpenAI API engine is currently overloaded, passing..."
in output.out
)
assert "Please double check" in output.out
if type(error) == APIError:
assert "API Bad gateway" in output.out
else:
assert output.out == ""
def test_retry_open_api_rate_limit_no_warn(capsys):
"""Tests the retry logic with a rate limit error"""
error_count = 2
retry_count = 10
raises = error_factory(RateLimitError, error_count, retry_count, warn_user=False)
result = raises()
call_count = min(error_count, retry_count) + 1
assert result == call_count
assert raises.count == call_count
output = capsys.readouterr()
assert "Reached rate limit, passing..." in output.out
assert "Please double check" not in output.out
def test_retry_open_api_service_unavairable_no_warn(capsys):
"""Tests the retry logic with a service unavairable error"""
error_count = 2
retry_count = 10
raises = error_factory(
ServiceUnavailableError, error_count, retry_count, warn_user=False
)
result = raises()
call_count = min(error_count, retry_count) + 1
assert result == call_count
assert raises.count == call_count
output = capsys.readouterr()
assert "The OpenAI API engine is currently overloaded, passing..." in output.out
assert "Please double check" not in output.out
def test_retry_openapi_other_api_error(capsys):
"""Tests the Retry logic with a non rate limit error such as HTTP500"""
error_count = 2
retry_count = 10
raises = error_factory(APIError("Error", http_status=500), error_count, retry_count)
with pytest.raises(APIError):
raises()
call_count = 1
assert raises.count == call_count
output = capsys.readouterr()
assert output.out == ""
| [] |
2024-01-10 | sebastienrousseau/euxis | tests~integration~test_provider_openai.py | from unittest.mock import MagicMock, patch
import pytest
from euxis.llm.api_manager import ApiManager
from euxis.llm.providers import openai
api_manager = ApiManager()
@pytest.fixture(autouse=True)
def reset_api_manager():
api_manager.reset()
yield
class TestProviderOpenAI:
@staticmethod
def test_create_chat_completion_debug_mode(caplog):
"""Test if debug mode logs response."""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
]
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 10
mock_response.usage.completion_tokens = 20
mock_create.return_value = mock_response
openai.create_chat_completion(messages, model=model)
assert "Response" in caplog.text
@staticmethod
def test_create_chat_completion_empty_messages():
"""Test if empty messages result in zero tokens and cost."""
messages = []
model = "gpt-3.5-turbo"
with patch("openai.ChatCompletion.create") as mock_create:
mock_response = MagicMock()
del mock_response.error
mock_response.usage.prompt_tokens = 0
mock_response.usage.completion_tokens = 0
mock_create.return_value = mock_response
openai.create_chat_completion(messages, model=model)
assert api_manager.get_total_prompt_tokens() == 0
assert api_manager.get_total_completion_tokens() == 0
assert api_manager.get_total_cost() == 0
| [
"You are a helpful assistant.",
"Who won the world series in 2020?"
] |
2024-01-10 | cheisenmhu/CH-BSSD4350-TogetherAI | together_llm.py | # Programmer: Chris Heise ([email protected])
# Course: BSSD 4350 Agile Methodoligies
# Instructor: Jonathan Lee
# Program: Together AI POC
# Purpose: Build a POC using Together AI and Langchain for inclusivity app.
# File: together_llm.py
import together
from typing import Any, Dict
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
# Get the API key from the environment
from environs import Env
env = Env()
env.read_env()
class TogetherLLM(LLM):
"""Together large language models."""
model: str = "togethercomputer/llama-2-70b-chat"
"""model endpoint to use"""
together_api_key: str = env.str("TOGETHERAI_API_KEY")
"""Together API key"""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 512
"""The maximum number of tokens to generate in the completion."""
class Config:
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the API key is set."""
api_key = get_from_dict_or_env(
values, "together_api_key", "TOGETHER_API_KEY"
)
values["together_api_key"] = api_key
return values
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "together"
def _call(
self,
prompt: str,
**kwargs: Any,
) -> str:
"""Call to Together endpoint."""
together.api_key = self.together_api_key
output = together.Complete.create(prompt,
model=self.model,
max_tokens=self.max_tokens,
temperature=self.temperature,
)
text = output['output']['choices'][0]['text']
return text | [] |
2024-01-10 | Andy963/telegram_ai_bot | bot~__init__.py | from ai.anthropic_utils import AnthropicAIService
from ai.cloudflare_utils import CloudflareAIService
from ai.google_utils import GoogleAIService
from ai.openai_utils import OpenAIService
from bot.helper import AzureService
from config import config
from database import engine
from database.model_view import (
UserServices,
DialogServices,
ModelServices,
PromptServices,
RoleServices,
)
azure_service = AzureService()
gpt_service = OpenAIService(model_name=config.openai_engine, api_type="chatgpt")
azure_openai_service = OpenAIService(
model_name=config.azure_openai_engine, api_type="azure"
)
palm_service = GoogleAIService(
api_key=config.palm_api_key, model_name=config.palm_model_name
)
anthropic_service = AnthropicAIService(
api_key=config.claude_api_key, model_name=config.claude_model_name
)
cloudflare_service = CloudflareAIService(
token=config.cloudflare_token, account_id=config.cloudflare_account_id,
model_name=config.cloudflare_model_name)
user_db = UserServices(engine)
dialog_db = DialogServices(engine)
ai_model_db = ModelServices(engine)
prompt_db = PromptServices(engine)
role_db = RoleServices(engine)
| [] |
2024-01-10 | Andy963/telegram_ai_bot | ai~anthropic_utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: anthropic_utils.py
# Author: Zhou
# Date: 2023/6/14
# Copyright: 2023 Zhou
# License:
# Description: anthropic ai claude
import anthropic
from logs.log import logger
class AnthropicAIService:
max_tokens = 100000
token_threshold = 1000
def __init__(self, api_key: str, model_name: str = 'claude-2', **kwargs):
self.model_name = model_name
self.claude = anthropic.AsyncAnthropic(api_key=api_key)
self.client = anthropic.Anthropic()
def solve_context_limit(self, dialogs: list) -> list:
"""
reduce the long context to a short one
:param dialogs: [{'user':"", 'assistant':""}]
:return: dialogs list
"""
lgs = [self.client.count_tokens(d['user']) + self.client.count_tokens(
d['assistant']) for d in dialogs]
if sum(lgs) > self.max_tokens:
count = 0
total = 0
for num in lgs:
total += num
count += 1
if total > self.max_tokens:
break
dialogs = dialogs[count:]
return dialogs
def _generate_msg(self, message, dialog_messages, prompt):
"""
Generate messages for claude
:param message:
:param dialog_messages:
:param chat_mode:
"""
if not dialog_messages:
context = f"{anthropic.HUMAN_PROMPT} {prompt} {anthropic.AI_PROMPT}"
return context
dialog_messages = self.solve_context_limit(dialog_messages)
context = ''.join(
[f"{anthropic.HUMAN_PROMPT} {msg['user']} {anthropic.AI_PROMPT} \
{msg['assistant']}" for msg in dialog_messages])
context += f"{anthropic.HUMAN_PROMPT} {message} {anthropic.AI_PROMPT}"
return context
async def send_message(self, message, dialog_messages=None, prompt=None):
"""
Send message to claude without stream response
"""
if dialog_messages is None:
dialog_messages = []
try:
messages = self._generate_msg(message, dialog_messages, prompt)
resp = await self.claude.completions.create(
prompt=messages,
model=self.model_name,
max_tokens_to_sample=self.token_threshold,
)
answer = resp.completion
except Exception as e:
logger.error(f"error:\n\n ask: {message} \n with error {e}")
answer = f"sth wrong with claude, please try again later."
return answer
async def send_message_stream(self, message, dialog_messages=None,
prompt=None):
"""
Send message with stream response
"""
if dialog_messages is None:
dialog_messages = []
try:
messages = self._generate_msg(message, dialog_messages, prompt)
answer = await self.claude.completions.create(
prompt=messages,
model=self.model_name,
max_tokens_to_sample=self.token_threshold,
stream=True
)
except Exception as e:
logger.error(f"error:\n\n ask: {message} \n with error {e}")
# 创建一个空的异步生成器
async def empty_generator():
if False: # 这将永远不会执行
yield
answer = empty_generator()
return answer
| [] |
2024-01-10 | shreytalreja25/InterviewScheduler | backend~ai_model.py | import openai
# Configure your OpenAI API key
openai.api_key = "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"
def generate_code(resume_text):
# Use the OpenAI API to generate code based on resume_text
# Return the generated code
pass
| [] |
2024-01-10 | hudeven/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | jinqiu-deng/modelverse | script~pin_gpt.py | #coding=utf-8
import os
import openai
import json
with open('./gpt_key.json') as f:
data = json.load(f)
openai.organization = data['organization']
openai.api_key = data['api_key']
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.2,
messages=[
{"role": "user", "content": "Redmi Note 10 Pro 5G 天玑1100旗舰芯 67W快充 120Hz旗舰变速金刚屏 幻青 8GB+128GB 智能手机 小米红米8GB+128GB 幻青 \n 小米Redmi Note10Pro 5G游戏智能手机 天玑1100旗舰芯 67W快充 机身颜色:幻青$$存储容量:6GB+128GB$$套餐类型:官方标配$ \n 是否是相同的sku"}
]
)
print(completion)
print(completion.choices[0].message.content)
| [
"Redmi Note 10 Pro 5G 天玑1100旗舰芯 67W快充 120Hz旗舰变速金刚屏 幻青 8GB+128GB 智能手机 小米红米8GB+128GB 幻青 \n 小米Redmi Note10Pro 5G游戏智能手机 天玑1100旗舰芯 67W快充 机身颜色:幻青$$存储容量:6GB+128GB$$套餐类型:官方标配$ \n 是否是相同的sku"
] |
2024-01-10 | jinqiu-deng/modelverse | server~app~handlers.py | import tornado.web
from tornado.web import RequestHandler
import asyncio
import json
import logging
import openai
import os
from .utils import CustomOpenAIClient
class MainHandler(tornado.web.RequestHandler):
def initialize(self, config, key_lock, key_state):
self.config = config
self.key_lock = key_lock
self.key_state = key_state
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
def options(self, *args, **kwargs):
self.set_status(204)
self.finish()
def get(self):
logging.info('Received GET request from %s', self.request.remote_ip)
self.render(os.path.join(os.path.dirname(__file__), '..', 'templates', 'index.html'))
async def decrement_key_state(self, group_name, selected_key_index):
async with self.key_lock:
self.key_state[group_name][selected_key_index] -= 1
async def post(self):
request_body_json = json.loads(self.request.body.decode('utf-8'))
# Set the group_name to 'default_group' if it's not provided in the request
group_name = request_body_json.get('group_name', 'default_group')
for group in self.config.settings['groups']:
if group['name'] == group_name:
selected_group = group
break
if not selected_group:
self.set_status(400)
self.write({"error": "Invalid group_name provided"})
return
async with self.key_lock:
logging.info('Current key_state: %s', self.key_state)
selected_key_index = min(self.key_state[group_name], key=self.key_state[group_name].get)
self.key_state[group_name][selected_key_index] += 1
api_key = selected_group['keys'][selected_key_index]['key']
custom_openai_client = CustomOpenAIClient(api_key)
logging.info('Sending question "%s" from %s using key %s in group %s',
request_body_json,
self.request.remote_ip,
selected_key_index,
group_name)
try:
request_body_json = json.loads(self.request.body.decode('utf-8'))
allowed_properties = {
'model', 'messages', 'temperature', 'top_p', 'n', 'max_tokens',
'presence_penalty', 'frequency_penalty', 'user', 'logit_bias',
'stream'
}
filtered_request_body_json = {k: v for k, v in request_body_json.items() if k in allowed_properties}
# Check if the stream is set to True
stream = filtered_request_body_json.get('stream', False)
if stream:
# Use filtered_request_body_json for further processing with streaming
async for message in custom_openai_client.create_chat_completion_stream(filtered_request_body_json):
chunk = json.dumps(message)
self.write(chunk)
await self.flush()
else:
# Use filtered_request_body_json for further processing without streaming
completion = await custom_openai_client.create_chat_completion(filtered_request_body_json)
answer = completion['choices'][0]['message']['content']
logging.info('Generated completion "%s" for question "%s" from %s using key %s in group %s',
completion,
request_body_json,
self.request.remote_ip,
selected_key_index,
group_name)
response = {
'completion': completion
}
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(response))
finally:
# 在请求处理完成后调用decrement_key_state方法
await self.decrement_key_state(group_name, selected_key_index)
| [] |
2024-01-10 | puzzle-labs/knowledge-sdk-python | GloLoader.py | # import supporting packages and modules
from abc import ABC
import yaml
import os
import tempfile
import requests
from urllib.parse import urlparse
from typing import List
import json
import re
# import langchain modules
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import OnlinePDFLoader
from langchain import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.document_loaders import WebBaseLoader
# import puzzle-knowledge-sdk modules
from .Ngram import AddAlphaSmooth
class GloLoader(BaseLoader, ABC):
"""Loader class for `.glo` files.
Defaults to check for local file, but if file is a web path, it will download it to a temporary file, use it, then clean up the temporary file after completion.
"""
def __init__(self, file_path: str):
"""Initializes the loader with the file path."""
self.file_path, self.web_path = self._process_file_path(file_path)
def _process_file_path(self, file_path: str):
"""Handles file checking, URL validity checking, and downloading if necessary."""
web_path = None # Initialize web_path locally
if "~" in file_path:
file_path = os.path.expanduser(file_path)
if os.path.isfile(file_path):
return file_path, web_path # Return a tuple with two values
elif self._is_valid_url(file_path):
temp_dir = tempfile.TemporaryDirectory()
self.temp_dir = temp_dir
_, suffix = os.path.splitext(file_path)
temp_glo = os.path.join(temp_dir.name, f"tmp{suffix}")
if self._is_s3_url(file_path):
web_path = file_path
else:
r = requests.get(file_path)
if r.status_code != 200:
print(file_path)
web_path = file_path
with open(temp_glo, mode="wb") as f:
f.write(r.content)
return str(temp_glo), web_path # Return a tuple with two values
else:
raise ValueError("File path %s is not a valid file or URL" % file_path)
def __del__(self) -> None:
if hasattr(self, "temp_dir"):
self.temp_dir.cleanup()
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
@staticmethod
def _is_s3_url(url: str) -> bool:
"""check if the url is S3"""
try:
result = urlparse(url)
if result.scheme == "s3" and result.netloc:
return True
return False
except ValueError:
return False
@staticmethod
def _is_html_page(url):
try:
response = requests.head(url)
content_type = response.headers.get('content-type', '').lower()
return 'text/html' in content_type
except requests.exceptions.RequestException:
return False
@property
def source(self) -> str:
return self.web_path if self.web_path is not None else self.file_path
def import_data(self) -> dict:
"""Load concept documents."""
if isinstance(self.file_path, str):
loaded = False
# load data json or yaml or glo
# yaml load
with open(self.file_path, 'r') as glo_file:
glo_text = glo_file.read()
try:
data = yaml.safe_load(glo_text)
return data
except:
pass
# json load
try:
with open(self.file_path, 'r') as json_file:
data = json.load(json_file)
return data
except:
pass
if not loaded:
raise ValueError("Error parsing file: Not in valid JSON or YAML formats")
elif isinstance(self.file_path, dict):
data = self.file_path
return data
@staticmethod
def load_link(link, type, concept_name, load) -> Document:
if type == 'uri':
if link.endswith(".pdf"):
if load:
loader = OnlinePDFLoader(link)
data_load = loader.load()[0]
data_load.page_content = re.sub(r'\n+', '\n', data_load.page_content)
else:
data_load = Document(page_content="")
data_load.metadata = {
"concept": concept_name,
"type": "link-pdf",
"link_type": "uri",
"source": link,
"load_status": load
}
return data_load
else:
try:
if load:
loader = WebBaseLoader(link)
data_load = loader.load()[0]
data_load.page_content = re.sub(r'\n+', '\n', data_load.page_content)
else:
data_load = Document(page_content="")
data_load.metadata = {
"concept": concept_name,
"type": "link-html",
"link_type": "uri",
"source": link,
"load_status": load
}
return data_load
except Exception as e:
raise ValueError(f"Error loading link {link}: {e}")
elif type == 'text':
data_load = Document(
page_content=link if load else "",
metadata={
"concept": concept_name,
"type": "link-text",
"link_type": "text",
"source": "text",
"load_status": load
}
)
return data_load
elif type == 'glo':
loader = GloLoader(link)
data_load = loader.load()
text = GloLoader.transform(documents=data_load, include_task=False)
data_load = Document(
page_content=text if load else "",
metadata={
"concept": concept_name,
"type": "link-glo",
"link_type": "glo",
"source": link if isinstance(link, str) else f"Glossary: {link['name']}",
"load_status": load
}
)
return data_load
else:
raise ValueError(f"Invalid link given: Can only process html, pdf, text, and glo.")
def load(self, loadLinks=False) -> List[Document]:
data = self.import_data()
documents = []
if "concepts" in data:
concepts = data["concepts"]
for concept in concepts:
if "name" in concept and "explanation" in concept:
content = f"NAME: {concept['name']}\nEXPLANATION: {concept['explanation']}"
new_document = Document(
page_content=re.sub(r'\n+', '\n', content),
metadata={
"glo_name": data.get("name", ""),
"topic": data.get("topic", ""),
"audience": data.get("audience", ""),
"concept": concept.get("name", ""),
"type": "content",
"source": self.file_path if isinstance(self.file_path, str) else self.file_path["name"],
"links": []
}
)
if "links" in concept.keys():
for link in concept["links"]:
if "uri" in link.keys():
new_document.metadata["links"].append(GloLoader.load_link(link["uri"], type="uri", concept_name=concept["name"], load=loadLinks))
elif "text" in link.keys():
new_document.metadata["links"].append(GloLoader.load_link(link["text"], type="text", concept_name=concept["name"], load=loadLinks))
elif "glo" in link.keys():
new_document.metadata["links"].append(GloLoader.load_link(link["glo"], type="glo", concept_name=concept["name"], load=loadLinks))
documents.append(new_document)
else:
raise ValueError("Concepts must have a name and explanation")
return documents
@staticmethod
def calculate_score(sample, source, n=2, scope="word"):
# to lower
sample = sample.lower()
source = source.lower()
n = 2
scope = "word"
try:
if n in [1, 2, 3]:
if scope == "word":
# preprocess pattern
pattern = r'([!@#$%^&*()_+{}\[\]:;"\'<>,.?/\|\\])'
prep_source = [re.sub(pattern, r' \1', source).split(" ")]
prep_sample = [re.sub(pattern, r' \1', sample).split(" ")]
elif scope == "char":
prep_source = [list(source)]
prep_sample = [list(sample)]
dist_source = AddAlphaSmooth(n, prep_source)
score = dist_source.getSentenceLogLikelihood(prep_sample[0])
return score
else:
raise ValueError(f"ngram methods must have n in [1, 2, 3]")
except ValueError as e:
raise ValueError(f"Concept ranking failed: {e}")
@staticmethod
def transform(query: str, documents: List[Document], header: str=None, task: str=None, rank_function=None, additional_args: dict={}):
if header is None or header == "":
# glo name
glo_name = ""
for doc in documents:
if isinstance(doc, Document):
glo_name = doc.metadata.get("glo_name", "")
if glo_name != "":
break
header = f"GLOSSARY: {glo_name}"
if task is None or task == "":
task = "TASK: This is a glossary of concepts for your reference throughout this conversation. You should prioritize this information when answering any questions asked by the user."
max_width = additional_args.get("max_width", 1024)
if rank_function is None:
def fn(documents, max_width):
context = "CONCEPTS: \n"
# collect concepts
concepts = []
for doc in documents:
if isinstance(doc, Document):
if doc.metadata["type"] == "content":
concepts.append(doc.page_content)
# select maximum possible number of concepts to fit in context window
filtered_concepts = []
max_width -= len(context)
for concept in concepts:
if len(concept) < max_width:
filtered_concepts.append(concept)
max_width -= len(concept) - 1
else:
break
# format concepts
concepts = "\n".join([concept for concept in filtered_concepts])
context += concepts
return context
rank_function = fn
template = "{header}\n{concepts}\n\n{task}"
max_width -= len(template.format(header="", concepts="", task="")) + len(header) + len(task)
parameters = {
"query": query,
"documents": documents,
"max_width": max_width
}
if additional_args is not {} and additional_args is not None:
parameters.update(additional_args)
concepts = rank_function(**{k: v for k, v in parameters.items() if k in rank_function.__code__.co_varnames})
prompt_template = PromptTemplate(
input_variables = ["header", "concepts", "task"],
template = template
)
prompt = prompt_template.format(header=header, concepts=concepts, task=task)
return prompt
# Concepts search and transform
@staticmethod
def rank_by_concepts(query, documents, max_width=1024):
# context variable
context = "CONCEPTS: \n"
# collect concepts
concepts = []
for doc in documents:
if isinstance(doc, Document):
if doc.metadata["type"] == "content":
concepts.append(doc.page_content)
# compute score of concepts
scores = []
for concept in concepts:
score = GloLoader.calculate_score(query, concept, n=1, scope="word")
scores.append(
{
"concept": concept,
"score": score
}
)
# sort concepts by score
sorted_concepts = sorted(scores, key=lambda x: x["score"], reverse=True)
# select maximum possible number of concepts to fit in context window
filtered_concepts = []
max_width -= len(context)
for concept in sorted_concepts:
if len(concept["concept"]) < max_width:
filtered_concepts.append(concept)
max_width -= len(concept["concept"]) - 1
else:
break
# format concepts
concepts = "\n".join([concept["concept"] for concept in filtered_concepts])
context += concepts
return context
# links search and transform
@staticmethod
def rank_by_links(query, documents, max_width=1024, chunk_size=512, chunk_overlap=128):
# context variable
context = "CONCEPTS: \n"
# text splitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
link_chunks = []
# split texts
for doc in documents:
if isinstance(doc, Document):
link_documents = doc.metadata.get("links", [])
for link_doc in link_documents:
if link_doc.metadata["type"] in ["link-html", "link-pdf", "link-text"]:
if not link_doc.metadata["load_status"]:
link_doc = GloLoader.load_link(
link=link_doc.metadata["source"],
type=link_doc.metadata["link_type"],
concept_name=doc.metadata["concept"],
load=True
)
link_chunks.extend(text_splitter.split_documents([link_doc]))
# build vector store
if len(link_chunks) == 0:
similar_texts = []
else:
links_vectorstore = FAISS.from_documents(link_chunks, OpenAIEmbeddings())
relevant_chunks = links_vectorstore.similarity_search(query)
similar_texts = [doc.page_content for doc in relevant_chunks]
# select maximum possible number of chunks to fit in context window
filtered_chunks = []
max_width -= len(context)
for chunk in similar_texts:
if len(chunk) < max_width:
filtered_chunks.append(chunk)
max_width -= len(chunk) - 1
else:
break
# format chunks
chunks = "\n".join([chunk for chunk in filtered_chunks])
context += chunks
return context
# concepts+links search and transform
@staticmethod
def rank_by_concepts_and_links(query, documents, max_width=1024, chunk_size=512, chunk_overlap=128):
# context variable
context = "CONCEPTS: \n"
# collect concepts
concepts = []
for doc in documents:
if isinstance(doc, Document):
if doc.metadata["type"] == "content":
concepts.append(doc)
# compute score of concepts
scores = []
for concept in concepts:
score = GloLoader.calculate_score(query, concept.page_content, n=1, scope="word")
scores.append(
{
"concept": concept,
"score": score
}
)
# sort concepts by score
sorted_concepts = sorted(scores, key=lambda x: x["score"], reverse=True)
# text splitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
link_chunks = []
# select maximum possible number of chunks to fit in context window
filtered_concepts = []
max_width -= len(context)
for concept in sorted_concepts:
if len(concept["concept"].page_content) < max_width:
# link qa here
link_documents = concept["concept"].metadata.get("links", [])
for link_doc in link_documents:
if not link_doc.metadata["load_status"]:
link_doc = GloLoader.load_link(
link=link_doc.metadata["source"],
type=link_doc.metadata["link_type"],
concept_name=concept["concept"].metadata["concept"],
load=True
)
if link_doc.metadata["type"] in ["link-html", "link-pdf", "link-text"]:
link_chunks.extend(text_splitter.split_documents([link_doc]))
# build vector store
if len(link_chunks) == 0:
similar_texts = []
else:
links_vectorstore = FAISS.from_documents(link_chunks, OpenAIEmbeddings())
relevant_chunks = links_vectorstore.similarity_search(query)
similar_texts = [doc.page_content for doc in relevant_chunks]
filtered_concepts.append(concept["concept"].page_content + "\nEXCERPTS: \n" + "\n".join(similar_texts[:3]))
max_width -= len(filtered_concepts[-1])
else:
break
# format chunks
concepts = "\n".join([concept for concept in filtered_concepts])
context += concepts
return context | [
"{header}\n{concepts}\n\n{task}",
"concepts"
] |
2024-01-10 | Kieran-who/local-file-AI | processors~segs.py | import asyncio
import en_core_web_sm
from langchain.text_splitter import SpacyTextSplitter
from db.segment import new_seg
from utils.azure_open_ai import get_chat_completion
import json
from config import CHECK_CHUNKS
import logging
# Get the logger for the module
logger = logging.getLogger('langchain.text_splitter')
# Set the level to ERROR
logger.setLevel(logging.ERROR)
# function to test if segment is worth keeping or not
async def seg_checker(seg):
messages = [
{'role': "system", 'content': 'LLM is an expert evaluator of text. It can determine whether a piece of text is substantial or just gibberish. It carefully considers the text and whether a human would find any substantial meaning from it.'},
{"role": "user", "content": seg}
]
functions = [
{
"name": "text_checker",
"description": "Your role is to determine whether the user message is substantial text or just gibberish. In determining whether it is substantial, it should be more than just nonsenstical characters and should include full sentences and not just document headers or footers. If it is just a URL, consider it gibberish. If you are unsure, err on the side of considering it substantial.",
"parameters": {
"type": "object",
"properties": {
"meaningful": {
"type": "boolean",
"description": "return true if the text is substantial or you are unsure, return false if the text is gibberish"
},
},
"required": ["meaningful"]
}
}
]
try:
check = await get_chat_completion(messages, functions=functions, function_to_call={"name": "text_checker"})
return json.loads(check["message"]["function_call"]["arguments"])["meaningful"]
except Exception as e:
return True
def chunker(text):
text_splitter = SpacyTextSplitter(chunk_size=450, chunk_overlap=30)
texts = text_splitter.split_text(text)
return texts
async def segs_processor(text, parent_id):
segments = chunker(text)
# runs segment processing in parallel to speed up
async def process(seg):
if CHECK_CHUNKS:
test = await seg_checker(seg)
else:
test = True
if test:
seg = {
"text": seg,
}
return await new_seg(seg, parent_id)
tasks = [process(seg) for seg in segments]
seg_ids = await asyncio.gather(*tasks)
# remove None values
seg_ids = [seg_id for seg_id in seg_ids if seg_id is not None]
return seg_ids
| [
"LLM is an expert evaluator of text. It can determine whether a piece of text is substantial or just gibberish. It carefully considers the text and whether a human would find any substantial meaning from it."
] |
2024-01-10 | Kieran-who/local-file-AI | rename_config.py | """
RENAME THIS FILE TO: config.py
(so remove 'rename_')
"""
# AZURE BACKUP FOR WEAVIATE -> OPTIONAL
AZURE_STORAGE_CONNECTION_STRING = ""
AZURE_CONTAINER_NAME = ""
# AZURE OPENAI -> OPTIONAL
AZURE_OPENAI_KEY = ""
AZURE_OPENAI_BASE_URL = ""
# Embed model for Azure OpenAI -> this is the name of the deployment for the text embedding
# If this is not set, the embedding will be retrieved from OpenAI
EMBED_MODEL = ''
# OPENAI KEY -> OPTIONAL ALTHOUGH EITHER THIS OR AZURE OPENAI NEEDS TO BE PROVIDED
OPEN_AI_KEY = "sk-"
# Whether this is the index machine or not -> if false, it will not index files but instead update the vector database from the backup (Only works if backup exists, this can be from any machine) Backups retrieved from the AZURE BACKUP FOR WEAVIATE configuration
INDEX_MACHINE = True
# INDEX SETTINGS
# The path of the folder to index
INDEX_PATH = ""
# Whether to check segments or not before adding to db. This calls chat-3.5-turbo for each segment and asks whether it should be saved to the db. Weeds out any gibberish or non-sensical segments that result from document headers or poor pdf extraction - however additional costs in the extra time to run and the API costs.
CHECK_CHUNKS = False
# any folder names within your directory to index that you want to ignore
FOLDERS_TO_IGNORE = [""]
# Default AI Models for summarisation; this must be set
# This needs to be updated based on your choice of Azure vs OpenAI. If Azure, add the deployment name. If OpenAI, use the standard model names they provide (your API key must be able to access them)
DEFAULT_SUMMARISATION_MODEL = "" # e.g. if not using Azure: "gpt-3.5-turbo"
# if you are using Azure, there is a fallback option to make calls to OpenAI in the event the Azure API fails (e.g. rate limits or content filtering). Turn the fallback off by not adding an OpenAI key.
FALLBACK_OPEN_AI_MODEL = ""
| [] |
2024-01-10 | Kieran-who/local-file-AI | utils~open_ai_fallback.py | import openai
import asyncio
import time
from config import OPEN_AI_KEY, FALLBACK_OPEN_AI_MODEL
model_list = ["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613", "gpt-4-0314", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-4-32k", "gpt-4-0613", "gpt-4"]
async def get_vector_openai(text):
for i in range(10):
try:
call_args = {
"api_key": OPEN_AI_KEY,
"api_type": "open_ai",
"api_base": "https://api.openai.com/v1",
"input": text,
"model": "text-embedding-ada-002"
}
response = openai.Embedding.create(**call_args)
embeddings = response['data'][0]['embedding']
return embeddings
except:
# Increase exponential backoff delay between each retry attempt
await asyncio.sleep((2 ** i) + (i ** 2))
continue
# If the code reaches here, all attempts have failed
raise
async def get_open_ai_chat_completion(messages, model, max_res_tokens, temp, functions=None, function_to_call="auto"):
if model not in model_list:
model = FALLBACK_OPEN_AI_MODEL
for i in range(10):
try:
# Initialize the arguments dictionary
call_args = {
"api_key": OPEN_AI_KEY,
"api_type": "open_ai",
"api_base": "https://api.openai.com/v1",
"model": model,
"messages": messages,
"max_tokens": max_res_tokens,
"temperature": temp,
}
# If functions parameter is passed, include "functions" and "function_call" in the arguments dictionary
if functions is not None:
call_args["functions"] = functions
call_args["function_call"] = function_to_call
response = openai.ChatCompletion.create(**call_args)
return response['choices'][0]
except:
# Increase exponential backoff delay between each retry attempt
await asyncio.sleep((2 ** i) + i ** 2)
continue
# If the code reaches here, all attempts have failed
raise
| [] |
2024-01-10 | Bradybry/SmartGPT_eval | expert.py | import re
import json
from langchain.chat_models import ChatOpenAI
from langchain.llms import Anthropic
from langchain.schema import HumanMessage, SystemMessage
from config import OPENAI_API_KEY, ANTHROPIC_API_KEY #Import API Keys stored in a separate file. You can do this with envionrment variables as well.
import datetime
from pathlib import Path
# At the moment langchain API wrappers are needed due to the separation of chat models and language models. These wrappers allow us to use the same interface for both.
# Class to communicate with OpenAI for generating responses. Wrapped around the langchain wrappers
class OpenAIModel():
def __init__(self, openai_api_key, **model_params):
"""Initialize the OpenAI chat model.
Parameters:
openai_api_key (str): API key to access OpenAI API
model_params (dict): Parameters to configure the model like temperature, n, etc.
"""
self.chat = ChatOpenAI(openai_api_key=openai_api_key, **model_params, request_timeout=120)
def __call__(self, request_messages):
return self.chat(request_messages).content
def bulk_generate(self, message_list):
return self.chat.generate(message_list)
# Class to communicate with claude-v1.3 for generating responses. Wrapped around the langchain wrappers
class AnthropicModel():
def __init__(self, anthropic_api_key, **model_params):
"""Initialize the Anthropic chat model.
Parameters:
anthropic_api_key (str): API key to access Anthropic API
model_params (dict): Parameters to configure the model like model_name, max_tokens, etc.
"""
self.chat = Anthropic(model=model_params['model_name'],temperature=model_params['temperature'], max_tokens_to_sample=model_params['max_tokens'], anthropic_api_key=anthropic_api_key)
def __call__(self, request_messages):
# Convert request_messages into a single string to be used as preamble
# This is a hacky solution to the fact that the langchain wrapper expects a single string as input.
# But the performance is actaualy really good especially with the XML formatting method.
message = "\n\n".join([message.content for message in request_messages])
return self.chat(message)
def bulk_generate(self, message_list):
new_message_list = []
for request_messages in message_list:
new_message = "\n".join([message.content for message in request_messages])
new_message_list.append(new_message)
return self.chat.generate(new_message_list)
class LanguageExpert:
"""Defines an AI assistant/expert for natural language generation.
Attributes:
name (str): Name of the expert
system_message (str): Expert's initial greeting message
description (str): Description of the expert's abilities
example_input (str): Sample user input the expert can handle
example_output (str): Expert's response to the sample input
model_params (dict): Parameters to configure the language model
"""
def __init__(self, name: str, system_message=None, description=None,
example_input=None, example_output=None, model_params=None):
## Initialize expert attributes##
self.name = name
self.system_message = system_message
self.description = description
self.example_input = example_input
self.example_output = example_output
##Set default model parameters if none provided##
if model_params is None:
model_params = {"model_name": "claude-v1.3", "temperature": 0.00,
"frequency_penalty": 1.0, "presence_penalty": 0.5,
"n": 1, "max_tokens": 512}
self.model_params = model_params
self.gen_chat() #Generate the chat object to get model-specific responses
def serialize(self):
"""Returns a JSON-serializable representation of the expert.
Returns:
dict: Contains all expert attributes.
"""
return {
"name": self.name,
"system_message": self.system_message,
"description": self.description,
"example_input": self.example_input,
"example_output": self.example_output,
"model_params": self.model_params
}
def get_content(self):
"""Returns the expert definition in an fake XML format.
Returns:
SystemMessage: Expert definition wrapped in XML tags.
"""
example_output = self.example_output
example_input = self.example_input
content = '<assistant_definition>\n'
if self.name:
content += f'<name>{self.name}</name>\n'
if self.description:
content += f'<role>{self.description}</role>\n'
if self.system_message:
content += f'<system_message>{self.system_message}</system_message>\n'
if example_input:
content += f'<example_input>{example_input}</example_input>\n'
if example_output:
content += f'<example_output>{example_output}</example_output>\n'
content += '</assistant_definition>'
content = SystemMessage(content=content)
return content
def generate(self, message):
"""Generates a response to the input message.
Passes the input through the chat model and returns its response.
Parameters:
message (str): User's input message
Returns:
response (str): expert's response to the message
"""
human_message = HumanMessage(content=message)
request_message = [self.get_content(), human_message]
response = self.chat(request_message)
self.log([message], [response])
return response
def log(self, requests, responses):
"""Logs a conversation between the user and the expert.
Parameters:
requests (list): List of user requests/messages
responses (list): List of expert responses
"""
now = datetime.datetime.now()
filename = Path(f'./logs/{now.strftime("%Y-%m-%d_%H-%M-%S")}_{self.name}.txt')
filename.parent.mkdir(parents=True, exist_ok=True)
log = f'Expert Name: {self.name}\n\nRequests:\n'
for request in requests:
log += f'{request}\n\n'
log += 'Responses:\n'
for response in responses:
log += f'{response}\n\n'
with open(filename, 'w', encoding='utf-8') as f:
f.write(log)
def extract_texts_from_generations(self, generations):
"""Extracts plain text responses from a list of generated responses.
Parameters:
generations (list): List of generated responses from the model
Returns:
list: List of plain text responses
"""
return [generation[0].text for generation in generations]
def bulk_generate(self, messages:list):
"""Generates responses for multiple input messages.
Parameters:
messages (list): List of user input messages
Returns:
responses (list): List of corresponding expert responses
"""
human_messages = [HumanMessage(content=message) for message in messages]
request_messages = [[self.get_content(), human_message] for human_message in human_messages]
responses = self.chat.bulk_generate(request_messages)
responses = self.extract_texts_from_generations(responses.generations)
self.log(messages, responses)
return responses
def __call__(self, message:str):
"""Allows the expert to be called like a function.
Invokes the generate() method.
"""
return self.generate(message)
def change_param(self, parameter_name, new_value):
"""Changes a expert definition parameter to a new value.
Updates the internal model_params dictionary and regenerates
the chat object.
Parameters:
parameter_name (str): Name of the parameter to change
new_value: New value for the parameter
"""
if parameter_name in ["model_name", "temperature", "frequency_penalty", "presence_penalty", "n", "max_tokens"]:
self.__dict__["model_params"][parameter_name] = new_value
else:
self.__dict__[parameter_name] = new_value
self.gen_chat()
def gen_chat(self):
"""Instantiates the chat object used to generate responses.
The chat object is either an AnthropicModel or OpenAIModel, depending
on the model_name parameter.
"""
if self.model_params["model_name"]in ["gpt-4", "gpt-3.5-turbo"]:
self.chat = OpenAIModel(openai_api_key=OPENAI_API_KEY, **self.model_params)
elif self.model_params["model_name"] in ['claude-v1.3']:
self.chat = AnthropicModel(anthropic_api_key=ANTHROPIC_API_KEY, **self.model_params)
else:
raise 'Model not supported'
| [] |
2024-01-10 | rawsh/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
from langchain_experimental.comprehend_moderation.base_moderation_enums import (
BaseModerationActions,
)
if config:
action = config.get("action", BaseModerationActions.STOP)
if action not in [BaseModerationActions.STOP, BaseModerationActions.ALLOW]:
raise ValueError("Action can either be stop or allow")
return (
self._contains_pii(prompt_value=prompt_value, config=config)
if action == BaseModerationActions.STOP
else self._detect_pii(prompt_value=prompt_value, config=config)
)
else:
return self._contains_pii(prompt_value=prompt_value)
def _contains_pii(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold.
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold", 0.5) if config else 0.5
pii_labels = config.get("labels", []) if config else []
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold", 0.5) # type: ignore
pii_labels = config.get("labels", []) # type: ignore
mask_marker = config.get("mask_character", "*") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
prompt_value = (
prompt_value[:char_offset_begin]
+ mask_marker * (char_offset_end - char_offset_begin)
+ prompt_value[char_offset_end:]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | rawsh/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
import warnings
from typing import Any, Dict, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded if not
already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks
max_size (int, optional): The maximum size limit in bytes for each chunk
Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list of sentences
Note:
This function validates the maximum sentence size based on service limits
using the 'toxicity_init_validate' function. It uses the NLTK sentence
tokenizer to split the paragraph into sentences.
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = []
current_chunk = [] # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size or
# current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Check the toxicity of a given text prompt using AWS Comprehend service
and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
if config:
from langchain_experimental.comprehend_moderation.base_moderation_enums import ( # noqa: E501
BaseModerationActions,
)
toxicity_found = False
action = config.get("action", BaseModerationActions.STOP)
if action not in [
BaseModerationActions.STOP,
BaseModerationActions.ALLOW,
]:
raise ValueError("Action can either be stop or allow")
threshold = config.get("threshold", 0.5) if config else 0.5
toxicity_labels = config.get("labels", []) if config else []
if action == BaseModerationActions.STOP:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label
and (
not toxicity_labels
or label["Name"] in toxicity_labels
)
and label["Score"] >= threshold
):
toxicity_found = True
break
if action == BaseModerationActions.ALLOW:
if not toxicity_labels:
warnings.warn(
"You have allowed toxic content without specifying "
"any toxicity labels."
)
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
else:
if response["ResultList"]:
detected_toxic_labels = list()
for item in response["ResultList"]:
detected_toxic_labels.extend(item["Labels"])
if any(item["Score"] >= 0.5 for item in detected_toxic_labels):
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | rawsh/langchain | libs~langchain~langchain~prompts~loading.py | """Load prompts."""
import json
import logging
from pathlib import Path
from typing import Union
import yaml
from langchain.output_parsers.regex import RegexParser
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseLLMOutputParser, BasePromptTemplate, StrOutputParser
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
logger = logging.getLogger(__name__)
def load_prompt_from_config(config: dict) -> BasePromptTemplate:
"""Load prompt from Config Dict."""
if "_type" not in config:
logger.warning("No `_type` key found, defaulting to `prompt`.")
config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported")
prompt_loader = type_to_loader_dict[config_type]
# Unclear why type error is being thrown here.
# Incompatible return value type (got "Runnable[Dict[Any, Any], PromptValue]",
# expected "BasePromptTemplate") [return-value]
return prompt_loader(config) # type: ignore[return-value]
def _load_template(var_name: str, config: dict) -> dict:
"""Load template from the path if applicable."""
# Check if template_path exists in config.
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(
f"Both `{var_name}_path` and `{var_name}` cannot be provided."
)
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config: dict) -> dict:
"""Load examples if necessary."""
if isinstance(config["examples"], list):
pass
elif isinstance(config["examples"], str):
with open(config["examples"]) as f:
if config["examples"].endswith(".json"):
examples = json.load(f)
elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f)
else:
raise ValueError(
"Invalid file format. Only json or yaml formats are supported."
)
config["examples"] = examples
else:
raise ValueError("Invalid examples format. Only list or string are supported.")
return config
def _load_output_parser(config: dict) -> dict:
"""Load output parser."""
if "output_parser" in config and config["output_parser"]:
_config = config.pop("output_parser")
output_parser_type = _config.pop("_type")
if output_parser_type == "regex_parser":
output_parser: BaseLLMOutputParser = RegexParser(**_config)
elif output_parser_type == "default":
output_parser = StrOutputParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parser"] = output_parser
return config
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
"""Load the "few shot" prompt from the config."""
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
"Only one of example_prompt and example_prompt_path should "
"be specified."
)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
config = _load_output_parser(config)
return FewShotPromptTemplate(**config)
def _load_prompt(config: dict) -> PromptTemplate:
"""Load the prompt template from config."""
# Load the template from disk if necessary.
config = _load_template("template", config)
config = _load_output_parser(config)
return PromptTemplate(**config)
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
):
return hub_result
else:
return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
"""Load prompt from file."""
# Convert file to a Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
# Load the prompt from the config now.
return load_prompt_from_config(config)
type_to_loader_dict = {
"prompt": _load_prompt,
"few_shot": _load_few_shot_prompt,
# "few_shot_with_templates": _load_few_shot_with_templates_prompt,
}
| [
"PLACEHOLDER_path"
] |
2024-01-10 | clementecbb/GeneradorResumen | ResumenAPI.py | import docx
import openai
# Configura la clave de la API de OpenAI
openai.api_key = 'key'
# Abre el archivo de Word
doc = docx.Document('informe.docx')
# Lee el contenido del documento
document_text = '\n'.join([paragraph.text for paragraph in doc.paragraphs])
# Envía una solicitud a la API para generar el resumen
response = openai.Completion.create(
engine='text-davinci-003',
prompt=document_text,
max_tokens=150,
temperature=0.3,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
summary = response.choices[0].text.strip()
# Imprime el resumen generado
print(summary)
| [] |
2024-01-10 | Xeppyz/ChatBot-with-DataBase | myenv~Lib~site-packages~langsmith~client.py | """The LangSmith Client."""
from __future__ import annotations
import collections
import concurrent
import datetime
import functools
import importlib
import io
import json
import logging
import os
import socket
import uuid
import weakref
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from urllib import parse as urllib_parse
import requests
from requests import adapters as requests_adapters
from urllib3.util import Retry
from langsmith import env as ls_env
from langsmith import schemas as ls_schemas
from langsmith import utils as ls_utils
from langsmith.evaluation import evaluator as ls_evaluator
if TYPE_CHECKING:
import pandas as pd
logger = logging.getLogger(__name__)
def _is_localhost(url: str) -> bool:
"""Check if the URL is localhost.
Parameters
----------
url : str
The URL to check.
Returns
-------
bool
True if the URL is localhost, False otherwise.
"""
try:
netloc = urllib_parse.urlsplit(url).netloc.split(":")[0]
ip = socket.gethostbyname(netloc)
return ip == "127.0.0.1" or ip.startswith("0.0.0.0") or ip.startswith("::")
except socket.gaierror:
return False
def _is_langchain_hosted(url: str) -> bool:
"""Check if the URL is langchain hosted.
Parameters
----------
url : str
The URL to check.
Returns
-------
bool
True if the URL is langchain hosted, False otherwise.
"""
try:
netloc = urllib_parse.urlsplit(url).netloc.split(":")[0]
return netloc.endswith("langchain.com")
except Exception:
return False
ID_TYPE = Union[uuid.UUID, str]
def _default_retry_config() -> Retry:
"""Get the default retry configuration.
If urllib3 version is 1.26 or greater, retry on all methods.
Returns
-------
Retry
The default retry configuration.
"""
retry_params = dict(
total=3,
status_forcelist=[502, 503, 504, 408, 425, 429],
backoff_factor=0.5,
# Sadly urllib3 1.x doesn't support backoff_jitter
raise_on_redirect=False,
raise_on_status=False,
)
# the `allowed_methods` keyword is not available in urllib3 < 1.26
# check to see if urllib3 version is 1.26 or greater
urllib3_version = importlib.metadata.version("urllib3")
use_allowed_methods = tuple(map(int, urllib3_version.split("."))) >= (1, 26)
if use_allowed_methods:
# Retry on all methods
retry_params["allowed_methods"] = None
return Retry(**retry_params) # type: ignore
def _serialize_json(obj: Any) -> str:
"""Serialize an object to JSON.
Parameters
----------
obj : Any
The object to serialize.
Returns
-------
str
The serialized JSON string.
Raises
------
TypeError
If the object type is not serializable.
"""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
else:
return str(obj)
def close_session(session: requests.Session) -> None:
"""Close the session.
Parameters
----------
session : Session
The session to close.
"""
logger.debug("Closing Client.session")
session.close()
def _validate_api_key_if_hosted(api_url: str, api_key: Optional[str]) -> None:
"""Verify API key is provided if url not localhost.
Parameters
----------
api_url : str
The API URL.
api_key : str or None
The API key.
Raises
------
LangSmithUserError
If the API key is not provided when using the hosted service.
"""
# If the domain is langchain.com, raise error if no api_key
if not api_key:
if _is_langchain_hosted(api_url):
raise ls_utils.LangSmithUserError(
"API key must be provided when using hosted LangSmith API"
)
def _get_api_key(api_key: Optional[str]) -> Optional[str]:
api_key = api_key if api_key is not None else os.getenv("LANGCHAIN_API_KEY")
if api_key is None or not api_key.strip():
return None
return api_key.strip().strip('"').strip("'")
def _get_api_url(api_url: Optional[str], api_key: Optional[str]) -> str:
_api_url = (
api_url
if api_url is not None
else os.getenv(
"LANGCHAIN_ENDPOINT",
"https://api.smith.langchain.com" if api_key else "http://localhost:1984",
)
)
if not _api_url.strip():
raise ls_utils.LangSmithUserError("LangSmith API URL cannot be empty")
return _api_url.strip().strip('"').strip("'").rstrip("/")
def _hide_inputs(inputs: Dict[str, Any]) -> Dict[str, Any]:
if os.environ.get("LANGCHAIN_HIDE_INPUTS") == "true":
return {}
return inputs
def _hide_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:
if os.environ.get("LANGCHAIN_HIDE_OUTPUTS") == "true":
return {}
return outputs
class Client:
"""Client for interacting with the LangSmith API."""
__slots__ = [
"__weakref__",
"api_url",
"api_key",
"retry_config",
"timeout_ms",
"session",
"_get_data_type_cached",
"_web_url",
"_tenant_id",
]
def __init__(
self,
api_url: Optional[str] = None,
*,
api_key: Optional[str] = None,
retry_config: Optional[Retry] = None,
timeout_ms: Optional[int] = None,
web_url: Optional[str] = None,
session: Optional[requests.Session] = None,
) -> None:
"""Initialize a Client instance.
Parameters
----------
api_url : str or None, default=None
URL for the LangSmith API. Defaults to the LANGCHAIN_ENDPOINT
environment variable or http://localhost:1984 if not set.
api_key : str or None, default=None
API key for the LangSmith API. Defaults to the LANGCHAIN_API_KEY
environment variable.
retry_config : Retry or None, default=None
Retry configuration for the HTTPAdapter.
timeout_ms : int or None, default=None
Timeout in milliseconds for the HTTPAdapter.
web_url : str or None, default=None
URL for the LangSmith web app. Default is auto-inferred from
the ENDPOINT.
session: requests.Session or None, default=None
The session to use for requests. If None, a new session will be
created.
Raises
------
LangSmithUserError
If the API key is not provided when using the hosted service.
"""
self.api_key = _get_api_key(api_key)
self.api_url = _get_api_url(api_url, self.api_key)
_validate_api_key_if_hosted(self.api_url, self.api_key)
self.retry_config = retry_config or _default_retry_config()
self.timeout_ms = timeout_ms or 7000
self._web_url = web_url
self._tenant_id: Optional[uuid.UUID] = None
# Create a session and register a finalizer to close it
self.session = session if session else requests.Session()
weakref.finalize(self, close_session, self.session)
# Mount the HTTPAdapter with the retry configuration
adapter = requests_adapters.HTTPAdapter(max_retries=self.retry_config)
self.session.mount("http://", adapter)
self.session.mount("https://", adapter)
self._get_data_type_cached = functools.lru_cache(maxsize=10)(
self._get_data_type
)
def _repr_html_(self) -> str:
"""Return an HTML representation of the instance with a link to the URL.
Returns
-------
str
The HTML representation of the instance.
"""
link = self._host_url
return f'<a href="{link}", target="_blank" rel="noopener">LangSmith Client</a>'
def __repr__(self) -> str:
"""Return a string representation of the instance with a link to the URL.
Returns
-------
str
The string representation of the instance.
"""
return f"Client (API URL: {self.api_url})"
@property
def _host_url(self) -> str:
"""The web host url."""
if self._web_url:
link = self._web_url
elif _is_localhost(self.api_url):
link = "http://localhost"
elif "dev" in self.api_url.split(".", maxsplit=1)[0]:
link = "https://dev.smith.langchain.com"
else:
link = "https://smith.langchain.com"
return link
@property
def _headers(self) -> Dict[str, str]:
"""Get the headers for the API request.
Returns
-------
Dict[str, str]
The headers for the API request.
"""
headers = {}
if self.api_key:
headers["x-api-key"] = self.api_key
return headers
def request_with_retries(
self,
request_method: str,
url: str,
request_kwargs: Mapping,
) -> requests.Response:
"""Send a request with retries.
Parameters
----------
request_method : str
The HTTP request method.
url : str
The URL to send the request to.
request_kwargs : Mapping
Additional request parameters.
Returns
-------
Response
The response object.
Raises
------
LangSmithAPIError
If a server error occurs.
LangSmithUserError
If the request fails.
LangSmithConnectionError
If a connection error occurs.
LangSmithError
If the request fails.
"""
try:
response = self.session.request(
request_method, url, stream=False, **request_kwargs
)
ls_utils.raise_for_status_with_text(response)
return response
except requests.HTTPError as e:
if response is not None:
if response.status_code == 500:
raise ls_utils.LangSmithAPIError(
f"Server error caused failure to {request_method} {url} in"
f" LangSmith API. {repr(e)}"
)
elif response.status_code == 429:
raise ls_utils.LangSmithRateLimitError(
f"Rate limit exceeded for {url}. {repr(e)}"
)
elif response is not None and response.status_code == 401:
raise ls_utils.LangSmithAuthError(
f"Authentication failed for {url}. {repr(e)}"
)
else:
raise ls_utils.LangSmithError(
f"Failed to {request_method} {url} in LangSmith API. {repr(e)}"
)
else:
raise ls_utils.LangSmithUserError(
f"Failed to {request_method} {url} in LangSmith API. {repr(e)}"
)
except requests.ConnectionError as e:
raise ls_utils.LangSmithConnectionError(
f"Connection error caused failure to {request_method} {url}"
" in LangSmith API. Please confirm your LANGCHAIN_ENDPOINT."
f" {repr(e)}"
) from e
except Exception as e:
args = list(e.args)
msg = args[1] if len(args) > 1 else ""
msg = msg.replace("session", "session (project)")
emsg = "\n".join([args[0]] + [msg] + args[2:])
raise ls_utils.LangSmithError(
f"Failed to {request_method} {url} in LangSmith API. {emsg}"
) from e
def _get_with_retries(
self, path: str, params: Optional[Dict[str, Any]] = None
) -> requests.Response:
"""Send a GET request with retries.
Parameters
----------
path : str
The path of the request URL.
params : Dict[str, Any] or None, default=None
The query parameters.
Returns
-------
Response
The response object.
Raises
------
LangSmithAPIError
If a server error occurs.
LangSmithUserError
If the request fails.
LangSmithConnectionError
If a connection error occurs.
LangSmithError
If the request fails.
"""
return self.request_with_retries(
"get",
f"{self.api_url}{path}",
request_kwargs={
"params": params,
"headers": self._headers,
"timeout": self.timeout_ms / 1000,
},
)
def _get_paginated_list(
self, path: str, *, params: Optional[dict] = None
) -> Iterator[dict]:
"""Get a paginated list of items.
Parameters
----------
path : str
The path of the request URL.
params : dict or None, default=None
The query parameters.
Yields
------
dict
The items in the paginated list.
"""
params_ = params.copy() if params else {}
offset = params_.get("offset", 0)
params_["limit"] = params_.get("limit", 100)
while True:
params_["offset"] = offset
response = self._get_with_retries(path, params=params_)
items = response.json()
if not items:
break
yield from items
if len(items) < params_["limit"]:
# offset and limit isn't respected if we're
# querying for specific values
break
offset += len(items)
def upload_dataframe(
self,
df: pd.DataFrame,
name: str,
input_keys: Sequence[str],
output_keys: Sequence[str],
*,
description: Optional[str] = None,
data_type: Optional[ls_schemas.DataType] = ls_schemas.DataType.kv,
) -> ls_schemas.Dataset:
"""Upload a dataframe as individual examples to the LangSmith API.
Parameters
----------
df : pd.DataFrame
The dataframe to upload.
name : str
The name of the dataset.
input_keys : Sequence[str]
The input keys.
output_keys : Sequence[str]
The output keys.
description : str or None, default=None
The description of the dataset.
data_type : DataType or None, default=DataType.kv
The data type of the dataset.
Returns
-------
Dataset
The uploaded dataset.
Raises
------
ValueError
If the csv_file is not a string or tuple.
"""
csv_file = io.BytesIO()
df.to_csv(csv_file, index=False)
csv_file.seek(0)
return self.upload_csv(
("data.csv", csv_file),
input_keys=input_keys,
output_keys=output_keys,
description=description,
name=name,
data_type=data_type,
)
def upload_csv(
self,
csv_file: Union[str, Tuple[str, io.BytesIO]],
input_keys: Sequence[str],
output_keys: Sequence[str],
*,
name: Optional[str] = None,
description: Optional[str] = None,
data_type: Optional[ls_schemas.DataType] = ls_schemas.DataType.kv,
) -> ls_schemas.Dataset:
"""Upload a CSV file to the LangSmith API.
Parameters
----------
csv_file : str or Tuple[str, BytesIO]
The CSV file to upload. If a string, it should be the path
If a tuple, it should be a tuple containing the filename
and a BytesIO object.
input_keys : Sequence[str]
The input keys.
output_keys : Sequence[str]
The output keys.
name : str or None, default=None
The name of the dataset.
description : str or None, default=None
The description of the dataset.
data_type : DataType or None, default=DataType.kv
The data type of the dataset.
Returns
-------
Dataset
The uploaded dataset.
Raises
------
ValueError
If the csv_file is not a string or tuple.
"""
data = {
"input_keys": input_keys,
"output_keys": output_keys,
}
if name:
data["name"] = name
if description:
data["description"] = description
if data_type:
data["data_type"] = ls_utils.get_enum_value(data_type)
if isinstance(csv_file, str):
with open(csv_file, "rb") as f:
file_ = {"file": f}
response = self.session.post(
self.api_url + "/datasets/upload",
headers=self._headers,
data=data,
files=file_,
)
elif isinstance(csv_file, tuple):
response = self.session.post(
self.api_url + "/datasets/upload",
headers=self._headers,
data=data,
files={"file": csv_file},
)
else:
raise ValueError("csv_file must be a string or tuple")
ls_utils.raise_for_status_with_text(response)
result = response.json()
# TODO: Make this more robust server-side
if "detail" in result and "already exists" in result["detail"]:
file_name = csv_file if isinstance(csv_file, str) else csv_file[0]
file_name = file_name.split("/")[-1]
raise ValueError(f"Dataset {file_name} already exists")
return ls_schemas.Dataset(**result, _host_url=self._host_url)
def create_run(
self,
name: str,
inputs: Dict[str, Any],
run_type: str,
*,
execution_order: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Persist a run to the LangSmith API.
Parameters
----------
name : str
The name of the run.
inputs : Dict[str, Any]
The input values for the run.
run_type : str
The type of the run, such as tool, chain, llm, retriever,
embedding, prompt, or parser.
execution_order : int or None, default=None
The position of the run in the full trace's execution sequence.
All root run traces have execution_order 1.
**kwargs : Any
Additional keyword arguments.
Raises
------
LangSmithUserError
If the API key is not provided when using the hosted service.
"""
project_name = kwargs.pop(
"project_name",
kwargs.pop(
"session_name",
os.environ.get(
# TODO: Deprecate LANGCHAIN_SESSION
"LANGCHAIN_PROJECT",
os.environ.get("LANGCHAIN_SESSION", "default"),
),
),
)
run_create = {
**kwargs,
"session_name": project_name,
"name": name,
"inputs": _hide_inputs(inputs),
"run_type": run_type,
"execution_order": execution_order if execution_order is not None else 1,
}
if "outputs" in run_create:
run_create["outputs"] = _hide_outputs(run_create["outputs"])
run_extra = cast(dict, run_create.setdefault("extra", {}))
runtime = run_extra.setdefault("runtime", {})
runtime_env = ls_env.get_runtime_and_metrics()
run_extra["runtime"] = {**runtime_env, **runtime}
headers = {**self._headers, "Accept": "application/json"}
self.request_with_retries(
"post",
f"{self.api_url}/runs",
request_kwargs={
"data": json.dumps(run_create, default=_serialize_json),
"headers": headers,
"timeout": self.timeout_ms / 1000,
},
)
def update_run(
self,
run_id: ID_TYPE,
*,
end_time: Optional[datetime.datetime] = None,
error: Optional[str] = None,
inputs: Optional[Dict] = None,
outputs: Optional[Dict] = None,
events: Optional[Sequence[dict]] = None,
**kwargs: Any,
) -> None:
"""Update a run in the LangSmith API.
Parameters
----------
run_id : str or UUID
The ID of the run to update.
end_time : datetime or None
The end time of the run.
error : str or None, default=None
The error message of the run.
inputs : Dict or None, default=None
The input values for the run.
outputs : Dict or None, default=None
The output values for the run.
events : Sequence[dict] or None, default=None
The events for the run.
**kwargs : Any
Kwargs are ignored.
"""
headers = {**self._headers, "Accept": "application/json"}
data: Dict[str, Any] = {}
if end_time is not None:
data["end_time"] = end_time.isoformat()
if error is not None:
data["error"] = error
if inputs is not None:
data["inputs"] = _hide_inputs(inputs)
if outputs is not None:
data["outputs"] = _hide_outputs(outputs)
if events is not None:
data["events"] = events
self.request_with_retries(
"patch",
f"{self.api_url}/runs/{run_id}",
request_kwargs={
"data": json.dumps(data, default=_serialize_json),
"headers": headers,
"timeout": self.timeout_ms / 1000,
},
)
def _load_child_runs(self, run: ls_schemas.Run) -> ls_schemas.Run:
"""Load child runs for a given run.
Parameters
----------
run : Run
The run to load child runs for.
Returns
-------
Run
The run with loaded child runs.
Raises
------
LangSmithError
If a child run has no parent.
"""
child_runs = self.list_runs(id=run.child_run_ids)
treemap: DefaultDict[uuid.UUID, List[ls_schemas.Run]] = collections.defaultdict(
list
)
runs: Dict[uuid.UUID, ls_schemas.Run] = {}
for child_run in sorted(
# TODO: Remove execution_order once it's no longer used
child_runs,
key=lambda r: r.dotted_order or str(r.execution_order),
):
if child_run.parent_run_id is None:
raise ls_utils.LangSmithError(f"Child run {child_run.id} has no parent")
treemap[child_run.parent_run_id].append(child_run)
runs[child_run.id] = child_run
run.child_runs = treemap.pop(run.id, [])
for run_id, children in treemap.items():
runs[run_id].child_runs = children
return run
def read_run(
self, run_id: ID_TYPE, load_child_runs: bool = False
) -> ls_schemas.Run:
"""Read a run from the LangSmith API.
Parameters
----------
run_id : str or UUID
The ID of the run to read.
load_child_runs : bool, default=False
Whether to load nested child runs.
Returns
-------
Run
The run.
"""
response = self._get_with_retries(f"/runs/{run_id}")
run = ls_schemas.Run(**response.json(), _host_url=self._host_url)
if load_child_runs and run.child_run_ids:
run = self._load_child_runs(run)
return run
def list_runs(
self,
*,
project_id: Optional[ID_TYPE] = None,
project_name: Optional[str] = None,
run_type: Optional[str] = None,
reference_example_id: Optional[ID_TYPE] = None,
query: Optional[str] = None,
filter: Optional[str] = None,
execution_order: Optional[int] = None,
parent_run_id: Optional[ID_TYPE] = None,
start_time: Optional[datetime.datetime] = None,
error: Optional[bool] = None,
run_ids: Optional[List[ID_TYPE]] = None,
**kwargs: Any,
) -> Iterator[ls_schemas.Run]:
"""List runs from the LangSmith API.
Parameters
----------
project_id : UUID or None, default=None
The ID of the project to filter by.
project_name : str or None, default=None
The name of the project to filter by.
run_type : str or None, default=None
The type of the runs to filter by.
reference_example_id : UUID or None, default=None
The ID of the reference example to filter by.
query : str or None, default=None
The query string to filter by.
filter : str or None, default=None
The filter string to filter by.
execution_order : int or None, default=None
The execution order to filter by. Execution order is the position
of the run in the full trace's execution sequence.
All root run traces have execution_order 1.
parent_run_id : UUID or None, default=None
The ID of the parent run to filter by.
start_time : datetime or None, default=None
The start time to filter by.
error : bool or None, default=None
Whether to filter by error status.
run_ids : List[str or UUID] or None, default=None
The IDs of the runs to filter by.
**kwargs : Any
Additional keyword arguments.
Yields
------
Run
The runs.
"""
if project_name is not None:
if project_id is not None:
raise ValueError("Only one of project_id or project_name may be given")
project_id = self.read_project(project_name=project_name).id
query_params: Dict[str, Any] = {
"session": project_id,
"run_type": run_type,
**kwargs,
}
if reference_example_id is not None:
query_params["reference_example"] = reference_example_id
if query is not None:
query_params["query"] = query
if filter is not None:
query_params["filter"] = filter
if execution_order is not None:
query_params["execution_order"] = execution_order
if parent_run_id is not None:
query_params["parent_run"] = parent_run_id
if start_time is not None:
query_params["start_time"] = start_time.isoformat()
if error is not None:
query_params["error"] = error
if run_ids is not None:
query_params["id"] = run_ids
yield from (
ls_schemas.Run(**run, _host_url=self._host_url)
for run in self._get_paginated_list("/runs", params=query_params)
)
def get_run_url(
self,
*,
run: ls_schemas.RunBase,
project_name: Optional[str] = None,
project_id: Optional[ID_TYPE] = None,
) -> str:
"""Get the URL for a run.
Parameters
----------
run : Run
The run.
project_name : str or None, default=None
The name of the project.
project_id : UUID or None, default=None
The ID of the project.
Returns
-------
str
The URL for the run.
"""
if hasattr(run, "session_id") and run.session_id is not None:
session_id = run.session_id
elif project_id is not None:
session_id = project_id
elif project_name is not None:
session_id = self.read_project(project_name=project_name).id
else:
project_name = os.environ.get(
"LANGCHAIN_PROJECT",
"default",
)
session_id = self.read_project(project_name=project_name).id
return (
f"{self._host_url}/o/{self._get_tenant_id()}/projects/p/{session_id}/"
f"r/{run.id}?poll=true"
)
def share_run(self, run_id: ID_TYPE, *, share_id: Optional[ID_TYPE] = None) -> str:
"""Get a share link for a run."""
data = {
"run_id": str(run_id),
"share_token": share_id or str(uuid.uuid4()),
}
response = self.session.put(
f"{self.api_url}/runs/{run_id}/share",
headers=self._headers,
json=data,
)
ls_utils.raise_for_status_with_text(response)
share_token = response.json()["share_token"]
return f"{self._host_url}/public/{share_token}/r"
def unshare_run(self, run_id: ID_TYPE) -> None:
"""Delete share link for a run."""
response = self.session.delete(
f"{self.api_url}/runs/{run_id}/share",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def read_run_shared_link(self, run_id: ID_TYPE) -> Optional[str]:
response = self.session.get(
f"{self.api_url}/runs/{run_id}/share",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
result = response.json()
if result is None or "share_token" not in result:
return None
return f"{self._host_url}/public/{result['share_token']}/r"
def run_is_shared(self, run_id: ID_TYPE) -> bool:
"""Get share state for a run."""
link = self.read_run_shared_link(run_id)
return link is not None
def list_shared_runs(
self, share_token: str, run_ids: Optional[List[str]] = None
) -> List[ls_schemas.Run]:
"""Get shared runs."""
params = {"id": run_ids, "share_token": share_token}
response = self.session.get(
f"{self.api_url}/public/{share_token}/runs",
headers=self._headers,
params=params,
)
ls_utils.raise_for_status_with_text(response)
return [
ls_schemas.Run(**run, _host_url=self._host_url) for run in response.json()
]
def create_project(
self,
project_name: str,
*,
project_extra: Optional[dict] = None,
upsert: bool = False,
reference_dataset_id: Optional[ID_TYPE] = None,
) -> ls_schemas.TracerSession:
"""Create a project on the LangSmith API.
Parameters
----------
project_name : str
The name of the project.
project_extra : dict or None, default=None
Additional project information.
upsert : bool, default=False
Whether to update the project if it already exists.
reference_dataset_id: UUID or None, default=None
The ID of the reference dataset to associate with the project.
Returns
-------
TracerSession
The created project.
"""
endpoint = f"{self.api_url}/sessions"
body: Dict[str, Any] = {
"name": project_name,
"extra": project_extra,
}
params = {}
if upsert:
params["upsert"] = True
if reference_dataset_id is not None:
body["reference_dataset_id"] = reference_dataset_id
response = self.session.post(
endpoint,
headers=self._headers,
data=json.dumps(body, default=_serialize_json),
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.TracerSession(**response.json(), _host_url=self._host_url)
def _get_tenant_id(self) -> uuid.UUID:
if self._tenant_id is not None:
return self._tenant_id
response = self._get_with_retries("/sessions", params={"limit": 1})
result = response.json()
if isinstance(result, list):
tracer_session = ls_schemas.TracerSessionResult(
**result[0], _host_url=self._host_url
)
self._tenant_id = tracer_session.tenant_id
return self._tenant_id
raise ls_utils.LangSmithError("No projects found")
@ls_utils.xor_args(("project_id", "project_name"))
def read_project(
self, *, project_id: Optional[str] = None, project_name: Optional[str] = None
) -> ls_schemas.TracerSessionResult:
"""Read a project from the LangSmith API.
Parameters
----------
project_id : str or None, default=None
The ID of the project to read.
project_name : str or None, default=None
The name of the project to read.
Note: Only one of project_id or project_name may be given.
Returns
-------
TracerSessionResult
The project.
"""
path = "/sessions"
params: Dict[str, Any] = {"limit": 1}
if project_id is not None:
path += f"/{project_id}"
elif project_name is not None:
params["name"] = project_name
else:
raise ValueError("Must provide project_name or project_id")
response = self._get_with_retries(path, params=params)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise ls_utils.LangSmithError(f"Project {project_name} not found")
return ls_schemas.TracerSessionResult(**result[0], _host_url=self._host_url)
return ls_schemas.TracerSessionResult(
**response.json(), _host_url=self._host_url
)
def list_projects(
self,
project_ids: Optional[List[ID_TYPE]] = None,
name: Optional[str] = None,
name_contains: Optional[str] = None,
reference_dataset_id: Optional[ID_TYPE] = None,
reference_dataset_name: Optional[str] = None,
reference_free: Optional[bool] = None,
) -> Iterator[ls_schemas.TracerSession]:
"""
List projects from the LangSmith API.
Parameters
----------
project_ids : Optional[List[ID_TYPE]], optional
A list of project IDs to filter by, by default None
name : Optional[str], optional
The name of the project to filter by, by default None
name_contains : Optional[str], optional
A string to search for in the project name, by default None
reference_dataset_id : Optional[List[ID_TYPE]], optional
A dataset ID to filter by, by default None
reference_dataset_name : Optional[str], optional
The name of the reference dataset to filter by, by default None
reference_free : Optional[bool], optional
Whether to filter for only projects not associated with a dataset.
Yields
------
TracerSession
The projects.
"""
params: Dict[str, Any] = {}
if project_ids is not None:
params["id"] = project_ids
if name is not None:
params["name"] = name
if name_contains is not None:
params["name_contains"] = name_contains
if reference_dataset_id is not None:
if reference_dataset_name is not None:
raise ValueError(
"Only one of reference_dataset_id or"
" reference_dataset_name may be given"
)
params["reference_dataset"] = reference_dataset_id
elif reference_dataset_name is not None:
reference_dataset_id = self.read_dataset(
dataset_name=reference_dataset_name
).id
params["reference_dataset"] = reference_dataset_id
if reference_free is not None:
params["reference_free"] = reference_free
yield from (
ls_schemas.TracerSession(**project, _host_url=self._host_url)
for project in self._get_paginated_list("/sessions", params=params)
)
@ls_utils.xor_args(("project_name", "project_id"))
def delete_project(
self, *, project_name: Optional[str] = None, project_id: Optional[str] = None
) -> None:
"""Delete a project from LangSmith.
Parameters
----------
project_name : str or None, default=None
The name of the project to delete.
project_id : str or None, default=None
The ID of the project to delete.
"""
if project_name is not None:
project_id = str(self.read_project(project_name=project_name).id)
elif project_id is None:
raise ValueError("Must provide project_name or project_id")
response = self.session.delete(
self.api_url + f"/sessions/{project_id}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def create_dataset(
self,
dataset_name: str,
*,
description: Optional[str] = None,
data_type: ls_schemas.DataType = ls_schemas.DataType.kv,
) -> ls_schemas.Dataset:
"""Create a dataset in the LangSmith API.
Parameters
----------
dataset_name : str
The name of the dataset.
description : str or None, default=None
The description of the dataset.
data_type : DataType or None, default=DataType.kv
The data type of the dataset.
Returns
-------
Dataset
The created dataset.
"""
dataset = ls_schemas.DatasetCreate(
name=dataset_name,
description=description,
data_type=data_type,
)
response = self.session.post(
self.api_url + "/datasets",
headers=self._headers,
data=dataset.json(),
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.Dataset(**response.json(), _host_url=self._host_url)
@ls_utils.xor_args(("dataset_name", "dataset_id"))
def read_dataset(
self,
*,
dataset_name: Optional[str] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> ls_schemas.Dataset:
"""Read a dataset from the LangSmith API.
Parameters
----------
dataset_name : str or None, default=None
The name of the dataset to read.
dataset_id : UUID or None, default=None
The ID of the dataset to read.
Returns
-------
Dataset
The dataset.
"""
path = "/datasets"
params: Dict[str, Any] = {"limit": 1}
if dataset_id is not None:
path += f"/{dataset_id}"
elif dataset_name is not None:
params["name"] = dataset_name
else:
raise ValueError("Must provide dataset_name or dataset_id")
response = self._get_with_retries(
path,
params=params,
)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise ls_utils.LangSmithError(f"Dataset {dataset_name} not found")
return ls_schemas.Dataset(**result[0], _host_url=self._host_url)
return ls_schemas.Dataset(**result, _host_url=self._host_url)
def read_dataset_openai_finetuning(
self, dataset_id: Optional[str] = None, *, dataset_name: Optional[str] = None
) -> list:
"""
Download a dataset in OpenAI Jsonl format and load it as a list of dicts.
Parameters
----------
dataset_id : str
The ID of the dataset to download.
dataset_name : str
The name of the dataset to download.
Returns
-------
list
The dataset loaded as a list of dicts.
"""
path = "/datasets"
if dataset_id is not None:
pass
elif dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
else:
raise ValueError("Must provide dataset_name or dataset_id")
response = self._get_with_retries(
f"{path}/{dataset_id}/openai_ft",
)
dataset = [json.loads(line) for line in response.text.strip().split("\n")]
return dataset
def list_datasets(
self,
*,
dataset_ids: Optional[List[ID_TYPE]] = None,
data_type: Optional[str] = None,
dataset_name: Optional[str] = None,
dataset_name_contains: Optional[str] = None,
) -> Iterator[ls_schemas.Dataset]:
"""List the datasets on the LangSmith API.
Yields
------
Dataset
The datasets.
"""
params: Dict[str, Any] = {}
if dataset_ids is not None:
params["id"] = dataset_ids
if data_type is not None:
params["data_type"] = data_type
if dataset_name is not None:
params["name"] = dataset_name
if dataset_name_contains is not None:
params["name_contains"] = dataset_name_contains
yield from (
ls_schemas.Dataset(**dataset, _host_url=self._host_url)
for dataset in self._get_paginated_list("/datasets", params=params)
)
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def delete_dataset(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
) -> None:
"""Delete a dataset from the LangSmith API.
Parameters
----------
dataset_id : UUID or None, default=None
The ID of the dataset to delete.
dataset_name : str or None, default=None
The name of the dataset to delete.
"""
if dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
if dataset_id is None:
raise ValueError("Must provide either dataset name or ID")
response = self.session.delete(
f"{self.api_url}/datasets/{dataset_id}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def _get_data_type(self, dataset_id: ID_TYPE) -> ls_schemas.DataType:
dataset = self.read_dataset(dataset_id=dataset_id)
return dataset.data_type
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def create_llm_example(
self,
prompt: str,
generation: Optional[str] = None,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
) -> ls_schemas.Example:
"""Add an example (row) to an LLM-type dataset."""
return self.create_example(
inputs={"input": prompt},
outputs={"output": generation},
dataset_id=dataset_id,
dataset_name=dataset_name,
created_at=created_at,
)
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def create_chat_example(
self,
messages: List[Union[Mapping[str, Any], ls_schemas.BaseMessageLike]],
generations: Optional[
Union[Mapping[str, Any], ls_schemas.BaseMessageLike]
] = None,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
) -> ls_schemas.Example:
"""Add an example (row) to a Chat-type dataset."""
final_input = []
for message in messages:
if ls_utils.is_base_message_like(message):
final_input.append(
ls_utils.convert_langchain_message(
cast(ls_schemas.BaseMessageLike, message)
)
)
else:
final_input.append(cast(dict, message))
final_generations = None
if generations is not None:
if ls_utils.is_base_message_like(generations):
final_generations = ls_utils.convert_langchain_message(
cast(ls_schemas.BaseMessageLike, generations)
)
else:
final_generations = cast(dict, generations)
return self.create_example(
inputs={"input": final_input},
outputs={"output": final_generations}
if final_generations is not None
else None,
dataset_id=dataset_id,
dataset_name=dataset_name,
created_at=created_at,
)
def create_example_from_run(
self,
run: ls_schemas.Run,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
) -> ls_schemas.Example:
"""Add an example (row) to an LLM-type dataset."""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
dataset_name = None # Nested call expects only 1 defined
dataset_type = self._get_data_type_cached(dataset_id)
if dataset_type == ls_schemas.DataType.llm:
if run.run_type != "llm":
raise ValueError(
f"Run type {run.run_type} is not supported"
" for dataset of type 'LLM'"
)
try:
prompt = ls_utils.get_prompt_from_inputs(run.inputs)
except ValueError:
raise ValueError(
"Error converting LLM run inputs to prompt for run"
f" {run.id} with inputs {run.inputs}"
)
inputs: Dict[str, Any] = {"input": prompt}
if not run.outputs:
outputs: Optional[Dict[str, Any]] = None
else:
try:
generation = ls_utils.get_llm_generation_from_outputs(run.outputs)
except ValueError:
raise ValueError(
"Error converting LLM run outputs to generation for run"
f" {run.id} with outputs {run.outputs}"
)
outputs = {"output": generation}
elif dataset_type == ls_schemas.DataType.chat:
if run.run_type != "llm":
raise ValueError(
f"Run type {run.run_type} is not supported"
" for dataset of type 'chat'"
)
try:
inputs = {"input": ls_utils.get_messages_from_inputs(run.inputs)}
except ValueError:
raise ValueError(
"Error converting LLM run inputs to chat messages for run"
f" {run.id} with inputs {run.inputs}"
)
if not run.outputs:
outputs = None
else:
try:
outputs = {
"output": ls_utils.get_message_generation_from_outputs(
run.outputs
)
}
except ValueError:
raise ValueError(
"Error converting LLM run outputs to chat generations"
f" for run {run.id} with outputs {run.outputs}"
)
elif dataset_type == ls_schemas.DataType.kv:
# Anything goes
inputs = run.inputs
outputs = run.outputs
else:
raise ValueError(f"Dataset type {dataset_type} not recognized.")
return self.create_example(
inputs=inputs,
outputs=outputs,
dataset_id=dataset_id,
dataset_name=dataset_name,
created_at=created_at,
)
def create_examples(
self,
*,
inputs: Sequence[Mapping[str, Any]],
outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
max_concurrency: int = 10,
) -> None:
"""Create examples in a dataset.
Parameters
----------
inputs : Sequence[Mapping[str, Any]]
The input values for the examples.
outputs : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None
The output values for the examples.
dataset_id : Optional[ID_TYPE], default=None
The ID of the dataset to create the examples in.
dataset_name : Optional[str], default=None
The name of the dataset to create the examples in.
max_concurrency : int, default=10
The maximum number of concurrent requests to make.
Returns
-------
None
Raises
------
ValueError
If both `dataset_id` and `dataset_name` are `None`.
"""
if dataset_id is None and dataset_name is None:
raise ValueError("Either dataset_id or dataset_name must be provided.")
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
max_concurrency = min(max_concurrency, len(inputs))
with concurrent.futures.ThreadPoolExecutor(
max_workers=max_concurrency
) as executor:
for input_data, output_data in zip(inputs, outputs or [None] * len(inputs)):
executor.submit(
self.create_example,
inputs=input_data,
outputs=output_data,
dataset_id=dataset_id,
)
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def create_example(
self,
inputs: Mapping[str, Any],
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
outputs: Optional[Mapping[str, Any]] = None,
example_id: Optional[ID_TYPE] = None,
) -> ls_schemas.Example:
"""Create a dataset example in the LangSmith API.
Examples are rows in a dataset, containing the inputs
and expected outputs (or other reference information)
for a model or chain.
Parameters
----------
inputs : Mapping[str, Any]
The input values for the example.
dataset_id : UUID or None, default=None
The ID of the dataset to create the example in.
dataset_name : str or None, default=None
The name of the dataset to create the example in.
created_at : datetime or None, default=None
The creation timestamp of the example.
outputs : Mapping[str, Any] or None, default=None
The output values for the example.
exemple_id : UUID or None, default=None
The ID of the example to create. If not provided, a new
example will be created.
Returns
-------
Example
The created example.
"""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
data = {
"inputs": inputs,
"outputs": outputs,
"dataset_id": dataset_id,
}
if created_at:
data["created_at"] = created_at.isoformat()
if example_id:
data["id"] = example_id
example = ls_schemas.ExampleCreate(**data)
response = self.session.post(
f"{self.api_url}/examples", headers=self._headers, data=example.json()
)
ls_utils.raise_for_status_with_text(response)
result = response.json()
return ls_schemas.Example(**result)
def read_example(self, example_id: ID_TYPE) -> ls_schemas.Example:
"""Read an example from the LangSmith API.
Parameters
----------
example_id : str or UUID
The ID of the example to read.
Returns
-------
Example
The example.
"""
response = self._get_with_retries(f"/examples/{example_id}")
return ls_schemas.Example(**response.json())
def list_examples(
self,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
example_ids: Optional[List[ID_TYPE]] = None,
) -> Iterator[ls_schemas.Example]:
"""Retrieve the example rows of the specified dataset.
Parameters
----------
dataset_id : UUID or None, default=None
The ID of the dataset to filter by.
dataset_name : str or None, default=None
The name of the dataset to filter by.
example_ids : List[UUID] or None, default=None
The IDs of the examples to filter by.
Yields
------
Example
The examples.
"""
params: Dict[str, Any] = {}
if dataset_id is not None:
params["dataset"] = dataset_id
elif dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
params["dataset"] = dataset_id
else:
pass
if example_ids is not None:
params["id"] = example_ids
yield from (
ls_schemas.Example(**example)
for example in self._get_paginated_list("/examples", params=params)
)
def update_example(
self,
example_id: str,
*,
inputs: Optional[Dict[str, Any]] = None,
outputs: Optional[Mapping[str, Any]] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> Dict[str, Any]:
"""Update a specific example.
Parameters
----------
example_id : str or UUID
The ID of the example to update.
inputs : Dict[str, Any] or None, default=None
The input values to update.
outputs : Mapping[str, Any] or None, default=None
The output values to update.
dataset_id : UUID or None, default=None
The ID of the dataset to update.
Returns
-------
Dict[str, Any]
The updated example.
"""
example = ls_schemas.ExampleUpdate(
inputs=inputs,
outputs=outputs,
dataset_id=dataset_id,
)
response = self.session.patch(
f"{self.api_url}/examples/{example_id}",
headers=self._headers,
data=example.json(exclude_none=True),
)
ls_utils.raise_for_status_with_text(response)
return response.json()
def delete_example(self, example_id: ID_TYPE) -> None:
"""Delete an example by ID.
Parameters
----------
example_id : str or UUID
The ID of the example to delete.
"""
response = self.session.delete(
f"{self.api_url}/examples/{example_id}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def _resolve_run_id(
self,
run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID],
load_child_runs: bool,
) -> ls_schemas.Run:
"""Resolve the run ID.
Parameters
----------
run : Run or RunBase or str or UUID
The run to resolve.
load_child_runs : bool
Whether to load child runs.
Returns
-------
Run
The resolved run.
Raises
------
TypeError
If the run type is invalid.
"""
if isinstance(run, (str, uuid.UUID)):
run_ = self.read_run(run, load_child_runs=load_child_runs)
else:
run_ = run
return run_
def _resolve_example_id(
self,
example: Union[ls_schemas.Example, str, uuid.UUID, dict, None],
run: ls_schemas.Run,
) -> Optional[ls_schemas.Example]:
"""Resolve the example ID.
Parameters
----------
example : Example or str or UUID or dict or None
The example to resolve.
run : Run
The run associated with the example.
Returns
-------
Example or None
The resolved example.
"""
if isinstance(example, (str, uuid.UUID)):
reference_example_ = self.read_example(example)
elif isinstance(example, ls_schemas.Example):
reference_example_ = example
elif isinstance(example, dict):
reference_example_ = ls_schemas.Example(**example)
elif run.reference_example_id is not None:
reference_example_ = self.read_example(run.reference_example_id)
else:
reference_example_ = None
return reference_example_
def evaluate_run(
self,
run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID],
evaluator: ls_evaluator.RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
reference_example: Optional[
Union[ls_schemas.Example, str, dict, uuid.UUID]
] = None,
load_child_runs: bool = False,
) -> ls_evaluator.EvaluationResult:
"""Evaluate a run.
Parameters
----------
run : Run or RunBase or str or UUID
The run to evaluate.
evaluator : RunEvaluator
The evaluator to use.
source_info : Dict[str, Any] or None, default=None
Additional information about the source of the evaluation to log
as feedback metadata.
reference_example : Example or str or dict or UUID or None, default=None
The example to use as a reference for the evaluation.
If not provided, the run's reference example will be used.
load_child_runs : bool, default=False
Whether to load child runs when resolving the run ID.
Returns
-------
Feedback
The feedback object created by the evaluation.
"""
run_ = self._resolve_run_id(run, load_child_runs=load_child_runs)
reference_example_ = self._resolve_example_id(reference_example, run_)
evaluation_result = evaluator.evaluate_run(
run_,
example=reference_example_,
)
source_info = source_info or {}
if evaluation_result.evaluator_info:
source_info = {**evaluation_result.evaluator_info, **source_info}
self.create_feedback(
run_.id,
evaluation_result.key,
score=evaluation_result.score,
value=evaluation_result.value,
comment=evaluation_result.comment,
correction=evaluation_result.correction,
source_info=source_info,
source_run_id=evaluation_result.source_run_id,
feedback_source_type=ls_schemas.FeedbackSourceType.MODEL,
)
return evaluation_result
async def aevaluate_run(
self,
run: Union[ls_schemas.Run, str, uuid.UUID],
evaluator: ls_evaluator.RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
reference_example: Optional[
Union[ls_schemas.Example, str, dict, uuid.UUID]
] = None,
load_child_runs: bool = False,
) -> ls_evaluator.EvaluationResult:
"""Evaluate a run asynchronously.
Parameters
----------
run : Run or str or UUID
The run to evaluate.
evaluator : RunEvaluator
The evaluator to use.
source_info : Dict[str, Any] or None, default=None
Additional information about the source of the evaluation to log
as feedback metadata.
reference_example : Optional Example or UUID, default=None
The example to use as a reference for the evaluation.
If not provided, the run's reference example will be used.
load_child_runs : bool, default=False
Whether to load child runs when resolving the run ID.
Returns
-------
EvaluationResult
The evaluation result object created by the evaluation.
"""
run_ = self._resolve_run_id(run, load_child_runs=load_child_runs)
reference_example_ = self._resolve_example_id(reference_example, run_)
evaluation_result = await evaluator.aevaluate_run(
run_,
example=reference_example_,
)
source_info = source_info or {}
if evaluation_result.evaluator_info:
source_info = {**evaluation_result.evaluator_info, **source_info}
self.create_feedback(
run_.id,
evaluation_result.key,
score=evaluation_result.score,
value=evaluation_result.value,
comment=evaluation_result.comment,
correction=evaluation_result.correction,
source_info=source_info,
source_run_id=evaluation_result.source_run_id,
feedback_source_type=ls_schemas.FeedbackSourceType.MODEL,
)
return evaluation_result
def create_feedback(
self,
run_id: ID_TYPE,
key: str,
*,
score: Union[float, int, bool, None] = None,
value: Union[float, int, bool, str, dict, None] = None,
correction: Union[dict, None] = None,
comment: Union[str, None] = None,
source_info: Optional[Dict[str, Any]] = None,
feedback_source_type: Union[
ls_schemas.FeedbackSourceType, str
] = ls_schemas.FeedbackSourceType.API,
source_run_id: Optional[ID_TYPE] = None,
feedback_id: Optional[ID_TYPE] = None,
) -> ls_schemas.Feedback:
"""Create a feedback in the LangSmith API.
Parameters
----------
run_id : str or UUID
The ID of the run to provide feedback on.
key : str
The name of the metric, tag, or 'aspect' this feedback is about.
score : float or int or bool or None, default=None
The score to rate this run on the metric or aspect.
value : float or int or bool or str or dict or None, default=None
The display value or non-numeric value for this feedback.
correction : dict or None, default=None
The proper ground truth for this run.
comment : str or None, default=None
A comment about this feedback.
source_info : Dict[str, Any] or None, default=None
Information about the source of this feedback.
feedback_source_type : FeedbackSourceType or str, default=FeedbackSourceType.API
The type of feedback source, such as model (for model-generated feedback)
or API.
source_run_id : str or UUID or None, default=None,
The ID of the run that generated this feedback, if a "model" type.
feedback_id : str or UUID or None, default=None
The ID of the feedback to create. If not provided, a random UUID will be
generated.
"""
if not isinstance(feedback_source_type, ls_schemas.FeedbackSourceType):
feedback_source_type = ls_schemas.FeedbackSourceType(feedback_source_type)
if feedback_source_type == ls_schemas.FeedbackSourceType.API:
feedback_source: ls_schemas.FeedbackSourceBase = (
ls_schemas.APIFeedbackSource(metadata=source_info)
)
elif feedback_source_type == ls_schemas.FeedbackSourceType.MODEL:
feedback_source = ls_schemas.ModelFeedbackSource(metadata=source_info)
else:
raise ValueError(f"Unknown feedback source type {feedback_source_type}")
feedback_source.metadata = (
feedback_source.metadata if feedback_source.metadata is not None else {}
)
if source_run_id is not None and "__run" not in feedback_source.metadata:
feedback_source.metadata["__run"] = {"run_id": str(source_run_id)}
feedback = ls_schemas.FeedbackCreate(
id=feedback_id or uuid.uuid4(),
run_id=run_id,
key=key,
score=score,
value=value,
correction=correction,
comment=comment,
feedback_source=feedback_source,
created_at=datetime.datetime.now(datetime.timezone.utc),
modified_at=datetime.datetime.now(datetime.timezone.utc),
)
self.request_with_retries(
"POST",
self.api_url + "/feedback",
request_kwargs={
"data": json.dumps(
feedback.dict(exclude_none=True), default=_serialize_json
),
"headers": {**self._headers, "Content-Type": "application/json"},
"timeout": self.timeout_ms / 1000,
},
)
return ls_schemas.Feedback(**feedback.dict())
def update_feedback(
self,
feedback_id: ID_TYPE,
*,
score: Union[float, int, bool, None] = None,
value: Union[float, int, bool, str, dict, None] = None,
correction: Union[dict, None] = None,
comment: Union[str, None] = None,
) -> None:
"""Update a feedback in the LangSmith API.
Parameters
----------
feedback_id : str or UUID
The ID of the feedback to update.
score : float or int or bool or None, default=None
The score to update the feedback with.
value : float or int or bool or str or dict or None, default=None
The value to update the feedback with.
correction : dict or None, default=None
The correction to update the feedback with.
comment : str or None, default=None
The comment to update the feedback with.
"""
feedback_update: Dict[str, Any] = {}
if score is not None:
feedback_update["score"] = score
if value is not None:
feedback_update["value"] = value
if correction is not None:
feedback_update["correction"] = correction
if comment is not None:
feedback_update["comment"] = comment
response = self.session.patch(
self.api_url + f"/feedback/{feedback_id}",
headers={**self._headers, "Content-Type": "application/json"},
data=json.dumps(feedback_update, default=_serialize_json),
)
ls_utils.raise_for_status_with_text(response)
def read_feedback(self, feedback_id: ID_TYPE) -> ls_schemas.Feedback:
"""Read a feedback from the LangSmith API.
Parameters
----------
feedback_id : str or UUID
The ID of the feedback to read.
Returns
-------
Feedback
The feedback.
"""
response = self._get_with_retries(f"/feedback/{feedback_id}")
return ls_schemas.Feedback(**response.json())
def list_feedback(
self,
*,
run_ids: Optional[Sequence[ID_TYPE]] = None,
feedback_key: Optional[Sequence[str]] = None,
feedback_source_type: Optional[Sequence[ls_schemas.FeedbackSourceType]] = None,
**kwargs: Any,
) -> Iterator[ls_schemas.Feedback]:
"""List the feedback objects on the LangSmith API.
Parameters
----------
run_ids : List[str or UUID] or None, default=None
The IDs of the runs to filter by.
feedback_key: List[str] or None, default=None
The feedback key(s) to filter by. Example: 'correctness'
The query performs a union of all feedback keys.
feedback_source_type: List[FeedbackSourceType] or None, default=None
The type of feedback source, such as model
(for model-generated feedback) or API.
**kwargs : Any
Additional keyword arguments.
Yields
------
Feedback
The feedback objects.
"""
params: dict = {
"run": run_ids,
**kwargs,
}
if feedback_key is not None:
params["key"] = feedback_key
if feedback_source_type is not None:
params["source"] = feedback_source_type
yield from (
ls_schemas.Feedback(**feedback)
for feedback in self._get_paginated_list("/feedback", params=params)
)
def delete_feedback(self, feedback_id: ID_TYPE) -> None:
"""Delete a feedback by ID.
Parameters
----------
feedback_id : str or UUID
The ID of the feedback to delete.
"""
response = self.session.delete(
f"{self.api_url}/feedback/{feedback_id}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
async def arun_on_dataset(
self,
dataset_name: str,
llm_or_chain_factory: Any,
*,
evaluation: Optional[Any] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
verbose: bool = False,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Dict[str, Any]:
"""
Asynchronously run the Chain or language model on a dataset
and store traces to the specified project name.
Args:
dataset_name: Name of the dataset to run the chain on.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
evaluation: Optional evaluation configuration to use when evaluating
concurrency_level: The number of async tasks to run concurrently.
project_name: Name of the project to store the traces in.
Defaults to {dataset_name}-{chain class name}-{datetime}.
verbose: Whether to print progress.
tags: Tags to add to each run in the project.
input_mapper: A function to map to the inputs dictionary from an Example
to the format expected by the model to be evaluated. This is useful if
your model needs to deserialize more complex schema or if your dataset
has inputs with keys that differ from what is expected by your chain
or agent.
Returns:
A dictionary containing the run's project name and the
resulting model outputs.
For the synchronous version, see client.run_on_dataset.
Examples
--------
.. code-block:: python
from langsmith import Client
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import RunEvalConfig
# Chains may have memory. Passing in a constructor function lets the
# evaluation framework avoid cross-contamination between runs.
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
# Load off-the-shelf evaluators via config or the EvaluatorType (string or enum)
evaluation_config = RunEvalConfig(
evaluators=[
"qa", # "Correctness" against a reference answer
"embedding_distance",
RunEvalConfig.Criteria("helpfulness"),
RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
await client.arun_on_dataset(
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
You can also create custom evaluators by subclassing the
:class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>`
or LangSmith's `RunEvaluator` classes.
.. code-block:: python
from typing import Optional
from langchain.evaluation import StringEvaluator
class MyStringEvaluator(StringEvaluator):
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "exact_match"
def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
return {"score": prediction == reference}
evaluation_config = RunEvalConfig(
custom_evaluators = [MyStringEvaluator()],
)
await client.arun_on_dataset(
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
""" # noqa: E501
try:
from langchain.smith import arun_on_dataset as _arun_on_dataset
except ImportError:
raise ImportError(
"The client.arun_on_dataset function requires the langchain"
"package to run.\nInstall with pip install langchain"
)
return await _arun_on_dataset(
dataset_name=dataset_name,
llm_or_chain_factory=llm_or_chain_factory,
client=self,
evaluation=evaluation,
concurrency_level=concurrency_level,
project_name=project_name,
verbose=verbose,
tags=tags,
input_mapper=input_mapper,
)
def run_on_dataset(
self,
dataset_name: str,
llm_or_chain_factory: Any,
*,
evaluation: Optional[Any] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
verbose: bool = False,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Dict[str, Any]:
"""
Run the Chain or language model on a dataset and store traces
to the specified project name.
Args:
dataset_name: Name of the dataset to run the chain on.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
evaluation: Configuration for evaluators to run on the
results of the chain
concurrency_level: The number of tasks to execute concurrently.
project_name: Name of the project to store the traces in.
Defaults to {dataset_name}-{chain class name}-{datetime}.
verbose: Whether to print progress.
tags: Tags to add to each run in the project.
input_mapper: A function to map to the inputs dictionary from an Example
to the format expected by the model to be evaluated. This is useful if
your model needs to deserialize more complex schema or if your dataset
has inputs with keys that differ from what is expected by your chain
or agent.
Returns:
A dictionary containing the run's project name and the resulting model outputs.
For the (usually faster) async version of this function, see `client.arun_on_dataset`.
Examples
--------
.. code-block:: python
from langsmith import Client
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.smith import RunEvalConfig
# Chains may have memory. Passing in a constructor function lets the
# evaluation framework avoid cross-contamination between runs.
def construct_chain():
llm = ChatOpenAI(temperature=0)
chain = LLMChain.from_string(
llm,
"What's the answer to {your_input_key}"
)
return chain
# Load off-the-shelf evaluators via config or the EvaluatorType (string or enum)
evaluation_config = RunEvalConfig(
evaluators=[
"qa", # "Correctness" against a reference answer
"embedding_distance",
RunEvalConfig.Criteria("helpfulness"),
RunEvalConfig.Criteria({
"fifth-grader-score": "Do you have to be smarter than a fifth grader to answer this question?"
}),
]
)
client = Client()
client.run_on_dataset(
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
You can also create custom evaluators by subclassing the
:class:`StringEvaluator <langchain.evaluation.schema.StringEvaluator>`
or LangSmith's `RunEvaluator` classes.
.. code-block:: python
from typing import Optional
from langchain.evaluation import StringEvaluator
class MyStringEvaluator(StringEvaluator):
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "exact_match"
def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
return {"score": prediction == reference}
evaluation_config = RunEvalConfig(
custom_evaluators = [MyStringEvaluator()],
)
client.run_on_dataset(
"<my_dataset_name>",
construct_chain,
evaluation=evaluation_config,
)
""" # noqa: E501
try:
from langchain.smith import run_on_dataset as _run_on_dataset
except ImportError:
raise ImportError(
"The client.run_on_dataset function requires the langchain"
"package to run.\nInstall with pip install langchain"
)
return _run_on_dataset(
dataset_name=dataset_name,
llm_or_chain_factory=llm_or_chain_factory,
concurrency_level=concurrency_level,
client=self,
evaluation=evaluation,
project_name=project_name,
verbose=verbose,
tags=tags,
input_mapper=input_mapper,
)
| [] |
2024-01-10 | Xeppyz/ChatBot-with-DataBase | controlador.py | import os
from sqlalchemy import create_engine
from langchain.chat_models.openai import ChatOpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.sql_database import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
# Configurar la conexión a la base de datos
db_connection_string = "mssql+pyodbc://sa:220631@localhost/Finanzas?driver=SQL Server Native Client 11.0"
db_uri = "mssql+pyodbc://sa:220631@localhost/Finanzas?driver=SQL Server Native Client 11.0"
#Configurar la API DE CHATOPENAI
os.environ["OPENAI_API_KEY"] = "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"
model_name = "gpt-3.5-turbo"
#Probar conexion a la base de datos
try:
engine = create_engine(db_connection_string)
connection = engine.connect()
print("Conexión exitosa a la base de datos.")
connection.close()
except Exception as e:
print(f"Error al conectar a la base de datos: {e}")
#Crear el modelo chatopenai
openai = ChatOpenAI(model_name=model_name)
#Crear la cadena de SQLDataBaseChain
memory = ConversationBufferWindowMemory(k=5)
db = SQLDatabase.from_uri(db_uri)
db_chain = SQLDatabaseChain.from_llm(openai, db, memory=memory, verbose=False, top_k=5)
# Formato personalizado de respuestas
# Formato personalizado de respuestas
def formato_consulta(question):
return f"""
**Siempre**
Dada una pregunta del usuario:
1. Crea una consulta de SQL Server.
2. Revisa los resultados.
3. Devuelve el dato.
4. Si es necesario, proporciona aclaraciones o cualquier texto en español.
Pregunta del usuario: "{question}"
"""
# Función para hacer la consulta
def consulta(input_usuario):
mensaje_formateado = formato_consulta(input_usuario)
resultado = db_chain.run(mensaje_formateado)
return resultado | [] |
2024-01-10 | HombreOso/temporary_aider_Vertex_AI | coders~base_coder.py | #!/usr/bin/env python
import hashlib
import json
import os
import sys
import traceback
from json.decoder import JSONDecodeError
from pathlib import Path, PurePosixPath
import backoff
import git
import openai
import requests
from jsonschema import Draft7Validator
from openai.error import APIError, RateLimitError, ServiceUnavailableError, Timeout
from rich.console import Console, Text
from rich.live import Live
from rich.markdown import Markdown
# ------------------------------
## temporary make aider a package not a library
## after developing needed functionality -> package it again and publish to PyPI
## do not forget to attribute the original library aider from Paul Gauthier
# ------------------------------
## temporary commented out
# from aider import models, prompts, utils
# from aider.commands import Commands
# from aider.repomap import RepoMap
# ------------------------------
# ------------------------------
## temporary added
import models, prompts, utils
from commands import Commands
from repomap import RepoMap
# ------------------------------
## temporary commented out
# from ..dump import dump # noqa: F401
# ------------------------------
class MissingAPIKeyError(ValueError):
pass
class ExhaustedContextWindow(Exception):
pass
def wrap_fence(name):
return f"<{name}>", f"</{name}>"
class Coder:
abs_fnames = None
repo = None
last_aider_commit_hash = None
last_asked_for_commit_time = 0
repo_map = None
functions = None
total_cost = 0.0
num_exhausted_context_windows = 0
@classmethod
def create(
self,
main_model,
edit_format,
io,
**kwargs,
):
from . import (
EditBlockCoder,
EditBlockFunctionCoder,
SingleWholeFileFunctionCoder,
WholeFileCoder,
WholeFileFunctionCoder,
)
if not main_model:
main_model = models.GPT35_16k
if not main_model.always_available:
if not check_model_availability(main_model):
if main_model != models.GPT4:
io.tool_error(
f"API key does not support {main_model.name}, falling back to"
f" {models.GPT35_16k.name}"
)
main_model = models.GPT35_16k
if edit_format is None:
edit_format = main_model.edit_format
if edit_format == "diff":
return EditBlockCoder(main_model, io, **kwargs)
elif edit_format == "whole":
return WholeFileCoder(main_model, io, **kwargs)
elif edit_format == "whole-func":
return WholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "single-whole-func":
return SingleWholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "diff-func-list":
return EditBlockFunctionCoder("list", main_model, io, **kwargs)
elif edit_format in ("diff-func", "diff-func-string"):
return EditBlockFunctionCoder("string", main_model, io, **kwargs)
else:
raise ValueError(f"Unknown edit format {edit_format}")
def __init__(
self,
main_model,
io,
fnames=None,
pretty=True,
show_diffs=False,
auto_commits=True,
dirty_commits=True,
dry_run=False,
map_tokens=1024,
verbose=False,
assistant_output_color="blue",
code_theme="default",
stream=True,
use_git=True,
):
if not fnames:
fnames = []
self.chat_completion_call_hashes = []
self.chat_completion_response_hashes = []
self.verbose = verbose
self.abs_fnames = set()
self.cur_messages = []
self.done_messages = []
self.num_control_c = 0
self.io = io
self.stream = stream
if not auto_commits:
dirty_commits = False
self.auto_commits = auto_commits
self.dirty_commits = dirty_commits
self.assistant_output_color = assistant_output_color
self.code_theme = code_theme
self.dry_run = dry_run
self.pretty = pretty
if pretty:
self.console = Console()
else:
self.console = Console(force_terminal=False, no_color=True)
self.main_model = main_model
self.io.tool_output(f"Model: {main_model.name}")
self.show_diffs = show_diffs
self.commands = Commands(self.io, self)
if use_git:
self.set_repo(fnames)
else:
self.abs_fnames = set([str(Path(fname).resolve()) for fname in fnames])
if self.repo:
rel_repo_dir = self.get_rel_repo_dir()
self.io.tool_output(f"Git repo: {rel_repo_dir}")
else:
self.io.tool_output("Git repo: none")
self.find_common_root()
if main_model.use_repo_map and self.repo and self.gpt_prompts.repo_content_prefix:
self.repo_map = RepoMap(
map_tokens,
self.root,
self.main_model,
io,
self.gpt_prompts.repo_content_prefix,
self.verbose,
)
if self.repo_map.use_ctags:
self.io.tool_output(f"Repo-map: universal-ctags using {map_tokens} tokens")
elif not self.repo_map.has_ctags and map_tokens > 0:
self.io.tool_output(
f"Repo-map: basic using {map_tokens} tokens"
f" ({self.repo_map.ctags_disabled_reason})"
)
else:
self.io.tool_output("Repo-map: disabled because map_tokens == 0")
else:
self.io.tool_output("Repo-map: disabled")
for fname in self.get_inchat_relative_files():
self.io.tool_output(f"Added {fname} to the chat.")
# validate the functions jsonschema
if self.functions:
for function in self.functions:
Draft7Validator.check_schema(function)
if self.verbose:
self.io.tool_output("JSON Schema:")
self.io.tool_output(json.dumps(self.functions, indent=4))
def find_common_root(self):
if len(self.abs_fnames) == 1:
self.root = os.path.dirname(list(self.abs_fnames)[0])
elif self.abs_fnames:
self.root = os.path.commonpath(list(self.abs_fnames))
else:
self.root = os.getcwd()
self.root = utils.safe_abs_path(self.root)
def get_rel_repo_dir(self):
try:
return os.path.relpath(self.repo.git_dir, os.getcwd())
except ValueError:
return self.repo.git_dir
def add_rel_fname(self, rel_fname):
self.abs_fnames.add(self.abs_root_path(rel_fname))
def abs_root_path(self, path):
res = Path(self.root) / path
return utils.safe_abs_path(res)
def set_repo(self, cmd_line_fnames):
if not cmd_line_fnames:
cmd_line_fnames = ["."]
repo_paths = []
for fname in cmd_line_fnames:
fname = Path(fname)
if not fname.exists():
self.io.tool_output(f"Creating empty file {fname}")
fname.parent.mkdir(parents=True, exist_ok=True)
fname.touch()
fname = fname.resolve()
try:
repo_path = git.Repo(fname, search_parent_directories=True).working_dir
repo_path = utils.safe_abs_path(repo_path)
repo_paths.append(repo_path)
except git.exc.InvalidGitRepositoryError:
pass
if fname.is_dir():
continue
self.abs_fnames.add(str(fname))
num_repos = len(set(repo_paths))
if num_repos == 0:
return
if num_repos > 1:
self.io.tool_error("Files are in different git repos.")
return
# https://github.com/gitpython-developers/GitPython/issues/427
self.repo = git.Repo(repo_paths.pop(), odbt=git.GitDB)
self.root = utils.safe_abs_path(self.repo.working_tree_dir)
new_files = []
for fname in self.abs_fnames:
relative_fname = self.get_rel_fname(fname)
tracked_files = set(self.get_tracked_files())
if relative_fname not in tracked_files:
new_files.append(relative_fname)
if new_files:
rel_repo_dir = self.get_rel_repo_dir()
self.io.tool_output(f"Files not tracked in {rel_repo_dir}:")
for fn in new_files:
self.io.tool_output(f" - {fn}")
if self.io.confirm_ask("Add them?"):
for relative_fname in new_files:
self.repo.git.add(relative_fname)
self.io.tool_output(f"Added {relative_fname} to the git repo")
show_files = ", ".join(new_files)
commit_message = f"Added new files to the git repo: {show_files}"
self.repo.git.commit("-m", commit_message, "--no-verify")
commit_hash = self.repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
else:
self.io.tool_error("Skipped adding new files to the git repo.")
return
# fences are obfuscated so aider can modify this file!
fences = [
("``" + "`", "``" + "`"),
wrap_fence("source"),
wrap_fence("code"),
wrap_fence("pre"),
wrap_fence("codeblock"),
wrap_fence("sourcecode"),
]
fence = fences[0]
def get_abs_fnames_content(self):
for fname in list(self.abs_fnames):
content = self.io.read_text(fname)
if content is None:
relative_fname = self.get_rel_fname(fname)
self.io.tool_error(f"Dropping {relative_fname} from the chat.")
self.abs_fnames.remove(fname)
else:
yield fname, content
def choose_fence(self):
all_content = ""
for _fname, content in self.get_abs_fnames_content():
all_content += content + "\n"
good = False
for fence_open, fence_close in self.fences:
if fence_open in all_content or fence_close in all_content:
continue
good = True
break
if good:
self.fence = (fence_open, fence_close)
else:
self.fence = self.fences[0]
self.io.tool_error(
"Unable to find a fencing strategy! Falling back to:"
" {self.fence[0]}...{self.fence[1]}"
)
return
def get_files_content(self, fnames=None):
if not fnames:
fnames = self.abs_fnames
prompt = ""
for fname, content in self.get_abs_fnames_content():
relative_fname = self.get_rel_fname(fname)
prompt += "\n"
prompt += relative_fname
prompt += f"\n{self.fence[0]}\n"
prompt += content
prompt += f"{self.fence[1]}\n"
return prompt
def get_repo_map(self):
if not self.repo_map:
return
other_files = set(self.get_all_abs_files()) - set(self.abs_fnames)
repo_content = self.repo_map.get_repo_map(self.abs_fnames, other_files)
return repo_content
def get_files_messages(self):
all_content = ""
if self.abs_fnames:
files_content = self.gpt_prompts.files_content_prefix
files_content += self.get_files_content()
else:
files_content = self.gpt_prompts.files_no_full_files
all_content += files_content
repo_content = self.get_repo_map()
if repo_content:
if all_content:
all_content += "\n"
all_content += repo_content
files_messages = [
dict(role="user", content=all_content),
dict(role="assistant", content="Ok."),
]
if self.abs_fnames:
files_messages += [
dict(role="system", content=self.fmt_system_reminder()),
]
return files_messages
def run(self, with_message=None):
while True:
try:
if with_message:
new_user_message = with_message
self.io.user_input(with_message)
else:
new_user_message = self.run_loop()
while new_user_message:
new_user_message = self.send_new_user_message(new_user_message)
if with_message:
return
except KeyboardInterrupt:
self.num_control_c += 1
if self.num_control_c >= 2:
break
self.io.tool_error("^C again or /exit to quit")
except EOFError:
return
def should_dirty_commit(self, inp):
cmds = self.commands.matching_commands(inp)
if cmds:
matching_commands, _, _ = cmds
if len(matching_commands) == 1:
cmd = matching_commands[0]
if cmd in ("/exit", "/commit"):
return
if not self.dirty_commits:
return
if not self.repo:
return
if not self.repo.is_dirty():
return
if self.last_asked_for_commit_time >= self.get_last_modified():
return
return True
def move_back_cur_messages(self, message):
self.done_messages += self.cur_messages
if message:
self.done_messages += [
dict(role="user", content=message),
dict(role="assistant", content="Ok."),
]
self.cur_messages = []
def run_loop(self):
inp = self.io.get_input(
self.root,
self.get_inchat_relative_files(),
self.get_addable_relative_files(),
self.commands,
)
self.num_control_c = 0
if self.should_dirty_commit(inp):
self.io.tool_output("Git repo has uncommitted changes, preparing commit...")
self.commit(ask=True, which="repo_files")
# files changed, move cur messages back behind the files messages
self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
if inp.strip():
self.io.tool_output("Use up-arrow to retry previous command:", inp)
return
if not inp:
return
if self.commands.is_command(inp):
return self.commands.run(inp)
self.check_for_file_mentions(inp)
return self.send_new_user_message(inp)
def fmt_system_reminder(self):
prompt = self.gpt_prompts.system_reminder
prompt = prompt.format(fence=self.fence)
return prompt
def send_new_user_message(self, inp):
self.choose_fence()
self.cur_messages += [
dict(role="user", content=inp),
]
main_sys = self.gpt_prompts.main_system
# if self.main_model.max_context_tokens > 4 * 1024:
main_sys += "\n" + self.fmt_system_reminder()
messages = [
dict(role="system", content=main_sys),
]
messages += self.done_messages
messages += self.get_files_messages()
messages += self.cur_messages
if self.verbose:
utils.show_messages(messages, functions=self.functions)
exhausted = False
interrupted = False
try:
interrupted = self.send(messages, functions=self.functions)
except ExhaustedContextWindow:
exhausted = True
except openai.error.InvalidRequestError as err:
if "maximum context length" in str(err):
exhausted = True
else:
raise err
if exhausted:
self.num_exhausted_context_windows += 1
self.io.tool_error("The chat session is larger than the context window!\n")
self.commands.cmd_tokens("")
self.io.tool_error("\nTo reduce token usage:")
self.io.tool_error(" - Use /drop to remove unneeded files from the chat session.")
self.io.tool_error(" - Use /clear to clear chat history.")
return
if self.partial_response_function_call:
args = self.parse_partial_args()
if args:
content = args["explanation"]
else:
content = ""
elif self.partial_response_content:
content = self.partial_response_content
else:
content = ""
if interrupted:
self.io.tool_error("\n\n^C KeyboardInterrupt")
self.num_control_c += 1
content += "\n^C KeyboardInterrupt"
self.io.tool_output()
if interrupted:
self.cur_messages += [dict(role="assistant", content=content)]
return
edited, edit_error = self.apply_updates()
if edit_error:
return edit_error
# TODO: this shouldn't use content, should use self.partial_....
self.update_cur_messages(content, edited)
if edited:
if self.repo and self.auto_commits and not self.dry_run:
saved_message = self.auto_commit()
elif hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
else:
saved_message = None
self.move_back_cur_messages(saved_message)
add_rel_files_message = self.check_for_file_mentions(content)
if add_rel_files_message:
return add_rel_files_message
def update_cur_messages(self, content, edited):
self.cur_messages += [dict(role="assistant", content=content)]
def auto_commit(self):
res = self.commit(history=self.cur_messages, prefix="aider: ")
if res:
commit_hash, commit_message = res
self.last_aider_commit_hash = commit_hash
saved_message = self.gpt_prompts.files_content_gpt_edits.format(
hash=commit_hash,
message=commit_message,
)
else:
if self.repo:
self.io.tool_output("No changes made to git tracked files.")
saved_message = self.gpt_prompts.files_content_gpt_no_edits
return saved_message
def check_for_file_mentions(self, content):
words = set(word for word in content.split())
# drop sentence punctuation from the end
words = set(word.rstrip(",.!;") for word in words)
# strip away all kinds of quotes
quotes = "".join(['"', "'", "`"])
words = set(word.strip(quotes) for word in words)
addable_rel_fnames = self.get_addable_relative_files()
mentioned_rel_fnames = set()
fname_to_rel_fnames = {}
for rel_fname in addable_rel_fnames:
if rel_fname in words:
mentioned_rel_fnames.add(str(rel_fname))
fname = os.path.basename(rel_fname)
if fname not in fname_to_rel_fnames:
fname_to_rel_fnames[fname] = []
fname_to_rel_fnames[fname].append(rel_fname)
for fname, rel_fnames in fname_to_rel_fnames.items():
if len(rel_fnames) == 1 and fname in words:
mentioned_rel_fnames.add(rel_fnames[0])
if not mentioned_rel_fnames:
return
for rel_fname in mentioned_rel_fnames:
self.io.tool_output(rel_fname)
if not self.io.confirm_ask("Add these files to the chat?"):
return
for rel_fname in mentioned_rel_fnames:
self.add_rel_fname(rel_fname)
return prompts.added_files.format(fnames=", ".join(mentioned_rel_fnames))
@backoff.on_exception(
backoff.expo,
(
Timeout,
APIError,
ServiceUnavailableError,
RateLimitError,
requests.exceptions.ConnectionError,
),
max_tries=10,
on_backoff=lambda details: print(f"Retry in {details['wait']} seconds."),
)
def send_with_retries(self, model, messages, functions):
kwargs = dict(
model=model,
messages=messages,
temperature=0,
stream=self.stream,
)
if functions is not None:
kwargs["functions"] = self.functions
# we are abusing the openai object to stash these values
if hasattr(openai, "api_deployment_id"):
kwargs["deployment_id"] = openai.api_deployment_id
if hasattr(openai, "api_engine"):
kwargs["engine"] = openai.api_engine
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode())
self.chat_completion_call_hashes.append(hash_object.hexdigest())
res = openai.ChatCompletion.create(**kwargs)
return res
def send(self, messages, model=None, silent=False, functions=None):
if not model:
model = self.main_model.name
self.partial_response_content = ""
self.partial_response_function_call = dict()
interrupted = False
try:
completion = self.send_with_retries(model, messages, functions)
if self.stream:
self.show_send_output_stream(completion, silent)
else:
self.show_send_output(completion, silent)
except KeyboardInterrupt:
interrupted = True
if not silent:
if self.partial_response_content:
self.io.ai_output(self.partial_response_content)
elif self.partial_response_function_call:
# TODO: push this into subclasses
args = self.parse_partial_args()
if args:
self.io.ai_output(json.dumps(args, indent=4))
return interrupted
def show_send_output(self, completion, silent):
if self.verbose:
print(completion)
show_func_err = None
show_content_err = None
try:
self.partial_response_function_call = completion.choices[0].message.function_call
except AttributeError as func_err:
show_func_err = func_err
try:
self.partial_response_content = completion.choices[0].message.content
except AttributeError as content_err:
show_content_err = content_err
resp_hash = dict(
function_call=self.partial_response_function_call,
content=self.partial_response_content,
)
resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())
self.chat_completion_response_hashes.append(resp_hash.hexdigest())
if show_func_err and show_content_err:
self.io.tool_error(show_func_err)
self.io.tool_error(show_content_err)
raise Exception("No data found in openai response!")
prompt_tokens = completion.usage.prompt_tokens
completion_tokens = completion.usage.completion_tokens
tokens = f"{prompt_tokens} prompt tokens, {completion_tokens} completion tokens"
if self.main_model.prompt_price:
cost = prompt_tokens * self.main_model.prompt_price / 1000
cost += completion_tokens * self.main_model.completion_price / 1000
tokens += f", ${cost:.6f} cost"
self.total_cost += cost
show_resp = self.render_incremental_response(True)
if self.pretty:
show_resp = Markdown(
show_resp, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(show_resp or "<no response>")
self.io.console.print(show_resp)
self.io.console.print(tokens)
def show_send_output_stream(self, completion, silent):
live = None
if self.pretty and not silent:
live = Live(vertical_overflow="scroll")
try:
if live:
live.start()
for chunk in completion:
if chunk.choices[0].finish_reason == "length":
raise ExhaustedContextWindow()
try:
func = chunk.choices[0].delta.function_call
# dump(func)
for k, v in func.items():
if k in self.partial_response_function_call:
self.partial_response_function_call[k] += v
else:
self.partial_response_function_call[k] = v
except AttributeError:
pass
try:
text = chunk.choices[0].delta.content
if text:
self.partial_response_content += text
except AttributeError:
pass
if silent:
continue
if self.pretty:
self.live_incremental_response(live, False)
else:
sys.stdout.write(text)
sys.stdout.flush()
finally:
if live:
self.live_incremental_response(live, True)
live.stop()
def live_incremental_response(self, live, final):
show_resp = self.render_incremental_response(final)
if not show_resp:
return
md = Markdown(show_resp, style=self.assistant_output_color, code_theme=self.code_theme)
live.update(md)
def render_incremental_response(self, final):
return self.partial_response_content
def get_context_from_history(self, history):
context = ""
if history:
for msg in history:
context += "\n" + msg["role"].upper() + ": " + msg["content"] + "\n"
return context
def get_commit_message(self, diffs, context):
if len(diffs) >= 4 * 1024 * 4:
self.io.tool_error(
f"Diff is too large for {models.GPT35.name} to generate a commit message."
)
return
diffs = "# Diffs:\n" + diffs
messages = [
dict(role="system", content=prompts.commit_system),
dict(role="user", content=context + diffs),
]
try:
interrupted = self.send(
messages,
model=models.GPT35.name,
silent=True,
)
except openai.error.InvalidRequestError:
self.io.tool_error(
f"Failed to generate commit message using {models.GPT35.name} due to an invalid"
" request."
)
return
commit_message = self.partial_response_content
commit_message = commit_message.strip()
if commit_message and commit_message[0] == '"' and commit_message[-1] == '"':
commit_message = commit_message[1:-1].strip()
if interrupted:
self.io.tool_error(
f"Unable to get commit message from {models.GPT35.name}. Use /commit to try again."
)
return
return commit_message
def get_diffs(self, *args):
if self.pretty:
args = ["--color"] + list(args)
diffs = self.repo.git.diff(*args)
return diffs
def commit(self, history=None, prefix=None, ask=False, message=None, which="chat_files"):
repo = self.repo
if not repo:
return
if not repo.is_dirty():
return
def get_dirty_files_and_diffs(file_list):
diffs = ""
relative_dirty_files = []
for fname in file_list:
relative_fname = self.get_rel_fname(fname)
relative_dirty_files.append(relative_fname)
try:
current_branch_commit_count = len(
list(self.repo.iter_commits(self.repo.active_branch))
)
except git.exc.GitCommandError:
current_branch_commit_count = None
if not current_branch_commit_count:
continue
these_diffs = self.get_diffs("HEAD", "--", relative_fname)
if these_diffs:
diffs += these_diffs + "\n"
return relative_dirty_files, diffs
if which == "repo_files":
all_files = [os.path.join(self.root, f) for f in self.get_all_relative_files()]
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(all_files)
elif which == "chat_files":
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(self.abs_fnames)
else:
raise ValueError(f"Invalid value for 'which': {which}")
if self.show_diffs or ask:
# don't use io.tool_output() because we don't want to log or further colorize
print(diffs)
context = self.get_context_from_history(history)
if message:
commit_message = message
else:
commit_message = self.get_commit_message(diffs, context)
if not commit_message:
commit_message = "work in progress"
if prefix:
commit_message = prefix + commit_message
if ask:
if which == "repo_files":
self.io.tool_output("Git repo has uncommitted changes.")
else:
self.io.tool_output("Files have uncommitted changes.")
res = self.io.prompt_ask(
"Commit before the chat proceeds [y/n/commit message]?",
default=commit_message,
).strip()
self.last_asked_for_commit_time = self.get_last_modified()
self.io.tool_output()
if res.lower() in ["n", "no"]:
self.io.tool_error("Skipped commmit.")
return
if res.lower() not in ["y", "yes"] and res:
commit_message = res
repo.git.add(*relative_dirty_fnames)
full_commit_message = commit_message + "\n\n# Aider chat conversation:\n\n" + context
repo.git.commit("-m", full_commit_message, "--no-verify")
commit_hash = repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
return commit_hash, commit_message
def get_rel_fname(self, fname):
return os.path.relpath(fname, self.root)
def get_inchat_relative_files(self):
files = [self.get_rel_fname(fname) for fname in self.abs_fnames]
return sorted(set(files))
def get_all_relative_files(self):
if self.repo:
files = self.get_tracked_files()
else:
files = self.get_inchat_relative_files()
return sorted(set(files))
def get_all_abs_files(self):
files = self.get_all_relative_files()
files = [self.abs_root_path(path) for path in files]
return files
def get_last_modified(self):
files = self.get_all_abs_files()
if not files:
return 0
return max(Path(path).stat().st_mtime for path in files)
def get_addable_relative_files(self):
return set(self.get_all_relative_files()) - set(self.get_inchat_relative_files())
def allowed_to_edit(self, path, write_content=None):
full_path = self.abs_root_path(path)
if full_path in self.abs_fnames:
if write_content:
self.io.write_text(full_path, write_content)
return full_path
if not Path(full_path).exists():
question = f"Allow creation of new file {path}?" # noqa: E501
else:
question = f"Allow edits to {path} which was not previously provided?" # noqa: E501
if not self.io.confirm_ask(question):
self.io.tool_error(f"Skipping edit to {path}")
return
if not Path(full_path).exists() and not self.dry_run:
Path(full_path).parent.mkdir(parents=True, exist_ok=True)
Path(full_path).touch()
self.abs_fnames.add(full_path)
# Check if the file is already in the repo
if self.repo:
tracked_files = set(self.get_tracked_files())
relative_fname = self.get_rel_fname(full_path)
if relative_fname not in tracked_files and self.io.confirm_ask(f"Add {path} to git?"):
if not self.dry_run:
self.repo.git.add(full_path)
if write_content:
self.io.write_text(full_path, write_content)
return full_path
def get_tracked_files(self):
if not self.repo:
return []
try:
commit = self.repo.head.commit
except ValueError:
return set()
files = []
for blob in commit.tree.traverse():
if blob.type == "blob": # blob is a file
files.append(blob.path)
# convert to appropriate os.sep, since git always normalizes to /
res = set(str(Path(PurePosixPath(path))) for path in files)
return res
apply_update_errors = 0
def apply_updates(self):
max_apply_update_errors = 2
try:
edited = self.update_files()
except ValueError as err:
err = err.args[0]
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, retrying...")
self.io.tool_error(str(err))
return None, err
else:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, aborting.")
return False, None
except Exception as err:
print(err)
print()
traceback.print_exc()
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, retrying...")
return None, str(err)
else:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, aborting")
return False, None
self.apply_update_errors = 0
if edited:
for path in sorted(edited):
if self.dry_run:
self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
else:
self.io.tool_output(f"Applied edit to {path}")
return edited, None
def parse_partial_args(self):
# dump(self.partial_response_function_call)
data = self.partial_response_function_call.get("arguments")
if not data:
return
try:
return json.loads(data)
except JSONDecodeError:
pass
try:
return json.loads(data + "]}")
except JSONDecodeError:
pass
try:
return json.loads(data + "}]}")
except JSONDecodeError:
pass
try:
return json.loads(data + '"}]}')
except JSONDecodeError:
pass
def check_model_availability(main_model):
available_models = openai.Model.list()
model_ids = [model.id for model in available_models["data"]]
return main_model.name in model_ids
| [
"\n"
] |
2024-01-10 | AllenInstitute/openai_tools | src~papers_extractor~pdf_parser.py | # This file contains classes to handle PDF files and extract the publication
# text from them. this will include function to clean up the text and remove
# formatting and irrelevant content.
from pdfminer.high_level import extract_text
from pdfminer.layout import LAParams
import os
import logging
from papers_extractor.openai_parsers import OpenaiLongParser
from papers_extractor.database_parser import hash_file
class PdfParser:
"""This class is used to parse a PDF file and extract the text from it.
It also has functions to clean up the text and remove formatting and
irrelevant content.
"""
def __init__(self, pdf_path, cut_bibliography=True, local_database=None,
database_id='auto'):
"""Initializes the class with the path to the PDF file.
Args:
pdf_path (str): The path to the PDF file.
cut_bibliography (bool): Whether to cut the bibliography from the
text or not. Defaults to True.
local_database (LocalDatabase): The local database to use. If set
to None, no database will be used. Defaults to None.
database_id (str): The key to use for the database. If set to auto,
it will be generated from the pdf_path. Defaults to auto.
Returns:
None
"""
self.pdf_path = pdf_path
self.raw_text = None
self.cleaned_text = None
self.cut_bibliography = cut_bibliography
self.database = local_database
self.database_id = database_id
# We load from the database if requested
# This will overwrite the raw text if it exists
if self.database is not None:
# The key in the database is created from the pdf_path
if database_id == 'auto':
# We hash the file to get a unique key
# that is indendent of the path
logging.debug("Hashing the pdf file to get a unique key")
self.database_id = hash_file(pdf_path)
else:
self.database_id = database_id
logging.debug("Database key for pdf file: {}"
.format(self.database_id))
# We load the database if it exists
self.database.load_class_from_database(self.database_id, self)
if self.raw_text is None:
# No need to load the raw text if it was loaded from the database
self.load_raw_text()
def load_raw_text(self):
"""Loads the raw text from the PDF file."""
logging.debug("Loading the raw text from the PDF file")
laparams = LAParams()
text = extract_text(self.pdf_path, laparams=laparams)
# We remove entire lines in text that contains a single character
# This is to remove vertical text, page numbers, etc.
array_text = text.split("\n")
for index, line in enumerate(array_text):
if len(line) == 1:
array_text[index] = ""
text = "\n".join(array_text)
self.raw_text = text
def save_database(self):
"""Saves the pdf data to the database if available."""
if self.database is not None:
logging.debug("Saving database for long text")
self.database.save_class_to_database(self.database_id, self)
def remove_bibliography(self, input_text):
"""We remove the bibliography from the text."""
updated_text = input_text
if "References" in self.raw_text:
updated_text = input_text.split("References")[0]
if "Bibliography" in self.raw_text:
updated_text = input_text.split("Bibliography")[0]
return updated_text
def get_clean_text(self, chunks_path=None):
"""Extracts the text from the PDF file and cleans it up.
Args:
chunks_path (str): The path to the folder where the chunks are
saved. Defaults to None. Used only for debugging.
Returns:
str: The cleaned up text.
"""
if self.cleaned_text:
return self.cleaned_text
else:
if self.cut_bibliography:
text_cleaned = self.remove_bibliography(self.raw_text)
logging.debug("Cleaning up and compressing the text")
openai_prompt = "Clean up formatting, Remove author list, " + \
"Remove references & bibliography, Remove page number, " + \
"Remove headers and Remove footers from the following " + \
"text from a scientific publication. Don't change any " + \
"other words:"
AIParser = OpenaiLongParser(text_cleaned, chunk_size=1400)
if chunks_path is not None:
if not os.path.exists(chunks_path):
os.mkdir(chunks_path)
all_chunks = AIParser.process_chunks_through_prompt(
openai_prompt, save_path=chunks_path
)
self.cleaned_text = "\n".join(all_chunks)
self.save_database()
return self.cleaned_text
| [
"Clean up formatting, Remove author list, Remove references & bibliography, Remove page number, Remove headers and Remove footers from the following text from a scientific publication. Don't change any other words:"
] |
2024-01-10 | AllenInstitute/openai_tools | tests~test_unique_paper_embedding.py | from papers_extractor.long_text import LongText
import logging
import sys
import openai
import os
import numpy as np
# Import the dotenv module to load the environment variables
from dotenv import load_dotenv
# Load the environment variables from the .env file
load_dotenv()
# Replace with your own OpenAI API key or set the OPENAI_API_KEY
openai.api_key = os.environ["OPENAI_API_KEY"]
def test_tsne_plot_longtext():
# This is mostly a smoke test to see if the plot is generated
list_sentences_to_test = [
"The vast expanse of the cosmos never ceases to astonish us.\
The universe is a testament to the beauty and wonder of creation. \
Scientists continue to uncover new celestial phenomena. \
We are reminded of the enormity of existence. \
Our humble place within it.",
]
list_sentences_to_test = "\n".join(list_sentences_to_test)
long_text = LongText(list_sentences_to_test, chunk_size=10)
figure_handle = long_text.plot_tsne_embedding(perplexity=3)
assert figure_handle is not None
def test_average_embedding():
list_sentences_to_test = \
"The vast expanse of the cosmos never ceases to astonish us."
long_text = LongText(list_sentences_to_test, chunk_size=3)
average_embedding = long_text.get_average_embedding()
assert len(average_embedding) == 1536
def test_calculate_embedding():
list_sentences_to_test = \
"The vast expanse of the cosmos never ceases to astonish us."
long_text = LongText(list_sentences_to_test, chunk_size=3)
long_text.calculate_embedding()
assert np.array(long_text.embedding).shape == (4, 1536)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stdout, force=True)
test_average_embedding()
test_tsne_plot_longtext()
test_calculate_embedding()
| [] |
2024-01-10 | AllenInstitute/openai_tools | tests~test_multi_paper.py | from papers_extractor.multi_paper import MultiPaper
from papers_extractor.unique_paper import UniquePaper
import logging
import sys
import os
import openai
import tempfile
# Import the dotenv module to load the environment variables
from dotenv import load_dotenv
# Load the environment variables from the .env file
load_dotenv()
# Replace with your own OpenAI API key or set the OPENAI_API_KEY
openai.api_key = os.environ["OPENAI_API_KEY"]
def test_multi_paper_creation():
first_paper = UniquePaper("10.1101/2020.03.03.972133")
second_paper = UniquePaper("10.1016/j.celrep.2023.112434")
multi_paper = MultiPaper([first_paper, second_paper])
assert len(multi_paper.papers_list) == 2
def test_multi_paper_embedding():
first_paper = UniquePaper("10.1101/2020.03.03.972133")
second_paper = UniquePaper("10.1016/j.celrep.2023.112434")
multi_paper = MultiPaper([first_paper, second_paper])
multi_paper.get_embedding_all_papers()
assert len(multi_paper.papers_embedding) == 2
assert len(multi_paper.papers_embedding[0][0]) == 1536
def test_multi_paper_plot():
first_paper = UniquePaper("10.1101/2020.03.03.972133")
second_paper = UniquePaper("10.1016/j.celrep.2023.112434")
multi_paper = MultiPaper([first_paper, second_paper])
multi_paper.get_embedding_all_papers()
# We create a temporary file in pytest tmp folder
with tempfile.TemporaryDirectory() as tmpdir:
path_plot = os.path.join(tmpdir, "test_plot.png")
multi_paper.plot_paper_embedding_map(save_path=path_plot, perplexity=1)
# We check that the file exists
assert os.path.exists(path_plot)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stdout, force=True)
test_multi_paper_creation()
test_multi_paper_embedding()
test_multi_paper_plot()
| [] |
2024-01-10 | AllenInstitute/openai_tools | tests~test_unique_paper_database.py | from papers_extractor.long_text import LongText
from papers_extractor.database_parser import LocalDatabase
import logging
import sys
import os
import openai
import pytest
# Import the dotenv module to load the environment variables
from dotenv import load_dotenv
# Load the environment variables from the .env file
load_dotenv()
# Replace with your own OpenAI API key or set the OPENAI_API_KEY
openai.api_key = os.environ["OPENAI_API_KEY"]
# We first test the LongText class for a short text
# We create a shared database for all tests
@pytest.fixture
def local_database():
obj = LocalDatabase()
return obj
def test_content_database(local_database):
longtext = "This is a test"
long_paper_obj = LongText(longtext, local_database=local_database)
long_paper_obj.save_database()
key_list = local_database.get_list_keys()
logging.debug("Keys in the database: {}".format(key_list))
assert long_paper_obj.database_id in key_list
def test_long_paper_key(local_database):
longtext = "This is a test"
long_paper_obj = LongText(longtext, local_database=local_database)
local_key = long_paper_obj.database_id
assert local_key == "a54d88e06612d820bc3be72877c74f257b561b196f34a3e0e" + \
"1af181e8a78e70c146682b7ead12846"
def test_long_paper_saving_database(local_database):
longtext = "This is a test"
long_paper_obj = LongText(longtext, local_database=local_database)
long_paper_obj.save_database()
assert local_database.check_in_database(long_paper_obj.database_id)
def test_custom_long_paper_key(local_database):
longtext = "This is a test"
long_paper_obj = LongText(longtext, local_database=local_database,
database_id="custom_key")
local_key = long_paper_obj.database_id
assert local_key == "custom_key"
long_paper_obj.save_database()
assert local_database.check_in_database(long_paper_obj.database_id)
def test_long_paper_embedding_caching(local_database):
longtext = "This is a test"
long_paper_obj = LongText(longtext, local_database=local_database)
embedding = long_paper_obj.calculate_embedding()
long_paper_obj.save_database()
embedding = long_paper_obj.calculate_embedding()
databased_data = local_database.load_from_database(
long_paper_obj.database_id)
assert local_database.check_in_database(long_paper_obj.database_id)
assert "embedding" in databased_data
assert databased_data['embedding'] == embedding
def test_long_paper_summarizing_caching(local_database):
longtext = "This is a test"
long_paper_obj = LongText(longtext, local_database=local_database)
summary = long_paper_obj.summarize_longtext_into_chunks()
long_paper_obj.save_database()
summary = long_paper_obj.summarize_longtext_into_chunks()
databased_data = local_database.load_from_database(
long_paper_obj.database_id)
assert "summary" in databased_data
assert databased_data['summary'] == summary
def test_reset_database(local_database):
longtext = "This is a test"
long_paper_obj = LongText(longtext, local_database=local_database)
long_paper_obj.save_database()
check_in = local_database.check_in_database(long_paper_obj.database_id)
assert check_in
long_paper_obj.reset_database()
check_in = local_database.check_in_database(long_paper_obj.database_id)
assert not check_in
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stdout, force=True)
local_database_fixture = LocalDatabase()
test_long_paper_key(local_database_fixture)
test_long_paper_saving_database(local_database_fixture)
test_custom_long_paper_key(local_database_fixture)
test_long_paper_embedding_caching(local_database_fixture)
test_long_paper_summarizing_caching(local_database_fixture)
test_content_database(local_database_fixture)
test_reset_database(local_database_fixture)
| [] |
2024-01-10 | AllenInstitute/openai_tools | src~papers_extractor~long_text.py | # This file contains classes to handle long texts that are coming from
# scientific papers. This will include function to make summaries and comments
# of the paper using various deep learning models.
import logging
from papers_extractor.openai_parsers import OpenaiLongParser
from papers_extractor.database_parser import hash_variable
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
class LongText:
"""This class is used to process a long text through Large Language Models.
It will be processed in chunks of a given size if needed."""
def __init__(self, longtext, chunk_size=1400, local_database=None,
database_id='auto'):
"""Initializes the class with the long text.
Args:
longtext (str): The long text to summarize.
chunk_size (int): The size of the chunks in tokens to use for the
chunks. Defaults to 1400.
local_database (LocalDatabase): The local database to use.
If set to None, no database will be used. Defaults to None.
database_id (str): The key to use for the database. If set to auto,
it will be generated from the long text and the chunk size.
We recommend using the DOI of the paper. Defaults
to auto.
Returns:
None
"""
self.longtext = longtext
self.chunk_size = chunk_size
self.database = local_database
self.summary = None
self.embedding = None
self.database_id = database_id
if self.database is not None:
# The key in the database is created from the long text and
# the chunk size unless provided to the class
if database_id == 'auto':
self.database_id = hash_variable(self.longtext) + \
hash_variable(self.chunk_size)
else:
self.database_id = database_id
logging.debug("Database key for long text: {}"
.format(self.database_id))
self.database.load_class_from_database(self.database_id, self)
def reset_database(self):
"""Resets the database for the long text if available."""
if self.database is not None:
logging.debug("Resetting database for long text")
self.database.reset_key(self.database_id)
def save_database(self):
"""Saves the long text to the database if available."""
if self.database is not None:
logging.debug("Saving long text to database")
self.database.save_class_to_database(self.database_id, self)
def get_average_embedding(self):
"""Returns the average embedding of the long text."""
if self.embedding is None:
logging.debug("Embedding not available, calculating it")
self.calculate_embedding()
return np.mean(self.embedding, axis=0)
def plot_tsne_embedding(self, save_figure_path=None,
perplexity=5, random_state=42):
"""This function plots the t-SNE embeddings of the long text.
Args:
save_figure_path (str): The path to save the figure to. If set to
None, the figure will not be saved. Defaults to None.
perplexity (int): The perplexity to use for the t-SNE. Defaults to
5.
random_state (int): The random state to use for the t-SNE.
Defaults to 42.
Returns:
fig (matplotlib.pyplot.figure): The figure of the t-SNE plot.
"""
if self.embedding is None:
logging.debug("Embedding not available, calculating it")
self.calculate_embedding()
matrix = np.array(self.embedding)
# Create a t-SNE model and transform the data
tsne = TSNE(n_components=2,
perplexity=perplexity,
random_state=random_state,
init='random',
learning_rate=200)
logging.debug("Fitting t-SNE")
vis_dims = tsne.fit_transform(matrix)
x = [x for x, y in vis_dims]
y = [y for x, y in vis_dims]
fig = plt.figure(figsize=(10, 10))
plt.scatter(x, y)
# Add text next to each dot
for i, text in enumerate(self.chunks):
plt.text(x[i] + 1, y[i] + 1, text, fontsize=7)
plt.title("t-SNE of the embeddings")
if save_figure_path is not None:
plt.savefig(save_figure_path)
return fig
def calculate_embedding(self, parser="GPT"):
"""This function extracts semantic embeddings in chunks
from the long text.
Args:
parser (str): The parser to use to extract the embeddings.
Defaults to GPT.
Returns:
embedding (list): The list of embeddings for each chunk.
"""
# We check if the embedding is already available
if self.embedding is not None:
return self.embedding
else:
logging.debug("Calculating embedding for long text")
if parser == "GPT":
local_openai = OpenaiLongParser(self.longtext,
chunk_size=self.chunk_size)
self.embedding, self.chunks = \
local_openai.process_chunks_through_embedding()
self.save_database()
return self.embedding
else:
logging.ERROR("Currently only GPT is supported for embedding")
def summarize_longtext_into_chunks(
self,
final_chunk_length=2,
save_path_summary=None,
max_concurrent_calls=10):
"""This function summarizes a long text into chunks.
Args:
final_chunk_length (int): The final number of chunks to have.
Defaults to 2.
save_path_summary (str): The path to save the summary.
Defaults to None.
max_concurrent_calls (int): The maximum number of concurrent calls
to the Openai API. Defaults to 10.
Returns:
final_text (list): A list of the summary for each chunk.
"""
openai_prompt = "Write a long, very detailed summary for a " + \
"technical expert of the following paragraph, from" + \
" a paper, refering to the text as -This publication-:"
# We check if the summary is already available
if self.summary is None:
current_text = self.longtext
# we initialize the number of chunks to a large number
nb_chunks = final_chunk_length + 1
logging.debug("Summarizing the text in chunks")
while True:
local_openai = OpenaiLongParser(
current_text,
chunk_size=self.chunk_size,
max_concurrent_calls=max_concurrent_calls)
nb_chunks = len(local_openai.chunks)
if nb_chunks <= final_chunk_length:
break
logging.debug(f"Summarizing chunks:{nb_chunks}")
summarized_chunks = local_openai.process_chunks_through_prompt(
openai_prompt, temperature=0, presence_penalty=-0.5
)
current_text = "\n".join(summarized_chunks)
# This is in case the text is too long to fit in a single chunk
final_text = current_text
# We can afford to clean up if the text is not too long
# Here the chunk size is fixed to maximize the number of tokens
final_long = OpenaiLongParser(
current_text,
chunk_size=2000,
max_concurrent_calls=max_concurrent_calls)
if final_long.num_chunks == 1:
logging.debug("Cleaning up the summary")
prompt = "Can you clean up this publication summary to " + \
"make it flow logically. Keep this summary very " + \
"technical and detailed:"
final_text = final_long.process_chunks_through_prompt(
prompt, temperature=0, presence_penalty=-0.5
)
# We save the summary in a txt file
if save_path_summary:
with open(save_path_summary, "w") as f:
f.write("/n".join(final_text))
self.summary = final_text
self.save_database()
return self.summary
| [
"Can you clean up this publication summary to make it flow logically. Keep this summary very technical and detailed:",
"Write a long, very detailed summary for a technical expert of the following paragraph, from a paper, refering to the text as -This publication-:"
] |
2024-01-10 | AllenInstitute/openai_tools | src~papers_extractor~unique_paper.py | # This file contains classes to handle long texts that are coming from
# scientific papers. This will include function to make summaries and comments
# of the paper using various deep learning models.
import logging
from papers_extractor.openai_parsers import OpenaiLongParser
from papers_extractor.arxiv_parser import get_doi_from_arxiv_id
import numpy as np
import re
import datetime
import requests
from bs4 import BeautifulSoup
from habanero import Crossref
# The following methods are used to check if an identifier is a DOI, a PMID or
# an arXiv ID. They are not perfect but should work for most cases.
def check_doi(identifier_string):
""" Checks if the identifier is a DOI.
Args:
identifier_string (str): The identifier to check.
Returns:
bool: True if the identifier is a DOI, False otherwise.
"""
# DOIs start with '10.' and contain a slash
if identifier_string.startswith('10.') and '/' in identifier_string:
return True
return False
def check_pmid(identifier_string):
""" Checks if the identifier is a PMID.
Args:
identifier_string (str): The identifier to check.
Returns:
bool: True if the identifier is a PMID, False otherwise.
"""
# PMIDs are numeric that have increasing numbers depending on when
# the paper was recorded in PubMed, ie. the first paper has PMID 1
if identifier_string.isdigit():
return True
return False
def check_arxiv(identifier_string):
""" Checks if the identifier is an arXiv ID.
Args:
identifier_string (str): The identifier to check.
Returns:
bool: True if the identifier is an arXiv ID, False otherwise.
"""
# arXiv IDs generally have the format 'category/year.number'
if re.match(r'[a-z\-]+(\.[a-z\-]+)?/\d{4}\.\d{4,5}', identifier_string):
return True
return False
def identify(identifier_string):
""" Identifies the type of identifier.
Args:
identifier_string (str): The identifier to check.
Returns:
str: The type of identifier. This can be 'DOI', 'PMID', 'arXiv ID' or
'Unknown'.
"""
if check_doi(identifier_string):
return 'DOI'
elif check_pmid(identifier_string):
return 'PMID'
elif check_arxiv(identifier_string):
return 'arXiv ID'
else:
return 'Unknown'
def check_string_length(string, min_length, max_length):
""" Checks if the string is of the right length.
Args:
string (str): The string to check.
min_length (int): The minimum length of the string.
max_length (int): The maximum length of the string.
Returns:
bool: True if the string is of the right length, False otherwise.
"""
if string is None:
return False
if len(string) >= min_length and len(string) <= max_length:
return True
return False
def get_doi_from_pmid(pmid):
"""This function extracts the PMID from the DOI link."""
base_url = ("https://eutils.ncbi.nlm.nih.gov/" +
"entrez/eutils/esearch.fcgi")
params = {
"db": "pubmed",
"term": f"{pmid}[PMID]",
"retmode": "xml"
}
response = requests.get(base_url, params=params)
response.raise_for_status()
soup = BeautifulSoup(response.content, "xml")
doi_tag = soup.find("DOI")
if doi_tag:
return doi_tag.text
else:
logging.warning(f"Could not find DOI for PMID {pmid}")
return None
def get_pmid_from_doi(doi):
"""This function extracts the PMID from the DOI link."""
base_url = ("https://eutils.ncbi.nlm.nih.gov/"
"entrez/eutils/esearch.fcgi")
params = {
"db": "pubmed",
"term": f"{doi}[DOI]",
"retmode": "xml"
}
response = requests.get(base_url, params=params)
response.raise_for_status()
soup = BeautifulSoup(response.content, "xml")
pmid_tag = soup.find("Id")
if pmid_tag:
pmid = pmid_tag.text
return pmid
else:
pmid = None
logging.warning(f"Could not find PMID for DOI {doi}")
return pmid
class UniquePaper:
"""This class is used to hold the information of a given paper.
"""
def __init__(self, identifier, local_database=None):
"""Initializes a unique paper with the identifier.
Args:
identifier (str): The identifier of the paper. This can be a DOI,
a PMID, an arXiv ID or another identifier that can be used to
retrieve the paper. If the identifier is a PMID or an arXiv ID,
the class will try to retrieve the DOI from the identifier.
local_database (LocalDatabase): The local database to use to
retrieve the paper. If None, the paper will not be retrieved from
the database.
Returns:
None
"""
self.identifier = identifier
identifier_database = identify(identifier)
if identifier_database == 'DOI':
self.doi = identifier
self.pmid = None
self.arxiv = None
elif identifier_database == 'PMID':
# We try to retrieve the DOI from the PMID
self.doi = get_doi_from_pmid(identifier)
self.pmid = identifier
self.arxiv = None
elif identifier_database == 'arXiv ID':
self.doi = get_doi_from_arxiv_id(identifier)
self.pmid = None
self.arxiv = identifier
else:
raise ValueError("The identifier {} is not a recognized \
publication identifier.".format(identifier))
self.database = local_database
# These are all the standard fields a paper could have
# in the context of litterature review
self.title = None
self.title_embedding = None
self.authors = None
self.abstract = None
self.abstract_embedding = None
self.journal = None
self.year = None
self.pdf_url = None
self.pdf_hash = None
self.fulltext = None
self.fulltext_embedding = None
self.longsummary = None
self.longsummary_embedding = None
self.nb_citations = None
self.database_id = None
# These are fields constructed through API calls
self.metadata_crossref = None
self.metadata_pubmed = None
# This is today's date
self.last_update = datetime.datetime.now()
# We attempt to retrieve the paper from the database if available
if self.database is not None:
local_id = self.get_database_id()
logging.debug(f"Database key for this paper: {local_id}")
self.database.load_class_from_database(local_id, self)
# We define the following methods to set the fields of the paper
# This is important to make sure the fields are set correctly
def set_abstract(self, abstract):
"""Sets the abstract of the paper.
Args:
abstract (str): The abstract of the paper.
Returns:
None
"""
if self.abstract is None:
if not check_string_length(abstract, 10, 10000):
raise ValueError(
(f"The abstract for {self.identifier} is not the " +
"right length."))
self.abstract = abstract
else:
logging.debug(f"The abstract for {self.doi} already set.")
def set_title(self, title):
"""Sets the title of the paper.
Args:
title (str): The title of the paper.
Returns:
None
"""
if self.title is None:
if not check_string_length(title, 10, 1000):
raise ValueError("The title is not the right length.")
self.title = title
else:
logging.debug(f"The title for {self.doi} already set.")
def set_authors(self, authors):
"""Sets the authors of the paper.
Args:
authors (list): The authors of the paper.
Returns:
None
"""
if self.authors is None:
if not isinstance(authors, list):
raise ValueError("The authors are not a list.")
if len(authors) == 0:
raise ValueError("The authors list is empty.")
for author in authors:
if not check_string_length(author, 5, 100):
raise ValueError("An author is not the right length.")
self.authors = authors
else:
logging.debug(f"The authors for {self.doi} already set.")
def set_journal(self, journal):
"""Sets the journal of the paper.
Args:
journal (str): The journal of the paper.
Returns:
None
"""
if self.journal is None:
if not check_string_length(journal, 2, 200):
raise ValueError("The journal is not the right length.")
self.journal = journal
else:
logging.debug(f"The journal for {self.doi} already set.")
def set_year(self, year):
"""Sets the year of the paper.
Args:
year (int): The year of the paper.
Returns:
None
"""
if self.year is None:
if not isinstance(year, int):
raise ValueError("The year is not an integer.")
if year < 1900 or year > 2100:
raise ValueError("The year is not in the right range.")
self.year = year
else:
logging.debug(f"The year for {self.doi} already set.")
def set_fulltext(self, fulltext):
"""Sets the fulltext of the paper.
Args:
fulltext (str): The fulltext of the paper.
Returns:
None
"""
if self.fulltext is None:
if not check_string_length(fulltext, 100, 1000000):
raise ValueError("The fulltext is not the right length.")
self.fulltext = fulltext
else:
logging.debug(f"The fulltext for {self.doi} already set.")
def set_longsummary(self, longsummary):
"""Sets the longsummary of the paper.
Args:
longsummary (str): The longsummary of the paper.
Returns:
None
"""
if self.longsummary is None:
if not check_string_length(longsummary, 100, 1000000):
raise ValueError("The longsummary is not the right length.")
self.longsummary = longsummary
else:
logging.debug(f"The longsummary for {self.doi} already set.")
def set_nb_citations(self, nb_citations):
"""Sets the number of citations of the paper.
Args:
nb_citations (int): The number of citations of the paper.
Returns:
None
"""
if not isinstance(nb_citations, int):
raise ValueError("The number of citations is not an integer.")
if nb_citations < 0:
raise ValueError("The number of citations is negative.")
self.nb_citations = nb_citations
def get_database_id(self):
"""Returns the database id of the paper. We favor the DOI as it is
the most reliable identifier, followed by the PMID and the arXiv ID.
Args:
None
Returns:
str: The database id of the paper.
"""
if self.database_id is not None:
return self.database_id
if self.doi is not None:
self.database_id = self.doi
elif self.pmid is not None:
self.database_id = self.pmid
elif self.arxiv is not None:
self.database_id = self.arxiv
else:
raise ValueError("The paper does not have any identifier.")
return self.database_id
def reset_database(self):
"""Resets the database for the long paper if available."""
if self.database is not None:
logging.debug("Resetting database for long paper")
self.database.reset_key(self.database_id)
def save_database(self):
"""Saves the long paper to the database if available."""
if self.database is not None:
logging.debug("Saving long paper to database")
self.last_update = datetime.datetime.now()
self.database.save_class_to_database(self.database_id, self)
def get_label_string(self, format='short'):
"""Returns the label string of the paper. This is a standardized strin
that contains the title, authors and year of the paper as well as more
information depending on the requested format.\
Args:
format (str): The format of the label string. Can be 'xshort',
'short', 'medium', 'long', 'xlong'. Defaults to 'short'.
Returns:
str: The label string of the paper.
If the format is 'xshort', the label string will be first author et
al, publication year.
If the format is 'short', the label string will be first author et
al, publication year, journal.
If the format is 'medium', the label string will be first author et
al, publication year, journal, title.
If the format is 'long', the label string will be all authors,
publication year, journal, title.
If the format is 'xlong', the label string will be all authors
, publication year, journal, title, abstract."""
first_author_lastname = self.get_first_author()
local_year = self.get_year()
if self.year is None:
raise ValueError("The paper does not have any year.")
# We only create those for the format 'long' and 'xlong'
if format == 'long' or format == 'xlong':
# We merge all authors into a single string
local_authors = self.get_authors()
local_authors = ', '.join(local_authors)
if local_authors is None:
raise ValueError("The paper does not have any author.")
if format != 'xshort':
local_journal = self.get_journal()
if self.journal is None:
raise ValueError("The paper does not have any journal.")
if format == 'medium' or format == 'long' or format == 'xlong':
local_title = self.get_title()
if self.title is None:
raise ValueError("The paper does not have any title.")
if format == 'xlong':
local_abstract = self.get_abstract()
if self.abstract is None:
raise ValueError("The paper does not have any abstract.")
if format == 'xshort':
label_string = f"{first_author_lastname} et al., {local_year}"
elif format == 'short':
label_string = f"{first_author_lastname} et al., {local_year}, \
{local_journal}"
elif format == 'medium':
label_string = f"{first_author_lastname} et al., {local_year} \
{local_journal}\n{local_title}"
elif format == 'long':
label_string = f"{local_authors}\n{local_year}, {local_journal}\n\
{local_title}"
elif format == 'xlong':
label_string = f"{local_authors}\n{local_year}\n{local_journal}, \
{local_title}\n{local_abstract}"
else:
raise ValueError("The format {} is not recognized.".format(format))
return label_string
def get_average_embedding(self, field="abstract"):
"""Returns the average embedding of the long paper.
Args:
field (str): The field to extract the embeddings from. Can be
'abstract', 'title', 'fulltext', 'longsummary'. Defaults to
'abstract'.
Returns:
embedding: The averaged embedding of the field.
"""
local_embedding = self.calculate_embedding(field=field)
return np.mean(local_embedding, axis=0)
def calculate_embedding(self, parser="ada2", field="abstract"):
"""This function extracts semantic embeddings in chunks
from the long text.
Args:
parser (str): The parser to use to extract the embeddings.
Defaults to ada2.
field (str): The field to extract the embeddings from. Can be
'abstract', 'title', 'fulltext', 'longsummary'. Defaults to
'abstract'.
Returns:
embedding (list): The list of embeddings for each chunk.
"""
if field == "abstract":
local_text = self.get_abstract()
elif field == "title":
local_text = self.get_title()
elif field == "fulltext":
# These are not available yet
local_text = None
elif field == "longsummary":
# These are not available yet
local_text = None
if local_text is None:
raise ValueError(f"The field {field} is not available for this \
paper.")
# We check if the embedding of that field is already available
local_embedding = getattr(self, f"{field}_embedding")
if local_embedding is not None:
logging.debug("Embedding already available")
return local_embedding
else:
logging.debug("Calculating embedding for field {}".format(field))
if parser == "ada2":
local_openai = OpenaiLongParser(local_text)
local_embedding, _ = \
local_openai.process_chunks_through_embedding()
setattr(self, f"{field}_embedding", local_embedding)
self.save_database()
return local_embedding
else:
logging.ERROR("Currently only ada2 is supported for embedding")
def get_metadata_from_crossref(self):
if self.metadata_crossref:
return self.metadata_crossref
else:
if self.doi is None:
logging.warning(
f"Could not find crossref metadata for DOI {self.doi}")
return None
cr = Crossref()
try:
self.metadata_crossref = cr.works(ids=self.doi)
except requests.exceptions.HTTPError as e:
logging.error(f"HTTPError for DOI {self.doi}: {e}")
return None
return self.metadata_crossref
def get_metadata_from_pubmed(self):
if self.metadata_pubmed:
return self.metadata_pubmed
else:
pmid = self.get_pmid()
if pmid == 0:
self.metadata_pubmed = None
logging.warning(
f"Could not find pubmed metadata for DOI {self.doi}")
return self.metadata_pubmed
else:
base_url = ("https://eutils.ncbi.nlm.nih.gov/"
"entrez/eutils/efetch.fcgi")
params = {
"db": "pubmed",
"retmode": "xml",
"id": pmid
}
response = requests.get(base_url, params=params)
response.raise_for_status()
soup = BeautifulSoup(response.content, "xml")
metadata = {}
title_tag = soup.find("ArticleTitle")
if title_tag:
metadata["title"] = title_tag.text
journal_tag = soup.find("Title")
if journal_tag:
metadata["journal"] = journal_tag.text
authors = []
for author in soup.find_all("Author"):
author_name = (f"{author.find('ForeName').text} \
{author.find('LastName').text}")
authors.append(author_name)
metadata["authors"] = authors
pub_date_tag = soup.find("PubDate")
if pub_date_tag:
pub_year = pub_date_tag.find("Year")
pub_month = pub_date_tag.find("Month")
pub_day = pub_date_tag.find("Day")
metadata["pub_date"] = (
f"{pub_year.text if pub_year else ''}-\
{pub_month.text if pub_month else ''}-\
{pub_day.text if pub_day else ''}"
)
abstract_tag = soup.find("AbstractText")
if abstract_tag:
metadata["abstract"] = abstract_tag.text
self.metadata_pubmed = metadata
return self.metadata_pubmed
def get_pmid(self):
if self.pmid:
return self.pmid
else:
if self.doi is None:
logging.warning(
f"Could not find pubmed metadata for DOI {self.doi}")
return None
self.pmid = get_pmid_from_doi(self.doi)
return self.pmid
def get_title(self):
"""This function extracts the title from the metadata."""
if self.title:
return self.title
else:
metadata = self.get_metadata_from_crossref()
if "message" in metadata and "title" in metadata["message"]:
title = metadata['message']['title'][0]
else:
# We try through the pubmed API
metadata = self.get_metadata_from_pubmed()
if metadata and "title" in metadata:
title = metadata["title"]
else:
logging.warning(f"Could not find title for DOI {self.doi}")
title = None
self.title = title
return self.title
def get_nb_citations(self, force_update=False):
"""This function extracts the citation number from the metadata."""
if self.nb_citations and not force_update:
return self.nb_citations
else:
metadata = self.get_metadata_from_crossref()
if (metadata is not None and "message" in metadata
and "is-referenced-by-count" in metadata["message"]):
nb_citations = metadata['message']['is-referenced-by-count']
else:
logging.warning(
f"Could not find citation number for DOI {self.doi}")
nb_citations = None
self.nb_citations = nb_citations
return self.nb_citations
def get_abstract(self):
if self.abstract:
return self.abstract
else:
# Try fetching abstract from CrossRef API
metadata = self.get_metadata_from_crossref()
if "message" in metadata and "abstract" in metadata["message"]:
self.abstract = metadata['message']['abstract']
return self.abstract
else:
logging.warning(
(f"Could not find abstract for DOI {self.doi} " +
"through CrossRef")
)
logging.warning("Trying to fetch abstract from Pubmed API")
# Try fetching abstract from Pubmed API
metadata = self.get_metadata_from_pubmed()
if metadata and "abstract" in metadata:
self.abstract = metadata["abstract"]
return self.abstract
else:
logging.warning(
(f"Could not find abstract for DOI {self.doi} " +
"through Pubmed")
)
logging.warning(f"abstract not found for DOI {self.doi}")
self.abstract = None
return self.abstract
def get_authors(self):
"""This function extracts the authors from the metadata."""
if self.authors:
return self.authors
else:
metadata = self.get_metadata_from_crossref()
if "message" in metadata and "author" in metadata["message"]:
authors = metadata['message']['author']
# We merge the given and family names as Crossref separates
# them
for index, author in enumerate(authors):
author = f"{author['given']} {author['family']}"
authors[index] = author
else:
# We try through the pubmed API
metadata = self.get_metadata_from_pubmed()
if metadata and "authors" in metadata:
authors = metadata["authors"]
else:
logging.warning(
f"Could not find authors for DOI {self.doi}")
authors = None
self.authors = authors
return self.authors
def get_first_author(self):
"""This function extracts the first author last name
from the metadata."""
authors = self.get_authors()
if authors:
return authors[0].split(" ")[-1]
else:
logging.warning(f"Could not find first author for DOI {self.doi}")
return None
def get_pdf_url(self):
"""This function extracts the pdf link from the metadata."""
if self.pdf_url:
return self.pdf_url
else:
metadata = self.get_metadata_from_crossref()
# If published is biorxiv, then we use the
# biorxiv api to get the pdf link
if metadata['message']['publisher'] == ("Cold Spring "
"Harbor Laboratory"):
url = 'https://www.biorxiv.org/content/' + self.doi
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
pdf_link = soup.find(
'a', class_='article-dl-pdf-link')['href']
self.pdf_url = f"https://www.biorxiv.org{pdf_link}"
return self.pdf_url
else:
logging.warning(
f"Error: Unable to fetch biorxiv webpage. Status \
code {response.status_code}")
self.pdf_url = None
return self.pdf_url
else:
full_text_links = metadata['message'].get('link', [])
for link in full_text_links:
content_type = link.get('content-type', '')
if content_type == 'application/pdf':
pdf_url = link['URL']
self.pdf_url = pdf_url
return self.pdf_url
else:
logging.warning(f"No pdf link found for DOI {self.doi}")
self.pdf_url = None
return self.pdf_url
def get_year(self):
"""This function extracts the year from the metadata."""
if self.year:
return self.year
else:
metadata = self.get_metadata_from_crossref()
if "message" in metadata and "created" in metadata["message"]:
year = metadata['message']['created']['date-parts'][0][0]
else:
# We try through the pubmed API
metadata = self.get_metadata_from_pubmed()
if metadata and "pub_date" in metadata:
year = metadata["pub_date"].split("-")[0]
else:
logging.warning(f"Could not find year for DOI {self.doi}")
year = None
self.year = year
return self.year
def get_journal(self):
"""This function extracts the journal from the metadata."""
if self.journal is None:
# We try through the crossref API
metadata = self.get_metadata_from_crossref()
if ("message" in metadata
and "container-title" in metadata["message"]
and len(metadata["message"]["container-title"]) > 0):
journal = metadata['message']['container-title'][0]
else:
# We try through the pubmed API
metadata = self.get_metadata_from_pubmed()
if metadata and "journal" in metadata:
journal = metadata["journal"]
else:
logging.warning(
f"Could not find journal for DOI {self.doi}")
journal = None
# We do some cleaning of the journal name
if "bioRxiv" in journal:
journal = "bioRxiv"
self.journal = journal
return self.journal
| [] |
2024-01-10 | AllenInstitute/openai_tools | tests~test_unique_paper.py | from papers_extractor.long_text import LongText
import logging
import sys
import os
import openai
# Import the dotenv module to load the environment variables
from dotenv import load_dotenv
# Load the environment variables from the .env file
load_dotenv()
# Replace with your own OpenAI API key or set the OPENAI_API_KEY
openai.api_key = os.environ["OPENAI_API_KEY"]
def test_creating_longtext_short_text():
longtext = "This is a test"
long_paper_obj = LongText(longtext)
assert long_paper_obj.longtext == longtext
def test_summarizing_single_chunk_output():
longtext = "This is a test."
long_paper_obj = LongText(longtext)
summary = long_paper_obj.summarize_longtext_into_chunks(
final_chunk_length=1,
save_path_summary=None,
max_concurrent_calls=1)
assert len(summary[0]) > 0
assert len(summary) == 1
def test_summarizing_double_chunk_output():
longtext = "This is a test"
long_paper_obj = LongText(longtext)
summary = long_paper_obj.summarize_longtext_into_chunks(
final_chunk_length=2,
save_path_summary=None,
max_concurrent_calls=1)
assert len(summary[0]) > 0
assert len(summary) == 1
def test_embedding_chunks():
longtext = "This is a test. It contains two sentences."
long_paper_obj = LongText(longtext, chunk_size=4)
assert long_paper_obj.longtext == longtext
embeddings = long_paper_obj.calculate_embedding()
assert len(embeddings[0]) == 1536
assert len(embeddings) == 3
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stdout, force=True)
test_creating_longtext_short_text()
test_summarizing_single_chunk_output()
test_summarizing_double_chunk_output
test_embedding_chunks()
| [] |
2024-01-10 | saiyan86/codeRating | score.py | import os
import openai
import json
from flask import Flask, request, jsonify, render_template
app = Flask(__name__)
openai.api_key = os.environ["OPENAI_API_KEY"]
@app.route('/')
def index():
return render_template('index.html')
@app.route('/rate_code', methods=['POST'])
def rate_code():
code_snippet = request.json.get('code_snippet')
if not code_snippet:
return jsonify({"error": "No code snippet provided"}), 400
prompt = f"""
Evaluate the following code snippet based on:
1. Correctness (0-40): Does the code run without errors and produce the expected output?
2. Readability (0-30): Is the code easy to read and understand?
3. Algorithm (0-30): Is the algorithm used efficient and well-implemented?
Code snippet:
{code_snippet}
Correctness:
Readability:
Algorithm:
"""
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=10,
n=1,
stop=None,
temperature=0.5,
)
response_text = response.choices[0].text.strip()
try:
correctness_str, readability_str, algorithm_str = response_text.split("\n", 2)
correctness = int(correctness_str.split(":")[-1].strip())
readability = int(readability_str.split(":")[-1].strip())
algorithm = int(algorithm_str.split(":")[-1].strip())
total_rating = correctness + readability + algorithm
return jsonify({
"correctness": correctness,
"readability": readability,
"algorithm": algorithm,
"total_rating": total_rating,
})
except ValueError:
return jsonify({"error": "Failed to parse ratings from AI response"}), 500
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=8080)
| [
"\nEvaluate the following code snippet based on:\n\n1. Correctness (0-40): Does the code run without errors and produce the expected output?\n2. Readability (0-30): Is the code easy to read and understand?\n3. Algorithm (0-30): Is the algorithm used efficient and well-implemented?\n\nCode snippet:\nPLACEHOLDER\n\nCorrectness:\nReadability:\nAlgorithm:\n"
] |
2024-01-10 | manas95826/Learning_Langchain | SerpAPI.py | # https://serpapi.com/
import os
os.environ['SERPAPI_API_KEY'] = "248676f3e794ef77d2c88a239493c2a99a7000eb4b8d051aa4d21daa1125efae"
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
# First, let's load the language model we're going to use to control the agent.
# llm = OpenAI()
# Next, let's load some tools to use.
tools = load_tools(["serpapi"])
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
# We are passing in some example questions and answers to train the agent on the definition of LangChain.
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
train_examples=[
("What is LangChain ?", "LangChain is a framework designed to simplify the creation of applications using large language models."),
],
verbose=True,
)
# Now let's test it out!
agent.run("What is LangChain ?")
| [] |
2024-01-10 | Reasoning-Lab/Elementary-Math-Solving-Zalo-AI-2023 | src~get_prompt.py | import argparse
import json
import os
import openai
from tqdm import tqdm
from dotenv import load_dotenv
import time
load_dotenv()
openai.api_type = "azure"
openai.api_base = "https://openai-enterprise-dci-eastus2-001.openai.azure.com/"
openai.api_version = "2023-05-15"
openai.api_key = os.getenv("OPENAI_API_KEY")
def parse_arguments():
parser = argparse.ArgumentParser(description="...")
parser.add_argument(
"--data_example_path",
type=str,
default="datasets/sorted_with_missing_explain_4.json",
help="",
)
parser.add_argument(
"--prompt_path", type=str, default="datasets/prompt/gen_explain.txt", help=""
)
parser.add_argument("--gpt_type", type=str, default="gpt-35-turbo", help="")
args = parser.parse_args()
return args
def load_json(path):
with open(path) as f:
return json.load(f)
def load_text(path):
with open(path) as file:
return file.read()
def save_json(path, file_save):
with open(path, "w") as f:
return json.dump(file_save, f)
def main():
args = parse_arguments()
data = load_json(args.data_example_path)["data"][423]
prompt = load_text(args.prompt_path)
prefixes = ["A.", "B.", "C.", "D."]
# for data in enumerate(raw_data):
# print(data)
print(data["choices"])
modified_choices = [
choice
if not any(choice.startswith(p) for p in prefixes)
else "- " + choice.split(". ", 1)[1]
for choice in data["choices"]
]
choices = "\n".join(modified_choices)
new_prompt = prompt.format(
question=data["question"],
choices=choices,
answer=data["answer"][3:], # Remove the A B C D.
)
print(new_prompt)
if __name__ == "__main__":
main()
| [
"question",
"answer"
] |
2024-01-10 | diya-basu/Langchain-Document-chatbot | app3.py | import os
import streamlit as st
import docx2txt
import PyPDF2
import textract
from pptx import Presentation
from io import BytesIO
from dotenv import load_dotenv
from langchain.document_loaders import TextLoader #for textfiles
from langchain.text_splitter import CharacterTextSplitter #text splitter
from langchain.embeddings import HuggingFaceEmbeddings #for using HugginFace models
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS #facebook vectorizationfrom langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering import load_qa_chain
from langchain import HuggingFaceHub
from langchain.document_loaders import UnstructuredPDFLoader #load pdf
from langchain.indexes import VectorstoreIndexCreator #vectorize db index with chromadb
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredURLLoader #load urls into docoument-loader
import textwrap
from nltk.tokenize import sent_tokenize
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
def extract_text_from_docx(docx_bytes):
return docx2txt.process(BytesIO(docx_bytes))
def extract_text_from_pdf(pdf_bytes):
pdf_text = ""
pdf_reader = PyPDF2.PdfReader(BytesIO(pdf_bytes))
for page in pdf_reader.pages:
pdf_text += page.extract_text()
return pdf_text
def extract_text_from_ppt(ppt_bytes):
ppt_text = ""
presentation = Presentation(BytesIO(ppt_bytes))
for slide in presentation.slides:
for shape in slide.shapes:
if shape.has_text_frame:
ppt_text += shape.text + "\n"
return ppt_text
def process_uploaded_files(docs):
all_text = ""
for doc in docs:
if doc.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
doc_text = extract_text_from_docx(doc.read())
elif doc.type == "application/pdf":
doc_text = extract_text_from_pdf(doc.read())
elif doc.type == "application/vnd.ms-powerpoint":
doc_text = extract_text_from_ppt(doc.read())
else:
doc_text = textract.process(doc.read()).decode("utf-8", errors="ignore")
sentences = sent_tokenize(doc_text)
all_text += "\n".join(sentences) + "\n"
return all_text
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xxl")
model = AutoModelForQuestionAnswering.from_pretrained("google/flan-t5-xxl")
def get_vectorstore(chunks):
embedder=HuggingFaceEmbeddings()
if not chunks:
return None
try:
vectorstore = FAISS.from_documents(chunks, embedder)
return vectorstore
except Exception as e:
return None
def wrap_text_preserve_newlines(text, width=110):
lines = text.split('\n')
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
def main():
load_dotenv()
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "Your API Token here"
st.set_page_config(page_title="Query your PDFs", page_icon=":scroll:")
st.header("The ultimate PDF whisperer 💬")
# upload files
pdfs = st.file_uploader("Upload your PDFs", type=["docx", "pdf", "ppt", "txt"], accept_multiple_files=True)
# process each uploaded PDF
if pdfs is not None:
text = process_uploaded_files(pdfs)
sentences = sent_tokenize(text)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.create_documents(sentences)
if chunks is not None:
db = get_vectorstore(chunks)
user_question = st.text_input(f"Ask a question about PDF:")
if user_question:
docs = db.similarity_search(user_question)
llm=llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
chain = load_qa_chain(llm, chain_type="stuff")
response = chain.run(input_documents=docs,question=user_question)
st.write(response)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | diya-basu/Langchain-Document-chatbot | app2.py | import os
import streamlit as st
import docx2txt
import requests
import PyPDF2
import textract
from pptx import Presentation
from io import BytesIO
from dotenv import load_dotenv
import numpy as np
import time
from langchain.document_loaders import TextLoader #for textfiles
from langchain.text_splitter import CharacterTextSplitter #text splitter
from langchain.embeddings import HuggingFaceEmbeddings #for using HugginFace models
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS #facebook vectorizationfrom langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering import load_qa_chain
from langchain import HuggingFaceHub
from langchain.document_loaders import UnstructuredPDFLoader #load pdf
from langchain.indexes import VectorstoreIndexCreator #vectorize db index with chromadb
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredURLLoader #load urls into docoument-loader
import textwrap
RATE_LIMIT = 2
last_request_time = 0
def check_rate_limit():
global last_request_time
current_time = time.time()
time_elapsed = current_time - last_request_time
if time_elapsed < 60 / RATE_LIMIT:
return False
last_request_time = current_time
return True
def extract_text_from_docx(docx_bytes):
return docx2txt.process(BytesIO(docx_bytes))
def extract_text_from_pdf(pdf_bytes):
pdf_text = ""
pdf_reader = PyPDF2.PdfReader(BytesIO(pdf_bytes))
for page in pdf_reader.pages:
pdf_text += page.extract_text()
return pdf_text
def extract_text_from_ppt(ppt_bytes):
ppt_text = ""
presentation = Presentation(BytesIO(ppt_bytes))
for slide in presentation.slides:
for shape in slide.shapes:
if shape.has_text_frame:
ppt_text += shape.text + "\n"
return ppt_text
def process_uploaded_files(docs):
all_text = ""
for doc in docs:
if doc.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
doc_text = extract_text_from_docx(doc.read())
elif doc.type == "application/pdf":
doc_text = extract_text_from_pdf(doc.read())
elif doc.type == "application/vnd.ms-powerpoint":
doc_text = extract_text_from_ppt(doc.read())
else:
doc_text = textract.process(doc.read()).decode("utf-8", errors="ignore")
all_text += doc_text + "\n"
return all_text
def get_vectorstore(chunks):
embeddings = HuggingFaceEmbeddings()
if not chunks:
return None
try:
vectorstore = FAISS.from_documents(chunks, embeddings)
return vectorstore
except Exception as e:
return None
def wrap_text_preserve_newlines(text, width=110):
lines = text.split('\n')
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
def main():
load_dotenv()
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "Your API Token here"
st.set_page_config(page_title="Query your PDFs", page_icon=":scroll:")
st.header("The ultimate PDF whisperer 💬")
# upload files
pdfs = st.file_uploader("Upload your PDFs", type=["docx", "pdf", "ppt", "txt"], accept_multiple_files=True)
# process each uploaded PDF
if pdfs is not None:
text = process_uploaded_files(pdfs)
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10)
chunks = text_splitter.create_documents(text)
chunks = text_splitter.split_documents(chunks)
if chunks is not None:
# knowledge_base = get_vectorstore(chunks)
# print(chunks)
db = get_vectorstore(chunks)
user_question = st.text_input(f"Ask a question about PDF:")
if user_question:
docs = db.similarity_search(user_question)
response = wrap_text_preserve_newlines(str(docs[0].page_content))
st.write(response)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | diya-basu/Langchain-Document-chatbot | finalapp.py | import os
import docx2txt
import PyPDF2
import textract
from io import BytesIO
import streamlit as st
from pptx import Presentation
from dotenv import load_dotenv
from langchain import HuggingFaceHub
from nltk.tokenize import sent_tokenize
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains.question_answering import load_qa_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
import easyocr
import cv2
from matplotlib import pyplot as plt
import numpy as np
import tempfile
from langchain.agents import create_csv_agent
from langchain.llms import OpenAI
def extract_text_from_docx(docx_bytes):
return docx2txt.process(BytesIO(docx_bytes))
def extract_text_from_pdf(pdf_bytes):
pdf_text = ""
pdf_reader = PyPDF2.PdfReader(BytesIO(pdf_bytes))
for page in pdf_reader.pages:
pdf_text += page.extract_text()
return pdf_text
def extract_text_from_ppt(ppt_bytes):
ppt_text = ""
presentation = Presentation(BytesIO(ppt_bytes))
for slide in presentation.slides:
for shape in slide.shapes:
if shape.has_text_frame:
ppt_text += shape.text + "\n"
return ppt_text
def extract_text_from_image(img):
reader = easyocr.Reader(['en'])
result = reader.readtext(img)
img = cv2.imread(img)
spacer = 100
detected_sentence=""
for detection in result:
top_left = tuple(detection[0][0])
bottom_right = tuple(detection[0][2])
text = detection[1]
detected_sentence+=text+" "
spacer+=15
return detected_sentence
def process_uploaded_files(docs):
all_text = ""
for doc in docs:
if doc.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
doc_text = extract_text_from_docx(doc.read())
elif doc.type == "application/pdf":
doc_text = extract_text_from_pdf(doc.read())
elif doc.type == "application/vnd.ms-powerpoint":
doc_text = extract_text_from_ppt(doc.read())
elif doc.type in ("image/jpeg", "image/png", "image/jpg"):
doc_text = extract_text_from_image(doc.read())
elif doc.type == "text/csv":
# Handle CSV processing here if needed
temp_dir = tempfile.mkdtemp()
temp_csv_path = os.path.join(temp_dir, "uploaded_csv.csv")
with open(temp_csv_path, "wb") as f:
f.write(doc.read())
agent = create_csv_agent(
OpenAI(temperature=0), temp_csv_path, verbose=True)
# Save the uploaded CSV file to the temporary location
else:
doc_text = textract.process(doc.read()).decode("utf-8", errors="ignore")
sentences = sent_tokenize(doc_text)
all_text += "\n".join(sentences) + "\n"
return all_text
def get_vectorstore(chunks):
embeddings = HuggingFaceEmbeddings()
if not chunks:
return None
try:
vectorstore = FAISS.from_documents(chunks, embeddings)
return vectorstore
except Exception as e:
return None
def main():
load_dotenv()
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
st.set_page_config(page_title="Query your PDFs", page_icon=":scroll:")
st.header("The ultimate PDF whisperer 💬")
# upload files
pdfs = st.file_uploader("Upload your PDFs", type=["docx", "pdf", "ppt", "txt"], accept_multiple_files=True)
# process each uploaded PDF
if pdfs is not None:
text = process_uploaded_files(pdfs)
sentences = sent_tokenize(text)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.create_documents(sentences)
if chunks is not None:
db = get_vectorstore(chunks)
user_question = st.text_input(f"Ask a question about PDF:")
if user_question:
docs = db.similarity_search(user_question)
# llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":512})
llm=HuggingFaceHub(repo_id="databricks/dolly-v2-3b", model_kwargs={"temperature":1, "max_length":500})
chain = load_qa_chain(llm, chain_type="stuff")
response = chain.run(input_documents=docs, question=user_question)
st.write(response)
if __name__ == '__main__':
main() | [] |
2024-01-10 | diya-basu/Langchain-Document-chatbot | csvapp.py | from langchain.agents import create_csv_agent
from langchain.llms import OpenAI
from dotenv import load_dotenv
import os
import streamlit as st
def main():
load_dotenv()
# Load the OpenAI API key from the environment variable
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
print("OPENAI_API_KEY is not set")
exit(1)
else:
print("OPENAI_API_KEY is set")
st.set_page_config(page_title="Ask your CSV")
st.header("Ask your CSV 📈")
csv_file = st.file_uploader("Upload a CSV file", type="csv")
if csv_file is not None:
agent = create_csv_agent(
OpenAI(temperature=0), csv_file, verbose=True)
user_question = st.text_input("Ask a question about your CSV: ")
if user_question is not None and user_question != "":
with st.spinner(text="In progress..."):
st.write(agent.run(user_question))
if __name__ == "__main__":
main()
| [] |
2024-01-10 | mxmpl/ppo | ppo~baselines.py | # Code from OpenAI baselines
# https://github.com/openai/baselines/blob/master/baselines/common/plot_util.py
import numpy as np
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(
low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(
high, xolds[-1])
assert len(xolds) == len(
yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
if luoi >= len(xolds):
break
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys
def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform symmetric EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
xs, ys1, count_ys1 = one_sided_ema(
xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)
_, ys2, count_ys2 = one_sided_ema(
-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0)
ys2 = ys2[::-1]
count_ys2 = count_ys2[::-1]
count_ys = count_ys1 + count_ys2
ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xs, ys, count_ys
| [] |
2024-01-10 | jonathanferrari/carema | flask~nlp.py | # task: make a function that takes in a string (the quote) and outputs:
# - the sentiment/valence score (so we can set a threeshhold in the filters) NVM LOL
# - a list of categories that the quote belongs to (motivation, encouragement, support, reassurance, kindness)
# other function also returns boolean on whether the quote is
# "allowed" based on my metrics (hard coding some words or maybe finding a way to filter)
'''
import cohere
from cohere.classify import Example
import pandas as pd
co = cohere.Client('KXCnIlcLzTAV0WUXeOWu9TVV9fyZeMOeQKbtYkra')
classifications = co.classify(
model='medium',
inputs=["you are a beautiful human"],
examples=[Example("i believe in you", "benign"), Example("you got this", "benign"), Example("PUDGE MID!", "benign"), Example("I WILL REMEMBER THIS FOREVER", "benign"), Example("I think I saw it first", "benign"), Example("bring me a potion", "benign"), Example("I will honestly kill you", "toxic"), Example("get rekt moron", "toxic"), Example("go to hell", "toxic"), Example("f*a*g*o*t", "toxic"), Example("you are hot trash", "toxic"), Example("you're not worth it", "toxic"), Example('you are a problem to society', 'toxic'), Example("live, laugh, love, and you will succeed", 'benign')])
# running idea of threshhold: .70
# print('i think its bad with this much confidence:')
toxicity = classifications.classifications[0].confidence[1].confidence
if toxicity >= 0.7:
print("ur bad the score was too high it's", toxicity) #THIS IS THE CONFIDENCE OF TOXICICITY
else:
print('thanks for the happy quote it was ', (1-toxicity), 'happy')
'''
# CODE TO COPY STARTS HERE
import cohere
from cohere.classify import Example
co = cohere.Client('KXCnIlcLzTAV0WUXeOWu9TVV9fyZeMOeQKbtYkra')
# toxic threshold is currently 0.70
toxic_examples=[Example("i believe in you", "benign"), Example("you got this", "benign"), Example("PUDGE MID!", "benign"), Example("I WILL REMEMBER THIS FOREVER", "benign"), Example("I think I saw it first", "benign"), Example("bring me a potion", "benign"), Example("I will honestly kill you", "toxic"), Example("get rekt moron", "toxic"), Example("go to hell", "toxic"), Example("f*a*g*o*t", "toxic"), Example("you are hot trash", "toxic"), Example("you're not worth it", "toxic"), Example('you are a problem to society', 'toxic'), Example("live, laugh, love, and you will succeed", 'benign')]
# categories are encouragement, support, kindness, gratitude, inspiration
type_examples = [Example('courage is going from failure to failure without losing enthusiasm', 'encouragement'), Example('I believe in you', 'encouragement'), Example('a smooth sea never made a skilled sailor', 'encouragement'), Example('it always seems impossible until it\'s done', 'encouragement'), Example('wherever you go, whatever you do, i\'ll always be there, supporting you', 'support'), Example('the best thing to hold onto in life is each other', 'support'), Example('I am here for you', 'support'), Example('All dogs are emotional support animals', 'support'), Example('I appreciate you. You are the most thoughtful person I know and I\'m so very thankful for you. Thank you.', 'gratitude'), Example('Thank you for being here I love you', 'gratitude'), Example('acknowledging the good that you already have in your life is the foundation for all abundance', 'gratitude'), Example('the world is a better place with you in it', 'gratitude'), Example('Be in love with your life. Every minute of it.', 'inspiration'), Example('enjoy the little things in life because one day you\'ll look back and realize they were the big things.', 'inspiration'), Example('There is hope and a kind of beauty in there somewhere, if you look for it.', 'inspiration'), Example('life is about accepting the challenges along the way, choosing to keep moving forward, and savoring the journey', 'inspiration')]
def toxic_bool(insp):
classifications = co.classify(
model='medium',
inputs=[insp],
examples=toxic_examples)
toxicity = classifications.classifications[0].confidence[1].confidence # use for toxicity score
return toxicity, toxicity >= 0.70 # confidence in 'toxic', whether it should be removed
def give_type(insp):
classifications = co.classify(
model='small',
inputs=[insp],
examples=type_examples)
type = classifications.classifications[0].prediction # use for types
return type # the category of the text
test_str = 'i am going to steal your commits'
print(toxic_bool(test_str), give_type(test_str))
# CODE TO COPY ENDS HERE
encouragement = ['courage is going from failure to failure without losing enthusiasm', 'I believe in you', 'a smooth sea never made a skilled sailor', 'it always seems impossible until it\'s done']
support = ['wherever you go, whatever you do, i\'ll always be there, supporting you', 'the best thing to hold onto in life is each other', 'I am here for you', 'All dogs are emotional support animals']
gratitude = ['I appreciate you. You are the most thoughtful person I know and I\'m so very thankful for you. Thank you.', 'Thank you for being here I love you', 'acknowledging the good that you already have in your life is the foundation for all abundance', 'the world is a better place with you in it']
inspiration = ['Be in love with your life. Every minute of it.', 'enjoy the little things in life because one day you\'ll look back and realize they were the big things.', 'There is hope and a kind of beauty in there somewhere, if you look for it.', 'life is about accepting the challenges along the way, choosing to keep moving forward, and savoring the journey']
# good examples to demo:
# failure:
# you are so successful at being bad
# i think of you as a password reset button: very annoying
# print('The confidence levels of the labels are: {}'.format(
# classifications.classifications))
# print('hi')
# print(classifications.classifications[0]) # first input
# print('2')
# print(classifications.classifications[0].prediction)
# predict = classifications.classifications[0].prediction
| [] |
2024-01-10 | Uokoroafor/finetune_llms | train_GPT_ICL.py | import random
import openai
from dotenv import load_dotenv
import os
# Load variables from .env
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
import math
# Perform an in context regression task
# Define the prompt
# Numerical prompts
numbers = []
for number in range(0, 30):
numbers.append(str(3 * number**3 + 2 * number**2))
# Number prompts are in groups of 4
number_contexts = [
[numbers[i : i + 3], numbers[i + 3]] for i in range(0, len(numbers) - 3)
]
# Shuffle the number prompts
random.shuffle(number_contexts)
# Only allowed 20
# number_contexts = number_contexts[:10]
contexts = []
answers = []
for context in number_contexts:
contexts.append(", ".join(context[0]))
answers.append(context[1])
# Define the prompt
question_prompt = " What number comes next in the sequence "
# Define the function to generate the prompt
def generate_prompt(context, answer=""):
prompt = ""
if answer == "":
prompt += "input-> " + question_prompt + context + "? Ans-> "
else:
prompt += "input-> " + question_prompt + context + "? Ans-> " + answer + "\n"
return prompt
def generate_few_shot_prompt(contexts, answers, num_shots=3, current_index=0):
prompt = ""
for _ in range(num_shots):
while True:
i = random.randint(0, len(contexts) - 1)
if i != current_index:
break
prompt += generate_prompt(contexts[i], answers[i])
prompt += generate_prompt(contexts[current_index])
return prompt
for num_shot in range(5):
# Generate the prompts
prompts = [
generate_prompt(context, answer) for context, answer in zip(contexts, answers)
]
few_shot_prompts = [
generate_few_shot_prompt(contexts, answers, num_shots=num_shot, current_index=i)
for i in range(len(contexts))
]
answers_ = []
for p in few_shot_prompts[:1]:
response = openai.Completion.create(
engine="text-davinci-003",
prompt=p,
max_tokens=5,
)
answers_.append(response.choices[0].text.strip())
# Print the prompts and answers
for i, prompt in enumerate(few_shot_prompts[:1]):
print(f"Prompt {i + 1}:\n{prompt}")
print(f"Answer {i + 1}:\n{answers_[i]}")
print(f"Correct Answer:\n{answers[i]}")
print()
| [
" What number comes next in the sequence ",
"input-> PLACEHOLDERPLACEHOLDER? Ans-> PLACEHOLDER\n",
"input-> PLACEHOLDERPLACEHOLDER? Ans-> "
] |
2024-01-10 | neowalter/Dr.GPT_plugin | news-plugin.py | import openai_secret_manager
assert "openai" in openai_secret_manager.get_services()
secrets = openai_secret_manager.get_secret("openai")
import openai
openai.api_key = secrets["api_key"]
def get_tech_news():
response = openai.Completion.create(
engine="davinci",
prompt="Give me a list of the top 5 tech news stories for today with a brief explanation of each.",
n=1,
max_tokens=150,
temperature=0.5
)
return response['choices'][0]['text']
def handle_message(message):
if message == 'tech news':
news = get_tech_news()
return news
else:
return "Sorry, I didn't understand that. Please type 'tech news' to get a list of the top 5 tech news stories for today with a brief explanation of each."
def handle_command(command):
if command["type"] == "message":
return handle_message(command["data"]["text"])
else:
return "Sorry, I didn't understand that command."
def process_event(event):
if event["type"] == "message":
response_text = handle_message(event["data"]["text"])
openai.Completion.create(
engine="davinci",
prompt=response_text,
n=1,
max_tokens=150,
temperature=0.5
)
| [
"Give me a list of the top 5 tech news stories for today with a brief explanation of each."
] |
2024-01-10 | conceptofmind/toolformer | tools.py | import copy
import requests
import calendar
import json
import torch
import wolframalpha
import openai
import datetime
import time
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
AutoModel,
T5ForConditionalGeneration,
)
from typing import List
from operator import truediv, mul, add, sub
from langchain.chains import LLMChain
from langchain import Cohere, PromptTemplate
# Optional imports
from googleapiclient.discovery import build
"""
Calendar
Uses Python's datetime and calendar libraries to retrieve the current date.
input - None
output - A string, the current date.
"""
def Calendar(date=datetime.datetime.now()):
return f"Today is {calendar.day_name[date.weekday()]}, {calendar.month_name[date.month]} {date.day}, {date.year}."
"""
retrieval
Uses Carptriever to retrieve sentences before the current context.
input_sentences - List[String], sentences to retrieve from
input_text - String, the input text (e.g. The dog's name is)
k - The number of sentences to retrieve
output - A list of strings, each string is the retrieved sentence, and the sentence after.
"""
class Retriever:
def __init__(self):
self.model = AutoModel.from_pretrained(
"CarperAI/carptriever-1", add_pooling_layer=False
).cuda()
self.tokenizer = AutoTokenizer.from_pretrained("CarperAI/carptriever-1")
def retrieval(
self, input_sentences: List[str], input_text: str, k: int
) -> List[str]:
if k > len(input_sentences):
# I'd error but LMs do stupid stuff sometimes
return input_sentences
input_sentences = copy.deepcopy(input_sentences)
input_sentences.append(input_text)
output_list = []
for sentence in input_sentences:
inputs = self.tokenizer(
sentence, padding=True, truncation=True, return_tensors="pt"
)
# print(inputs)
inputs["input_ids"] = inputs["input_ids"].cuda()
inputs["token_type_ids"] = inputs["token_type_ids"].cuda()
inputs["attention_mask"] = inputs["attention_mask"].cuda()
with torch.no_grad():
outputs = self.model(**inputs)
embeddings = mean_pooling(outputs[0], inputs["attention_mask"])
output_list.append(embeddings)
query_embedding, sentence_embeddings = output_list[-1], torch.concat(
output_list[:-1], 0
)
# print(len(sentence_embeddings), sentence_embeddings[0].shape)
scores = (query_embedding @ sentence_embeddings.transpose(0, 1)).cpu().tolist()
# print(scores)
sentence_score_pairs = sorted(
zip(input_sentences[:-1], scores[0]), reverse=True, key=lambda x: x[1]
)
continued_sentence_score_pairs = sorted(
zip(input_sentences[1:], scores[0]), reverse=True, key=lambda x: x[1]
)
# print(sentence_score_pairs)
return [
sentence_pair[0] + " " + continue_pair[0]
for sentence_pair, continue_pair in zip(
sentence_score_pairs[:k], continued_sentence_score_pairs[:k]
)
]
def mean_pooling(token_embeddings: torch.Tensor, mask: torch.Tensor):
token_embeddings = token_embeddings.masked_fill(~mask[..., None].bool(), 0.0)
sentence_embeddings = token_embeddings.sum(dim=1) / mask.sum(dim=1)[..., None]
return sentence_embeddings
"""
Wikipedia Search
Uses ColBERTv2 to retrieve Wikipedia documents.
input_query - A string, the input query (e.g. "what is a dog?")
k - The number of documents to retrieve
output - A list of strings, each string is a Wikipedia document
Adapted from Stanford's DSP: https://github.com/stanfordnlp/dsp/
Also see: https://github.com/lucabeetz/dsp
"""
class ColBERTv2:
def __init__(self, url: str):
self.url = url
def __call__(self, query, k=1):
topk = colbertv2_get_request(self.url, query, k)
topk = [doc["text"] for doc in topk]
return topk
def colbertv2_get_request(url: str, query: str, k: int):
payload = {"query": query, "k": k}
res = requests.get(url, params=payload)
topk = res.json()["topk"][:k]
return topk
def WikiSearch(input_query: str):
k = 10
retrieval_model = ColBERTv2(
"http://ec2-44-228-128-229.us-west-2.compute.amazonaws.com:8893/api/search"
)
output = retrieval_model(input_query, k)
return output
"""
Machine Translation - NLLB-600M
Uses HuggingFace's transformers library to translate input query to English.
input_query - A string, the input query (e.g. "what is a dog?")
output - A string, the translated input query.
"""
def MT(input_query: str):
model_name = "facebook/nllb-200-distilled-600M"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
input_ids = tokenizer(input_query, return_tensors="pt")
outputs = model.generate(
**input_ids,
forced_bos_token_id=tokenizer.lang_code_to_id["eng_Latn"],
)
output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
return output
"""
Calculator
Calculates the result of a mathematical expression.
input_query - A string, the input query (e.g. "400/1400")
output - A float, the result of the calculation
Adapted from: https://levelup.gitconnected.com/3-ways-to-write-a-calculator-in-python-61642f2e4a9a
"""
def Calculator(input_query: str):
operators = {"+": add, "-": sub, "*": mul, "/": truediv}
if input_query.isdigit():
return float(input_query)
for c in operators.keys():
left, operator, right = input_query.partition(c)
if operator in operators:
return round(operators[operator](Calculator(left), Calculator(right)), 2)
# Other Optional Tools
"""
LangChain LLMChain
input_question - A string, the input query (e.g. "what is a dog?")
output - String for generation
Requires that you set your COHERE_API_KEY environment variable before starting.
"""
def langchain_llmchain(input_question):
# TODO: Check succinct if it's good once we don't have rate limited APIs
template = """Please be succinct in your answer to this question.
Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = Cohere(model="command-xlarge-nightly")
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict(question=input_question)
"""
HuggingFace API
Uses HuggingFace's API to generate text.
input_query - A string, the input query (e.g. "what is a dog?")
output - A string, the generated text
API_TOKEN - your HuggingFace API token
"""
def HuggingfaceAPI(input_query: str):
model_id = "gpt-neox-20b"
API_TOKEN = "YOUR_API_TOKEN"
API_URL = "https://api-inference.huggingface.co/models/{model_id}".format(
model_id=model_id
)
headers = {"Authorization": f"Bearer {API_TOKEN}".format(API_TOKEN=API_TOKEN)}
def query(payload):
data = json.dumps(payload)
response = requests.request("POST", API_URL, headers=headers, data=data)
return json.loads(response.content.decode("utf-8"))
data = query(input_query)
return data[0]["generated_text"]
"""
Wolfram Alpha Calculator
pip install wolframalpha
Uses Wolfram Alpha API to calculate input query.
input_query - A string, the input query (e.g. "what is 2 + 2?")
output - A string, the answer to the input query
wolfarm_alpha_appid - your Wolfram Alpha API key
"""
def WolframAlphaCalculator(input_query: str):
wolfram_alpha_appid = "YOUR_WOLFRAM_ALPHA_APPID"
wolfram_client = wolframalpha.Client(wolfram_alpha_appid)
res = wolfram_client.query(input_query)
assumption = next(res.pods).text
answer = next(res.results).text
return f"Assumption: {assumption} \nAnswer: {answer}"
"""
Google Search
Uses Google's Custom Search API to retrieve Google Search results.
input_query - The query to search for.
num_results - The number of results to return.
api_key - Your Google API key.
cse_id - Your Google Custom Search Engine ID.
output - A list of dictionaries, each dictionary is a Google Search result
"""
def custom_search(query, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=query, cx=cse_id, **kwargs).execute()
return res["items"]
def google_search(input_query: str):
api_key = "YOUR_GOOGLE_API_KEY"
cse_id = "YOUR_GOOGLE_CSE_ID"
num_results = 10
metadata_results = []
results = custom_search(
input_query, num=num_results, api_key=api_key, cse_id=cse_id
)
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["title"],
"link": result["link"],
}
metadata_results.append(metadata_result)
return metadata_results
"""
SteamSHP
Uses HuggingFace's transformers library to generate text.
input_query - A string, the input query (e.g. "what is a dog?")
output - A list of strings, the generated text
"""
def SteamSHP(input_query: str):
device = "cuda" # if you have a GPU
tokenizer = AutoTokenizer.from_pretrained("stanfordnlp/SteamSHP-flan-t5-large")
model = T5ForConditionalGeneration.from_pretrained(
"stanfordnlp/SteamSHP-flan-t5-large"
).to(device)
x = tokenizer([input_query], return_tensors="pt").input_ids.to(device)
y = model.generate(x, max_new_tokens=1)
output = tokenizer.batch_decode(y, skip_special_tokens=True)
return output
"""
Goose AI
pip install openai
Uses GPT-NeoX 20B to generate text.
input_query - A string, the input query (e.g. "what is a dog?")
output - A string, the generated text
openai.api_key - your GooseAI API key
"""
def GooseAI(input_query: str):
openai.api_key = "YOUR_API_KEY"
openai.api_base = "https://api.goose.ai/v1"
# Create a completion, return results streaming as they are generated.
# Run with `python3 -u` to ensure unbuffered output.
completion = openai.Completion.create(
engine="gpt-neo-20b", prompt=input_query, max_tokens=160
)
return completion.choices[0].text
"""
Bing Search
Uses Bing's Custom Search API to retrieve Bing Search results.
input_query: The query to search for.
bing_subscription_key: Your Bing API key.
num_results: The number of results to return.
output: A list of dictionaries, each dictionary is a Bing Search result
"""
def _bing_search_results(search_term: str, bing_subscription_key: str, count: int):
headers = {"Ocp-Apim-Subscription-Key": bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
}
response = requests.get(
"https://api.bing.microsoft.com/v7.0/search", headers=headers, params=params
)
response.raise_for_status()
search_results = response.json()
return search_results["webPages"]["value"]
def bing_search(input_query: str):
bing_subscription_key = "YOUR BING API KEY"
num_results = 10
metadata_results = []
results = _bing_search_results(
input_query, bing_subscription_key, count=num_results
)
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results
if __name__ == "__main__":
print(langchain_llmchain("Please respond"))
print(
WikiSearch("What is a dog?")
) # Outputs a list of strings, each string is a Wikipedia document
print(Calendar()) # Outputs a string, the current date
print(Calculator("400/1400")) # For Optional Basic Calculator
print(MT("Un chien c'est quoi?")) # What is a dog?
# Optional Tools
print(
HuggingfaceAPI("What is a dog?")
) # Outputs a string, the answer to the input query
print(SteamSHP("What is a dog?")) # Outputs a list with an answer
print(WolframAlphaCalculator("What is 2 + 2?")) # 4
print(GooseAI("What is a dog?")) # Outputs a string, the answer to the input query
print(google_search("What is a dog?"))
# Outputs a list of dictionaries, each dictionary is a Google Search result
print(bing_search("What is a dog?"))
# Outputs a list of dictionaries, each dictionary is a Bing Search result
| [
"Please be succinct in your answer to this question.\nQuestion: {question}\n\nAnswer: Let's think step by step.",
"question"
] |
2024-01-10 | conceptofmind/toolformer | data_generation~llmchain.py | import torch
from transformers import (
PreTrainedTokenizerBase,
PreTrainedModel,
)
from tools import langchain_llmchain
from prompts import llmchain_prompt
from typing import List
from data_generation.base_api import APICallPostprocessing
# TODO: Per API?
MAX_BATCH_SIZE = 1 # My 3090 is weak 😔
N = 128 # SEQ Len
MAX_LEN = 1024 # Maximum retrieval length
M = 16 # Min Loss Span To Consider
class LLMChainPostprocessing(APICallPostprocessing):
def __init__(
self,
start_tokens: List[int],
end_tokens: List[int],
minimum_percentage: float = 0.1,
):
self.llmchain = langchain_llmchain
self.api_text = "LLMChain("
super().__init__(start_tokens, end_tokens, minimum_percentage)
def add_api_calls(
self,
candidate: int,
outputs: dict,
texts_to_test: List[str],
tokenizer: PreTrainedTokenizerBase,
input_tokens: torch.Tensor,
input_start: int,
nums_to_keep: List[int],
base_loss: float,
*args,
**kwargs
):
generated_texts = list()
max_token_len = N
max_token_len_base = N
for j in range(len(outputs)):
outputs[j]["LLMChain"] = outputs[j]["generated_text"].replace(
texts_to_test[candidate], ""
)
outputs[j]["Generated"] = outputs[j]["generated_text"].split("Output:")[-1]
if "]" in outputs[j]["LLMChain"]:
outputs[j]["LLMChain"] = (
outputs[j]["LLMChain"].replace("LLMChain(", "").split("]")[0]
)
if ")" in outputs[j]["LLMChain"]:
outputs[j]["LLMChain"] = outputs[j]["LLMChain"].split(")")[0]
if outputs[j]["LLMChain"][0] == "\"":
outputs[j]["LLMChain"] = outputs[j]["LLMChain"][1:]
if outputs[j]["LLMChain"][-1] == "\"":
outputs[j]["LLMChain"] = outputs[j]["LLMChain"][:-1]
outputs[j]["LLMChain_text"] = (
"[LLMChain(" + outputs[j]["LLMChain"] + ")"
)
base_inputs = tokenizer(
outputs[j]["LLMChain_text"] + "]" + "\n",
return_tensors="pt",
)["input_ids"].cuda()
outputs[j]["LLMChain"] = str(self.llmchain(outputs[j]["LLMChain"]))
outputs[j]["LLMChain_output"] = [outputs[j]["LLMChain_text"][1:], outputs[j]["LLMChain"]]
outputs[j]["LLMChain_text"] = (
outputs[j]["LLMChain_text"]
+ "->"
+ outputs[j]["LLMChain"]
+ "]"
)
test_inputs = tokenizer(
outputs[j]["LLMChain_text"] + "\n",
return_tensors="pt",
)["input_ids"].cuda()
test_inputs = torch.concat(
[
test_inputs.cuda(),
input_tokens[:, input_start:].cuda(),
],
dim=1,
)
if test_inputs.shape[1] > MAX_LEN:
continue
base_inputs = torch.concat(
[
base_inputs.cuda(),
input_tokens[:, input_start:].cuda(),
],
dim=1,
)
max_token_len = max(max_token_len, test_inputs.shape[1])
max_token_len_base = max(max_token_len_base, test_inputs.shape[1])
generated_texts.append(
[
test_inputs,
base_inputs,
nums_to_keep[candidate],
base_loss,
outputs[j],
]
)
return generated_texts, max_token_len, max_token_len_base
def parse_article(
self, data: dict, model: PreTrainedModel, tokenizer: PreTrainedTokenizerBase
):
outputs = list()
tokens = tokenizer(data["text"], return_tensors="pt")["input_ids"]
start_step = 0
total_steps = tokens.shape[1]//N
for i in range(start_step, total_steps):
input_tokens = tokens[:, (-N * (i + 1) - 1) : (-N * (i) - 1)]
labels = tokens[
:,
int(tokens.shape[1] + (-N * (i + 1))) : int(tokens.shape[1] + (-N * i)),
]
# print(tokens.shape)
string = tokenizer.decode(input_tokens[0])
# print(ret_strings)
model_input = tokenizer(
llmchain_prompt.replace("<REPLACEGPT>", string) + string,
return_tensors="pt",
)["input_ids"]
# print(string)
# print(model_input.shape)
with torch.no_grad():
output = model(model_input.cuda()).logits.cpu()[:, -N:]
new_outputs = self.generate_continuations(
model_input,
output,
labels,
model,
tokenizer,
)
print(new_outputs)
for output in new_outputs:
if output is None:
continue
output["index"] += int(tokens.shape[1] + (-N * (i + 1)))
# filter by score
if output["Score"] > 1.0:
outputs.append([output["Score"], output["index"]] + output["LLMChain_output"])
return outputs
| [] |
2024-01-10 | kennethlien/ray | rllib~agents~maddpg~maddpg.py | """Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see rllib/examples/two_step_game.py,
and the README for how to run with the multi-agent particle envs.
"""
import logging
from typing import Type
from ray.rllib.agents.maddpg.maddpg_tf_policy import MADDPGTFPolicy
from ray.rllib.agents.dqn.dqn import DQNTrainer
from ray.rllib.agents.trainer import COMMON_CONFIG, with_common_config
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils import merge_dicts
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import TrainerConfigDict
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# fmt: off
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Framework to run the algorithm ===
"framework": "tf",
# === Settings for each individual policy ===
# ID of the agent controlled by this policy
"agent_id": None,
# Use a local critic for this policy.
"use_local_critic": False,
# === Evaluation ===
# Evaluation interval
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_duration": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [64, 64],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [64, 64],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# Algorithm for good policies.
"good_policy": "maddpg",
# Algorithm for adversary policies.
"adv_policy": "maddpg",
# === Replay buffer ===
"replay_buffer_config": {
"type": "MultiAgentReplayBuffer",
# Specify prioritized replay by supplying a buffer type that supports
# prioritization, for example: MultiAgentPrioritizedReplayBuffer.
"prioritized_replay": DEPRECATED_VALUE,
"capacity": int(1e6),
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
},
# Observation compression. Note that compression makes simulation slow in
# MPE.
"compress_observations": False,
# If set, this will fix the ratio of replayed from a buffer and learned on
# timesteps to sampled from an environment and stored in the replay buffer
# timesteps. Otherwise, the replay will proceed at the native ratio
# determined by (train_batch_size / rollout_fragment_length).
"training_intensity": None,
# Force lockstep replay mode for MADDPG.
"multiagent": merge_dicts(COMMON_CONFIG["multiagent"], {
"replay_mode": "lockstep",
}),
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-2,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-2,
# Update the target network every `target_network_update_freq` sample steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.01,
# Weights for feature regularization for the actor
"actor_feature_reg": 0.001,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 0.5,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"rollout_fragment_length": 100,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 1024,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 1,
# Prevent iterations from going lower than this time span
"min_time_s_per_reporting": 0,
})
# __sphinx_doc_end__
# fmt: on
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(dict(zip(keys, multi_agent_batch.policy_batches[pid].values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
for i, p in enumerate(policies.values()):
feed_dict = {new_obs_ph_n[i]: new_obs_n[i]}
new_act = p.get_session().run(p.target_act_sampler, feed_dict)
samples.update({"new_actions_%d" % i: new_act})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
class MADDPGTrainer(DQNTrainer):
@classmethod
@override(DQNTrainer)
def get_default_config(cls) -> TrainerConfigDict:
return DEFAULT_CONFIG
@override(DQNTrainer)
def validate_config(self, config: TrainerConfigDict) -> None:
"""Adds the `before_learn_on_batch` hook to the config.
This hook is called explicitly prior to TrainOneStep() in the execution
setups for DQN and APEX.
"""
# Call super's validation method.
super().validate_config(config)
def f(batch, workers, config):
policies = dict(
workers.local_worker().foreach_policy_to_train(lambda p, i: (i, p))
)
return before_learn_on_batch(batch, policies, config["train_batch_size"])
config["before_learn_on_batch"] = f
@override(DQNTrainer)
def get_default_policy_class(self, config: TrainerConfigDict) -> Type[Policy]:
return MADDPGTFPolicy
| [] |
2024-01-10 | meroxa/presidio | docs~samples~python~streamlit~presidio_helpers.py | """
Helper methods for the Presidio Streamlit app
"""
from typing import List, Optional
import spacy
import streamlit as st
from presidio_analyzer import AnalyzerEngine, RecognizerResult, RecognizerRegistry
from presidio_analyzer.nlp_engine import NlpEngineProvider
from presidio_anonymizer import AnonymizerEngine
from presidio_anonymizer.entities import OperatorConfig
from flair_recognizer import FlairRecognizer
from openai_fake_data_generator import (
set_openai_key,
call_completion_model,
create_prompt,
)
from transformers_rec import (
STANFORD_COFIGURATION,
TransformersRecognizer,
BERT_DEID_CONFIGURATION,
)
@st.cache_resource
def analyzer_engine(model_path: str):
"""Return AnalyzerEngine.
:param model_path: Which model to use for NER:
"StanfordAIMI/stanford-deidentifier-base",
"obi/deid_roberta_i2b2",
"en_core_web_lg"
"""
registry = RecognizerRegistry()
registry.load_predefined_recognizers()
# Set up NLP Engine according to the model of choice
if model_path == "en_core_web_lg":
if not spacy.util.is_package("en_core_web_lg"):
spacy.cli.download("en_core_web_lg")
nlp_configuration = {
"nlp_engine_name": "spacy",
"models": [{"lang_code": "en", "model_name": "en_core_web_lg"}],
}
elif model_path == "flair/ner-english-large":
flair_recognizer = FlairRecognizer()
nlp_configuration = {
"nlp_engine_name": "spacy",
"models": [{"lang_code": "en", "model_name": "en_core_web_sm"}],
}
registry.add_recognizer(flair_recognizer)
registry.remove_recognizer("SpacyRecognizer")
else:
if not spacy.util.is_package("en_core_web_sm"):
spacy.cli.download("en_core_web_sm")
# Using a small spaCy model + a HF NER model
transformers_recognizer = TransformersRecognizer(model_path=model_path)
registry.remove_recognizer("SpacyRecognizer")
if model_path == "StanfordAIMI/stanford-deidentifier-base":
transformers_recognizer.load_transformer(**STANFORD_COFIGURATION)
elif model_path == "obi/deid_roberta_i2b2":
transformers_recognizer.load_transformer(**BERT_DEID_CONFIGURATION)
# Use small spaCy model, no need for both spacy and HF models
# The transformers model is used here as a recognizer, not as an NlpEngine
nlp_configuration = {
"nlp_engine_name": "spacy",
"models": [{"lang_code": "en", "model_name": "en_core_web_sm"}],
}
registry.add_recognizer(transformers_recognizer)
nlp_engine = NlpEngineProvider(nlp_configuration=nlp_configuration).create_engine()
analyzer = AnalyzerEngine(nlp_engine=nlp_engine, registry=registry)
return analyzer
@st.cache_resource
def anonymizer_engine():
"""Return AnonymizerEngine."""
return AnonymizerEngine()
@st.cache_data
def get_supported_entities(st_model: str):
"""Return supported entities from the Analyzer Engine."""
return analyzer_engine(st_model).get_supported_entities()
@st.cache_data
def analyze(st_model: str, **kwargs):
"""Analyze input using Analyzer engine and input arguments (kwargs)."""
if "entities" not in kwargs or "All" in kwargs["entities"]:
kwargs["entities"] = None
return analyzer_engine(st_model).analyze(**kwargs)
def anonymize(
text: str,
operator: str,
analyze_results: List[RecognizerResult],
mask_char: Optional[str] = None,
number_of_chars: Optional[str] = None,
encrypt_key: Optional[str] = None,
):
"""Anonymize identified input using Presidio Anonymizer.
:param text: Full text
:param operator: Operator name
:param mask_char: Mask char (for mask operator)
:param number_of_chars: Number of characters to mask (for mask operator)
:param encrypt_key: Encryption key (for encrypt operator)
:param analyze_results: list of results from presidio analyzer engine
"""
if operator == "mask":
operator_config = {
"type": "mask",
"masking_char": mask_char,
"chars_to_mask": number_of_chars,
"from_end": False,
}
# Define operator config
elif operator == "encrypt":
operator_config = {"key": encrypt_key}
elif operator == "highlight":
operator_config = {"lambda": lambda x: x}
else:
operator_config = None
# Change operator if needed as intermediate step
if operator == "highlight":
operator = "custom"
elif operator == "synthesize":
operator = "replace"
else:
operator = operator
res = anonymizer_engine().anonymize(
text,
analyze_results,
operators={"DEFAULT": OperatorConfig(operator, operator_config)},
)
return res
def annotate(text: str, analyze_results: List[RecognizerResult]):
"""Highlight the identified PII entities on the original text
:param text: Full text
:param analyze_results: list of results from presidio analyzer engine
"""
tokens = []
# Use the anonymizer to resolve overlaps
results = anonymize(
text=text,
operator="highlight",
analyze_results=analyze_results,
)
# sort by start index
results = sorted(results.items, key=lambda x: x.start)
for i, res in enumerate(results):
if i == 0:
tokens.append(text[: res.start])
# append entity text and entity type
tokens.append((text[res.start : res.end], res.entity_type))
# if another entity coming i.e. we're not at the last results element, add text up to next entity
if i != len(results) - 1:
tokens.append(text[res.end : results[i + 1].start])
# if no more entities coming, add all remaining text
else:
tokens.append(text[res.end :])
return tokens
def create_fake_data(
text: str,
analyze_results: List[RecognizerResult],
openai_key: str,
openai_model_name: str,
):
"""Creates a synthetic version of the text using OpenAI APIs"""
if not openai_key:
return "Please provide your OpenAI key"
results = anonymize(text=text, operator="replace", analyze_results=analyze_results)
set_openai_key(openai_key)
prompt = create_prompt(results.text)
fake = call_openai_api(prompt, openai_model_name)
return fake
@st.cache_data
def call_openai_api(prompt: str, openai_model_name: str) -> str:
fake_data = call_completion_model(prompt, model=openai_model_name)
return fake_data
| [] |
2024-01-10 | ArcherFMY/SD-T2I-360PanoImage | img2panoimg~pipeline_i2p.py | # Copyright © Alibaba, Inc. and its affiliates.
# The implementation here is modifed based on diffusers.StableDiffusionPipeline,
# originally Apache 2.0 License and public available at
# https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
import copy
import inspect
import re
import warnings
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
import os
import torch
import torch.nn.functional as F
from diffusers import (AutoencoderKL, DiffusionPipeline, ControlNetModel,
StableDiffusionPipeline, UNet2DConditionModel)
from diffusers.configuration_utils import FrozenDict
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models.vae import DecoderOutput
from diffusers.models.controlnet import ControlNetOutput
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import \
StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (PIL_INTERPOLATION, deprecate, is_accelerate_available,
is_accelerate_version, is_compiled_module, logging, randn_tensor,
replace_example_docstring)
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
import PIL
import numpy as np
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import EulerAncestralDiscreteScheduler
>>> from txt2panoimage.pipeline_base import StableDiffusionBlendExtendPipeline
>>> model_id = "models/sd-base"
>>> pipe = StableDiffusionBlendExtendPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
>>> pipe.vae.enable_tiling()
>>> pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
>>> # remove following line if xformers is not installed
>>> pipe.enable_xformers_memory_efficient_attention()
>>> pipe.enable_model_cpu_offload()
>>> prompt = "a living room"
>>> image = pipe(prompt).images[0]
```
"""
re_attention = re.compile(
r"""
\\\(|
\\\)|
\\\[|
\\]|
\\\\|
\\|
\(|
\[|
:([+-]?[.\d]+)\)|
\)|
]|
[^\\()\[\]:]+|
:
""",
re.X,
)
def parse_prompt_attention(text):
"""
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
Accepted tokens are:
(abc) - increases attention to abc by a multiplier of 1.1
(abc:3.12) - increases attention to abc by a multiplier of 3.12
[abc] - decreases attention to abc by a multiplier of 1.1
"""
res = []
round_brackets = []
square_brackets = []
round_bracket_multiplier = 1.1
square_bracket_multiplier = 1 / 1.1
def multiply_range(start_position, multiplier):
for p in range(start_position, len(res)):
res[p][1] *= multiplier
for m in re_attention.finditer(text):
text = m.group(0)
weight = m.group(1)
if text.startswith('\\'):
res.append([text[1:], 1.0])
elif text == '(':
round_brackets.append(len(res))
elif text == '[':
square_brackets.append(len(res))
elif weight is not None and len(round_brackets) > 0:
multiply_range(round_brackets.pop(), float(weight))
elif text == ')' and len(round_brackets) > 0:
multiply_range(round_brackets.pop(), round_bracket_multiplier)
elif text == ']' and len(square_brackets) > 0:
multiply_range(square_brackets.pop(), square_bracket_multiplier)
else:
res.append([text, 1.0])
for pos in round_brackets:
multiply_range(pos, round_bracket_multiplier)
for pos in square_brackets:
multiply_range(pos, square_bracket_multiplier)
if len(res) == 0:
res = [['', 1.0]]
# merge runs of identical weights
i = 0
while i + 1 < len(res):
if res[i][1] == res[i + 1][1]:
res[i][0] += res[i + 1][0]
res.pop(i + 1)
else:
i += 1
return res
def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str],
max_length: int):
r"""
Tokenize a list of prompts and return its tokens with weights of each token.
No padding, starting or ending token is included.
"""
tokens = []
weights = []
truncated = False
for text in prompt:
texts_and_weights = parse_prompt_attention(text)
text_token = []
text_weight = []
for word, weight in texts_and_weights:
# tokenize and discard the starting and the ending token
token = pipe.tokenizer(word).input_ids[1:-1]
text_token += token
# copy the weight by length of token
text_weight += [weight] * len(token)
# stop if the text is too long (longer than truncation limit)
if len(text_token) > max_length:
truncated = True
break
# truncate
if len(text_token) > max_length:
truncated = True
text_token = text_token[:max_length]
text_weight = text_weight[:max_length]
tokens.append(text_token)
weights.append(text_weight)
if truncated:
logger.warning(
'Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples'
)
return tokens, weights
def pad_tokens_and_weights(tokens,
weights,
max_length,
bos,
eos,
pad,
no_boseos_middle=True,
chunk_length=77):
r"""
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
"""
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
for i in range(len(tokens)):
tokens[i] = [
bos
] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
if no_boseos_middle:
weights[i] = [1.0] + weights[i] + [1.0] * (
max_length - 1 - len(weights[i]))
else:
w = []
if len(weights[i]) == 0:
w = [1.0] * weights_length
else:
for j in range(max_embeddings_multiples):
w.append(1.0) # weight for starting token in this chunk
w += weights[i][j * (chunk_length - 2):min(
len(weights[i]), (j + 1) * (chunk_length - 2))]
w.append(1.0) # weight for ending token in this chunk
w += [1.0] * (weights_length - len(w))
weights[i] = w[:]
return tokens, weights
def get_unweighted_text_embeddings(
pipe: DiffusionPipeline,
text_input: torch.Tensor,
chunk_length: int,
no_boseos_middle: Optional[bool] = True,
):
"""
When the length of tokens is a multiple of the capacity of the text encoder,
it should be split into chunks and sent to the text encoder individually.
"""
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
if max_embeddings_multiples > 1:
text_embeddings = []
for i in range(max_embeddings_multiples):
# extract the i-th chunk
text_input_chunk = text_input[:, i * (chunk_length - 2):(i + 1)
* (chunk_length - 2) + 2].clone()
# cover the head and the tail by the starting and the ending tokens
text_input_chunk[:, 0] = text_input[0, 0]
text_input_chunk[:, -1] = text_input[0, -1]
text_embedding = pipe.text_encoder(text_input_chunk)[0]
if no_boseos_middle:
if i == 0:
# discard the ending token
text_embedding = text_embedding[:, :-1]
elif i == max_embeddings_multiples - 1:
# discard the starting token
text_embedding = text_embedding[:, 1:]
else:
# discard both starting and ending tokens
text_embedding = text_embedding[:, 1:-1]
text_embeddings.append(text_embedding)
text_embeddings = torch.concat(text_embeddings, axis=1)
else:
text_embeddings = pipe.text_encoder(text_input)[0]
return text_embeddings
def get_weighted_text_embeddings(
pipe: DiffusionPipeline,
prompt: Union[str, List[str]],
uncond_prompt: Optional[Union[str, List[str]]] = None,
max_embeddings_multiples: Optional[int] = 3,
no_boseos_middle: Optional[bool] = False,
skip_parsing: Optional[bool] = False,
skip_weighting: Optional[bool] = False,
):
r"""
Prompts can be assigned with local weights using brackets. For example,
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
Args:
pipe (`DiffusionPipeline`):
Pipe to provide access to the tokenizer and the text encoder.
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
uncond_prompt (`str` or `List[str]`):
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
is provided, the embeddings of prompt and uncond_prompt are concatenated.
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
The max multiple length of prompt embeddings compared to the max output length of text encoder.
no_boseos_middle (`bool`, *optional*, defaults to `False`):
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
ending token in each of the chunk in the middle.
skip_parsing (`bool`, *optional*, defaults to `False`):
Skip the parsing of brackets.
skip_weighting (`bool`, *optional*, defaults to `False`):
Skip the weighting. When the parsing is skipped, it is forced True.
"""
max_length = (pipe.tokenizer.model_max_length
- 2) * max_embeddings_multiples + 2
if isinstance(prompt, str):
prompt = [prompt]
if not skip_parsing:
prompt_tokens, prompt_weights = get_prompts_with_weights(
pipe, prompt, max_length - 2)
if uncond_prompt is not None:
if isinstance(uncond_prompt, str):
uncond_prompt = [uncond_prompt]
uncond_tokens, uncond_weights = get_prompts_with_weights(
pipe, uncond_prompt, max_length - 2)
else:
prompt_tokens = [
token[1:-1] for token in pipe.tokenizer(
prompt, max_length=max_length, truncation=True).input_ids
]
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
if uncond_prompt is not None:
if isinstance(uncond_prompt, str):
uncond_prompt = [uncond_prompt]
uncond_tokens = [
token[1:-1] for token in pipe.tokenizer(
uncond_prompt, max_length=max_length,
truncation=True).input_ids
]
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
# round up the longest length of tokens to a multiple of (model_max_length - 2)
max_length = max([len(token) for token in prompt_tokens])
if uncond_prompt is not None:
max_length = max(max_length,
max([len(token) for token in uncond_tokens]))
max_embeddings_multiples = min(
max_embeddings_multiples,
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
)
max_embeddings_multiples = max(1, max_embeddings_multiples)
max_length = (pipe.tokenizer.model_max_length
- 2) * max_embeddings_multiples + 2
# pad the length of tokens and weights
bos = pipe.tokenizer.bos_token_id
eos = pipe.tokenizer.eos_token_id
pad = getattr(pipe.tokenizer, 'pad_token_id', eos)
prompt_tokens, prompt_weights = pad_tokens_and_weights(
prompt_tokens,
prompt_weights,
max_length,
bos,
eos,
pad,
no_boseos_middle=no_boseos_middle,
chunk_length=pipe.tokenizer.model_max_length,
)
prompt_tokens = torch.tensor(
prompt_tokens, dtype=torch.long, device=pipe.device)
if uncond_prompt is not None:
uncond_tokens, uncond_weights = pad_tokens_and_weights(
uncond_tokens,
uncond_weights,
max_length,
bos,
eos,
pad,
no_boseos_middle=no_boseos_middle,
chunk_length=pipe.tokenizer.model_max_length,
)
uncond_tokens = torch.tensor(
uncond_tokens, dtype=torch.long, device=pipe.device)
# get the embeddings
text_embeddings = get_unweighted_text_embeddings(
pipe,
prompt_tokens,
pipe.tokenizer.model_max_length,
no_boseos_middle=no_boseos_middle,
)
prompt_weights = torch.tensor(
prompt_weights,
dtype=text_embeddings.dtype,
device=text_embeddings.device)
if uncond_prompt is not None:
uncond_embeddings = get_unweighted_text_embeddings(
pipe,
uncond_tokens,
pipe.tokenizer.model_max_length,
no_boseos_middle=no_boseos_middle,
)
uncond_weights = torch.tensor(
uncond_weights,
dtype=uncond_embeddings.dtype,
device=uncond_embeddings.device)
# assign weights to the prompts and normalize in the sense of mean
# TODO: should we normalize by chunk or in a whole (current implementation)?
if (not skip_parsing) and (not skip_weighting):
previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(
text_embeddings.dtype)
text_embeddings *= prompt_weights.unsqueeze(-1)
current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(
text_embeddings.dtype)
text_embeddings *= (previous_mean
/ current_mean).unsqueeze(-1).unsqueeze(-1)
if uncond_prompt is not None:
previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(
uncond_embeddings.dtype)
uncond_embeddings *= uncond_weights.unsqueeze(-1)
current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(
uncond_embeddings.dtype)
uncond_embeddings *= (previous_mean
/ current_mean).unsqueeze(-1).unsqueeze(-1)
if uncond_prompt is not None:
return text_embeddings, uncond_embeddings
return text_embeddings, None
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(
dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (
1 - guidance_rescale) * noise_cfg
return noise_cfg
def prepare_image(image):
if isinstance(image, torch.Tensor):
# Batch single image
if image.ndim == 3:
image = image.unsqueeze(0)
image = image.to(dtype=torch.float32)
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
return image
class StableDiffusionImage2PanoPipeline(DiffusionPipeline, TextualInversionLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
as a list, the outputs from each ControlNet are added together to create one combined additional
conditioning.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
):
super().__init__()
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
if isinstance(controlnet, (list, tuple)):
controlnet = MultiControlNetModel(controlnet)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
controlnet=controlnet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding.
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding.
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_sequential_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
Note that offloading happens on a submodule basis. Memory savings are higher than with
`enable_model_cpu_offload`, but performance is lower.
"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
device = torch.device(f"cuda:{gpu_id}")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
cpu_offload(cpu_offloaded_model, device)
if self.safety_checker is not None:
cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
hook = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
if self.safety_checker is not None:
# the safety checker can offload the vae again
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
# control net hook has be manually offloaded as it alternates with unet
cpu_offload_with_hook(self.controlnet, device)
# We'll offload the last model manually.
self.final_offload_hook = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _execution_device(self):
r"""
Returns the device on which the pipeline's models will be executed. After calling
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
hooks.
"""
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(module, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
max_embeddings_multiples=3,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `list(int)`):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
The max multiple length of prompt embeddings compared to the max output length of text encoder.
"""
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if negative_prompt_embeds is None:
if negative_prompt is None:
negative_prompt = [""] * batch_size
elif isinstance(negative_prompt, str):
negative_prompt = [negative_prompt] * batch_size
if batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
if prompt_embeds is None or negative_prompt_embeds is None:
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
if do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
pipe=self,
prompt=prompt,
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
max_embeddings_multiples=max_embeddings_multiples,
)
if prompt_embeds is None:
prompt_embeds = prompt_embeds1
if negative_prompt_embeds is None:
negative_prompt_embeds = negative_prompt_embeds1
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
bs_embed, seq_len, _ = negative_prompt_embeds.shape
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
def decode_latents(self, latents):
warnings.warn(
"The decode_latents method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor instead",
FutureWarning,
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
image,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
controlnet_conditioning_scale=1.0,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
# `prompt` needs more sophisticated handling when there are multiple
# conditionings.
if isinstance(self.controlnet, MultiControlNetModel):
if isinstance(prompt, list):
logger.warning(
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
" prompts. The conditionings will be fixed across the prompts."
)
# Check `image`
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
)
if (
isinstance(self.controlnet, ControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, ControlNetModel)
):
self.check_image(image, prompt, prompt_embeds)
elif (
isinstance(self.controlnet, MultiControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
):
if not isinstance(image, list):
raise TypeError("For multiple controlnets: `image` must be type `list`")
# When `image` is a nested list:
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
elif any(isinstance(i, list) for i in image):
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
elif len(image) != len(self.controlnet.nets):
raise ValueError(
"For multiple controlnets: `image` must have the same length as the number of controlnets."
)
for image_ in image:
self.check_image(image_, prompt, prompt_embeds)
else:
assert False
# Check `controlnet_conditioning_scale`
if (
isinstance(self.controlnet, ControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, ControlNetModel)
):
if not isinstance(controlnet_conditioning_scale, float):
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
elif (
isinstance(self.controlnet, MultiControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
):
if isinstance(controlnet_conditioning_scale, list):
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
self.controlnet.nets
):
raise ValueError(
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
" the same length as the number of controlnets"
)
else:
assert False
def check_image(self, image, prompt, prompt_embeds):
image_is_pil = isinstance(image, PIL.Image.Image)
image_is_tensor = isinstance(image, torch.Tensor)
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
raise TypeError(
"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
)
if image_is_pil:
image_batch_size = 1
elif image_is_tensor:
image_batch_size = image.shape[0]
elif image_is_pil_list:
image_batch_size = len(image)
elif image_is_tensor_list:
image_batch_size = len(image)
if prompt is not None and isinstance(prompt, str):
prompt_batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt_batch_size = len(prompt)
elif prompt_embeds is not None:
prompt_batch_size = prompt_embeds.shape[0]
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
raise ValueError(
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
)
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
def prepare_control_image(
self,
image,
width,
height,
batch_size,
num_images_per_prompt,
device,
dtype,
do_classifier_free_guidance=False,
guess_mode=False,
):
if not isinstance(image, torch.Tensor):
if isinstance(image, PIL.Image.Image):
image = [image]
if isinstance(image[0], PIL.Image.Image):
images = []
for image_ in image:
image_ = image_.convert("RGB")
image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
image_ = np.array(image_)
image_ = image_[None, :]
images.append(image_)
image = images
image = np.concatenate(image, axis=0)
image = np.array(image).astype(np.float32) / 255.0
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
elif isinstance(image[0], torch.Tensor):
image = torch.cat(image, dim=0)
image_batch_size = image.shape[0]
if image_batch_size == 1:
repeat_by = batch_size
else:
# image batch size is the same as prompt batch size
repeat_by = num_images_per_prompt
image = image.repeat_interleave(repeat_by, dim=0)
image = image.to(device=device, dtype=dtype)
if do_classifier_free_guidance and not guess_mode:
image = torch.cat([image] * 2)
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
image = image.to(device=device, dtype=dtype)
batch_size = batch_size * num_images_per_prompt
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if isinstance(generator, list):
init_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = self.vae.encode(image).latent_dist.sample(generator)
init_latents = self.vae.config.scaling_factor * init_latents
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
# expand init_latents for batch_size
deprecation_message = (
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
" your script to pass as many initial images as text prompts to suppress this warning."
)
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
additional_image_per_prompt = batch_size // init_latents.shape[0]
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
raise ValueError(
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
)
else:
init_latents = torch.cat([init_latents], dim=0)
shape = init_latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
def _default_height_width(self, height, width, image):
# NOTE: It is possible that a list of images have different
# dimensions for each image, so just checking the first image
# is not _exactly_ correct, but it is simple.
while isinstance(image, list):
image = image[0]
if height is None:
if isinstance(image, PIL.Image.Image):
height = image.height
elif isinstance(image, torch.Tensor):
height = image.shape[2]
height = (height // 8) * 8 # round down to nearest multiple of 8
if width is None:
if isinstance(image, PIL.Image.Image):
width = image.width
elif isinstance(image, torch.Tensor):
width = image.shape[3]
width = (width // 8) * 8 # round down to nearest multiple of 8
return height, width
# override DiffusionPipeline
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
safe_serialization: bool = False,
variant: Optional[str] = None,
):
if isinstance(self.controlnet, ControlNetModel):
super().save_pretrained(save_directory, safe_serialization, variant)
else:
raise NotImplementedError("Currently, the `save_pretrained()` is not implemented for Multi-ControlNet.")
def denoise_latents(self, latents, t, prompt_embeds, control_image, controlnet_conditioning_scale, guess_mode, cross_attention_kwargs, do_classifier_free_guidance, guidance_scale, extra_step_kwargs, views_scheduler_status):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
self.scheduler.__dict__.update(views_scheduler_status[0])
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
controlnet_latent_model_input = latents
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
else:
controlnet_latent_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
down_block_res_samples, mid_block_res_sample = self.controlnet(
controlnet_latent_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=controlnet_conditioning_scale,
guess_mode=guess_mode,
return_dict=False,
)
if guess_mode and do_classifier_free_guidance:
# Infered ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
return latents
def blend_v(self, a, b, blend_extent):
blend_extent = min(a.shape[2], b.shape[2], blend_extent)
for y in range(blend_extent):
b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def blend_h(self, a, b, blend_extent):
blend_extent = min(a.shape[3], b.shape[3], blend_extent)
for x in range(blend_extent):
b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def get_blocks(self, latents, control_image, tile_latent_min_size, overlap_size):
rows_latents = []
rows_control_images = []
for i in range(0, latents.shape[2] - overlap_size, overlap_size):
row_latents = []
row_control_images = []
for j in range(0, latents.shape[3] - overlap_size, overlap_size):
latents_input = latents[:, :, i: i + tile_latent_min_size, j: j + tile_latent_min_size]
control_image_input = control_image[:, :,
self.vae_scale_factor * i: self.vae_scale_factor * (i + tile_latent_min_size),
self.vae_scale_factor * j: self.vae_scale_factor * (j + tile_latent_min_size)]
row_latents.append(latents_input)
row_control_images.append(control_image_input)
rows_latents.append(row_latents)
rows_control_images.append(row_control_images)
return rows_latents, rows_control_images
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None,
control_image: Union[
torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
] = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 0.8,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
guess_mode: bool = False,
mask: Optional[torch.FloatTensor] = None,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,
`List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
specified in init, images must be passed as a list such that each element of the list can be correctly
batched for input to a single controlnet.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
than for [`~StableDiffusionControlNetPipeline.__call__`].
guess_mode (`bool`, *optional*, defaults to `False`):
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
def controlnet_forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
controlnet_cond: torch.FloatTensor,
conditioning_scale: float = 1.0,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guess_mode: bool = False,
return_dict: bool = True,
mask: Optional[torch.FloatTensor] = None,
) -> Union[ControlNetOutput, Tuple]:
"""
The [`ControlNetModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor.
timestep (`Union[torch.Tensor, float, int]`):
The number of timesteps to denoise an input.
encoder_hidden_states (`torch.Tensor`):
The encoder hidden states.
controlnet_cond (`torch.FloatTensor`):
The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
conditioning_scale (`float`, defaults to `1.0`):
The scale factor for ControlNet outputs.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
added_cond_kwargs (`dict`):
Additional conditions for the Stable Diffusion XL UNet.
cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
guess_mode (`bool`, defaults to `False`):
In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
return_dict (`bool`, defaults to `True`):
Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
Returns:
[`~models.controlnet.ControlNetOutput`] **or** `tuple`:
If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
returned where the first element is the sample tensor.
"""
# check channel order
channel_order = self.config.controlnet_conditioning_channel_order
if channel_order == "rgb":
# in rgb order by default
...
elif channel_order == "bgr":
controlnet_cond = torch.flip(controlnet_cond, dims=[1])
else:
raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
aug_emb = None
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = emb + class_emb
if self.config.addition_embed_type is not None:
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
elif self.config.addition_embed_type == "text_time":
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(emb.dtype)
aug_emb = self.add_embedding(add_embeds)
emb = emb + aug_emb if aug_emb is not None else emb
# 2. pre-process
sample = self.conv_in(sample)
controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
if mask is not None:
sample = (1 - mask.to(sample.dtype)) * sample + mask.to(sample.dtype) * controlnet_cond
else:
sample = sample + controlnet_cond
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
# 4. mid
if self.mid_block is not None:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
)
# 5. Control net blocks
controlnet_down_block_res_samples = ()
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
down_block_res_sample = controlnet_block(down_block_res_sample)
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = controlnet_down_block_res_samples
mid_block_res_sample = self.controlnet_mid_block(sample)
# 6. scaling
if guess_mode and not self.config.global_pool_conditions:
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
scales = scales * conditioning_scale
down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
else:
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample = mid_block_res_sample * conditioning_scale
if self.config.global_pool_conditions:
down_block_res_samples = [
torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
]
mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return ControlNetOutput(
down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
)
self.controlnet.forward = controlnet_forward.__get__(self.controlnet, ControlNetModel)
def tiled_decode(
self,
z: torch.FloatTensor,
return_dict: bool = True
) -> Union[DecoderOutput, torch.FloatTensor]:
r"""Decode a batch of images using a tiled decoder.
Args:
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding in several
steps. This is useful to keep memory use constant regardless of image size. The end result of tiled
decoding is: different from non-tiled decoding due to each tile using a different decoder.
To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output.
You may still see tile-sized changes in the look of the output, but they should be much less noticeable.
z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to
`True`):
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
"""
_tile_overlap_factor = 1 - self.tile_overlap_factor
overlap_size = int(self.tile_latent_min_size
* _tile_overlap_factor)
blend_extent = int(self.tile_sample_min_size
* self.tile_overlap_factor)
row_limit = self.tile_sample_min_size - blend_extent
w = z.shape[3]
z = torch.cat([z, z[:, :, :, :w // 4]], dim=-1)
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
rows = []
for i in range(0, z.shape[2], overlap_size):
row = []
tile = z[:, :, i:i + self.tile_latent_min_size, :]
tile = self.post_quant_conv(tile)
decoded = self.decoder(tile)
vae_scale_factor = decoded.shape[-1] // tile.shape[-1]
row.append(decoded)
rows.append(row)
result_rows = []
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_extent)
result_row.append(
self.blend_h(
tile[:, :, :row_limit, w * vae_scale_factor:],
tile[:, :, :row_limit, :w * vae_scale_factor],
tile.shape[-1] - w * vae_scale_factor))
result_rows.append(torch.cat(result_row, dim=3))
dec = torch.cat(result_rows, dim=2)
if not return_dict:
return (dec, )
return DecoderOutput(sample=dec)
self.vae.tiled_decode = tiled_decode.__get__(self.vae, AutoencoderKL)
# 0. Default height and width to unet
height, width = self._default_height_width(height, width, image)
self.blend_extend = width // self.vae_scale_factor // 32
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
control_image,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
controlnet_conditioning_scale,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
self.controlnet.to(device)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
global_pool_conditions = (
controlnet.config.global_pool_conditions
if isinstance(controlnet, ControlNetModel)
else controlnet.nets[0].config.global_pool_conditions
)
guess_mode = guess_mode or global_pool_conditions
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Prepare image, and controlnet_conditioning_image
image = prepare_image(image)
# 5. Prepare image
if isinstance(controlnet, ControlNetModel):
control_image = self.prepare_control_image(
image=control_image,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
elif isinstance(controlnet, MultiControlNetModel):
control_images = []
for control_image_ in control_image:
control_image_ = self.prepare_control_image(
image=control_image_,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
control_images.append(control_image_)
control_image = control_images
else:
assert False
# 5. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# 6. Prepare latent variables
latents = self.prepare_latents(
image,
latent_timestep,
batch_size,
num_images_per_prompt,
prompt_embeds.dtype,
device,
generator,
)
if mask is not None:
mask = torch.cat([mask] * batch_size, dim=0)
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
views_scheduler_status = [copy.deepcopy(self.scheduler.__dict__)]
# value = torch.zeros_like(latents)
latents = torch.cat([latents, latents[:, :, :, :self.blend_extend]], dim=-1)
control_image = torch.cat([control_image, control_image[:, :, :, :self.blend_extend * self.vae_scale_factor]], dim=-1)
if mask is not None:
mask = torch.cat([mask] * batch_size, dim=0)
mask = torch.cat([mask, mask[:, :, :, :self.blend_extend]], dim=-1)
# 8. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
if mask is not None:
mask_input = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
else:
mask_input = None
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
controlnet_latent_model_input = latents
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
else:
controlnet_latent_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
down_block_res_samples, mid_block_res_sample = self.controlnet(
controlnet_latent_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=controlnet_conditioning_scale,
guess_mode=guess_mode,
return_dict=False,
mask=mask_input,
)
if guess_mode and do_classifier_free_guidance:
# Infered ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# latents = value + 0.0
latents = self.blend_h(latents, latents, self.blend_extend)
latents = self.blend_h(latents, latents, self.blend_extend)
latents = latents[:, :, :, :width // self.vae_scale_factor]
# If we do sequential model offloading, let's offload unet and controlnet
# manually for max memory savings
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.unet.to("cpu")
self.controlnet.to("cpu")
torch.cuda.empty_cache()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| [
"1",
"[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]"
] |
2024-01-10 | Cristina-Gabriela/AI-Powered-Video-Tutorial-Generator | backend~functions~create_script_function.py | from langchain.llms import Cohere
from langchain import PromptTemplate, LLMChain
import json
import random
def create_script(topic_name, level_of_explanation, age, creativity_level, humour_level):
age = str(age)
beginner = "Explain this in a simple and easy to understand way for beginners. Help them understand the intuition, logic and importance of it."
intermediate = "Explain the topic with a bit more complexity and depth, assuming the reader has some prior knowledge and understanding. The script must be very long and detailed."
advanced = "Delve into intricate details of the topic and explain it in depth. The reader has a solid foundation and is familiar with the intermediate concepts. Include more technical language, mathematical formulas, or advanced examples to provide a comprehensive understanding of the topic. Be extremely detailed and the script must be very long."
level_string = beginner if level_of_explanation == 'beginner' else intermediate if creativity_level == 'intermediate' else advanced
creativity_string = "" if creativity_level < 4 else "Be Creative while explaining the concepts to make it easier to understand." if creativity_level < 7 else "Be creative while explaining the concepts to make it easier to understand, include creative analogies while explaining."
humour_string = "" if humour_level < 4 else "Be a little humorous while explaining the concepts." if humour_level < 7 else "Be funny and add jokes while explaining the concepts to make it more fun to learn."
# Randomly select an API key
selected_key = json.load(open('apikeys.json', 'r'))['api_keys'][random.randint(
0, len(json.load(open('apikeys.json', 'r'))['api_keys'])-1)]
# Initialise model
llm = Cohere(cohere_api_key=selected_key,
model='command-xlarge-beta', temperature=1.2, max_tokens=1700)
# create the template string
template = """Instructions:\nCreate a script for a self contained video about {topic_name} such that a {age} year old can understand. Explain the key concepts. {level_string} {creativity_string} {humour_string} It should be in first person. Be cheerful and happy while explaining. The Youtube channel is Alystria.\n"""
# create prompt
prompt = PromptTemplate(template=template, input_variables=[
"topic_name", "age", "level_string", "creativity_string", "humour_string"])
# print(prompt.format(topic_name=topic_name , age=age , creativity_string=creativity_string , humour_string=humour_string, level_string=level_string))
# Create and run the llm chain
llm_chain = LLMChain(prompt=prompt, llm=llm)
response = llm_chain.run(topic_name=topic_name, age=age, creativity_string=creativity_string,
humour_string=humour_string, level_string=level_string)
return response
| [
"creativity_string",
"Instructions:\nCreate a script for a self contained video about {topic_name} such that a {age} year old can understand. Explain the key concepts. {level_string} {creativity_string} {humour_string} It should be in first person. Be cheerful and happy while explaining. The Youtube channel is Alystria.\n",
"level_string",
"topic_name",
"humour_string"
] |
2024-01-10 | chitang233/Openaibot | test~vits_nlp.py | # -*- coding: utf-8 -*-
# @Time : 12/20/22 10:19 PM
# @FileName: vits_nlp.py
# @Software: PyCharm
# @Github :sudoskys
import time
from openai_async.utils.Talk import Talk
res = Talk().cut_chinese_sentence(
"これから日本...大家好,我是可莉,我建议大家不要有其它的营养,所以不能只看它的热量就作为应急食品来使用。")
print(res)
from ftlangdetect import detect
t1 = time.time()
result = detect(text="你好", low_memory=True)
print(result)
result = detect(text="你好你好,これから日本", low_memory=True)
print(result)
result = detect(text="怎麼不給爺嘿嘿呢", low_memory=True)
print(result)
t2 = time.time()
print(t2 - t1)
| [] |
2024-01-10 | johntelforduk/gpt-hue | gpt_hue.py | from phue import Bridge
import openai
from dotenv import load_dotenv
from os import getenv
import json
def cost_calc(num_tokens: int) -> float:
"""
For parm number of tokens used, return cost incurred in USD.
"""
# From, https://openai.com/pricing, gpt-3.5-turbo is $0.002 per 1000 tokens.
return num_tokens * 0.002 / 1000
class Persona:
def __init__(self, gpt_model: str, persona_name: str, functions: list):
self.gpt_model = gpt_model
self.persona_name = persona_name
self.functions = functions
self.history = []
self.cumulative_tokens = 0
def give_mission(self, mission: str, response: str):
print(self.persona_name + ' mission...')
print(mission)
print('------------')
self.update_history(role='user', content=mission)
# 'Trick' GPT into thinking it understood us earlier in the conversation.
self.update_history(role='assistant', content=response)
def update_history(self, role: str, content: str):
assert role in ['assistant', 'user']
self.history.append({'role': role, 'content': content})
def chat(self, prompt: str):
self.update_history(role='user', content=prompt)
completion = openai.ChatCompletion.create(model=self.gpt_model,
messages=self.history,
functions=self.functions,
function_call="auto")
self.cumulative_tokens += int(completion.usage.total_tokens)
reply_content = completion.choices[0].message
# print(reply_content)
if 'function_call' in reply_content:
func_name = reply_content['function_call']['name']
args_json = reply_content.to_dict()['function_call']['arguments']
payload = json.loads(args_json)
payload['function_name'] = func_name
# print(payload)
return payload
else:
content = reply_content['content']
print(self.persona_name + ': ' + content)
self.update_history(role='assistant', content=content)
return {}
class Lights:
def __init__(self, bridge_ip):
self.bridge = Bridge(bridge_ip)
# If the app is not registered and the button is not pressed, press the button and call connect()
# (this only needs to be run a single time)
self.bridge.connect()
# Get the bridge state (This returns the full dictionary that you can explore)
bridge_state = self.bridge.get_api()
# Make a single dictionary of all the lights and groups.
# Key = name of individual light or group of lights, value = list of light IDs.
self.lights = {}
for light_id in bridge_state['lights']:
light_name = bridge_state['lights'][light_id]['name']
self.lights[light_name] = [light_id]
remove_later = set()
for group_id in bridge_state['groups']:
group_name = bridge_state['groups'][group_id]['name']
for candidate in self.lights:
if group_name in candidate:
remove_later.add(candidate)
self.lights[group_name] = bridge_state['groups'][group_id]['lights']
# Remove individual lights that have names that are substrings of groups.
for each_light in remove_later:
del self.lights[each_light]
def describe_lights(self) -> list:
"""
Generate a list of the lights in the house, suitable for telling the chatbot about.
"""
return [name for name in self.lights]
def turn_on_or_off(self, light_name: str, on: bool):
if light_name not in self.lights:
print('Light not found.')
return
# Adjust all of the lights in the list.
for light_id in self.lights[light_name]:
self.bridge.set_light(int(light_id), 'on', on)
if on:
print('Turned on ' + light_name)
else:
print('Turned off ' + light_name)
def set_brightness(self, light_name: str, brightness: int):
if light_name not in self.lights:
print('Light not found.')
return
# Adjust all of the lights in the list.
for light_id in self.lights[light_name]:
self.bridge.set_light(int(light_id), {'on': True, 'bri': brightness})
print(light_name + ' set to ' + str(int(100 * brightness / 254)) + '% brightness')
def interpret_response(self, gpt_response: dict):
"""
Interpret the response from the chatbot.
"""
if len(gpt_response) == 0:
return
# print(gpt_response)
if gpt_response['function_name'] == 'turn_on_or_off':
self.turn_on_or_off(light_name=gpt_response['light_name'], on=gpt_response['on'])
elif gpt_response['function_name'] == 'set_brightness':
self.set_brightness(light_name=gpt_response['light_name'], brightness=gpt_response['brightness'])
load_dotenv(verbose=True) # Set operating system environment variables based on contents of .env file.
my_lights = Lights(getenv('BRIDGE_IP'))
lights_list = my_lights.describe_lights()
print('lights_list:', lights_list)
# my_lights.turn_on_or_off(light_name='Master Bedroom', on=True)
# my_lights.set_brightness(light_name='Master Bedroom', brightness=200)
hue_functions = [
{
"name": "turn_on_or_off",
"description": "Turn a Hue light bulb on or off.",
"parameters": {
"type": "object",
"properties": {
"light_name": {
"type": "string",
"description": "The name of the Hue bulb that this function will turn on or off.",
"enum": lights_list
},
"on": {"type": "boolean",
"description": "True if the light should be turned on. False if the light should be turned off."
}
},
"required": ["light_name", "on"]
}
},
{
"name": "set_brightness",
"description": "Change the level of brightness of a Hue bulb. Don't use this function for turning lights off.",
"parameters": {
"type": "object",
"properties": {
"light_name": {
"type": "string",
"description": "The name of the Hue bulb that this function will turn on or off.",
"enum": lights_list
},
"brightness": {"type": "integer",
"description": "The brightness that the bulb should be set to. Expressed as an integer between 0 and 254, where 0 is dark and 254 is maximum brightness.",
"enum": list(range(255))
}
},
"required": ["light_name", "brightness"]
}
}
]
openai.api_key = getenv('OPEN_AI_KEY')
chatgpt = Persona(gpt_model=getenv('OPEN_AI_MODEL'), persona_name='ChatGPT', functions=hue_functions)
mission = '''I'd like you to control the Philips Hue light bulbs in my house.
Only use the set_brightness function for changing brightness. Make sure you use the turn_on_or_off function for actually turning the lights on and off.
Please say "OK" now if you understand.'''
response = 'OK.'
chatgpt.give_mission(mission=mission, response=response)
while True:
inp = input("User input (or 'quit'): ")
if inp == 'quit':
break
resp = chatgpt.chat(prompt=inp)
my_lights.interpret_response(gpt_response=resp)
print('\nTotal tokens used:', chatgpt.cumulative_tokens)
print('Cost incurred (USD):', cost_calc(chatgpt.cumulative_tokens))
| [] |
2024-01-10 | realneir/Jarvis | jarvis.py | import speech_recognition as sr
import pyttsx3
import openai
import requests
import webbrowser
engine = pyttsx3.init()
openai.api_key = 'YOUR_API_KEY'
r = sr.Recognizer()
def speak_text(text):
engine.say(text)
engine.runAndWait()
def get_audio():
with sr.Microphone() as source:
print("Identifying voice...")
audio = r.listen(source)
text = ""
try:
print("Recognizing...")
text = r.recognize_google(audio)
print(text)
except Exception as e:
print(e)
speak_text("Sorry, Master Reneir can you please repeat.")
return text
def get_response(prompt):
if "open google" in prompt.lower():
webbrowser.open("http://www.google.com")
return "Opening Google"
elif "open facebook" in prompt.lower():
webbrowser.open("http://www.facebook.com")
return "Opening Facebook"
else:
response = openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=100)
return response.choices[0].text.strip()
speak_text("Hello, I am Jarvis from Ironman. How can I assist you today?")
while True:
text = get_audio()
if text:
response = get_response(text)
speak_text(response)
| [] |
2024-01-10 | artwalker/EasyTranslator | process_file.py | import re
import zipfile
import time
import pdfminer.high_level
import ebooklib
import tempfile
import os
import openai
import json
import random
import docx
import pandas as pd
import mobi
import sys
from docx import Document
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from ebooklib import epub
from ebooklib.epub import EpubImage
from bs4 import BeautifulSoup
class ProcessFile:
"""A class about according to the file extension, use the corresponding function to convert the file to text."""
def __init__(self, parameterReader):
"""Initialize the title of filename and text which receives the contents of file."""
self.filename = ""
self.start_page = 0
self.end_page = 0
self.total_pages = 0
self.transliteration_list_file = ""
self.transliteration_word_capi_low = ""
self.bilingual_output = ""
self.prompt = ""
self.language_code = ""
self.jsonfile = ""
self.translated_dict = ""
self.new_filename = ""
self.new_filenametxt = ""
self.show = ""
self.azure = ""
self.tlist = ""
self.test = ""
self.gpt_model = ""
self.gpt_temperature = ""
self.title = ""
self.text = ""
self.book = ""
self.total_tokens = 0
self.completion_tokens = 0
self.prompt_tokens = 0
self.short_text_list = ""
self.translated_text = ""
self.translated_short_text = ""
self.count = 0
self.messages = ""
self.client = ""
self.non_azure_client = ""
self._set_args_from_parameterReader(parameterReader)
def _set_args_from_parameterReader(self, parameterReader):
"""Set args from parameterReader."""
self.filename = parameterReader.filename
self.start_page = parameterReader.startpage
self.end_page = parameterReader.endpage
self.total_pages = 0
self.transliteration_list_file = parameterReader.transliteration_list_file
self.transliteration_word_capi_low = parameterReader.transliteration_word_capi_low
self.bilingual_output = parameterReader.bilingual_output
self.prompt = parameterReader.prompt
self.language_code = parameterReader.language_code
self.jsonfile = parameterReader.jsonfile
self.translated_dict = parameterReader.translated_dict
self.new_filename = parameterReader.new_filename
self.new_filenametxt = parameterReader.new_filenametxt
self.show = parameterReader.show
self.tlist = parameterReader.tlist
self.test = parameterReader.test
self.gpt_model = parameterReader.gpt_model
self.gpt_temperature = parameterReader.gpt_temperature
self.api_proxy = parameterReader.api_proxy
self.azure = parameterReader.azure
if self.azure:
self.client = parameterReader.client
self.openai_api_model_azure = parameterReader.openai_api_model_azure
if len(self.api_proxy) != 0:
self.non_azure_client = parameterReader.non_azure_client
def _get_pdf_total_pages(self):
"""Get total pages."""
with open(self.filename, 'rb') as file:
parser = PDFParser(file)
document = PDFDocument(parser)
self.total_pages = len(list(PDFPage.create_pages(document)))
def _convert_pdf_to_text(self):
"""Access the contents of the PDF file and convert it to text."""
print("\033[1;32mINFO:Converting pdf to text.\033[0m")
self.text = pdfminer.high_level.extract_text(self.filename, page_numbers=list(range(self.start_page - 1, self.end_page)))
print("-" * 3)
if self.show:
print("*" * 3)
print(self.text)
print("*" * 3)
print("\033[1;32mINFO:Finished converting pdf to text\033[0m")
def _convert_mobi_to_text(self):
"""Access the content fo mobi and then convert it to text."""
# Extract MOBI contents to a temporary directory
with tempfile.TemporaryDirectory() as tempdir:
tempdir, filepath = mobi.extract(self.filename)
# Find the HTML file in the temporary directory
for root, _, files in os.walk(tempdir):
for file in files:
if file.endswith(".html"):
html_file = os.path.join(root, file)
break
else:
continue
break
else:
raise FileNotFoundError("ERROR:HTML file not found in the extracted MOBI contents")
# Parse the HTML file with BeautifulSoup to get the text
with open(html_file, "r", encoding="utf-8") as f:
soup = BeautifulSoup(f.read(), "html.parser")
self.text = soup.get_text()
def _convert_docx_to_text(self):
"""Access the content of docx and then convert it to text."""
print("-" * 3)
print("\033[1;32mINFO:Parsing the DOCX content.\033[0m")
doc = docx.Document(self.filename)
for paragraph in doc.paragraphs:
self.text += paragraph.text + "\n"
def _convert_epub_to_text(self):
"""Convert epub to text."""
# Access all contents
for item in self.book.get_items():
if item.get_type() == ebooklib.ITEM_DOCUMENT:
# Use BeautifulSoup to extract the original text
soup = BeautifulSoup(item.get_content(), 'html.parser')
self.text += re.sub(r'\n+', '\n', soup.get_text().strip())
def _text_replace(self):
"""Replace the text according to the transliteration table."""
# Read the excel file and store the first column and the second column as two lists
df = pd.read_excel(self.transliteration_list_file)
old_words = df.iloc[:, 0].tolist()
new_words = df.iloc[:, 1].tolist()
# Order the old word list in descending order of length and synchronize the new word list
old_words, new_words = zip(*sorted(zip(old_words, new_words), key=lambda x: len(x[0]), reverse=True))
# Iterate through two lists and replace strings
for i in range(len(old_words)):
# If ingore the case, convert the string and the word to be replaced to lowercase
if not self.transliteration_word_capi_low:
lower_old_word = old_words[i].lower()
# Use the regular expression to replace, note that the original string case is retained
self.text = re.sub(r"\b" + lower_old_word + r"\b", new_words[i], self.text, flags=re.IGNORECASE)
else:
# If care about the case, just use the regular expression to replace
self.text = re.sub(r"\b" + old_words[i] + r"\b", new_words[i], self.text)
def _text_replace_reverse(self, text):
"""Replace the text according to the transliteration table in reverse order."""
# Read the excel file and store the first column and the second column as two lists
df = pd.read_excel(self.transliteration_list_file)
old_words = df.iloc[:, 0].tolist() # Swapped
new_words = df.iloc[:, 1].tolist() # Swapped
# Order the new word list in descending order of length and synchronize the old word list
new_words, old_words = zip(*sorted(zip(new_words, old_words), key=lambda x: len(x[0]), reverse=True))
# Iterate through two lists and replace strings
for i in range(len(new_words)):
# If ignore the case, convert the string and the word to be replaced to lowercase
if not self.transliteration_word_capi_low:
lower_new_word = new_words[i].lower()
# Use the regular expression to replace, note that the original string case is retained
text = re.sub(r"\b" + lower_new_word + r"\b", old_words[i], text, flags=re.IGNORECASE)
else:
# If care about the case, just use the regular expression to replace
text = re.sub(r"\b" + new_words[i] + r"\b", old_words[i], text)
return text
def _reverse_text_replace_reverse(self, text):
"""Reverse the text according to the transliteration table in reverse order."""
# Read the excel file and store the first column and the second column as two lists
df = pd.read_excel(self.transliteration_list_file)
new_words = df.iloc[:, 0].tolist() # Swapped
old_words = df.iloc[:, 1].tolist() # Swapped
# Order the new word list in descending order of length and synchronize the old word list
new_words, old_words = zip(*sorted(zip(new_words, old_words), key=lambda x: len(x[0]), reverse=True))
# Iterate through two lists and replace strings
for i in range(len(new_words)):
# If ignore the case, convert the string and the word to be replaced to lowercase
if not self.transliteration_word_capi_low:
lower_new_word = new_words[i].lower()
# Use the regular expression to replace, note that the original string case is retained
text = re.sub(r"\b" + lower_new_word + r"\b", old_words[i], text, flags=re.IGNORECASE)
else:
# If care about the case, just use the regular expression to replace
text = re.sub(r"\b" + new_words[i] + r"\b", old_words[i], text)
return text
def _split_text(self):
"""Divide the text into a list of short texts with no more than 1024 characters."""
# Use the regular expression to split the text into a list of sentences
sentence_list = re.findall(r'.+?[。!?!?.]', self.text)
# Initialize the short text list
self.short_text_list = []
# Initialize the current short text
short_text = ""
# Iterate through the sentence list
for s in sentence_list:
# If the current short plus the length of the new sentence is not greater than 1024, add the new sentence to the current short
if len(short_text + s) <= 1024:
short_text += s
# If the current short plus the length of the new sentence is greater than 1024, add the current short to the short text list and reset the current short to the new sentence
else:
self.short_text_list.append(short_text)
short_text = s
# Add the last short text to the short text list
self.short_text_list.append(short_text)
def _replace_sign(self, text):
"""Replace the period with a period plus line break."""
text = text.replace(". ", ".\n")
text = text.replace("。", "。\n")
text = text.replace("?", "?\n")
text = text.replace("?", "?\n")
text = text.replace("!", "!\n")
text = text.replace("。\n”", "。”\n")
text = text.replace("!\n”", "!”\n")
text = text.replace("?\n”", "?”\n")
return text
def _get_completion_from_messages(self):
"""Get completion from messages."""
if len(self.api_proxy) == 0:
response = openai.chat.completions.create(
model=self.gpt_model,
messages=self.messages,
temperature=self.gpt_temperature,
)
else:
response = self.non_azure_client.chat.completions.create(
model=self.gpt_model,
messages=self.messages,
temperature=self.gpt_temperature,
)
content = response.choices[0].message.content
token_dict = {
'prompt_tokens':response.usage.prompt_tokens,
'completion_tokens':response.usage.completion_tokens,
'total_tokens':response.usage.total_tokens,
}
return content, token_dict
def _get_completion_from_messages_by_azure(self):
"""Get completion from messages by azure."""
response = self.client.chat.completions.create(
model=self.openai_api_model_azure,
messages=self.messages,
temperature=self.gpt_temperature,
)
#print(str(response.choices[0].message))
content = response.choices[0].message.content
token_dict = {
'prompt_tokens':response.usage.prompt_tokens,
'completion_tokens':response.usage.completion_tokens,
'total_tokens':response.usage.total_tokens,
}
return content, token_dict
def _comletion_tokens(self):
"""Get comletion and tokens."""
if self.azure:
completion, token_dict = self._get_completion_from_messages_by_azure()
else:
completion, token_dict = self._get_completion_from_messages()
self.translated_short_text = (
completion
.encode("utf8")
.decode()
)
# Get the token usage from the API response
self.total_tokens += token_dict['total_tokens']
self.completion_tokens += token_dict['completion_tokens']
self.prompt_tokens += token_dict['prompt_tokens']
def _translate_text(self, content):
"""Translate the text."""
# Call the OpenAI API for translation
try:
self.messages = [
{'role':'system',
'content': f"You are a translation assistant.Your task is to translate the content given to you by the user.{self.prompt}"},
{'role': 'user',
'content': f"{content}\n"},
]
self._comletion_tokens()
except Exception as e:
# Time to wait for limitation of ChatGPT
sleep_time = 60 * 3 + 5
print(e, "\n"+f"Sleep {sleep_time} seconds.")
time.sleep(sleep_time)
self._comletion_tokens()
def _translate_and_store(self, text):
"""Tranlate and store text."""
if self.tlist:
# Revert the replacement so that it can be judged whether the text has been translated
text = self._text_replace_reverse(text)
# If the text has been translated, return the translation result directly
if text in self.translated_dict:
self.translated_short_text = self.translated_dict[text]
else:
# Before translation, replace the text according to the transliteration table
text = self._reverse_text_replace_reverse(text)
# Else, call the translate_text function to translate and store the result in the dictionary
self._translate_text(text)
# Reverse the replacement of the transliteration table so than the text keeps the original content
text = self._text_replace_reverse(text)
self.translated_dict[text] = self.translated_short_text
# Save the dictionary as a JSON file
with open(self.jsonfile, "w", encoding="utf-8") as f:
json.dump(self.translated_dict, f, ensure_ascii=False, indent=4)
else:
# If the text has been translated, return the translation result directly
if text in self.translated_dict:
self.translated_short_text = self.translated_dict[text]
else:
# Else, call the translate_text function to translate and store the result in the dictionary
self._translate_text(text)
self.translated_dict[text] = self.translated_short_text
# Save the dictionary as a JSON file
with open(self.jsonfile, "w", encoding="utf-8") as f:
json.dump(self.translated_dict, f, ensure_ascii=False, indent=4)
def _process_text(self):
"""Process the text."""
# Replace all line breaks with spaces
self.text = self.text.replace("\n", " ")
# Replace multiple spaces with one space
self.text = re.sub(r"\s+", " ", self.text)
# If the transliteration table replacement is set, replace the text before translation
if self.tlist:
self._text_replace()
# Split the text into short texts of no more than 1024 characters
self._split_text()
# If the test mode is turned on, only translate the first 3 short texts
if self.test:
self.short_text_list = self.short_text_list[:3]
# Iterate through the short text list and translate each short text in turn
for short_text in self.short_text_list:
self.count += 1
# Translate the current short text
time.sleep(0.5)
self._translate_and_store(short_text)
short_text = self._replace_sign(short_text)
self.translated_short_text = self._replace_sign(self.translated_short_text)
short_text = self._text_replace_reverse(short_text)
# Add the current short text and the translated text to the total text
if self.bilingual_output.lower() == 'true':
self.translated_text += f"{short_text}<br>\n{self.translated_short_text}<br>\n"
else:
self.translated_text += f"{self.translated_short_text}<br>\n"
if self.show:
print("*" * 3)
print(short_text)
print("*" * 1)
print(self.translated_short_text)
print("*" * 3)
def _text_to_epub(self):
"""Write the translated text to the epub file."""
text = self.translated_text.replace('\n', '<br>').replace("\n", "<br>")
# Create an epub book object
book = epub.EpubBook()
# Set the metadata
book.set_identifier(str(random.randint(100000, 999999)))
book.set_title(self.title)
book.set_language(self.language_code)
# Create a chapter object
c = epub.EpubHtml(title='Chapter 1', file_name='chap_1.xhtml', lang=self.language_code)
c.content = text
# Add the chapter to the book
book.add_item(c)
# Add the table of contents
book.toc = (epub.Link('chap_1.xhtml', 'Chapter 1', 'chap_1'),)
# Set spine order
book.spine = ['nav', c]
# Add navigation files
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
# Write the content to the epub book
#print("\n" + text)
try:
epub.write_epub(self.new_filename, book, {})
except Exception as e:
print(f"Failed to write EPUB: {e}")
def _get_title_of_md(self):
"""Get title of the md."""
print("-" * 3)
print("\033[1;32mINFO:Parsing the md title.\033[0m")
with open(self.filename, 'r', encoding='utf-8') as file:
for line in file:
if line.startswith('#'):
self.title = line.replace('#', '').strip()
break
print("-" * 3)
print("\033[1;32mINFO:Finished parsing the md title.\033[0m")
def _get_title_of_txt(self):
"""Get title of the txt."""
print("-" * 3)
print("\033[1;32mINFO:Parsing the txt title.\033[0m")
title_extension = os.path.basename(self.filename)
self.title = os.path.splitext(title_extension)[0]
print("-" * 3)
print("\033[1;32mINFO:Finished parsing the txt title.\033[0m")
def _get_title_of_docx(self):
"""Get title of the docx."""
try:
print("-" * 3)
print("\033[1;32mINFO:Parsing the docx file.\033[0m")
with zipfile.ZipFile(self.filename) as zf:
core_properties = etree.fromstring(zf.read("docProps/core.xml"))
ns = {"cp": "http://schemas.openxmlformats.org/package/2006/metadata/core-properties",
"dc": "http://purl.org/dc/elements/1.1/",
"dcterms": "http://purl.org/dc/terms/",
"dcmitype": "http://purl.org/dc/dcmitype/",
"xsi": "http://www.w3.org/2001/XMLSchema-instance"}
title_elements = core_properties.findall("dc:title", ns)
if title_elements:
self.title = title_elements[0].text
else:
self.title = "INFO:Unknown title."
print("-" * 3)
print("\033[1;32mINFO:Finished parsing the docx title.\033[0m")
except Exception as e:
print(f"An error occurred: {e}")
print("*" * 6)
print("\033[91mERROR:Parsing the DOCX file.\033[0m")
print("*" * 6)
def _get_title_of_pdf(self):
"""Get title of the pdf."""
try:
print("-" * 3)
print("\033[1;32mINFO:Parsing the pdf title.\033[0m")
with open(self.filename, 'rb') as file:
parser = PDFParser(file)
document = PDFDocument(parser)
if 'Title' in document.info:
self.title = document.info['Title']
else:
text = pdfminer.high_level.extract_text(file)
match = re.search(r'(?<=\n)([^\n]+)(?=\n)', text)
if match:
self.title = match.group(1)
else:
self.title = "INFO:Unknown title."
print("-" * 3)
print("\033[1;32mINFO:Finished parsing the pdf title.\033[0m")
except Exception as e:
print(f"An error occurred: {e}")
print("*" * 6)
print("\033[91mERROR:Parsing the pdf title.\033[0m")
print("*" * 6)
# step 1
def get_title(self):
"""Get the title of file."""
if self.filename.endswith('.pdf'):
self._get_title_of_pdf()
self._get_pdf_total_pages()
elif self.filename.endswith('.txt'):
self._get_title_of_txt()
elif self.filename.endswith('.docx'):
self._get_title_of_docx()
elif self.filename.endswith('.mobi'):
pass
elif self.filename.endswith('.epub'):
self.book = epub.read_epub(self.filename)
elif self.filename.endswith('.md'):
self._get_title_of_md()
else:
print("-" * 3)
print("\033[91mINFO:Unsupported file type right now.\033[0m")
print("-" * 3)
sys.exit(0)
def _get_md_content(self):
"""Get md content."""
print("-" * 3)
print("\033[1;32mINFO:Parsing the md content.\033[0m")
with open(self.filename, 'r', encoding='utf-8') as file:
self.text = file.read()
def _get_txt_content(self):
"""Get txt content."""
print("-" * 3)
print("\033[1;32mINFO:Parsing the txt content.\033[0m")
with open(self.filename, 'r', encoding='utf-8') as file:
self.text = file.read()
def _get_pdf_content(self):
"""Get pdf content."""
try:
print("-" * 3)
print("\033[1;32mINFO:Parsing the pdf content.\033[0m")
print("-" * 3)
print(f"\033[1;32mINFO:Total pages of the pdf: {self.total_pages}\033[0m")
if self.end_page == -1:
self.end_page = self.total_pages
print("-" * 3)
print(f"\033[1;32mINFO:Converting pdf from: Page {self.start_page} to Page {self.end_page}.\033[0m")
print("-" * 3)
self._convert_pdf_to_text()
except Exception as e:
print(f"An error occurred: {e}")
print("*" * 6)
print("\033[91mERROR:Parsing the pdf content.\033[0m")
print("*" * 6)
def _get_mobi_content(self):
"""Get mobi content."""
try:
print("-" * 3)
print("\033[1;32mINFO:Parsing the mobi content.\033[0m")
self._convert_mobi_to_text()
except Exception as e:
print(f"An error occurred: {e}")
print("*" * 6)
print("\033[91mERROR:Parsing the MOBI content.\033[0m")
print("*" * 6)
def _get_epub_content(self):
"""Get mobi content."""
try:
print("-" * 3)
print("\033[1;32mINFO:Parsing the EPUB content.\033[0m")
self._convert_epub_to_text()
except Exception as e:
print(f"An error occurred: {e}")
print("*" * 6)
print("\033[91mERROR:Parsing the EPUB content.\033[0m")
print("*" * 6)
# step 2
def convert_text(self):
"""Convert the file ending with differnt types to text."""
if self.filename.endswith('.pdf'):
self._get_pdf_content()
elif self.filename.endswith('.txt'):
self._get_txt_content()
elif self.filename.endswith('.mobi'):
self._get_mobi_content()
elif self.filename.endswith('.docx'):
self._convert_docx_to_text()
elif self.filename.endswith('.epub'):
self._get_epub_content()
elif self.filename.endswith('.md'):
self._get_md_content()
else:
print("\033[91mINFO:Unsupported to access the content of this file type right now.\033[0m")
# step 3
def tranlate_file(self):
"""Translate the file."""
if self.filename.endswith('.epub'):
# Access all chapters of the epub file
items = self.book.get_items()
# Iterate through all chapters
translated_all = ''
print("-" * 3)
print("\033[1;32mINFO:Translating the file content.\033[0m")
for item in items:
# If the chapter type is a document type, it needs to be translated
if item.get_type() == ebooklib.ITEM_DOCUMENT:
# Use BeautifulSoup to extract the original text
soup = BeautifulSoup(item.get_content(), 'html.parser')
self.text = soup.get_text().strip()
img_html = ''
img_tags = soup.find_all('img')
for img_tag in img_tags:
img_html += str(img_tag) + '<br>'
# If the text is empty, skip this chapter
if not self.text:
continue
self._process_text()
# Replace the original chapter content with the translated text
item.set_content((img_html + self.translated_text.replace('\n', '<br>')).encode('utf-8'))
translated_all += self.translated_text
# If the test mode is turned on, only translate the first 3 chapters
if self.test and self.count >= 3:
break
print("-" * 3)
print("\033[1;32mINFO:Finished parsing and translating the file.\033[0m")
# Write content to the epub file
epub.write_epub(self.new_filename, self.book, {})
# Write the translated text to the txt file
with open(self.new_filenametxt, "w", encoding="utf-8") as f:
f.write(translated_all.replace('<br>', ''))
else:
print("-" * 3)
print("\033[1;32mINFO:Translating the file content.\033[0m")
self._process_text()
print("-" * 3)
print("\033[1;32mINFO:Finished parsing and translating the file.\033[0m")
print("-" * 3)
# Write the translated text to the epub file
print("\033[1;32mINFO:Writing the translated text to epub.\033[0m") # 输出绿色的 "DEBUG"
self._text_to_epub()
# Write the translated text to the txt file
print("-" * 3)
print("\033[1;32mINFO:Writing the translated text to the txt file.\033[0m")
with open(self.new_filenametxt, "w", encoding="utf-8") as f:
f.write(self.translated_text.replace('<br>', ''))
# step 4
def caculate_tokens_costs(self):
"""Caculate the tokens."""
cost = self.completion_tokens / 1000 * 0.002 + self.prompt_tokens / 1000 * 0.001
print("-" * 3)
print(f"\033[1;32mINFO:Use completion tokens: {self.completion_tokens}.\033[0m")
print("-" * 3)
print(f"\033[1;32mINFO:Use prompt tokens: {self.prompt_tokens}.\033[0m")
print("-" * 3)
print(f"\033[1;32mINFO:Use total tokens: {self.total_tokens}.\033[0m")
print("-" * 3)
print(f"\033[1;32mINFO:Total approximate cost: ${cost}.\033[0m")
print("-" * 3)
print(f"\033[1;34mINFO:Translation completed.\033[0m")
print("-" * 3)
# step 5
def remove_jsonfile(self):
"""Remove the jsonfile."""
try:
os.remove(self.jsonfile)
print(f"\033[1;34mFile '{self.jsonfile}' has been deleted.\033[0m")
print("-" * 3)
except Exception as e:
print(f"An error occurred: {e}")
print("*" * 6)
print(f"\033[91mERROR:File '{self.jsonfile}' not found. No file was deleted.\033[0m")
print("*" * 6)
| [
"PLACEHOLDER\n"
] |
2024-01-10 | deoxykev/langchain | langchain~chains~combine_documents~reduce.py | """Combine many documents together by recursively reducing them."""
from __future__ import annotations
from typing import Any, Callable, List, Optional, Protocol, Tuple
from pydantic import Extra
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.docstore.document import Document
class CombineDocsProtocol(Protocol):
"""Interface for the combine_docs method."""
def __call__(self, docs: List[Document], **kwargs: Any) -> str:
"""Interface for the combine_docs method."""
class AsyncCombineDocsProtocol(Protocol):
"""Interface for the combine_docs method."""
async def __call__(self, docs: List[Document], **kwargs: Any) -> str:
"""Async nterface for the combine_docs method."""
def _split_list_of_docs(
docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any
) -> List[List[Document]]:
new_result_doc_list = []
_sub_result_docs = []
for doc in docs:
_sub_result_docs.append(doc)
_num_tokens = length_func(_sub_result_docs, **kwargs)
if _num_tokens > token_max:
if len(_sub_result_docs) == 1:
raise ValueError(
"A single document was longer than the context length,"
" we cannot handle this."
)
new_result_doc_list.append(_sub_result_docs[:-1])
_sub_result_docs = _sub_result_docs[-1:]
new_result_doc_list.append(_sub_result_docs)
return new_result_doc_list
def _collapse_docs(
docs: List[Document],
combine_document_func: CombineDocsProtocol,
**kwargs: Any,
) -> Document:
result = combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
async def _acollapse_docs(
docs: List[Document],
combine_document_func: AsyncCombineDocsProtocol,
**kwargs: Any,
) -> Document:
result = await combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
class ReduceDocumentsChain(BaseCombineDocumentsChain):
"""Combining documents by recursively reducing them.
This involves
- combine_documents_chain
- collapse_documents_chain
`combine_documents_chain` is ALWAYS provided. This is final chain that is called.
We pass all previous results to this chain, and the output of this chain is
returned as a final result.
`collapse_documents_chain` is used if the documents passed in are too many to all
be passed to `combine_documents_chain` in one go. In this case,
`collapse_documents_chain` is called recursively on as big of groups of documents
as are allowed.
Example:
.. code-block:: python
from langchain.chains import (
StuffDocumentsChain, LLMChain, ReduceDocumentsChain
)
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
)
# If we wanted to, we could also pass in collapse_documents_chain
# which is specifically aimed at collapsing documents BEFORE
# the final call.
prompt = PromptTemplate.from_template(
"Collapse this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
collapse_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
)
"""
combine_documents_chain: BaseCombineDocumentsChain
"""Final chain to call to combine documents.
This is typically a StuffDocumentsChain."""
collapse_documents_chain: Optional[BaseCombineDocumentsChain] = None
"""Chain to use to collapse documents if needed until they can all fit.
If None, will use the combine_documents_chain.
This is typically a StuffDocumentsChain."""
token_max: int = 3000
"""The maximum number of tokens to group documents into. For example, if
set to 3000 then documents will be grouped into chunks of no greater than
3000 tokens before trying to combine them into a smaller chunk."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def _collapse_chain(self) -> BaseCombineDocumentsChain:
if self.collapse_documents_chain is not None:
return self.collapse_documents_chain
else:
return self.combine_documents_chain
def combine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = self._collapse(
docs, token_max, callbacks=callbacks, **kwargs
)
return self.combine_documents_chain.combine_docs(
docs=result_docs, callbacks=callbacks, **kwargs
)
async def acombine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = await self._acollapse(
docs, callbacks=callbacks, **kwargs
)
return await self.combine_documents_chain.acombine_docs(
docs=result_docs, callbacks=callbacks, **kwargs
)
def _collapse(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[List[Document], dict]:
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str:
return self._collapse_chain.run(
input_documents=docs, callbacks=callbacks, **kwargs
)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = _split_list_of_docs(
result_docs, length_func, _token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = _collapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
async def _acollapse(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[List[Document], dict]:
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
async def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str:
return await self._collapse_chain.arun(
input_documents=docs, callbacks=callbacks, **kwargs
)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = _split_list_of_docs(
result_docs, length_func, _token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = await _acollapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
@property
def _chain_type(self) -> str:
return "reduce_documents_chain"
| [] |
2024-01-10 | deoxykev/langchain | langchain~docstore~in_memory.py | """Simple in memory docstore in the form of a dict."""
from typing import Dict, Union
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
class InMemoryDocstore(Docstore, AddableMixin):
"""Simple in memory docstore in the form of a dict."""
def __init__(self, _dict: Dict[str, Document]):
"""Initialize with dict."""
self._dict = _dict
def add(self, texts: Dict[str, Document]) -> None:
"""Add texts to in memory dictionary."""
overlapping = set(texts).intersection(self._dict)
if overlapping:
raise ValueError(f"Tried to add ids that already exist: {overlapping}")
self._dict = {**self._dict, **texts}
def search(self, search: str) -> Union[str, Document]:
"""Search via direct lookup."""
if search not in self._dict:
return f"ID {search} not found."
else:
return self._dict[search]
| [] |
2024-01-10 | deoxykev/langchain | langchain~utilities~scenexplain.py | """Util that calls SceneXplain.
In order to set this up, you need API key for the SceneXplain API.
You can obtain a key by following the steps below.
- Sign up for a free account at https://scenex.jina.ai/.
- Navigate to the API Access page (https://scenex.jina.ai/api) and create a new API key.
"""
from typing import Dict
import requests
from pydantic import BaseModel, BaseSettings, Field, root_validator
from langchain.utils import get_from_dict_or_env
class SceneXplainAPIWrapper(BaseSettings, BaseModel):
"""Wrapper for SceneXplain API.
In order to set this up, you need API key for the SceneXplain API.
You can obtain a key by following the steps below.
- Sign up for a free account at https://scenex.jina.ai/.
- Navigate to the API Access page (https://scenex.jina.ai/api)
and create a new API key.
"""
scenex_api_key: str = Field(..., env="SCENEX_API_KEY")
scenex_api_url: str = (
"https://api.scenex.jina.ai/v1/describe"
)
def _describe_image(self, image: str) -> str:
headers = {
"x-api-key": f"token {self.scenex_api_key}",
"content-type": "application/json",
}
payload = {
"data": [
{
"image": image,
"algorithm": "Ember",
"languages": ["en"],
}
]
}
response = requests.post(self.scenex_api_url, headers=headers, json=payload)
response.raise_for_status()
result = response.json().get("result", [])
img = result[0] if result else {}
return img.get("text", "")
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
scenex_api_key = get_from_dict_or_env(
values, "scenex_api_key", "SCENEX_API_KEY"
)
values["scenex_api_key"] = scenex_api_key
return values
def run(self, image: str) -> str:
"""Run SceneXplain image explainer."""
description = self._describe_image(image)
if not description:
return "No description found."
return description
| [
"application/json"
] |
2024-01-10 | WuTao18/DeepKE | example~llm~gpt3DA.py | import openai
import json
import random
from tqdm import tqdm
import argparse
import os
entity_types = {
"tacrev": ['URL', 'LOCATION', 'IDEOLOGY', 'CRIMINAL CHARGE', 'TITLE', 'STATE OR PROVINCE', 'DATE', 'PERSON', 'NUMBER', 'CITY', 'DURATION', 'CAUSE OF DEATH', 'COUNTRY', 'NATIONALITY', 'RELIGION', 'ORGANIZATION', 'MISCELLANEOUS'],
# "SciERC": ['Generic', 'Material', 'Method', 'Metric', 'OtherScientificTerm', 'Task'],
"retacred": ['IDEOLOGY', 'ORGANIZATION', 'URL', 'PERSON', 'DURATION', 'COUNTRY', 'LOCATION', 'NATIONALITY', 'TITLE', 'RELIGION', 'NUMBER', 'CITY', 'CAUSE OF DEATH', 'DATE', 'STATE OR PROVINCE', 'CRIMINAL CHARGE'],
"tacred": ['COUNTRY', 'IDEOLOGY', 'LOCATION', 'DATE', 'PERSON', 'NATIONALITY', 'RELIGION', 'CITY', 'MISCELLANEOUS', 'CAUSE OF DEATH', 'TITLE', 'URL', 'NUMBER', 'ORGANIZATION', 'STATE OR PROVINCE', 'DURATION', 'CRIMINAL CHARGE']
}
def convert_token(token):
""" Convert PTB tokens to normal tokens """
if (token.lower() == '-lrb-'):
return '('
elif (token.lower() == '-rrb-'):
return ')'
elif (token.lower() == '-lsb-'):
return '['
elif (token.lower() == '-rsb-'):
return ']'
elif (token.lower() == '-lcb-'):
return '{'
elif (token.lower() == '-rcb-'):
return '}'
return token
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--api_key', '-ak', type=str, required=True)
parser.add_argument('--demo_path', '-dp', type=str, required=True, help="The directory of demonstration data.")
parser.add_argument('--output_dir', type=str, required=True, help="The output directory of generated data.")
parser.add_argument('--dataset', type=str, required=True, choices=["tacred", "tacrev", "retacred"])
parser.add_argument('--k', type=int, default=3, help="k-shot demonstrations")
args = parser.parse_args()
openai.api_key = args.api_key
input_file = args.demo_path
datasetname = args.dataset
output_file = os.path.join(args.output_dir, "generated.json")
data = []
label_list = {}
with open(input_file,'r') as f:
data = json.load(f)
random.shuffle(data)
for line in data:
rel = line['relation']
if rel not in label_list:
label_list[rel] = [line]
else:
label_list[rel].append(line)
'''
One sample in relation extraction datasets consists of a relation, a context, a pair of head and tail entities in the context and their entity types.
The head entity has the relation with the tail entity and entities are pre-categorized as the following types: URL, LOCATION, IDEOLOGY, CRIMINAL CHARGE, TITLE, STATE OR PROVINCE, DATE, PERSON, NUMBER, CITY, DURATION, CAUSE OF DEATH, COUNTRY, NATIONALITY, RELIGION, ORGANIZATION, MISCELLANEOUS.
Here are some samples for relation 'org:founded_by':
Relation: org:founded_by. Context: President Lee Teng-hui confers the Order of the Brilliant Star with a Violet Grand Cordon on Samuel Noordhoff , founder of the Noordhoff Craniofacial Foundation , for his devoted service to local citizens over the past four decades. Head Entity: Noordhoff Craniofacial Foundation . Head Type: ORGANIZATION. Tail Entity: Samuel Noordhoff. Tail Type: PERSON.
Relation: org:founded_by. Context: Talansky is also the US contact for the New Jerusalem Foundation , an organization founded by Olmert while he was Jerusalem 's mayor . Head Entity: New Jerusalem Foundation. Head Type: ORGANIZATION. Tail Entity: Olmert. Tail Type: PERSON.
Relation: org:founded_by. Context: Sharpton has said he will not endorse any candidate until hearing more about their views on civil rights and other issues at his National Action Network convention next week in New York City . Head Entity: National Action Network. Head Type: ORGANIZATION. Tail Entity: his. Tail Type: PERSON.
Relation: org:founded_by. Context: `` We believe that we can best serve our clients by offering a single multistrategy hedge fund platform , '' wrote John Havens , who was a founder of Old Lane with Pandit and is president of the alternative investment group . Head Entity: Old Lane. Head Type: ORGANIZATION. Tail Entity: John Havens. Tail Type: PERSON.
Generate more samples for the relation 'org:founded_by'.
'''
with open(output_file,'a') as f:
for k,v in tqdm(label_list.items()):
prompt = "One sample in relation extraction datasets consists of a relation, a context, a pair of head and tail entities in the context and their entity types. The head entity has the relation with the tail entity and entities are pre-categorized as the following types: " + \
(', '.join(entity_types[datasetname])) + \
". Here are some samples for relation '" + k + "':\n"
for i in range(args.k):
sample = "Relation: " + k + ". Context: " + ' '.join([convert_token(token) for token in v[i]['token']]) + ' ' + "Head Entity: " + ' '.join([convert_token(token) for token in v[i]['token'][v[i]['subj_start']:v[i]['subj_end']+1]]) + '. ' + "Head Type: " + v[i]['subj_type'] + '. ' + "Tail Entity: " + ' '.join([convert_token(token) for token in v[i]['token'][v[i]['obj_start']:v[i]['obj_end']+1]]) + ". " + "Tail Type: " + v[i]['obj_type'] + ".\n"
prompt = prompt + sample
prompt = prompt + "Generate more samples like above for the relation '" + k + "'."
response = openai.Completion.create(
model="text-davinci-003",
prompt = prompt,
temperature=1,
max_tokens=3500
)
res = response['choices'][0]['text'].split('\n')
for line in res:
if len(line) == 0:
continue
try:
DAdata = {}
data1 = line.split('Relation:')[-1].strip()
onepoint = data1.index('.')
relation = data1[:onepoint]
if relation == k:
relation = k
else:
continue
# text
data2 = data1.split('Context:')[-1].strip()
data2lower = data2.lower()
if "head entity:" in data2lower:
textend = data2lower.index('head entity:')
text = data2[:textend].strip()
data3 = data2[textend+len('head entity:'):].strip()
else:
continue
DAdata['text'] = text
# head entity
data3lower = data3.lower()
if ". head type:" in data3lower:
headend = data3lower.index(". head type:")
head = data3[:headend]
data4 = data3[headend + len(". head type:"):].strip()
else:
continue
# head type
data4lower = data4.lower()
if ". tail entity:" in data4lower:
htend = data4lower.index(". tail entity:")
headtype = data4[:htend]
if headtype in entity_types[datasetname] or headtype.replace('_',' ') in entity_types[datasetname]:
if datasetname in ["tacrev","tacred","retacred"]:
headtype = headtype.upper()
if headtype=="MISCELLANEOUS":
headtype = "MISC"
else:
headtype = headtype.replace(" ","_")
DAdata['subj_type'] = headtype
elif datasetname=="SciERC":
DAdata['subj_type'] = headtype.title()
else:
continue
data5 = data4[htend+len(". tail entity:"):].strip()
else:
continue
# tail entity
data5lower = data5.lower()
if ". tail type:" in data5lower:
tailend = data5lower.index(". tail type:")
tail = data5[:tailend]
data6 = data5[tailend + len(". tail type:"):].strip()
else:
continue
# tail type
tailtype = data6[:-1].strip()
if tailtype in entity_types[datasetname] or tailtype.replace("_"," ") in entity_types[datasetname]:
if datasetname in ["tacrev","tacred","retacred"]:
headtype = headtype.upper()
if headtype=="MISCELLANEOUS":
headtype = "MISC"
else:
headtype = headtype.replace(" ","_")
DAdata['obj_type'] = headtype
elif datasetname=="SciERC":
DAdata['obj_type'] = headtype.title()
else:
continue
textlower = text.lower()
headlower = head.lower()
if headlower in textlower:
hpos1 = textlower.index(headlower)
hpos2 = hpos1 + len(headlower)
truehead = text[hpos1:hpos2]
else:
continue
taillower = tail.lower()
if taillower in textlower:
tpos1 = textlower.index(taillower)
tpos2 = tpos1 + len(taillower)
truetail = text[tpos1:tpos2]
else:
continue
DAdata['subj'] = truehead
DAdata['subj_start'], DAdata['subj_end'] = hpos1, hpos2
DAdata['obj'] = truetail
DAdata['obj_start'], DAdata['obj_end'] = tpos1, tpos2
DAdata['relation'] = k
f.writelines(json.dumps(DAdata, ensure_ascii=False))
f.write('\n')
except:
pass | [
". Here are some samples for relation '",
"':\n",
"PLACEHOLDERGenerate more samples like above for the relation 'PLACEHOLDER'.",
"PLACEHOLDERPLACEHOLDER",
"One sample in relation extraction datasets consists of a relation, a context, a pair of head and tail entities in the context and their entity types. The head entity has the relation with the tail entity and entities are pre-categorized as the following types: ",
", "
] |
2024-01-10 | WuTao18/DeepKE | example~llm~gpt3ICL.py | import openai
import json
import random
import time
from tqdm import tqdm
from collections import Counter
import argparse
import numpy as np
import copy
import os
def convert_token(token):
""" Convert PTB tokens to normal tokens """
if (token.lower() == '-lrb-'):
return '('
elif (token.lower() == '-rrb-'):
return ')'
elif (token.lower() == '-lsb-'):
return '['
elif (token.lower() == '-rsb-'):
return ']'
elif (token.lower() == '-lcb-'):
return '{'
elif (token.lower() == '-rcb-'):
return '}'
return token
def f1_score(true, pred_result, rel2id):
correct = 0
total = len(true)
correct_positive = 0
pred_positive = 0
gold_positive = 0
neg = -1
for name in ['NA', 'na', 'no_relation', 'Other', 'Others', 'false', 'unanswerable']:
if name in rel2id:
neg = rel2id[name]
break
for i in range(total):
golden = true[i]
if golden == pred_result[i]:
correct += 1
if golden != neg:
correct_positive += 1
if golden != neg:
gold_positive +=1
if pred_result[i] != neg:
pred_positive += 1
acc = float(correct) / float(total)
try:
micro_p = float(correct_positive) / float(pred_positive)
except:
micro_p = 0
try:
micro_r = float(correct_positive) / float(gold_positive)
except:
micro_r = 0
try:
micro_f1 = 2 * micro_p * micro_r / (micro_p + micro_r)
except:
micro_f1 = 0
result = {'acc': acc, 'micro_p': micro_p, 'micro_r': micro_r, 'micro_f1': micro_f1}
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--api_key', '-ak', type=str, required=True)
parser.add_argument('--train_path', '-tp', type=str, required=True, help="The path of training / demonstration data.")
parser.add_argument('--test_path', '-ttp', type=str, required=True, help="The path of test data.")
parser.add_argument('--output_success', '-os', type=str, required=True, help="The output directory of successful ICL samples.")
parser.add_argument('--output_nores', '-on', type=str, required=True, help="The output directory of failed ICL samples.")
parser.add_argument('--prompt', type=str, required=True, choices=["text", "text_schema", "instruct", "instruct_schema"])
parser.add_argument('--k', type=int, default=1, help="k-shot demonstrations")
args = parser.parse_args()
openai.api_key = args.api_key
# Train / Demostration Set
with open(args.train_path,'r') as f:
train = json.load(f)
label_list = {}
for line in train:
rel = line['relation']
if rel not in label_list:
label_list[rel] = [line]
else:
label_list[rel].append(line)
# Relations
rels = list(label_list.keys())
rel2id = {}
for i, rel in enumerate(rels):
rel2id[rel] = i
# Label words
rel2labelword = {}
for rel in rels:
rel2labelword[rel] = rel.lower().replace("_"," ").replace("-", " ").replace("per", "person").replace("org", "organization").replace("stateor", "state or ")
labelword2rel = {}
for k,v in rel2labelword.items():
labelword2rel[v] = k
# Test Set
with open(args.test_path,'r') as f:
test = json.load(f)
res = []
true = []
nores = []
success = []
with open(os.path.join(args.output_success, "os.json"),"w") as f:
for input in tqdm(test):
random.shuffle(rels)
try:
if "text" in args.prompt:
prompt = "There are candidate relations: " + ', '.join(labelword2rel.keys()) + ".\n"
else:
prompt = "Given a context, a pair of head and tail entities in the context, decide the relationship between the head and tail entities from candidate relations: " + \
', '.join(labelword2rel.keys()) + ".\n"
for rel in rels:
random.shuffle(label_list[rel])
kshot = label_list[rel][:args.k]
for data in kshot:
ss, se = data['subj_start'], data['subj_end']
head = ' '.join(data['token'][ss:se+1])
headtype = data['subj_type'].lower().replace('_',' ')
if headtype == "misc":
headtype = "miscellaneous"
os, oe = data['obj_start'], data['obj_end']
tail = ' '.join(data['token'][os:oe+1])
tailtype = data['obj_type'].lower().replace('_',' ')
if tailtype == "misc":
tailtype = "miscellaneous"
sentence = ' '.join([convert_token(token) for token in data['token']])
relation = rel2labelword[data['relation']]
if "schema" in args.prompt:
prompt += "Context: " + sentence + " The relation between " + headtype + " '" + head + "' and " + tailtype + " '" + tail + "' in the context is " + relation + ".\n"
else:
prompt += "Context: " + sentence + " The relation between '" + head + "' and '" + tail + "' in the context is " + relation + ".\n"
# prompt += " The relation between '" + head + "' and '" + tail + "' in the context '" + sentence + "' is " + relation + ".\n"
tss, tse = input['subj_start'], input['subj_end']
testhead = ' '.join(input['token'][tss:tse+1])
testheadtype = input['subj_type'].lower().replace('_',' ')
if testheadtype == "misc":
testheadtype = "miscellaneous"
tos, toe = input['obj_start'], input['obj_end']
testtail = ' '.join(input['token'][tos:toe+1])
testtailtype = input['obj_type'].lower().replace('_',' ')
if testtailtype == "misc":
testtailtype = "miscellaneous"
testsen = ' '.join(input['token'])
if "schema" in args.prompt:
prompt += "Context: " + testsen + " The relation between " + testheadtype + " '" + testhead + "' and " + testtailtype + " '" + testtail + "' in the context is "
else:
prompt += "Context: " + testsen + " The relation between '" + testhead + "' and '" + testtail + "' in the context is "
# prompt += " The relation between '" + testhead + "' and '" + testtail + "' in the context '" + testsen + "' is "
# print(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt = prompt,
temperature=0,
max_tokens=128
)
resrel = response['choices'][0]['text'].strip().split('.')[0].lower()
if resrel in labelword2rel:
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("city" in resrel) and (resrel.replace("city", "cities") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("city", "cities")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("city", "cities")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("country" in resrel) and (resrel.replace("country", "countries") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("country", "countries")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("country", "countries")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
elif ("province" in resrel) and (resrel.replace("province", "provinces") in labelword2rel):
truerel = rel2id[input['relation']]
predictrel = rel2id[labelword2rel[resrel.replace("province", "provinces")]]
true.append(truerel)
res.append(predictrel)
input['pr'] = labelword2rel[resrel.replace("province", "provinces")]
success.append(input)
f.writelines(json.dumps(input))
f.write('\n')
else:
input['pr'] = resrel
nores.append(input)
except Exception as e:
print(e)
if e._message == 'You exceeded your current quota, please check your plan and billing details.':
break
nores.append(input)
time.sleep(30)
if len(nores)!=0:
json.dump(nores, open(os.path.join(args.output_nores, "no.json"),'w'))
print(f1_score(true, res, rel2id)) | [
"' and ",
"Context: PLACEHOLDER The relation between 'PLACEHOLDER' and 'PLACEHOLDER' in the context is ",
" '",
".\n",
"Given a context, a pair of head and tail entities in the context, decide the relationship between the head and tail entities from candidate relations: ",
"Context: PLACEHOLDER The relation between 'PLACEHOLDER' and 'PLACEHOLDER' in the context is PLACEHOLDER.\n",
"Context: ",
"There are candidate relations: ",
", ",
"' in the context is ",
" The relation between "
] |
2024-01-10 | software-students-fall2023/3-python-package-exercise-stuytowners | src~funnygpt~functions.py | from openai import OpenAI
client = OpenAI()
def llm(question):
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": question},
],
max_tokens=200,
temperature=0.99
)
answer = completion.choices[0].message.content
return answer
preset = "You are the most chill cs professor in the world, Professor Foo Barstein. You are talking to me, a student in your class."
def gptchat(type=None, subject=None, preset=preset):
if (type == "joke"):
if (subject):
response = llm(preset + "tell me a funny joke about " + subject)
else:
response = llm(preset + "tell me a funny joke")
elif (type == "haiku"):
if (subject):
response = llm(preset + "tell me a haiku about " + subject)
else:
response = llm(preset + "tell me a haiku")
elif (type == "compliment"):
if (subject):
response = llm(preset + "tell me a compliment about " + subject)
else:
response = llm(preset + "tell me a compliment")
elif (type == "email"):
if (subject):
response = llm(preset + "write an email about " + subject)
else:
response = llm(preset + "write a random email to send to my boss")
else:
if (subject):
response = llm(preset + subject)
else:
response = llm(preset + "say something random")
return response
def cowtalk(input):
words = input.split()
# Create a new list that includes 'moo' after every three words
moo_words = []
for i in range(0, len(words), 3):
moo_words.extend(words[i:i+3]) # Add the next three words
moo_words.append('moo') # Follow them with 'moo'
# Convert the list back into a string and remove the last 'moo' if it is extra
moo_sentence = ' '.join(moo_words).rstrip(' moo')
return moo_sentence
def onewordperline(input_string):
words = input_string.split() # Split the input string into words
return '\n'.join(words) # Join the words with a newline character
def changepreset():
global preset # Use the 'preset' variable defined outside the function
original_preset=preset
new_preset = input("Enter the new preset: ")
if new_preset == "":
return original_preset
elif new_preset == " ":
return original_preset
else:
return new_preset
def main():
# result = onewordperline()
# Do something with the result if needed, for example:
# print(result)
pass
if __name__ == "__main__":
main()
| [] |
2024-01-10 | joaopalmeiro/openai-playground | dev.py | # https://openai.com/blog/dall-e-api-now-available-in-public-beta/
# https://beta.openai.com/docs/libraries/python-bindings
# https://beta.openai.com/docs/guides/images
# https://labs.openai.com/
# https://beta.openai.com/docs/api-reference/images/create
# https://github.com/un33k/python-slugify
# https://beta.openai.com/examples
import os
import urllib.request
from datetime import datetime
import openai
from dotenv import load_dotenv
from slugify import slugify
IMAGE_SIZES = {
"S": "256x256",
"M": "512x512",
"L": "1024x1024",
}
if __name__ == "__main__":
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt = "Pikachu painted by El Greco."
response = openai.Image.create(prompt=prompt, n=1, size=IMAGE_SIZES["L"])
image = response["data"][0]
image_url = image["url"]
print(image_url)
urllib.request.urlretrieve(
image_url,
f"output/{slugify(prompt)}_openai_{int(datetime.now().timestamp())}.png",
)
| [
"Pikachu painted by El Greco."
] |
2024-01-10 | nikk0o046/carryoncarlos-backend | flights_function~params~duration.py | import os
import re
import time
import logging
import json
import openai
from dotenv import load_dotenv
load_dotenv()
logger = logging.getLogger(__name__)
openai.api_key = os.environ.get('OPENAI_API_KEY')
def create_duration_params(user_request : str, selectedCityID : str, user_id : str) -> dict:
"""
This function takes the user request, the selected city ID and the user ID and returns the duration parameters.
Args:
user_request (str): The user request.
selectedCityID (str): The selected city ID.
user_id (str): The user ID.
Returns:
dict: The duration parameters.
"""
start_time = time.time()
logger.debug("[UserID: %s] Creating duration parameters...", user_id)
# Create the prompt templates
system_template = """You're an intelligent AI agent, and your job is to create search parameters about the flight duration, stopovers, and stopover duration.
INSTRUCTIONS:
When creating flight search parameters based on user info, consider the following:
Specified Flight Preferences: Prioritize user-specific requests, like "direct flights."
Trip Distance:
Short Haul: Favor direct routes as layovers can extend short trips unnecessarily.
Long Haul: Allow more layovers, but balance their number and duration.
Trip Duration:
Short Trips: Prioritize speed to maximize time at the destination.
Long Trips: Consider comfort and minimize unnecessary layovers.
Availability of Flights:
Major Hubs: Expect numerous direct flight options.
Less Popular Routes: Optimize for shortest total travel time and feasible connections.
Use these parameters:
max_sector_stopovers: Maximum number of stopovers per sector.
stopover_to: Maximum length of a stopover (e.g., 4:00 means 4 hours). Aim to keep under 5:00.
max_fly_duration: Maximum itinerary duration, including stopovers. Aim to keep short.
ANSWER INSTRUCTIONS:
Provide:
1) Thought: Detail your reasoning briefly.
2) Markdown code snippet formatted in the following schema, including the leading and trailing "\`\`\`json" and "\`\`\`":
```json
{
"key1": value1 // Define relevant values. Only use keys mentioned in the API documentation.
"key2": value2
}
```"""
#example 1
userExample1 = """Origin: Madrid
Info: Origin: Madrid, ES | Destination: Barcelona, ES | Departure: Next month | Duration: Weekend"""
botExample1 = """Thought: Considering the short-haul nature of Madrid to Barcelona and the short duration of the trip (weekend), direct flights would be ideal. Major hubs like Madrid and Barcelona have numerous direct flight options.
```json
{
"max_sector_stopovers": 0
}
```"""
#example 2
userExample2 = """Origin: Helsinki
Info: Origin: Helsinki, FI | Destination: South America | Departure: January | Duration: 2 weeks | Flights: Any"""
botExample2 = """Thought: The long-haul nature of Helsinki to South America, combined with the user's flexibility for any flights, suggests that we should allow some layovers. However, we'll aim to optimize for comfort by limiting lengthy stopovers and excessive travel time.
```json
{
"max_fly_duration": 20,
"max_sector_stopovers": 2,
"stopover_to": "5:00"
}
```"""
#example 3
userExample3 = """Origin: New York
Info: "Origin: New York, US | Destination: Sydney, AU | Departure: March | Duration: 1 week | Flights: direct"""
botExample3 = """Thought: The user wants direct flights, so we set max_sector_stopovers to 0. We omit stopover_to and max_fly_duration for direct flights.
```json
{
"max_sector_stopovers": 0
}
```"""
human_template = f"Origin: {selectedCityID}\nInfo: {user_request}"
# Construct the conversation message list
message_list = [
{"role": "system", "content": system_template},
{"role": "user", "content": userExample1},
{"role": "assistant", "content": botExample1},
{"role": "user", "content": userExample2},
{"role": "assistant", "content": botExample2},
{"role": "user", "content": userExample3},
{"role": "assistant", "content": botExample3},
{"role": "user", "content": human_template}
]
# Request the response from the model
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
temperature=0,
messages=message_list,
)
response_content = response.choices[0].message['content']
logger.debug("[UserID: %s] Duration parameters response: %s", user_id, response_content)
# Extract the json string using regular expressions
json_str = re.search(r"\{.*\}", response_content, re.DOTALL).group()
# Convert the json string to a Python dictionary
logger.debug("[UserID: %s] json_str: %s", user_id, json_str)
duration_params = json.loads(json_str)
logger.debug("[UserID: %s] Duration created: %s", user_id, duration_params)
end_time = time.time()
elapsed_time = end_time - start_time
logger.debug("[UserID: %s] Function execution time: %s seconds", user_id, elapsed_time)
return duration_params | [
"Origin: New York\n Info: \"Origin: New York, US | Destination: Sydney, AU | Departure: March | Duration: 1 week | Flights: direct",
"Origin: PLACEHOLDER\nInfo: PLACEHOLDER",
"Thought: The user wants direct flights, so we set max_sector_stopovers to 0. We omit stopover_to and max_fly_duration for direct flights.\n ```json\n {\n \"max_sector_stopovers\": 0\n }\n ```",
"You're an intelligent AI agent, and your job is to create search parameters about the flight duration, stopovers, and stopover duration.\n\nINSTRUCTIONS:\nWhen creating flight search parameters based on user info, consider the following:\n\nSpecified Flight Preferences: Prioritize user-specific requests, like \"direct flights.\"\nTrip Distance:\nShort Haul: Favor direct routes as layovers can extend short trips unnecessarily.\nLong Haul: Allow more layovers, but balance their number and duration.\nTrip Duration:\nShort Trips: Prioritize speed to maximize time at the destination.\nLong Trips: Consider comfort and minimize unnecessary layovers.\nAvailability of Flights:\nMajor Hubs: Expect numerous direct flight options.\nLess Popular Routes: Optimize for shortest total travel time and feasible connections.\nUse these parameters:\n\nmax_sector_stopovers: Maximum number of stopovers per sector.\nstopover_to: Maximum length of a stopover (e.g., 4:00 means 4 hours). Aim to keep under 5:00.\nmax_fly_duration: Maximum itinerary duration, including stopovers. Aim to keep short.\nANSWER INSTRUCTIONS:\nProvide:\n\n1) Thought: Detail your reasoning briefly.\n2) Markdown code snippet formatted in the following schema, including the leading and trailing \"\\`\\`\\`json\" and \"\\`\\`\\`\":\n\n```json\n{\n \"key1\": value1 // Define relevant values. Only use keys mentioned in the API documentation. \n \"key2\": value2\n}\n ```",
"Origin: Madrid\n Info: Origin: Madrid, ES | Destination: Barcelona, ES | Departure: Next month | Duration: Weekend",
"Thought: Considering the short-haul nature of Madrid to Barcelona and the short duration of the trip (weekend), direct flights would be ideal. Major hubs like Madrid and Barcelona have numerous direct flight options.\n ```json\n {\n \"max_sector_stopovers\": 0\n }\n ```",
"Thought: The long-haul nature of Helsinki to South America, combined with the user's flexibility for any flights, suggests that we should allow some layovers. However, we'll aim to optimize for comfort by limiting lengthy stopovers and excessive travel time.\n ```json\n {\n \"max_fly_duration\": 20,\n \"max_sector_stopovers\": 2,\n \"stopover_to\": \"5:00\"\n }\n ```",
"Origin: Helsinki\n Info: Origin: Helsinki, FI | Destination: South America | Departure: January | Duration: 2 weeks | Flights: Any"
] |
2024-01-10 | nikk0o046/carryoncarlos-backend | flights_function~fine-tuning~time_answer_script.py | """
THIS SCRIPT IS UNFINISHED
This script will be used to use GPT-4 to create fine-tuning data for the time parameters function.
"""
import os
import openai
import re
import json
import time
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ.get('OPENAI_API_KEY')
def create_time_dict(test_case_number : int, user_request : str, current_date : str) -> dict:
"""
This function takes a user request and the current date and returns parameters to evaluate the time params model.
Args:
user_request (str): The user request.
current_date (str): The current date.
Returns:
dict: A dictionary containing test_case_number (int), the GPT-4 response content (str), the extracted time params (dict), the elapsed time (float), the prompt tokens (int), the completion tokens (int) and quality score (float), which is a placeholder.
"""
start_time = time.time() #start timer to log it later
#create the prompt templates
system_template = """API DOCUMENTATION:
departure_date_from, departure_date_to: Range for outbound flight departure (dd/mm/yyyy).
nights_in_dst_from, nights_in_dst_to: Minimum and maximum stay length at the destination (in nights). Only exclude these if the user is looking for a one-way trip. Otherwise you must make an assumption.
fly_days, ret_fly_days: List of preferred days for outbound and return flights (0=Sunday, 1=Monday, ... 6=Saturday).
fly_days_type, ret_fly_days_type: Specifies if fly_days/ret_fly_days is for an arrival or a departure flight.
If the user looks for specific dates, set departure_date_from and departure_date_to to a specific date, and match nights_in_dst_from and nights_in_dst_to so that the return day will be correct.
ANSWER INSTRUCTIONS:
Your task is to create parameters specified above based on user information. The parameters will be forwarded to another assistant, who uses them to search flights. Do not come up with any other parameters.
The output should include both:
1) Thought: Thinking out loud about the user's needs and the task.
2) Markdown code snippet formatted in the following schema, including the leading and trailing "\`\`\`json" and "\`\`\`":
```json
{
"key1": value1 // Define relevant values. Only use keys mentioned in the API documentation.
"key2": value2
}
```"""
#example 1
userExample1 = "Current date: 10/07/2023\nInfo: Origin: London, GB | Destination: Paris, FR | Departure: Next month's Friday| Duration: Weekend | Flights: Any"
botExample1 = """Thought: User wants to leave on a Friday next month (August) and stay for two nights.
```json
{
"departure_date_from": "01/08/2023",
"departure_date_to": "31/08/2023",
"fly_days": 5,
"fly_days_type": "departure",
"nights_in_dst_from": 2,
"nights_in_dst_to": 2,
"ret_fly_days": 0,
"ret_fly_days_type": "departure"
}
```"""
#example 2
userExample2 = """Current date: 01/01/2024\nInfo: Origin: San Francisco, US | Destination: Anywhere abroad | Departure: March | Duration: About a week | Flights: Any"""
botExample2 = """Thought: setting departure dates for next March, lasting about a week which translated to 6-8 nights.
```json
{
"departure_date_from": "01/03/2024",
"departure_date_to": "31/03/2024",
"nights_in_dst_from": 6,
"nights_in_dst_to": 8
}
```"""
#example 3
userExample3 = "Current date: 10/08/2023\nInfo: Origin: Los Angeles, US | Destination: Miami, US | Departure: October's Long Weekend | Duration: 3 days | Flights: direct"
botExample3 = """Thought: Long weekend usually means three days. Possible departure days are Thursday and Friday. Possible return flight days are Sunday or Monday.
```json
{
"departure_date_from": "01/10/2023",
"departure_date_to": "31/10/2023",
"nights_in_dst_from": 3,
"nights_in_dst_to": 3,
"fly_days": [4, 5],
"ret_fly_days": [0, 1]
}
```"""
#example 4
userExample4 = "Current date: 10/04/2023\nInfo: Origin: Chicago, US | Destination: Paris, FR | Departure: Summer | Flights: One-way"
botExample4 = """Thought: The user only needs an outbound flight to Paris, which should be anytime in the summer months (June, July, August). Because it is a one-way trip, nights_in_dst-parameters must be excluded.
```json
{
"departure_date_from": "01/06/2023",
"departure_date_to": "31/08/2023"
}
```"""
#example 5
userExample5 = "Current date: 10/07/2023\nInfo: Origin: Boston, US | Destination: Abroad | Activity: not specified | Flights: 4th of October to 8th of October"
botExample5 = """Thought: The user wants the outbound flight on 4th of October, so we set the departure window (departure_date_from and departure_date_to) to a single day. The return is on 8th of October, so the stay is exactly 4 nights. Therefore we set both nights_in_dst_from and nights_in_dst_to to 4.
```json
{
"departure_date_from": "04/10/2023",
"departure_date_to": "04/10/2023",
"nights_in_dst_from": 4,
"nights_in_dst_to": 4
}
```"""
human_template = f"Current date: {current_date}\nInfo: {user_request}"
# Construct the conversation message list
message_list = [
{"role": "system", "content": system_template},
{"role": "user", "content": userExample1},
{"role": "assistant", "content": botExample1},
{"role": "user", "content": userExample2},
{"role": "assistant", "content": botExample2},
{"role": "user", "content": userExample3},
{"role": "assistant", "content": botExample3},
{"role": "user", "content": userExample4},
{"role": "assistant", "content": botExample4},
{"role": "user", "content": userExample5},
{"role": "assistant", "content": botExample5},
{"role": "user", "content": human_template}
]
# Request the response from the model
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=message_list,
)
response_content = response.choices[0].message['content']
# Extract the json string using regular expressions
json_str = re.search(r"\{.*\}", response_content, re.DOTALL).group()
# Convert the json string to a Python dictionary
time_params = json.loads(json_str)
end_time = time.time()
elapsed_time = end_time - start_time
output_dict = {
"test_case_number": test_case_number,
"response_content": response_content,
"time_params": time_params,
"elapsed_time": elapsed_time,
"prompt_tokens": response.usage["prompt_tokens"],
"completion_tokens": response.usage["completion_tokens"],
"quality": 1.0 # Default is 1.0, which means as good as it gets. Bug is 0.0. If it doesn't crash but isn't perfect, use 0.5.
}
return output_dict
if __name__ == "__main__":
def save_to_file(data, filename="../data/time_answers_raw.json"):
with open(filename, "a") as file:
json.dump(data, file)
file.write("\n")
# Read the JSON file
with open("../data/test_cases.json", "r") as file:
test_cases = json.load(file)
# Loop through the test cases
for test_case in test_cases:
user_request = test_case["user_request"]
date = test_case["date"]
test_case_number = test_case["test_case_number"]
print(f"Test number: {test_case_number}")
result = create_time_dict(test_case_number, user_request, date)
save_to_file(result)
| [
"Thought: setting departure dates for next March, lasting about a week which translated to 6-8 nights.\n```json\n{\n\"departure_date_from\": \"01/03/2024\",\n\"departure_date_to\": \"31/03/2024\",\n\"nights_in_dst_from\": 6,\n\"nights_in_dst_to\": 8\n}\n```",
"Current date: 10/07/2023\nInfo: Origin: Boston, US | Destination: Abroad | Activity: not specified | Flights: 4th of October to 8th of October",
"Thought: User wants to leave on a Friday next month (August) and stay for two nights.\n```json\n{\n \"departure_date_from\": \"01/08/2023\",\n \"departure_date_to\": \"31/08/2023\",\n \"fly_days\": 5,\n \"fly_days_type\": \"departure\",\n \"nights_in_dst_from\": 2,\n \"nights_in_dst_to\": 2,\n \"ret_fly_days\": 0,\n \"ret_fly_days_type\": \"departure\"\n}\n```",
"Current date: PLACEHOLDER\nInfo: PLACEHOLDER",
"Current date: 10/04/2023\nInfo: Origin: Chicago, US | Destination: Paris, FR | Departure: Summer | Flights: One-way",
"Thought: The user wants the outbound flight on 4th of October, so we set the departure window (departure_date_from and departure_date_to) to a single day. The return is on 8th of October, so the stay is exactly 4 nights. Therefore we set both nights_in_dst_from and nights_in_dst_to to 4.\n```json\n{\n \"departure_date_from\": \"04/10/2023\",\n \"departure_date_to\": \"04/10/2023\",\n \"nights_in_dst_from\": 4,\n \"nights_in_dst_to\": 4\n}\n```",
"Current date: 10/08/2023\nInfo: Origin: Los Angeles, US | Destination: Miami, US | Departure: October's Long Weekend | Duration: 3 days | Flights: direct",
"Thought: The user only needs an outbound flight to Paris, which should be anytime in the summer months (June, July, August). Because it is a one-way trip, nights_in_dst-parameters must be excluded. \n```json\n{\n \"departure_date_from\": \"01/06/2023\",\n \"departure_date_to\": \"31/08/2023\"\n}\n```",
"Thought: Long weekend usually means three days. Possible departure days are Thursday and Friday. Possible return flight days are Sunday or Monday.\n```json\n{\n \"departure_date_from\": \"01/10/2023\",\n \"departure_date_to\": \"31/10/2023\",\n \"nights_in_dst_from\": 3,\n \"nights_in_dst_to\": 3,\n \"fly_days\": [4, 5],\n \"ret_fly_days\": [0, 1]\n}\n```",
"API DOCUMENTATION:\ndeparture_date_from, departure_date_to: Range for outbound flight departure (dd/mm/yyyy). \n\nnights_in_dst_from, nights_in_dst_to: Minimum and maximum stay length at the destination (in nights). Only exclude these if the user is looking for a one-way trip. Otherwise you must make an assumption.\n\nfly_days, ret_fly_days: List of preferred days for outbound and return flights (0=Sunday, 1=Monday, ... 6=Saturday). \n\nfly_days_type, ret_fly_days_type: Specifies if fly_days/ret_fly_days is for an arrival or a departure flight.\n\nIf the user looks for specific dates, set departure_date_from and departure_date_to to a specific date, and match nights_in_dst_from and nights_in_dst_to so that the return day will be correct.\n\nANSWER INSTRUCTIONS:\nYour task is to create parameters specified above based on user information. The parameters will be forwarded to another assistant, who uses them to search flights. Do not come up with any other parameters.\nThe output should include both:\n1) Thought: Thinking out loud about the user's needs and the task.\n2) Markdown code snippet formatted in the following schema, including the leading and trailing \"\\`\\`\\`json\" and \"\\`\\`\\`\":\n\n```json\n{\n \"key1\": value1 // Define relevant values. Only use keys mentioned in the API documentation. \n \"key2\": value2\n}\n```",
"Current date: 10/07/2023\nInfo: Origin: London, GB | Destination: Paris, FR | Departure: Next month's Friday| Duration: Weekend | Flights: Any",
"Current date: 01/01/2024\nInfo: Origin: San Francisco, US | Destination: Anywhere abroad | Departure: March | Duration: About a week | Flights: Any"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.