date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | definitive-io/openassistants | packages~openassistants~openassistants~llm_function_calling~entity_resolution.py | import asyncio
from typing import Any, Dict, List, Tuple
from langchain.chat_models.base import BaseChatModel
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores.usearch import USearch
from openassistants.data_models.chat_messages import OpasMessage
from openassistants.functions.base import (
IEntity,
IEntityConfig,
IFunction,
)
from openassistants.llm_function_calling.infilling import generate_arguments
async def _vec_search(
documents: List[Document],
query: str,
embeddings: Embeddings,
) -> List[Document]:
search: USearch = await USearch.afrom_documents(
embedding=embeddings,
documents=documents,
)
results = await search.asimilarity_search(
query,
k=3,
)
return results
def entity_to_document(entity: IEntity) -> Document:
doc = Document(
metadata=dict(id=entity.get_identity()), page_content=entity.get_identity()
)
if entity.get_description():
doc.page_content += f" ({entity.get_description()})"
return doc
async def _get_entities(
entity_cfg: IEntityConfig,
entity_key: str,
preliminary_arguments: Dict[str, Any],
embeddings: Embeddings,
) -> Tuple[str, List[IEntity]]:
documents = [entity_to_document(entity) for entity in entity_cfg.get_entities()]
query = str(preliminary_arguments[entity_key])
vec_result = await _vec_search(documents, query, embeddings)
# filter for entities that in vec result
ids: set[str] = set([doc.metadata["id"] for doc in vec_result])
entities = [
entity for entity in entity_cfg.get_entities() if entity.get_identity() in ids
]
return entity_key, entities
async def resolve_entities(
function: IFunction,
function_infilling_llm: BaseChatModel,
embeddings: Embeddings,
user_query: str,
chat_history: List[OpasMessage],
) -> Dict[str, List[IEntity]]:
entity_configs = await function.get_entity_configs()
# skip if no entity configs
if len(entity_configs) == 0:
return {}
preliminary_arguments = await generate_arguments(
function,
function_infilling_llm,
user_query,
chat_history,
{},
)
results = await asyncio.gather(
*[
_get_entities(entity_cfg, param_name, preliminary_arguments, embeddings)
for param_name, entity_cfg in entity_configs.items()
]
)
return {key: entities for key, entities in results}
| [] |
2024-01-10 | definitive-io/openassistants | packages~openassistants~openassistants~utils~vision.py | from langchain.chat_models.base import BaseChatModel
from langchain.schema.messages import HumanMessage
def image_url_to_text(
vision_model: BaseChatModel, image_url: str, text_context: str
) -> str:
description_prompt = (
"Describe this image in detail."
" Only use one or two sentences, but include specific details."
"The image needs to be described in the context"
" of the following user question:"
f"START_CONTEXT\n{text_context}\nEND_CONTEXT."
)
msg = vision_model.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": description_prompt},
{"type": "image_url", "image_url": image_url},
]
)
]
)
return msg.content
| [
"Describe this image in detail. Only use one or two sentences, but include specific details.The image needs to be described in the context of the following user question:START_CONTEXT\nPLACEHOLDER\nEND_CONTEXT.",
"[{'type': 'text', 'text': PLACEHOLDER}, {'type': 'image_url', 'image_url': PLACEHOLDER}]"
] |
2024-01-10 | definitive-io/openassistants | packages~openassistants~openassistants~core~assistant.py | import asyncio
from typing import Any, Dict, List, Optional, Tuple
from langchain.chat_models.base import BaseChatModel
from langchain.chat_models.openai import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from openassistants.contrib.index_function import IndexFunction
from openassistants.data_models.chat_messages import (
OpasAssistantMessage,
OpasFunctionMessage,
OpasMessage,
OpasUserMessage,
)
from openassistants.data_models.function_input import FunctionCall, FunctionInputRequest
from openassistants.functions.base import (
FunctionExecutionDependency,
IEntity,
IFunction,
IFunctionLibrary,
)
from openassistants.functions.crud import LocalFunctionLibrary, PythonLibrary
from openassistants.llm_function_calling.entity_resolution import resolve_entities
from openassistants.llm_function_calling.fallback import perform_general_qa
from openassistants.llm_function_calling.infilling import (
generate_argument_decisions,
generate_arguments,
)
from openassistants.llm_function_calling.selection import select_function
from openassistants.utils.async_utils import AsyncStreamVersion
from openassistants.utils.langchain_util import LangChainCachedEmbeddings
from openassistants.utils.vision import image_url_to_text
class Assistant:
function_identification: BaseChatModel
function_infilling: BaseChatModel
function_summarization: BaseChatModel
function_fallback: BaseChatModel
entity_embedding_model: Embeddings
function_libraries: List[IFunctionLibrary]
scope_description: str
_cached_all_functions: List[IFunction]
def __init__(
self,
libraries: List[str | IFunctionLibrary],
function_identification: Optional[BaseChatModel] = None,
function_infilling: Optional[BaseChatModel] = None,
function_summarization: Optional[BaseChatModel] = None,
function_fallback: Optional[BaseChatModel] = None,
vision_model: Optional[BaseChatModel] = None,
entity_embedding_model: Optional[Embeddings] = None,
scope_description: str = "General assistant.",
add_index: bool = True,
):
# instantiate dynamically vs as default args
self.function_identification = function_identification or ChatOpenAI(
model="gpt-3.5-turbo-16k", temperature=0.0, max_tokens=128
)
self.function_infilling = function_infilling or ChatOpenAI(
model="gpt-3.5-turbo-16k", temperature=0.0, max_tokens=128
)
self.function_summarization = function_summarization or ChatOpenAI(
model="gpt-3.5-turbo-16k", temperature=0.0, max_tokens=1024
)
self.function_fallback = function_fallback or ChatOpenAI(
model="gpt-4-1106-preview", temperature=0.2, max_tokens=1024
)
self.vision_model = vision_model or ChatOpenAI(
model="gpt-4-vision-preview", temperature=0.2, max_tokens=1024
)
self.scope_description = scope_description
self.entity_embedding_model = (
entity_embedding_model or LangChainCachedEmbeddings(OpenAIEmbeddings())
)
self.function_libraries = [
library
if isinstance(library, IFunctionLibrary)
else LocalFunctionLibrary(library)
for library in libraries
]
if add_index:
index_func: IFunction = IndexFunction(
id="index",
display_name="List functions",
description=(
"List the functions available to the assistant. "
"This is a list of things you can ask."
),
sample_questions=[
"What can you do?",
"What can I ask?",
"Which functions are defined?",
],
functions=self.get_all_functions,
)
self.function_libraries.append(PythonLibrary(functions=[index_func]))
self._cached_all_functions = []
async def get_all_functions(self) -> List[IFunction]:
if not self._cached_all_functions:
functions: List[IFunction] = []
for library in self.function_libraries:
functions.extend(await library.get_all_functions())
self._cached_all_functions = functions
return self._cached_all_functions
async def get_function_by_id(self, function_id: str) -> Optional[IFunction]:
functions = await self.get_all_functions()
for function in functions:
if function.get_id() == function_id:
return function
return None
async def execute_function(
self,
function: IFunction,
func_args: Dict[str, Any],
dependencies: Dict[str, Any],
):
deps = FunctionExecutionDependency(
arguments=func_args,
**dependencies,
)
function_call_invocation = OpasAssistantMessage(
content="",
function_call=FunctionCall(name=function.get_id(), arguments=func_args),
)
yield [function_call_invocation]
async for version in function.execute(deps):
yield [
function_call_invocation,
OpasFunctionMessage(name=function.get_id(), outputs=list(version)),
]
async def do_infilling(
self,
dependencies: dict,
message: OpasUserMessage,
selected_function: IFunction,
args_json_schema: dict,
entities_info: Dict[str, List[IEntity]],
) -> Tuple[bool, dict]:
# Perform infilling and generate argument decisions in parallel
chat_history: List[OpasMessage] = dependencies.get("chat_history") # type: ignore
arguments_future = asyncio.create_task(
generate_arguments(
selected_function,
self.function_infilling,
message.content,
chat_history,
entities_info,
)
)
argument_decisions_future = asyncio.create_task(
generate_argument_decisions(
selected_function,
self.function_infilling,
message.content,
chat_history,
)
)
arguments = await arguments_future
argument_decisions = await argument_decisions_future
# Filter arguments that are not needed or cannot be inferred
arguments = {
arg_name: arg_value
for arg_name, arg_value in arguments.items()
if arg_name in argument_decisions
and argument_decisions[arg_name]["can_be_found"]
and argument_decisions[arg_name]["needed"]
}
# Find if any of the arguments are needed but cannot be inferred
arguments_needed_but_cannot_be_inferred = [
arg_name
for arg_name, arg_decision in argument_decisions.items()
if arg_decision["needed"] and not arg_decision["can_be_found"]
]
# Check if any of the arguments that are required are missing
required_arguments_missing = [
arg_name
for arg_name, arg_property in args_json_schema["properties"].items()
if arg_name in args_json_schema["required"] and arg_name not in arguments
]
complete = (
len(arguments_needed_but_cannot_be_inferred) == 0
and len(required_arguments_missing) == 0
)
return complete, arguments
async def handle_user_plaintext(
self,
message: OpasUserMessage,
all_functions: List[IFunction],
dependencies: Dict[str, Any],
autorun: bool,
force_select_function: Optional[str],
) -> AsyncStreamVersion[List[OpasMessage]]:
selected_function: Optional[IFunction] = None
# perform entity resolution
chat_history: List[OpasMessage] = dependencies.get("chat_history") # type: ignore
# Perform function selection
if force_select_function is not None:
filtered = [f for f in all_functions if f.get_id() == force_select_function]
if len(filtered) == 0:
raise ValueError("function not found")
selected_function = filtered[0]
if selected_function is None:
function_selection = await select_function(
self.function_identification, all_functions, message.content
)
if function_selection.function:
selected_function = function_selection.function
elif function_selection.suggested_functions:
suggested_functions_names = ", ".join(
[f.get_id() for f in function_selection.suggested_functions]
)
yield [
OpasAssistantMessage(
content=(
f"No function matching that question was found. "
f"Did you mean: {suggested_functions_names}?"
)
)
]
return
else:
# In case no function was found and no suggested functions were found
# attempt to directly perform the request requested by the user.
async for output in perform_general_qa(
chat=self.function_fallback,
chat_history=dependencies.get("chat_history"), # type: ignore
user_query=message.content,
scope_description=self.scope_description,
):
yield [
OpasAssistantMessage(content=output),
]
return
selected_function_arg_json_schema = (
selected_function.get_parameters_json_schema()
)
entities_info = await resolve_entities(
selected_function,
self.function_infilling,
self.entity_embedding_model,
message.content,
chat_history,
)
# perform argument infilling
if len(selected_function_arg_json_schema["properties"]) > 0:
complete, arguments = await self.do_infilling(
dependencies,
message,
selected_function,
selected_function_arg_json_schema,
entities_info,
)
else:
complete, arguments = True, {}
can_autorun = autorun
if selected_function.get_confirm():
can_autorun = False
if can_autorun and complete:
# execute
async for version in self.execute_function(
selected_function, arguments, dependencies
):
yield version
return
else:
# request input
request_user_input = OpasAssistantMessage(
content=f"""\
I found the function *{selected_function.get_display_name() or selected_function.get_id()}*.
Please fill in the following parameters and I'll run it.
""", # noqa: E501
input_request=FunctionInputRequest(
name=selected_function.get_id(),
json_schema=selected_function_arg_json_schema,
arguments=arguments,
),
)
yield [request_user_input]
return
async def handle_user_input(
self,
message: OpasUserMessage,
all_functions: List[IFunction],
dependencies: Dict[str, Any],
) -> AsyncStreamVersion[List[OpasMessage]]:
if message.input_response is None:
raise ValueError("message must have input_response")
selected_function: Optional[IFunction] = None
for f in all_functions:
if f.get_id() == message.input_response.name:
selected_function = f
if selected_function is None:
raise ValueError("function not found")
async for version in self.execute_function(
selected_function, message.input_response.arguments, dependencies
):
yield version
async def convert_list_message(self, messages, message, idx):
text_context = " ".join(
content["text"]
for content in message.content
if content.get("type") == "text"
)
for i, content in enumerate(message.content):
if isinstance(content, dict) and content.get("type") == "image_url":
image_description = "Image described as {}".format(content["filename"])
if self.vision_model is not None and idx == len(messages) - 1:
image_description = image_url_to_text(
vision_model=self.vision_model,
image_url=content["image_url"],
text_context=text_context,
)
message.content[i] = {
"type": "text",
"text": image_description,
}
message.content = " ".join(
piece["text"] for piece in message.content if piece.get("type") == "text"
)
async def pre_process_messages(self, messages):
tasks = [
self.convert_list_message(messages, message, idx)
for idx, message in enumerate(messages)
if isinstance(message, OpasUserMessage)
and isinstance(message.content, list)
]
await asyncio.gather(*tasks)
return messages
async def run_chat(
self,
messages: List[OpasMessage],
autorun: bool = True,
force_select_function: Optional[str] = None,
) -> AsyncStreamVersion[List[OpasMessage]]:
last_message = messages[-1]
messages = await self.pre_process_messages(messages)
dependencies = {
"chat_history": messages,
"summarization_chat_model": self.function_summarization,
}
if not isinstance(last_message, OpasUserMessage):
raise ValueError("last message must be a user message")
if last_message.input_response is not None:
async for version in self.handle_user_input(
last_message, await self.get_all_functions(), dependencies
):
yield version
else:
async for version in self.handle_user_plaintext(
last_message,
await self.get_all_functions(),
dependencies,
autorun,
force_select_function,
):
yield version
| [
"No function matching that question was found. Did you mean: PLACEHOLDER?"
] |
2024-01-10 | definitive-io/openassistants | packages~openassistants~openassistants~data_models~chat_messages.py | from typing import Annotated, Dict, List, Literal, Optional, Union
from langchain.schema.messages import BaseMessage, merge_content
from pydantic import BaseModel, Field
from openassistants.data_models.function_input import FunctionCall, FunctionInputRequest
from openassistants.data_models.function_output import FunctionOutput
class OpasUserMessage(BaseModel):
role: Literal["user"] = "user"
content: Union[str, List[Union[str, Dict]]]
input_response: Optional[FunctionCall] = Field(
default=None,
description="the user's response to an input request. "
"must satisfy the schema in the assistants input request",
)
class OpasAssistantMessage(BaseModel):
role: Literal["assistant"] = "assistant"
content: Annotated[str, Field(description="stuff like rejection messages go here")]
input_request: Optional[FunctionInputRequest] = Field(
description="a widget should be shown to the user when this is present",
default=None,
)
function_call: Optional[FunctionCall] = Field(
description="informs the client a function call is happening "
"with certain parameters. may be shown to the user",
default=None,
)
class OpasFunctionMessage(BaseModel):
role: Literal["function"] = "function"
name: str
outputs: Annotated[
List[FunctionOutput],
Field(
description="the outputs of the function. "
"can only be certain types so the client knows how to display it"
),
]
OpasMessage = Annotated[
OpasUserMessage | OpasAssistantMessage | OpasFunctionMessage,
Field(json_schema_extra={"descriminator": "role"}),
]
def ensure_alternating(chat_history: List[BaseMessage]) -> List[BaseMessage]:
"""
Ensure that the chat history alternates between user and assistant messages.
If theres a repeated role, concatenate the messages.
"""
fixed: List[BaseMessage] = []
message: BaseMessage
for i, message in enumerate(chat_history):
if i == 0:
fixed.append(message)
elif message.type == fixed[-1].type:
fixed[-1].content = merge_content(fixed[-1].content, message.content)
else:
fixed.append(message)
return fixed
| [] |
2024-01-10 | mcai/gem5 | configs~example~gem5_library~riscv-ubuntu-run.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script shows an example of running a full system RISCV Ubuntu boot
simulation using the gem5 library. This simulation boots Ubuntu 20.04 using
2 TIMING CPU cores. The simulation ends when the startup is completed
successfully.
Usage
-----
```
scons build/RISCV/gem5.opt
./build/RISCV/gem5.opt \
configs/example/gem5_library/riscv-ubuntu-run.py
```
"""
import m5
from m5.objects import Root
from gem5.utils.requires import requires
from gem5.components.boards.riscv_board import RiscvBoard
from gem5.components.memory import DualChannelDDR4_2400
from gem5.components.processors.simple_processor import (
SimpleProcessor,
)
from gem5.components.processors.cpu_types import CPUTypes
from gem5.isas import ISA
from gem5.coherence_protocol import CoherenceProtocol
from gem5.resources.resource import Resource
# This runs a check to ensure the gem5 binary is compiled for RISCV.
requires(
isa_required=ISA.RISCV,
)
# With RISCV, we use simple caches.
from gem5.components.cachehierarchies.classic\
.private_l1_private_l2_cache_hierarchy import (
PrivateL1PrivateL2CacheHierarchy,
)
# Here we setup the parameters of the l1 and l2 caches.
cache_hierarchy = PrivateL1PrivateL2CacheHierarchy(
l1d_size="16kB",
l1i_size="16kB",
l2_size="256kB",
)
# Memory: Dual Channel DDR4 2400 DRAM device.
memory = DualChannelDDR4_2400(size = "3GB")
# Here we setup the processor. We use a simple processor.
processor = SimpleProcessor(
cpu_type=CPUTypes.TIMING,
num_cores=2,
)
# Here we setup the board. The RiscvBoard allows for Full-System RISCV
# simulations.
board = RiscvBoard(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# Here we set the Full System workload.
# The `set_kernel_disk_workload` function for the RiscvBoard accepts a
# RISCV bootloader and a disk image. Once the system successfully boots, it
# encounters an `m5_exit instruction encountered`. We stop the simulation then.
# When the simulation has ended you may inspect `m5out/system.pc.com_1.device`
# to see the stdout.
board.set_kernel_disk_workload(
# The RISCV bootloader will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# The riscv-ubuntu boot-test was tested with riscv-bootloader-5.10
kernel=Resource(
"riscv-bootloader-vmlinux-5.10",
),
# The RISCV ubuntu image will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
disk_image=Resource(
"riscv-ubuntu-20.04-img",
),
)
root = Root(full_system=True, system=board)
m5.instantiate()
# We simulate the system till we encounter `m5_exit instruction encountered`.
exit_event = m5.simulate()
# We check whether the simulation ended with `m5_exit instruction encountered`
if exit_event.getCause() == "m5_exit instruction encountered":
# We acknowledge the user that the boot was successful.
print("Successfully completed booting!")
else:
# `m5_exit instruction encountered` was never encountered. We exit the
# program unsuccessfully.
print("The startup was not completed successfully!",)
print(
"Exiting @ tick {} because {}."\
.format(m5.curTick(), exit_event.getCause())
)
exit(-1)
# We are done with the simulation. We exit the program now.
print(
"Exiting @ tick {} because {}."\
.format(m5.curTick(), exit_event.getCause())
)
| [] |
2024-01-10 | ConstantSun/NQL | utils~sagemaker.py | import json
from typing import Dict
from langchain.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({"inputs": [[{"role": "user", "content": prompt}]], **model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
if len(response_json) == 0:
raise Exception("Response not found")
return response_json[0]['generation']['content'].strip()
def format_sagemaker_inference_data(data):
split_key = '\n\n'
question = (data_list := data.split(split_key))[0].replace("Question:", '').strip()
query = data_list[1].replace("SQLQuery:", '').strip()
query_explanation = split_key.join(data_list[2:]).replace("Explanation:", '').strip()
return question, query, query_explanation
| [] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~get_complete_act_labels.py | import json
import openai
import Levenshtein
from time import sleep
import os
openai.organization = os.environ.get("OPENAI_ORG_KEY")
openai.api_key = os.environ.get("OPENAI_API_KEY")
#intro sequence:
intro = "Nous allons te fournir un certificat de mariage, un document ayant toujours la même mise en forme.Tu vas devoir procéder à l’extraction de certaines données sur plusieurs certificats ensuite. Voici le premier certificat, je précise qu’il est extrait d’un document au format Json et que tu auras toutes les réponses fournies à la fin, cela te permettra de mieux reconnaître ce qu’il te faut obtenir dans les contrats suivants. "
# Un exemple d'acte:
act_example = """Le huit avril mil neuf cent quarante, onze heures cinq minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Camille Marcel MOUROT, barman, né à Dijon\nCôte d'Or * le dix-huit février mil neuf cent onze * vingt-neuf ans, domicilié à Paris, 17, rue\nPierre Lescot, actuellement aux armées; fils de Emile MOUROT et de Pauline MONIOT, époux décédés,\nd'une part ,/- ET Jeanne BLONDEAU, sans profession, née à Jars * Cher * le seize mars mil neuf cent\nneuf, trente et un ans, domiciliée à Paris, 17, rue Pierre Lescot; fille de Emile Joseph BLONDEAU\net de Marie Louise MELOT, son épouse, marchands de bestiaux, domiciliés à Vailly sur Saône * Cher *\nDivorcée de William Louis Paul BERNON, d'autre part ;- Les futurs époux déclarent quil n'a pas été\nfait de contrat de mariage .- Camille Marcel MOUROT et Jeanne BLONDEAU ont déclaré l'un après l'autre\nvouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont unis par le mariage.\nEn présence de: Emile SOLIGNAC, commerçant, Médaillé militaire, Croix de Guerre, 16 bis, rue\nLauriston, et de Marcelle MOUROT, vendeuse, 22, rue de l'Echiquier, témoins majeurs, qui, lecture\nfaite ont signé avec les époux et Nous, Pierre Louis Adolphe BERTRAND, Maire du Premier arrondisse-\nment de Paris, Chevalier de la Légion d'Honneur ./ \n
{"Jour-mariage": "huit",
"Mois-mariage": "avril",
"Annee-mariage": "mil neuf cent quarante",
"Heure-mariage": "onze heures",
"Minute-mariage": "cinq minutes",
"Prenom-adjoint-maire": "Pierre Louis Adolphe",
"Nom-adjoint-maire": "BERTRAND",
"Ville-mariage": "Premier arrondisse-\nment de Paris",
"Prenom-mari": "Camille Marcel",
"Nom-mari": "MOUROT",
"Profession-mari": "barman",
"Ville-naissance-mari": "Dijon\nCôte d'Or",
"Departement-naissance-mari": "",
"Pays-naissance-mari": "",
"Jour-naissance-mari": "dix-huit",
"Mois-naissance-mari": "février",
"Annee-naissance-mari": "mil neuf cent onze",
"Age-mari": "vingt-neuf ans",
"Ville-residence-mari": "Paris",
"Departement-residence-mari": "",
"Pays-residence-mari": "",
"Numero-rue-residence-mari": "17",
"Type-rue-residence-mari": "rue",
"Nom-rue-residence-mari": "Pierre Lescot",
"Prenom-pere-mari": "Emile",
"Nom-pere-mari": "MOUROT",
"Profession-pere-mari": "",
"Ville-residence-pere-mari": "",
"Departement-residence-pere-mari": "",
"Numero-residence-pere-mari": "",
"Type-rue-residence-pere-mari": "",
"Nom-rue-residence-pere-mari": "",
"Prenom-mere-mari": "Pauline",
"Nom-mere-mari": "MONIOT",
"Profession-mere-mari": "",
"Ville-residence-mere-mari": "",
"Departement-residence-mere-mari": "",
"Pays-residence-mere-mari": "",
"Numero-rue-residence-mere-mari": "",
"Type-rue-residence-mere-mari": "",
"Nom-rue-residence-mere-mari": "",
"Prenom-mariee": "Jeanne",
"Nom-mariee": "BLONDEAU",
"Profession-mariee": "sans profession",
"Ville-naissance-mariee": "Jars",
"Departement-naissance-mariee": "Cher",
"Pays-naissance-mariee": "",
"Jour-naissance-mariee": "seize",
"Mois-naissance-mariee": "mars",
"Annee-naissance-mariee": "mil neuf cent\nneuf",
"Age-mariee": "trente et un ans",
"Ville-residence-mariee": "Paris",
"Departement-residence-mariee": "Cher",
"Pays-residence-mariee": "",
"Numero-rue-residence-mariee": "17",
"Type-rue-residence-mariee": "rue",
"Nom-rue-residence-mariee": "Pierre Lescot",
"Prenom-pere-mariee": "Emile Joseph",
"Nom-pere-mariee": "BLONDEAU",
"Profession-pere-mariee": "",
"Ville-residence-pere-mariee": "",
"Departement-residence-pere-mariee": "",
"Numero-residence-pere-mariee": "",
"Type-rue-residence-pere-mariee": "",
"Nom-rue-residence-pere-mariee": "",
"Prenom-mere-mariee": "Marie Louise",
"Nom-mere-mariee": "MELOT",
"Profession-mere-mariee": "marchands de bestiaux",
"Ville-residence-mere-mariee": "",
"Departement-residence-mere-mariee": "",
"Pays-residence-mere-mariee": "",
"Numero-rue-residence-mere-mariee": "",
"Type-rue-residence-mere-mariee": "",
"Nom-rue-residence-mere-mariee": "",
"Prenom-ex-epoux": "William Louis Paul",
"Nom-ex-epoux": "BERNON",
"Prenom-temoin-0": "Emile",
"Nom-temoin-0": "SOLIGNAC",
"Profession-temoin-0": "commerçant",
"Age-temoin-0": "",
"Numero-rue-residence-temoin-0": "16 bis",
"Type-rue-residence-temoin-0": "rue",
"Nom-rue-residence-temoin-0": "Lauriston",
"Ville-residence-temoin": "",
"Departement-residence-temoin": "",
"Prenom-temoin-1": "Marcelle",
"Nom-temoin-1": "MOUROT",
"Profession-temoin-1": "vendeuse",
"Numero-rue-residence-temoin-1": "22",
"Type-rue-residence-temoin-1": "rue",
"Nom-rue-residence-temoin-1": "de l'Echiquier",
"Nom-ex-epouse" :"",
"Prenom-ex-epouse" :""
}
"""
question=""" Maintenant, voici un autre certificat de mariage : je veux que tu m'extrais des données sous la meme forme que les réponses que je t'ai fourni. Précision : compte les arrondissement comme une ville."""
def labels_from_act(act_text : str) -> dict :
"""
Extrait les labels d'un acte de mariage.
input: act_text: texte de l'acte de mariage
output: dictionnaire des label
"""
prompt= intro + act_example + question + act_text
#try to get an answer and catch the error if the model doesn't answer or answer with an error. Retry 3 times
for i in range(3):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.8,
messages=[
{"role": "user", "content": prompt,},
]
)
break
except:
completion = None
print("Error while getting answer. Retry in 5 seconds")
sleep(5)
continue
if completion is None:
print("Error while getting answer. Returning...")
return None
answer = completion.choices[0].message['content']
answer = answer.replace('\n', '').replace('.','')
#remove quote around comma
answer = answer.replace('\',', '",')
answer = answer.replace(',\'', ',"')
#remove quote around space
answer = answer.replace(' \'', ' "')
answer = answer.replace('\' ', '" ')
#remove quote around colon
answer = answer.replace('\':', '":')
answer = answer.replace(':\'', ':"')
#remove quote around {}
answer = answer.replace('{\'', '{"')
answer = answer.replace('\'}', '"}')
#remove \n and -\n
answer = answer.replace('-\\n', '')
answer = answer.replace('\\n', ' ')
#replace Prenom-du-maire with Prenom-adjoint-maire
#answer = answer.replace('Prenom-maire', 'Prenom-adjoint-maire')
#replace Nom-du-maire with Nom-adjoint-maire
#answer = answer.replace('Nom-maire', 'Nom-adjoint-maire')
#remplacer les apostrophes par des guillemets
answer = answer.replace("\\'", "\'")
#print(answer)
answer = answer[answer.index('{'):]
#print(f'answer : {answer}')
answer = json.loads(answer)
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
print('begin')
act_text = """ neuf avril mil neuf cent quarante * onze heures vingt minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Antoine POCARD, porteur aux halles, né à\nParis, dixième arrondissement, le cinq février mil neuf cent un, trente-neuf ans, domicilié à Paris\n56, rue Saint Honoré; actuellement mobilisé- fils de Jeanne Marie POCARD- Veuf de Juliette **\nAlbertine GAYRARD, d'une part ,/- ET Adrienne Jeanne ALBRIEUX, journalière, née à Paris, onzième\narrondissement, le onze septembre mil neuf cent deux, trente-sept ans; domiciliée à Paris, 56, rue\nSaint Honoré; fille de Marie Charles ALBRIEUX, sans profession, domicilié à Aulnay sous Bois * Sein\net Oise * et de Marguerite TERLES, décédée, d'autre part ;- Les futurs époux déclarent qu'il n'a\npas été fait de contrat de mariage .- Antoine POCARD et Adrienne Jeanne ALBRIEUX ont déclaré l'un\naprès l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont\nunis par le mariage .- En présence de: Fernande PASTEAU, concierge, 13, rue Dussoubs, et de\nAmélie MASSONIE, ménagère, 10 rue Volta, témoins majeurs, qui, lecture farte ont signé avec\nles époux et Nous, Charles Louis TOURY, Officier de l'état-civil du premier arrondissement de\nParis, Chevalier de la Légion d'Honneur ./"""
labels = labels_from_act(act_text)
print('end')
# On vérifie que les labels extraits sont bien les mêmes que ceux de l'acte de mariage:
ref = {
"Jour-mariage": "neuf",
"Mois-mariage": "avril",
"Annee-mariage": "mil neuf cent quarante",
"Heure-mariage": "onze heures",
"Minute-mariage": "vingt minutes",
"Prenom-adjoint-maire": "Charles Louis",
"Nom-adjoint-maire": "TOURY",
"Ville-mariage": "premier arrondissement de\nParis",
"Prenom-mari": "Antoine",
"Nom-mari": "POCARD",
"Profession-mari": "porteur aux halles",
"Ville-naissance-mari": "Paris, dixième arrondissement",
"Departement-naissance-mari": "",
"Pays-naissance-mari": "",
"Jour-naissance-mari": "cinq",
"Mois-naissance-mari": "février",
"Annee-naissance-mari": "mil neuf cent un",
"Age-mari": "trente-neuf ans",
"Ville-residence-mari": "Paris",
"Departement-residence-mari": "",
"Pays-residence-mari": "",
"Numero-rue-residence-mari": "56",
"Type-rue-residence-mari": "rue",
"Nom-rue-residence-mari": "Saint Honoré",
"Prenom-pere-mari": "",
"Nom-pere-mari": "",
"Profession-pere-mari": "",
"Ville-residence-pere-mari": "",
"Departement-residence-pere-mari": "",
"Numero-residence-pere-mari": "",
"Type-rue-residence-pere-mari": "",
"Nom-rue-residence-pere-mari": "",
"Prenom-mere-mari": "Jeanne Marie",
"Nom-mere-mari": "POCARD-",
"Profession-mere-mari": "",
"Ville-residence-mere-mari": "",
"Departement-residence-mere-mari": "",
"Pays-residence-mere-mari": "",
"Numero-rue-residence-mere-mari": "",
"Type-rue-residence-mere-mari": "",
"Nom-rue-residence-mere-mari": "",
"Prenom-mariee": "Adrienne Jeanne",
"Nom-mariee": "ALBRIEUX",
"Profession-mariee": "journalière",
"Ville-naissance-mariee": "Paris, onzième\narrondissement",
"Departement-naissance-mariee": "",
"Pays-naissance-mariee": "",
"Jour-naissance-mariee": "onze",
"Mois-naissance-mariee": "septembre",
"Annee-naissance-mariee": "mil neuf cent deux",
"Age-mariee": "trente-sept ans",
"Ville-residence-mariee": "Paris",
"Departement-residence-mariee": "",
"Pays-residence-mariee": "",
"Numero-rue-residence-mariee": "56",
"Type-rue-residence-mariee": "rue",
"Nom-rue-residence-mariee": "Saint Honoré",
"Prenom-pere-mariee": "Marie Charles",
"Nom-pere-mariee": "ALBRIEUX",
"Profession-pere-mariee": "sans profession",
"Ville-residence-pere-mariee": "Aulnay sous Bois",
"Departement-residence-pere-mariee": "Sein \net Oise",
"Numero-residence-pere-mariee": "",
"Type-rue-residence-pere-mariee": "",
"Nom-rue-residence-pere-mariee": "",
"Prenom-mere-mariee": "Marguerite",
"Nom-mere-mariee": "TERLES",
"Profession-mere-mariee": "",
"Ville-residence-mere-mariee": "",
"Departement-residence-mere-mariee": "",
"Pays-residence-mere-mariee": "",
"Numero-rue-residence-mere-mariee": "",
"Type-rue-residence-mere-mariee": "",
"Nom-rue-residence-mere-mariee": "",
"Prenom-de-l'ex-époux": "",
"Nom-de-l'ex-époux": "",
"Prenom-temoin-0": "Fernande",
"Nom-temoin-0": "PASTEAU",
"Profession-temoin-0": "concierge",
"Age-temoin": "",
"Numero-rue-residence-temoin-0": "13",
"Type-rue-residence-temoin-0": "rue",
"Nom-rue-residence-temoin-0": "Dussoubs",
"Ville-residence-temoin": "",
"Departement-residence-temoin": "",
"Prenom-temoin-1": "Amélie",
"Nom-temoin-1": "MASSONIE",
"Profession-temoin-1": "ménagère",
"Numero-rue-residence-temoin-1": "10",
"Type-rue-residence-temoin-1": "rue",
"Nom-rue-residence-temoin-1": "Volta"
}
distances = 0
for key in ref.keys():
distance = Levenshtein.distance(labels[key], ref[key])
if distance > 0:
print(key, distance, labels[key] if labels[key] != '' else 'VIDE', ref[key] if ref[key] != '' else 'VIDE')
distances += distance
print(distances)
| [
"intro33300617-243a-4c2d-8730-53d762853a71act_example33300617-243a-4c2d-8730-53d762853a71question33300617-243a-4c2d-8730-53d762853a71 neuf avril mil neuf cent quarante * onze heures vingt minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Antoine POCARD, porteur aux halles, né à\nParis, dixième arrondissement, le cinq février mil neuf cent un, trente-neuf ans, domicilié à Paris\n56, rue Saint Honoré; actuellement mobilisé- fils de Jeanne Marie POCARD- Veuf de Juliette **\nAlbertine GAYRARD, d'une part ,/- ET Adrienne Jeanne ALBRIEUX, journalière, née à Paris, onzième\narrondissement, le onze septembre mil neuf cent deux, trente-sept ans; domiciliée à Paris, 56, rue\nSaint Honoré; fille de Marie Charles ALBRIEUX, sans profession, domicilié à Aulnay sous Bois * Sein\net Oise * et de Marguerite TERLES, décédée, d'autre part ;- Les futurs époux déclarent qu'il n'a\npas été fait de contrat de mariage .- Antoine POCARD et Adrienne Jeanne ALBRIEUX ont déclaré l'un\naprès l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont\nunis par le mariage .- En présence de: Fernande PASTEAU, concierge, 13, rue Dussoubs, et de\nAmélie MASSONIE, ménagère, 10 rue Volta, témoins majeurs, qui, lecture farte ont signé avec\nles époux et Nous, Charles Louis TOURY, Officier de l'état-civil du premier arrondissement de\nParis, Chevalier de la Légion d'Honneur ./",
"Nous allons te fournir un certificat de mariage, un document ayant toujours la même mise en forme.Tu vas devoir procéder à l’extraction de certaines données sur plusieurs certificats ensuite. Voici le premier certificat, je précise qu’il est extrait d’un document au format Json et que tu auras toutes les réponses fournies à la fin, cela te permettra de mieux reconnaître ce qu’il te faut obtenir dans les contrats suivants. Le huit avril mil neuf cent quarante, onze heures cinq minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Camille Marcel MOUROT, barman, né à Dijon\nCôte d'Or * le dix-huit février mil neuf cent onze * vingt-neuf ans, domicilié à Paris, 17, rue\nPierre Lescot, actuellement aux armées; fils de Emile MOUROT et de Pauline MONIOT, époux décédés,\nd'une part ,/- ET Jeanne BLONDEAU, sans profession, née à Jars * Cher * le seize mars mil neuf cent\nneuf, trente et un ans, domiciliée à Paris, 17, rue Pierre Lescot; fille de Emile Joseph BLONDEAU\net de Marie Louise MELOT, son épouse, marchands de bestiaux, domiciliés à Vailly sur Saône * Cher *\nDivorcée de William Louis Paul BERNON, d'autre part ;- Les futurs époux déclarent quil n'a pas été\nfait de contrat de mariage .- Camille Marcel MOUROT et Jeanne BLONDEAU ont déclaré l'un après l'autre\nvouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont unis par le mariage.\nEn présence de: Emile SOLIGNAC, commerçant, Médaillé militaire, Croix de Guerre, 16 bis, rue\nLauriston, et de Marcelle MOUROT, vendeuse, 22, rue de l'Echiquier, témoins majeurs, qui, lecture\nfaite ont signé avec les époux et Nous, Pierre Louis Adolphe BERTRAND, Maire du Premier arrondisse-\nment de Paris, Chevalier de la Légion d'Honneur ./ \n \n {\"Jour-mariage\": \"huit\",\n \"Mois-mariage\": \"avril\",\n \"Annee-mariage\": \"mil neuf cent quarante\",\n \"Heure-mariage\": \"onze heures\",\n \"Minute-mariage\": \"cinq minutes\",\n \"Prenom-adjoint-maire\": \"Pierre Louis Adolphe\",\n \"Nom-adjoint-maire\": \"BERTRAND\",\n \"Ville-mariage\": \"Premier arrondisse-\nment de Paris\",\n \"Prenom-mari\": \"Camille Marcel\",\n \"Nom-mari\": \"MOUROT\",\n \"Profession-mari\": \"barman\",\n \"Ville-naissance-mari\": \"Dijon\nCôte d'Or\",\n \"Departement-naissance-mari\": \"\",\n \"Pays-naissance-mari\": \"\",\n \"Jour-naissance-mari\": \"dix-huit\",\n \"Mois-naissance-mari\": \"février\",\n \"Annee-naissance-mari\": \"mil neuf cent onze\",\n \"Age-mari\": \"vingt-neuf ans\",\n \"Ville-residence-mari\": \"Paris\",\n \"Departement-residence-mari\": \"\",\n \"Pays-residence-mari\": \"\",\n \"Numero-rue-residence-mari\": \"17\",\n \"Type-rue-residence-mari\": \"rue\",\n \"Nom-rue-residence-mari\": \"Pierre Lescot\",\n \"Prenom-pere-mari\": \"Emile\",\n \"Nom-pere-mari\": \"MOUROT\",\n \"Profession-pere-mari\": \"\",\n \"Ville-residence-pere-mari\": \"\",\n \"Departement-residence-pere-mari\": \"\",\n \"Numero-residence-pere-mari\": \"\",\n \"Type-rue-residence-pere-mari\": \"\",\n \"Nom-rue-residence-pere-mari\": \"\",\n \"Prenom-mere-mari\": \"Pauline\",\n \"Nom-mere-mari\": \"MONIOT\",\n \"Profession-mere-mari\": \"\",\n \"Ville-residence-mere-mari\": \"\",\n \"Departement-residence-mere-mari\": \"\",\n \"Pays-residence-mere-mari\": \"\",\n \"Numero-rue-residence-mere-mari\": \"\",\n \"Type-rue-residence-mere-mari\": \"\",\n \"Nom-rue-residence-mere-mari\": \"\",\n \"Prenom-mariee\": \"Jeanne\",\n \"Nom-mariee\": \"BLONDEAU\",\n \"Profession-mariee\": \"sans profession\",\n \"Ville-naissance-mariee\": \"Jars\",\n \"Departement-naissance-mariee\": \"Cher\",\n \"Pays-naissance-mariee\": \"\",\n \"Jour-naissance-mariee\": \"seize\",\n \"Mois-naissance-mariee\": \"mars\",\n \"Annee-naissance-mariee\": \"mil neuf cent\nneuf\",\n \"Age-mariee\": \"trente et un ans\",\n \"Ville-residence-mariee\": \"Paris\",\n \"Departement-residence-mariee\": \"Cher\",\n \"Pays-residence-mariee\": \"\",\n \"Numero-rue-residence-mariee\": \"17\",\n \"Type-rue-residence-mariee\": \"rue\",\n \"Nom-rue-residence-mariee\": \"Pierre Lescot\",\n \"Prenom-pere-mariee\": \"Emile Joseph\",\n \"Nom-pere-mariee\": \"BLONDEAU\",\n \"Profession-pere-mariee\": \"\",\n \"Ville-residence-pere-mariee\": \"\",\n \"Departement-residence-pere-mariee\": \"\",\n \"Numero-residence-pere-mariee\": \"\",\n \"Type-rue-residence-pere-mariee\": \"\",\n \"Nom-rue-residence-pere-mariee\": \"\",\n \"Prenom-mere-mariee\": \"Marie Louise\",\n \"Nom-mere-mariee\": \"MELOT\",\n \"Profession-mere-mariee\": \"marchands de bestiaux\",\n \"Ville-residence-mere-mariee\": \"\",\n \"Departement-residence-mere-mariee\": \"\",\n \"Pays-residence-mere-mariee\": \"\",\n \"Numero-rue-residence-mere-mariee\": \"\",\n \"Type-rue-residence-mere-mariee\": \"\",\n \"Nom-rue-residence-mere-mariee\": \"\",\n \"Prenom-ex-epoux\": \"William Louis Paul\",\n \"Nom-ex-epoux\": \"BERNON\",\n \"Prenom-temoin-0\": \"Emile\",\n \"Nom-temoin-0\": \"SOLIGNAC\",\n \"Profession-temoin-0\": \"commerçant\",\n \"Age-temoin-0\": \"\",\n \"Numero-rue-residence-temoin-0\": \"16 bis\",\n \"Type-rue-residence-temoin-0\": \"rue\",\n \"Nom-rue-residence-temoin-0\": \"Lauriston\",\n \"Ville-residence-temoin\": \"\",\n \"Departement-residence-temoin\": \"\",\n \"Prenom-temoin-1\": \"Marcelle\",\n \"Nom-temoin-1\": \"MOUROT\",\n \"Profession-temoin-1\": \"vendeuse\",\n \"Numero-rue-residence-temoin-1\": \"22\",\n \"Type-rue-residence-temoin-1\": \"rue\",\n \"Nom-rue-residence-temoin-1\": \"de l'Echiquier\",\n \"Nom-ex-epouse\" :\"\",\n \"Prenom-ex-epouse\" :\"\"\n }\n Maintenant, voici un autre certificat de mariage : je veux que tu m'extrais des données sous la meme forme que les réponses que je t'ai fourni. Précision : compte les arrondissement comme une ville. neuf avril mil neuf cent quarante * onze heures vingt minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Antoine POCARD, porteur aux halles, né à\nParis, dixième arrondissement, le cinq février mil neuf cent un, trente-neuf ans, domicilié à Paris\n56, rue Saint Honoré; actuellement mobilisé- fils de Jeanne Marie POCARD- Veuf de Juliette **\nAlbertine GAYRARD, d'une part ,/- ET Adrienne Jeanne ALBRIEUX, journalière, née à Paris, onzième\narrondissement, le onze septembre mil neuf cent deux, trente-sept ans; domiciliée à Paris, 56, rue\nSaint Honoré; fille de Marie Charles ALBRIEUX, sans profession, domicilié à Aulnay sous Bois * Sein\net Oise * et de Marguerite TERLES, décédée, d'autre part ;- Les futurs époux déclarent qu'il n'a\npas été fait de contrat de mariage .- Antoine POCARD et Adrienne Jeanne ALBRIEUX ont déclaré l'un\naprès l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont\nunis par le mariage .- En présence de: Fernande PASTEAU, concierge, 13, rue Dussoubs, et de\nAmélie MASSONIE, ménagère, 10 rue Volta, témoins majeurs, qui, lecture farte ont signé avec\nles époux et Nous, Charles Louis TOURY, Officier de l'état-civil du premier arrondissement de\nParis, Chevalier de la Légion d'Honneur ./"
] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~get_paragraph_labels_16k.py | import json
import openai
import Levenshtein
import itertools
from time import sleep
import os
# openai.organization = os.environ.get("OPENAI_ORG_KEY")
# openai.api_key = os.environ.get("OPENAI_API_KEY")
openai.organization = "org-2wXrLf4fLEfdyawavmkAqi8z"
openai.api_key = "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"
def read_json(filename):
with open(filename, "r") as f:
return json.load(f)
def split_examples(examples : dict, index : int):
#return the example corresponding to index, and the others examples. Beware if index is the last example. use itertools slices.
return examples[str(index)], {k: v for k, v in examples.items() if k != str(index)}
def get_example_prompt(examples, paragraph_index):
prompt = [{"role": "system", "content": "Read the French Mariage Acts input by the user, then answer using a JSON to extract named entities in the act. Always use the same JSON keys. Beware of plurals. Parents can have the same job. They can also live with their child ('avec ses père et mère', 'avec sa mère', 'avec son père'). Do not answer with anything else that what is in the text. Pay attention to cities, departments and countries."}]
for i, example in enumerate(examples):
prompt.append({"role": "user", "content": str(examples[example]['text'][paragraph_index])})
#print(examples[example]['labels'])
prompt.append({"role": "assistant", "content": str(examples[example]['labels'][paragraph_index])})
return prompt
def make_prompt(example_prompt, paragraph):
#make the prompt
prompt = example_prompt
prompt += f"Question:\n {paragraph}\n"
#print(f"{paragraph}\nLabels:")
#prompt += "template:\n"
#prompt += f"{template[paragraph_index]}\n"
prompt += ("Labels:\n")
#print(prompt)
#print(prompt)
return prompt
def get_answer(prompt):
#try to get an answer and catch the error if the model doesn't answer or answer with an error. Retry 3 times
print("Getting answer...")
for i in range(3):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
temperature=0.4,
messages=prompt
)
break
except:
completion = None
print("Error while getting answer. Retry in 5 seconds")
sleep(5)
continue
if completion is None:
print("Error while getting answer. Returning...")
return {}
answer = completion.choices[0].message['content']
#print("Raw answer : ", answer)
answer = answer.replace('\n', '').replace('.','')
# #remove quote around comma
# answer = answer.replace('\',', '",')
# answer = answer.replace(',\'', ',"')
# #remove quote around space
# answer = answer.replace(' \'', ' "')
# #answer = answer.replace('\' ', '" ')
# #remove quote around colon
# answer = answer.replace('\':', '":')
# answer = answer.replace(':\'', ':"')
# #remove quote around {}
# answer = answer.replace('{\'', '{"')
# answer = answer.replace('\'}', '"}')
#remove \n and -\n
answer = answer.replace('-\\n', '')
answer = answer.replace('\\n', ' ')
answer = answer.replace('"', '\'')
answer = answer.replace('{\'', '{"')
answer = answer.replace('{ \'', '{ "')
answer = answer.replace('\'}', '"}')
answer = answer.replace('\' }', '" }')
answer = answer.replace('\':', '":')
answer = answer.replace('\' :', '" :')
answer = answer.replace(':\'', ':"')
answer = answer.replace(': \'', ': "')
answer = answer.replace('\',', '",')
answer = answer.replace('\' ,', '" ,')
answer = answer.replace(',\'', ',"')
answer = answer.replace(', \'', ', "')
#replace Prenom-du-maire with Prenom-adjoint-maire
answer = answer.replace('Prenom-maire', 'Prenom-adjoint-maire')
#replace Nom-du-maire with Nom-adjoint-maire
answer = answer.replace('Nom-maire', 'Nom-adjoint-maire')
#remplacer les apostrophes par des guillemets
answer = answer.replace("\\'", "\'")
#print(answer)
answer = answer[answer.index('{'):]
print(f'answer : {answer}')
answer = json.loads(answer)
#print("Answer : ", answer)
return answer
def get_labels(text):
examples = read_json("paragraphs_labels_examples.json")
labels_dict = {}
for _ , paragraph_index in enumerate(text):
print(text[paragraph_index])
prompt = get_example_prompt(examples, paragraph_index)
if text[paragraph_index] == '':
labels = {}
else :
prompt.append({"role": "user", "content": f"{text[paragraph_index]}"})
print(f"==========================Paragraph {paragraph_index}==========================")
#print("Prompt : ", prompt)
labels = get_answer(prompt)
#print(labels)
#rebuild a dictionnary with paragraph_index : labels
labels_dict[paragraph_index] = labels
return labels_dict
| [
"Read the French Mariage Acts input by the user, then answer using a JSON to extract named entities in the act. Always use the same JSON keys. Beware of plurals. Parents can have the same job. They can also live with their child ('avec ses père et mère', 'avec sa mère', 'avec son père'). Do not answer with anything else that what is in the text. Pay attention to cities, departments and countries.",
"Labels:\n",
"Question:\n PLACEHOLDER\n"
] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~split_paragraph_16k.py | import json
import openai
from time import sleep
import os
# openai.organization = os.environ.get("OPENAI_ORG_KEY")
# openai.api_key = os.environ.get("OPENAI_API_KEY")
openai.organization = "org-2wXrLf4fLEfdyawavmkAqi8z"
openai.api_key = "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"
file = open("splitting_examples.json", "r")
data = json.load(file)
file.close()
exemple= data["0"]
paragraphs = exemple["text"]
base = exemple["base"]
exemple_incomplet = "onze mai mil neuf cent quinze, vingt-quatre ans, domiciliée à Paris, 7, rue des Prêcheurs *\nfille de Charles Célestin Edouard LEPAGE, décédé, t de Pauline Désirée Germain VAPPEREAU * sa\nveuve * matelassière * domiciliée 7, rue des Prêcheurs, d'autre part Les futurs époux déclarent\nqu'il n'a pas été fait de contrat de mariage .- Marius Fernand VIDAL et Charlotte Lucie LEPAGE ont\ndécalé l'un après l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la\nloi qu'ils sont unis par le mariage En présence de: Charles GAUTHIER * employé, 4, Place du\nLouvre et de Pauline LEPAGE, matelassière * 7, rue des Prêcheurs * témoins majeurs, qui, lecture\nfaite, ont signé avec les époux et Nous, Charles Louis TOURY * Officier de l'état-civil du premier\narrondissement de Paris, Chevalier de la Légion d'Honneur ./."
paragraphes_incomplets = {
"p1": "",
"p2": "",
"p3": "onze mai mil neuf cent quinze, vingt-quatre ans, domiciliée à Paris, 7, rue des Prêcheurs *\nfille de Charles Célestin Edouard LEPAGE, décédé, t de Pauline Désirée Germain VAPPEREAU * sa\nveuve * matelassière * domiciliée 7, rue des Prêcheurs, d'autre part",
"p4": "Les futurs époux déclarent\nqu'il n'a pas été fait de contrat de mariage .- Marius Fernand VIDAL et Charlotte Lucie LEPAGE ont\ndécalé l'un après l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la\nloi qu'ils sont unis par le mariage",
"p5": "En présence de: Charles GAUTHIER * employé, 4, Place du\nLouvre et de Pauline LEPAGE, matelassière * 7, rue des Prêcheurs * témoins majeurs, qui, lecture\nfaite, ont signé avec les époux et Nous, Charles Louis TOURY * Officier de l'état-civil du premier\narrondissement de Paris, Chevalier de la Légion d'Honneur"
}
exemple_incomplet_debut = "Le trente mars mil neuf cent quarante * onze heures cinq minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Ermel Eug\u00e8ne LUCAS * imprimeur n\u00e9 \u00e0 Saint\nChristophe du Bois * Maine et Loire * le dix octobre mil neuf cent, trente-neuf ans, domicili\u00e9 \u00e0 Cholet\nMaine et Loir * 62, rue de Lorraine * actuellement aux arm\u00e9es; fils de Auguste Maximin LUCAS * d\u00e9c\u00e9d\u00e9, et\nde Marie Louise CHARBONNEAU, sa veuve, sans profession, domicili\u00e9e \u00e0 Cholet Divorc\u00e9 de Micheline\nHenriette SEROT, d'une part ,/- ET Am\u00e9lie Eug\u00e9nie LE GALLE, journali\u00e8re, n\u00e9e \u00e0 Le Palais * Morbihan *\nle quinze octobre mil neuf cent dix, vingt-neuf ans, domicili\u00e9e \u00e0 Paris, 13, rue de la Ferronnerie;\nfille de Joachim Marie LE GALLE, d\u00e9c\u00e9d\u00e9, et de Marie France"
paragraphes_incomplets_debut = {
"p1" : "Le trente mars mil neuf cent quarante * onze heures cinq minutes ****\ndevant Nous ont comparu publiquement en la maison commune:",
"p2" : "Ermel Eug\u00e8ne LUCAS * imprimeur n\u00e9 \u00e0 Saint\nChristophe du Bois * Maine et Loire * le dix octobre mil neuf cent, trente-neuf ans, domicili\u00e9 \u00e0 Cholet\nMaine et Loir * 62, rue de Lorraine * actuellement aux arm\u00e9es; fils de Auguste Maximin LUCAS * d\u00e9c\u00e9d\u00e9, et\nde Marie Louise CHARBONNEAU, sa veuve, sans profession, domicili\u00e9e \u00e0 Cholet Divorc\u00e9 de Micheline\nHenriette SEROT, d'une part ,/-",
"p3" : "ET Am\u00e9lie Eug\u00e9nie LE GALLE, journali\u00e8re, n\u00e9e \u00e0 Le Palais * Morbihan *\nle quinze octobre mil neuf cent dix, vingt-neuf ans, domicili\u00e9e \u00e0 Paris, 13, rue de la Ferronnerie;\nfille de Joachim Marie LE GALLE, d\u00e9c\u00e9d\u00e9, et de Marie France",
"p4" : "",
"p5" : ""
}
def split_text(text_to_split):
for i in range(3):
for i in range(3):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
temperature=0.4,
messages=[
{"role": "system", "content": "The user will provide you with French Mariage acts. You must always split these acts into 5 paragraphs. If a paragraphs seems to be missing, answer with an empty paragraph. The parapgrahs are : p1 = Date et heure maison commune\n p2 = Le mari et son entourage\n p3 = La mariée et son entourage\n p4 = Présence d'un acte de mariage, baiser et union des partenaires\n p5 = 'En présence de ...', Les informations concernant les témoins et l'adjoint au maire.\n\n"},
{"role": "user", "content" : data["0"]["base"]},
{"role": "assistant", "content": json.dumps(data["0"]["text"], indent=4)},
{"role": "user", "content" : data["1"]["base"]},
{"role": "assistant", "content": json.dumps(data["1"]["text"], indent=4)},
{"role": "user", "content": exemple_incomplet},
{"role": "assistant", "content": json.dumps(paragraphes_incomplets, indent=4)},
{"role": "user", "content": exemple_incomplet_debut},
{"role": "assistant", "content": json.dumps(paragraphes_incomplets_debut, indent=4)},
{"role": "user", "content" : text_to_split}
]
)
break
except:
completion = None
print("Error while getting answer. Retry in 5 seconds")
sleep(5)
continue
if completion is None:
print("Error while getting answer. Returning...")
#check if keys are p1, p2, p3, p4, p5 and if values are not empty or ''
answer = completion.choices[0].message['content']
answer = answer.replace('\n', '').replace('.','')
answer = answer.replace('-\\n', '')
answer = answer.replace('\\n', ' ')
answer = answer.replace('"', '\'')
answer = answer.replace("'p1'", '"p1"')
answer = answer.replace("'p2'", '"p2"')
answer = answer.replace("'p3'", '"p3"')
answer = answer.replace("'p4'", '"p4"')
answer = answer.replace("'p5'", '"p5"')
answer = answer.replace('{\'', '{"')
answer = answer.replace('{ \'', '{ "')
answer = answer.replace('\'}', '"}')
answer = answer.replace('\' }', '" }')
answer = answer.replace('\':', '":')
answer = answer.replace('\' :', '" :')
answer = answer.replace(':\'', ':"')
answer = answer.replace(': \'', ': "')
answer = answer.replace('\',', '",')
answer = answer.replace('\' ,', '" ,')
answer = answer.replace(',\'', ',"')
answer = answer.replace(', \'', ', "')
#remplacer les apostrophes par des guillemets
answer = answer.replace("\\'", "\'")
#print(answer)
answer = answer[answer.index('{'):]
print(f'answer : {answer}')
answer = json.loads(answer)
#print(answer)
#check if keys are p1, p2, p3, p4, p5 and if values are not empty or '' :
if 'p1' not in answer.keys() or 'p2' not in answer.keys() or 'p3' not in answer.keys() or 'p4' not in answer.keys() or 'p5' not in answer.keys():
print('At least one paragraph is missing... retrying')
sleep(5)
continue
break
return answer
| [
"un acte de mariage, baiser et union des partenaires\n p5 = ",
"The user will provide you with French Mariage acts. You must always split these acts into 5 paragraphs. If a paragraphs seems to be missing, answer with an empty paragraph. The parapgrahs are : p1 = Date et heure maison commune\n p2 = Le mari et son entourage\n p3 = La mariée et son entourage\n p4 = Présence d'un acte de mariage, baiser et union des partenaires\n p5 = 'En présence de ...', Les informations concernant les témoins et l'adjoint au maire.\n\n",
"Le trente mars mil neuf cent quarante * onze heures cinq minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Ermel Eugène LUCAS * imprimeur né à Saint\nChristophe du Bois * Maine et Loire * le dix octobre mil neuf cent, trente-neuf ans, domicilié à Cholet\nMaine et Loir * 62, rue de Lorraine * actuellement aux armées; fils de Auguste Maximin LUCAS * décédé, et\nde Marie Louise CHARBONNEAU, sa veuve, sans profession, domiciliée à Cholet Divorcé de Micheline\nHenriette SEROT, d'une part ,/- ET Amélie Eugénie LE GALLE, journalière, née à Le Palais * Morbihan *\nle quinze octobre mil neuf cent dix, vingt-neuf ans, domiciliée à Paris, 13, rue de la Ferronnerie;\nfille de Joachim Marie LE GALLE, décédé, et de Marie France",
", Les informations concernant les témoins et l",
"onze mai mil neuf cent quinze, vingt-quatre ans, domiciliée à Paris, 7, rue des Prêcheurs *\nfille de Charles Célestin Edouard LEPAGE, décédé, t de Pauline Désirée Germain VAPPEREAU * sa\nveuve * matelassière * domiciliée 7, rue des Prêcheurs, d'autre part Les futurs époux déclarent\nqu'il n'a pas été fait de contrat de mariage .- Marius Fernand VIDAL et Charlotte Lucie LEPAGE ont\ndécalé l'un après l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la\nloi qu'ils sont unis par le mariage En présence de: Charles GAUTHIER * employé, 4, Place du\nLouvre et de Pauline LEPAGE, matelassière * 7, rue des Prêcheurs * témoins majeurs, qui, lecture\nfaite, ont signé avec les époux et Nous, Charles Louis TOURY * Officier de l'état-civil du premier\narrondissement de Paris, Chevalier de la Légion d'Honneur ./."
] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~get_paragraphs_labels.py | import json
import openai
import Levenshtein
import itertools
from time import sleep
import os
openai.organization = os.environ.get("OPENAI_ORG_KEY")
openai.api_key = os.environ.get("OPENAI_API_KEY")
def read_json(filename):
with open(filename, "r") as f:
return json.load(f)
def split_examples(examples : dict, index : int):
#return the example corresponding to index, and the others examples. Beware if index is the last example. use itertools slices.
return examples[str(index)], {k: v for k, v in examples.items() if k != str(index)}
def get_example_prompt(examples, paragraph_index):
#get the prompt
prompt = ""
prompt += "Répondre. Utilise exact même labels même si personne non mentionnée\n"
for i, example in enumerate(examples):
prompt += f"Example {i+1}:\n"
prompt += f"{examples[example]['text'][paragraph_index]}\n"
prompt += f"Labels {i+1}:\n"
prompt += f"{examples[example]['labels'][paragraph_index]}\n\n"
return prompt
def make_prompt(example_prompt, paragraph):
#make the prompt
prompt = example_prompt
prompt += f"Question:\n {paragraph}\n"
#print(f"{paragraph}\nLabels:")
#prompt += "template:\n"
#prompt += f"{template[paragraph_index]}\n"
prompt += ("Labels:\n")
#print(prompt)
#print(prompt)
return prompt
def get_answer(prompt):
#try to get an answer and catch the error if the model doesn't answer or answer with an error. Retry 3 times
for i in range(3):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.8,
messages=[
{"role": "user", "content": prompt,},
]
)
break
except:
completion = None
print("Error while getting answer. Retry in 5 seconds")
sleep(5)
continue
if completion is None:
print("Error while getting answer. Returning...")
return None
answer = completion.choices[0].message['content']
answer = answer.replace('\n', '').replace('.','')
#remove quote around comma
answer = answer.replace('\',', '",')
answer = answer.replace(',\'', ',"')
#remove quote around space
answer = answer.replace(' \'', ' "')
answer = answer.replace('\' ', '" ')
#remove quote around colon
answer = answer.replace('\':', '":')
answer = answer.replace(':\'', ':"')
#remove quote around {}
answer = answer.replace('{\'', '{"')
answer = answer.replace('\'}', '"}')
#remove \n and -\n
answer = answer.replace('-\\n', '')
answer = answer.replace('\\n', ' ')
#replace Prenom-du-maire with Prenom-adjoint-maire
answer = answer.replace('Prenom-maire', 'Prenom-adjoint-maire')
#replace Nom-du-maire with Nom-adjoint-maire
answer = answer.replace('Nom-maire', 'Nom-adjoint-maire')
#remplacer les apostrophes par des guillemets
answer = answer.replace("\\'", "\'")
#print(answer)
answer = answer[answer.index('{'):]
#print(f'answer : {answer}')
answer = json.loads(answer)
return answer
def get_labels(text):
examples_base = read_json("paragraphs_labels_examples.json")
_, examples = split_examples(examples_base, 1)
labels_dict = {}
for _ , paragraph_index in enumerate(text):
example_prompt = get_example_prompt(examples, paragraph_index)
if text[paragraph_index] == '':
labels = {}
else :
prompt = make_prompt(example_prompt, text[paragraph_index])
labels = get_answer(prompt)
#print(labels)
#rebuild a dictionnary with paragraph_index : labels
labels_dict[paragraph_index] = labels
return labels_dict
if __name__ == "__main__":
examples_base = read_json("paragraphes_train_copy.json")
for i, _ in enumerate(examples_base):
example, examples = split_examples(examples_base, i)
distances = 0
print('===================================================')
print('Test: ', i)
for j,paragraph_index in enumerate(example["text"]):
print(paragraph_index)
if paragraph_index == 'p4':
continue
example_prompt = get_example_prompt(examples, paragraph_index)
#print(example_prompt)
prompt = make_prompt(example_prompt, example["text"][paragraph_index])
labels = get_answer(prompt)
ref = example["labels"][paragraph_index]
#replace all -\n by '' and \n by ' ' in the ref values
for key in ref.keys():
ref[key] = ref[key].replace('-\n', '').replace('\n', ' ')
for key in ref.keys():
if key not in labels.keys():
labels[key] = ''
distance = Levenshtein.distance(labels[key], ref[key])
if distance > 0:
print(key, distance, labels[key] if labels[key] != '' else 'VIDE', ref[key] if ref[key] != '' else 'VIDE')
distances += distance
print('============================ Distance :' , distances)
# if Levenshtein.distance(answer, example["labels"][paragraph_index]) > 5:
# print("Example: ", i)
# print("Paragraph: ", j)
# print("Answer: ", answer)
# print("Label: ", example["labels"][paragraph_index])
# print("Distance: ", Levenshtein.distance(answer, example["labels"][paragraph_index]))
# print()
| [
"Labels:\n",
"Question:\n PLACEHOLDER\n",
"Répondre. Utilise exact même labels même si personne non mentionnée\n"
] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~split_paragraph.py | import json
import openai
from time import sleep
import os
openai.organization = os.environ.get("OPENAI_ORG_KEY")
openai.api_key = os.environ.get("OPENAI_API_KEY")
file = open("splitting_examples.json", "r")
data = json.load(file)
file.close()
exemple= data["0"]
paragraphs = exemple["text"]
base = exemple["base"]
exemple_incomplet = "onze mai mil neuf cent quinze, vingt-quatre ans, domiciliée à Paris, 7, rue des Prêcheurs *\nfille de Charles Célestin Edouard LEPAGE, décédé, t de Pauline Désirée Germain VAPPEREAU * sa\nveuve * matelassière * domiciliée 7, rue des Prêcheurs, d'autre part ;- Les futurs époux déclarent\nqu'il n'a pas été fait de contrat de mariage .- Marius Fernand VIDAL et Charlotte Lucie LEPAGE ont\ndécalé l'un après l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la\nloi qu'ils sont unis par le mariage .- En présence de: Charles GAUTHIER * employé, 4, Place du\nLouvre et de Pauline LEPAGE, matelassière * 7, rue des Prêcheurs * témoins majeurs, qui, lecture\nfaite, ont signé avec les époux et Nous, Charles Louis TOURY * Officier de l'état-civil du premier\narrondissement de Paris, Chevalier de la Légion d'Honneur ./."
paragraphes_incomplets = {
"p1": "",
"p2": "",
"p3": "onze mai mil neuf cent quinze, vingt-quatre ans, domiciliée à Paris, 7, rue des Prêcheurs *\nfille de Charles Célestin Edouard LEPAGE, décédé, t de Pauline Désirée Germain VAPPEREAU * sa\nveuve * matelassière * domiciliée 7, rue des Prêcheurs, d'autre part ;-",
"p4": "Les futurs époux déclarent\nqu'il n'a pas été fait de contrat de mariage .- Marius Fernand VIDAL et Charlotte Lucie LEPAGE ont\ndécalé l'un après l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la\nloi qu'ils sont unis par le mariage .-",
"p5": "En présence de: Charles GAUTHIER * employé, 4, Place du\nLouvre et de Pauline LEPAGE, matelassière * 7, rue des Prêcheurs * témoins majeurs, qui, lecture\nfaite, ont signé avec les époux et Nous, Charles Louis TOURY * Officier de l'état-civil du premier\narrondissement de Paris, Chevalier de la Légion d'Honneur ./."
}
def split_text(text_to_split):
# prompt = ""
# prompt += "Tâche : Séparer le texte en 5 paragraphes. Le texte donné peut être incomplet ou tronqué, n'inclure que les paragraphes complets.\n p1 = Date et heure maison commune\n p2 = Le mari\n p3 = La mariée\n p4 = acte de mariage + uni\n p5 = Témoin et maire\n\n"
# prompt += "Exemple:\n"
# prompt += base
# prompt += "\nParagraphes:\n"
# prompt += json.dumps(paragraphs, indent=4)
# prompt += "\n\n"
# prompt += "Exemple incomplet:\n"
# prompt += exemple_incomplet
# prompt += "\nParagraphes:\n"
# prompt += json.dumps(paragraphes_incomplets, indent=4)
# prompt += "\n\n"
# prompt += "Texte à séparer:\n"
# prompt += text_to_split
# prompt += "\n\n"
# prompt += "Paragraphes :\n"
for i in range(3):
for i in range(3):
try:
# completion = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# temperature=1,
# messages=[
# {"role": "user", "content": prompt,},
# ]
# )
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.4,
messages=[
{"role": "system", "content": "You split the given texts into paragraphs. You are aware the text can be incomplete or truncated. The parapgrahs are : p1 = Date et heure maison commune\n p2 = Le mari\n p3 = La mariée\n p4 = acte de mariage + uni\n p5 = Témoin et maire\n\n"},
{"role": "user", "content" : base},
{"role": "assistant", "content": json.dumps(paragraphs, indent=4)},
{"role": "user", "content": exemple_incomplet},
{"role": "assistant", "content": json.dumps(paragraphes_incomplets, indent=4)},
{"role": "user", "content" : text_to_split}
]
)
break
except:
completion = None
print("Error while getting answer. Retry in 5 seconds")
sleep(5)
continue
if completion is None:
print("Error while getting answer. Returning...")
#check if keys are p1, p2, p3, p4, p5 and if values are not empty or ''
answer = completion.choices[0].message['content']
answer = answer.replace('\n', '').replace('.','')
#remove quote around comma
answer = answer.replace('\',', '",')
answer = answer.replace(',\'', ',"')
#remove quote around space
answer = answer.replace(' \'', ' "')
answer = answer.replace('\' ', '" ')
#remove quote around colon
answer = answer.replace('\':', '":')
answer = answer.replace(':\'', ':"')
#remove quote around {}
answer = answer.replace('{\'', '{"')
answer = answer.replace('\'}', '"}')
#remove \n and -\n
answer = answer.replace('-\\n', '')
answer = answer.replace('\\n', ' ')
#remplacer les apostrophes par des guillemets
answer = answer.replace("\\'", "\'")
#print(answer)
answer = answer[answer.index('{'):]
#print(f'answer : {answer}')
answer = json.loads(answer)
print(answer)
#check if keys are p1, p2, p3, p4, p5 and if values are not empty or '' :
if 'p1' not in answer.keys() or 'p2' not in answer.keys() or 'p3' not in answer.keys() or 'p4' not in answer.keys() or 'p5' not in answer.keys():
print('At least one paragraph is missing... retrying')
sleep(5)
continue
# if answer['p1'] == '' or answer['p2'] == '' or answer['p3'] == '' or answer['p4'] == '' or answer['p5'] == '':
# print('At least one paragraph is empty... retrying')
# sleep(5)
# continue
break
return answer
| [
"onze mai mil neuf cent quinze, vingt-quatre ans, domiciliée à Paris, 7, rue des Prêcheurs *\nfille de Charles Célestin Edouard LEPAGE, décédé, t de Pauline Désirée Germain VAPPEREAU * sa\nveuve * matelassière * domiciliée 7, rue des Prêcheurs, d'autre part ;- Les futurs époux déclarent\nqu'il n'a pas été fait de contrat de mariage .- Marius Fernand VIDAL et Charlotte Lucie LEPAGE ont\ndécalé l'un après l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la\nloi qu'ils sont unis par le mariage .- En présence de: Charles GAUTHIER * employé, 4, Place du\nLouvre et de Pauline LEPAGE, matelassière * 7, rue des Prêcheurs * témoins majeurs, qui, lecture\nfaite, ont signé avec les époux et Nous, Charles Louis TOURY * Officier de l'état-civil du premier\narrondissement de Paris, Chevalier de la Légion d'Honneur ./.",
"You split the given texts into paragraphs. You are aware the text can be incomplete or truncated. The parapgrahs are : p1 = Date et heure maison commune\n p2 = Le mari\n p3 = La mariée\n p4 = acte de mariage + uni\n p5 = Témoin et maire\n\n"
] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~archived~get_tags_long.py | import json
import openai
import Levenshtein
import os
openai.organization = os.environ.get("OPENAI_ORG_KEY")
openai.api_key = os.environ.get("OPENAI_API_KEY")
#intro sequence:
intro = "Nous allons te fournir un certificat de mariage, un document ayant toujours la même mise en forme.Tu vas devoir procéder à l’extraction de certaines données sur plusieurs certificats ensuite. Voici le premier certificat, je précise qu’il est extrait d’un document au format Json et que tu auras toutes les réponses fournies à la fin, cela te permettra de mieux reconnaître ce qu’il te faut obtenir dans les contrats suivants. "
# Un exemple d'acte:
act_example = """Le huit avril mil neuf cent quarante, onze heures cinq minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Camille Marcel MOUROT, barman, né à Dijon\nCôte d'Or * le dix-huit février mil neuf cent onze * vingt-neuf ans, domicilié à Paris, 17, rue\nPierre Lescot, actuellement aux armées; fils de Emile MOUROT et de Pauline MONIOT, époux décédés,\nd'une part ,/- ET Jeanne BLONDEAU, sans Métier, née à Jars * Cher * le seize mars mil neuf cent\nneuf, trente et un ans, domiciliée à Paris, 17, rue Pierre Lescot; fille de Emile Joseph BLONDEAU\net de Marie Louise MELOT, son épouse, marchands de bestiaux, domiciliés à Vailly sur Saône * Cher *\nDivorcée de William Louis Paul BERNON, d'autre part ;- Les futurs époux déclarent quil n'a pas été\nfait de contrat de mariage .- Camille Marcel MOUROT et Jeanne BLONDEAU ont déclaré l'un après l'autre\nvouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont unis par le mariage.\nEn présence de: Emile SOLIGNAC, commerçant, Médaillé militaire, Croix de Guerre, 16 bis, rue\nLauriston, et de Marcelle MOUROT, vendeuse, 22, rue de l'Echiquier, témoins majeurs, qui, lecture\nfaite ont signé avec les époux et Nous, Pierre Louis Adolphe BERTRAND, Maire du Premier arrondisse-\nment de Paris, Chevalier de la Légion d'Honneur ./ \n
{"Jour-du-mariage": "huit",
"Mois-du-mariage": "avril",
"Année-du-mariage": "mil neuf cent quarante",
"Heure-du-mariage": "onze heures",
"Minute-du-mariage": "cinq minutes",
"Prénom-de-l'adjoint-au-maire": "Pierre Louis Adolphe",
"Nom-de-l'adjoint-au-maire": "BERTRAND",
"Ville-du-mariage": "Premier arrondisse-\nment de Paris",
"Prénom-du-mari": "Camille Marcel",
"Nom-du-mari": "MOUROT",
"Métier-du-mari": "barman",
"Ville-de-naissance-du-mari": "Dijon\nCôte d'Or",
"Département-de-naissance-du-mari": "",
"Pays-de-naissance-du-mari": "",
"Jour-de-naissance-du-mari": "dix-huit",
"Mois-de-naissance-du-mari": "février",
"Année-de-naissance-du-mari": "mil neuf cent onze",
"Âge-du-mari": "vingt-neuf ans",
"Ville-de-résidence-du-mari": "Paris",
"Département-de-résidence-du-mari": "",
"Pays-de-résidence-du-mari": "",
"Numero-de-rue-de-résidence-du-mari": "17",
"Type-de-rue-de-résidence-du-mari": "rue",
"Nom-de-rue-de-résidence-du-mari": "Pierre Lescot",
"Prénom-du-père-du-mari": "Emile",
"Nom-du-père-du-mari": "MOUROT",
"Métier-du-père-du-mari": "",
"Ville-de-résidence-du-père-du-mari": "",
"Département-de-résidence-du-père-du-mari": "",
"Numero-de-résidence-du-père-du-mari": "",
"Type-de-rue-de-résidence-du-père-du-mari": "",
"Nom-de-rue-de-résidence-du-père-du-mari": "",
"Prénom-de-la-mère-du-mari": "Pauline",
"Nom-de-la-mère-du-mari": "MONIOT",
"Métier-de-la-mère-du-mari": "",
"Ville-de-résidence-de-la-mère-du-mari": "",
"Département-de-résidence-de-la-mère-du-mari": "",
"Pays-de-résidence-de-la-mère-du-mari": "",
"Numero-de-rue-de-résidence-de-la-mère-du-mari": "",
"Type-de-rue-de-résidence-de-la-mère-du-mari": "",
"Nom-de-rue-de-résidence-de-la-mère-du-mari": "",
"Prénom-de-la-mariée": "Jeanne",
"Nom-de-la-mariée": "BLONDEAU",
"Métier-de-la-mariée": "sans Métier",
"Ville-de-naissance-de-la-mariée": "Jars",
"Département-de-naissance-de-la-mariée": "Cher",
"Pays-de-naissance-de-la-mariée": "",
"Jour-de-naissance-de-la-mariée": "seize",
"Mois-de-naissance-de-la-mariée": "mars",
"Année-de-naissance-de-la-mariée": "mil neuf cent\nneuf",
"Âge-de-la-mariée": "trente et un ans",
"Ville-de-résidence-de-la-mariée": "Paris",
"Département-de-résidence-de-la-mariée": "Cher",
"Pays-de-résidence-de-la-mariée": "",
"Numero-de-rue-de-résidence-de-la-mariée": "17",
"Type-de-rue-de-résidence-de-la-mariée": "rue",
"Nom-de-rue-de-résidence-de-la-mariée": "Pierre Lescot",
"Prénom-du-père-de-la-mariée": "Emile Joseph",
"Nom-du-père-de-la-mariée": "BLONDEAU",
"Métier-du-père-de-la-mariée": "",
"Ville-de-résidence-du-père-de-la-mariée": "",
"Département-de-résidence-du-père-de-la-mariée": "",
"Numero-de-résidence-du-père-de-la-mariée": "",
"Type-de-rue-de-résidence-du-père-de-la-mariée": "",
"Nom-de-rue-de-résidence-du-père-de-la-mariée": "",
"Prénom-de-la-mère-de-la-mariée": "Marie Louise",
"Nom-de-la-mère-de-la-mariée": "MELOT",
"Métier-de-la-mère-de-la-mariée": "marchands de bestiaux",
"Ville-de-résidence-de-la-mère-de-la-mariée": "",
"Département-de-résidence-de-la-mère-de-la-mariée": "",
"Pays-de-résidence-de-la-mère-de-la-mariée": "",
"Numero-de-rue-de-résidence-de-la-mère-de-la-mariée": "",
"Type-de-rue-de-résidence-de-la-mère-de-la-mariée": "",
"Nom-de-rue-de-résidence-de-la-mère-de-la-mariée": "",
"Prénom-de-l'ex-époux": "William Louis Paul",
"Nom-de-l'ex-époux": "BERNON",
"Prénom-temoin-0": "Emile",
"Nom-temoin-0": "SOLIGNAC",
"Métier-temoin-0": "commerçant",
"Âge-temoin-0": "",
"Numero-de-rue-de-résidence-temoin-0": "16 bis",
"Type-de-rue-de-résidence-temoin-0": "rue",
"Nom-de-rue-de-résidence-temoin-0": "Lauriston",
"Ville-de-résidence-temoin": "",
"Département-de-résidence-temoin": "",
"Prénom-temoin-1": "Marcelle",
"Nom-temoin-1": "MOUROT",
"Métier-temoin-1": "vendeuse",
"Numero-de-rue-de-résidence-temoin-1": "22",
"Type-de-rue-de-résidence-temoin-1": "rue",
"Nom-de-rue-de-résidence-temoin-1": "de l'Echiquier",
"Nom-de-l'ex-épouse" :"",
"Prénom-de-l'ex-épouse" :""
}
"""
question=""" Maintenant, voici un autre certificat de mariage : je veux que tu m'extrais des données sous la meme forme que les réponses que je t'ai fourni. Précision : compte les arrondissement comme une ville."""
def labels_from_act(act_text : str) -> dict :
"""
Extrait les labels d'un acte de mariage.
input: act_text: texte de l'acte de mariage
output: dictionnaire des label
"""
prompt= intro + act_example + question + act_text
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt,},
]
)
answer = completion.choices[0].message['content']
answer = answer.replace('\n', '').replace('.','')
#remove quote around comma
answer = answer.replace('\',', '",')
answer = answer.replace(',\'', ',"')
#remove quote around space
answer = answer.replace(' \'', ' "')
answer = answer.replace('\' ', '" ')
#remove quote around colon
answer = answer.replace('\':', '":')
answer = answer.replace(':\'', ':"')
#remove quote around {}
answer = answer.replace('{\'', '{"')
answer = answer.replace('\'}', '"}')
#remove \n and -\n
answer = answer.replace('-\\n', '')
answer = answer.replace('\\n', ' ')
#replace Prénom-du-maire with Prénom-de-l'adjoint-au-maire
answer = answer.replace('Prénom-du-maire', "Prénom-de-l'adjoint-au-maire")
#replace Nom-du-maire with Nom-de-l'adjoint-au-maire
answer = answer.replace('Nom-du-maire', "Nom-de-l'adjoint-au-maire")
#remplacer les apostrophes par des guillemets
answer = answer.replace("\\'", "\'")
answer = answer.replace('sa veuve, sans Métier', 'sans Métier')
#print(answer)
answer = answer[answer.index('{'):]
#print(f'answer : {answer}')
answer = json.loads(answer)
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
act_text = """ neuf avril mil neuf cent quarante * onze heures vingt minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Antoine POCARD, porteur aux halles, né à\nParis, dixième arrondissement, le cinq février mil neuf cent un, trente-neuf ans, domicilié à Paris\n56, rue Saint Honoré; actuellement mobilisé- fils de Jeanne Marie POCARD- Veuf de Juliette **\nAlbertine GAYRARD, d'une part ,/- ET Adrienne Jeanne ALBRIEUX, journalière, née à Paris, onzième\narrondissement, le onze septembre mil neuf cent deux, trente-sept ans; domiciliée à Paris, 56, rue\nSaint Honoré; fille de Marie Charles ALBRIEUX, sans Métier, domicilié à Aulnay sous Bois * Sein\net Oise * et de Marguerite TERLES, décédée, d'autre part ;- Les futurs époux déclarent qu'il n'a\npas été fait de contrat de mariage .- Antoine POCARD et Adrienne Jeanne ALBRIEUX ont déclaré l'un\naprès l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont\nunis par le mariage .- En présence de: Fernande PASTEAU, concierge, 13, rue Dussoubs, et de\nAmélie MASSONIE, ménagère, 10 rue Volta, témoins majeurs, qui, lecture farte ont signé avec\nles époux et Nous, Charles Louis TOURY, Officier de l'état-civil du premier arrondissement de\nParis, Chevalier de la Légion d'Honneur ./"""
labels = labels_from_act(act_text)
#print(labels)
# On vérifie que les labels extraits sont bien les mêmes que ceux de l'acte de mariage:
ref = {
"Jour-du-mariage": "neuf",
"Mois-du-mariage": "avril",
"Année-du-mariage": "mil neuf cent quarante",
"Heure-du-mariage": "onze heures",
"Minute-du-mariage": "vingt minutes",
"Prénom-de-l'adjoint-au-maire": "Charles Louis",
"Nom-de-l'adjoint-au-maire": "TOURY",
"Ville-du-mariage": "premier arrondissement de\nParis",
"Prénom-du-mari": "Antoine",
"Nom-du-mari": "POCARD",
"Métier-du-mari": "porteur aux halles",
"Ville-de-naissance-du-mari": "Paris, dixième arrondissement",
"Département-de-naissance-du-mari": "",
"Pays-de-naissance-du-mari": "",
"Jour-de-naissance-du-mari": "cinq",
"Mois-de-naissance-du-mari": "février",
"Année-de-naissance-du-mari": "mil neuf cent un",
"Âge-du-mari": "trente-neuf ans",
"Ville-de-résidence-du-mari": "Paris",
"Département-de-résidence-du-mari": "",
"Pays-de-résidence-du-mari": "",
"Numero-de-rue-de-résidence-du-mari": "56",
"Type-de-rue-de-résidence-du-mari": "rue",
"Nom-de-rue-de-résidence-du-mari": "Saint Honoré",
"Prénom-du-père-du-mari": "",
"Nom-du-père-du-mari": "",
"Métier-du-père-du-mari": "",
"Ville-de-résidence-du-père-du-mari": "",
"Département-de-résidence-du-père-du-mari": "",
"Numero-de-résidence-du-père-du-mari": "",
"Type-de-rue-de-résidence-du-père-du-mari": "",
"Nom-de-rue-de-résidence-du-père-du-mari": "",
"Prénom-de-la-mère-du-mari": "Jeanne Marie",
"Nom-de-la-mère-du-mari": "POCARD-",
"Métier-de-la-mère-du-mari": "",
"Ville-de-résidence-de-la-mère-du-mari": "",
"Département-de-résidence-de-la-mère-du-mari": "",
"Pays-de-résidence-de-la-mère-du-mari": "",
"Numero-de-rue-de-résidence-de-la-mère-du-mari": "",
"Type-de-rue-de-résidence-de-la-mère-du-mari": "",
"Nom-de-rue-de-résidence-de-la-mère-du-mari": "",
"Prénom-de-la-mariée": "Adrienne Jeanne",
"Nom-de-la-mariée": "ALBRIEUX",
"Métier-de-la-mariée": "journalière",
"Ville-de-naissance-de-la-mariée": "Paris, onzième\narrondissement",
"Département-de-naissance-de-la-mariée": "",
"Pays-de-naissance-de-la-mariée": "",
"Jour-de-naissance-de-la-mariée": "onze",
"Mois-de-naissance-de-la-mariée": "septembre",
"Année-de-naissance-de-la-mariée": "mil neuf cent deux",
"Âge-de-la-mariée": "trente-sept ans",
"Ville-de-résidence-de-la-mariée": "Paris",
"Département-de-résidence-de-la-mariée": "",
"Pays-de-résidence-de-la-mariée": "",
"Numero-de-rue-de-résidence-de-la-mariée": "56",
"Type-de-rue-de-résidence-de-la-mariée": "rue",
"Nom-de-rue-de-résidence-de-la-mariée": "Saint Honoré",
"Prénom-du-père-de-la-mariée": "Marie Charles",
"Nom-du-père-de-la-mariée": "ALBRIEUX",
"Métier-du-père-de-la-mariée": "sans Métier",
"Ville-de-résidence-du-père-de-la-mariée": "Aulnay sous Bois",
"Département-de-résidence-du-père-de-la-mariée": "Sein \net Oise",
"Numero-de-résidence-du-père-de-la-mariée": "",
"Type-de-rue-de-résidence-du-père-de-la-mariée": "",
"Nom-de-rue-de-résidence-du-père-de-la-mariée": "",
"Prénom-de-la-mère-de-la-mariée": "Marguerite",
"Nom-de-la-mère-de-la-mariée": "TERLES",
"Métier-de-la-mère-de-la-mariée": "",
"Ville-de-résidence-de-la-mère-de-la-mariée": "",
"Département-de-résidence-de-la-mère-de-la-mariée": "",
"Pays-de-résidence-de-la-mère-de-la-mariée": "",
"Numero-de-rue-de-résidence-de-la-mère-de-la-mariée": "",
"Type-de-rue-de-résidence-de-la-mère-de-la-mariée": "",
"Nom-de-rue-de-résidence-de-la-mère-de-la-mariée": "",
"Prénom-de-l'ex-époux": "",
"Nom-de-l'ex-époux": "",
"Prénom-temoin-0": "Fernande",
"Nom-temoin-0": "PASTEAU",
"Métier-temoin-0": "concierge",
"Âge-temoin": "",
"Numero-de-rue-de-résidence-temoin-0": "13",
"Type-de-rue-de-résidence-temoin-0": "rue",
"Nom-de-rue-de-résidence-temoin-0": "Dussoubs",
"Ville-de-résidence-temoin": "",
"Département-de-résidence-temoin": "",
"Prénom-temoin-1": "Amélie",
"Nom-temoin-1": "MASSONIE",
"Métier-temoin-1": "ménagère",
"Numero-de-rue-de-résidence-temoin-1": "10",
"Type-de-rue-de-résidence-temoin-1": "rue",
"Nom-de-rue-de-résidence-temoin-1": "Volta"
}
distances = 0
for key in ref.keys():
distance = Levenshtein.distance(labels[key], ref[key])
if distance > 0:
print(key, distance, labels[key] if labels[key] != '' else 'VIDE', ref[key] if ref[key] != '' else 'VIDE')
distances += distance
print(distances)
| [
"intro802e477b-92f6-466a-bb89-225be90f6b1aact_example802e477b-92f6-466a-bb89-225be90f6b1aquestion802e477b-92f6-466a-bb89-225be90f6b1a neuf avril mil neuf cent quarante * onze heures vingt minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Antoine POCARD, porteur aux halles, né à\nParis, dixième arrondissement, le cinq février mil neuf cent un, trente-neuf ans, domicilié à Paris\n56, rue Saint Honoré; actuellement mobilisé- fils de Jeanne Marie POCARD- Veuf de Juliette **\nAlbertine GAYRARD, d'une part ,/- ET Adrienne Jeanne ALBRIEUX, journalière, née à Paris, onzième\narrondissement, le onze septembre mil neuf cent deux, trente-sept ans; domiciliée à Paris, 56, rue\nSaint Honoré; fille de Marie Charles ALBRIEUX, sans Métier, domicilié à Aulnay sous Bois * Sein\net Oise * et de Marguerite TERLES, décédée, d'autre part ;- Les futurs époux déclarent qu'il n'a\npas été fait de contrat de mariage .- Antoine POCARD et Adrienne Jeanne ALBRIEUX ont déclaré l'un\naprès l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont\nunis par le mariage .- En présence de: Fernande PASTEAU, concierge, 13, rue Dussoubs, et de\nAmélie MASSONIE, ménagère, 10 rue Volta, témoins majeurs, qui, lecture farte ont signé avec\nles époux et Nous, Charles Louis TOURY, Officier de l'état-civil du premier arrondissement de\nParis, Chevalier de la Légion d'Honneur ./",
"Nous allons te fournir un certificat de mariage, un document ayant toujours la même mise en forme.Tu vas devoir procéder à l’extraction de certaines données sur plusieurs certificats ensuite. Voici le premier certificat, je précise qu’il est extrait d’un document au format Json et que tu auras toutes les réponses fournies à la fin, cela te permettra de mieux reconnaître ce qu’il te faut obtenir dans les contrats suivants. Le huit avril mil neuf cent quarante, onze heures cinq minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Camille Marcel MOUROT, barman, né à Dijon\nCôte d'Or * le dix-huit février mil neuf cent onze * vingt-neuf ans, domicilié à Paris, 17, rue\nPierre Lescot, actuellement aux armées; fils de Emile MOUROT et de Pauline MONIOT, époux décédés,\nd'une part ,/- ET Jeanne BLONDEAU, sans Métier, née à Jars * Cher * le seize mars mil neuf cent\nneuf, trente et un ans, domiciliée à Paris, 17, rue Pierre Lescot; fille de Emile Joseph BLONDEAU\net de Marie Louise MELOT, son épouse, marchands de bestiaux, domiciliés à Vailly sur Saône * Cher *\nDivorcée de William Louis Paul BERNON, d'autre part ;- Les futurs époux déclarent quil n'a pas été\nfait de contrat de mariage .- Camille Marcel MOUROT et Jeanne BLONDEAU ont déclaré l'un après l'autre\nvouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont unis par le mariage.\nEn présence de: Emile SOLIGNAC, commerçant, Médaillé militaire, Croix de Guerre, 16 bis, rue\nLauriston, et de Marcelle MOUROT, vendeuse, 22, rue de l'Echiquier, témoins majeurs, qui, lecture\nfaite ont signé avec les époux et Nous, Pierre Louis Adolphe BERTRAND, Maire du Premier arrondisse-\nment de Paris, Chevalier de la Légion d'Honneur ./ \n \n {\"Jour-du-mariage\": \"huit\",\n \"Mois-du-mariage\": \"avril\",\n \"Année-du-mariage\": \"mil neuf cent quarante\",\n \"Heure-du-mariage\": \"onze heures\",\n \"Minute-du-mariage\": \"cinq minutes\",\n \"Prénom-de-l'adjoint-au-maire\": \"Pierre Louis Adolphe\",\n \"Nom-de-l'adjoint-au-maire\": \"BERTRAND\",\n \"Ville-du-mariage\": \"Premier arrondisse-\nment de Paris\",\n \"Prénom-du-mari\": \"Camille Marcel\",\n \"Nom-du-mari\": \"MOUROT\",\n \"Métier-du-mari\": \"barman\",\n \"Ville-de-naissance-du-mari\": \"Dijon\nCôte d'Or\",\n \"Département-de-naissance-du-mari\": \"\",\n \"Pays-de-naissance-du-mari\": \"\",\n \"Jour-de-naissance-du-mari\": \"dix-huit\",\n \"Mois-de-naissance-du-mari\": \"février\",\n \"Année-de-naissance-du-mari\": \"mil neuf cent onze\",\n \"Âge-du-mari\": \"vingt-neuf ans\",\n \"Ville-de-résidence-du-mari\": \"Paris\",\n \"Département-de-résidence-du-mari\": \"\",\n \"Pays-de-résidence-du-mari\": \"\",\n \"Numero-de-rue-de-résidence-du-mari\": \"17\",\n \"Type-de-rue-de-résidence-du-mari\": \"rue\",\n \"Nom-de-rue-de-résidence-du-mari\": \"Pierre Lescot\",\n \"Prénom-du-père-du-mari\": \"Emile\",\n \"Nom-du-père-du-mari\": \"MOUROT\",\n \"Métier-du-père-du-mari\": \"\",\n \"Ville-de-résidence-du-père-du-mari\": \"\",\n \"Département-de-résidence-du-père-du-mari\": \"\",\n \"Numero-de-résidence-du-père-du-mari\": \"\",\n \"Type-de-rue-de-résidence-du-père-du-mari\": \"\",\n \"Nom-de-rue-de-résidence-du-père-du-mari\": \"\",\n \"Prénom-de-la-mère-du-mari\": \"Pauline\",\n \"Nom-de-la-mère-du-mari\": \"MONIOT\",\n \"Métier-de-la-mère-du-mari\": \"\",\n \"Ville-de-résidence-de-la-mère-du-mari\": \"\",\n \"Département-de-résidence-de-la-mère-du-mari\": \"\",\n \"Pays-de-résidence-de-la-mère-du-mari\": \"\",\n \"Numero-de-rue-de-résidence-de-la-mère-du-mari\": \"\",\n \"Type-de-rue-de-résidence-de-la-mère-du-mari\": \"\",\n \"Nom-de-rue-de-résidence-de-la-mère-du-mari\": \"\",\n \"Prénom-de-la-mariée\": \"Jeanne\",\n \"Nom-de-la-mariée\": \"BLONDEAU\",\n \"Métier-de-la-mariée\": \"sans Métier\",\n \"Ville-de-naissance-de-la-mariée\": \"Jars\",\n \"Département-de-naissance-de-la-mariée\": \"Cher\",\n \"Pays-de-naissance-de-la-mariée\": \"\",\n \"Jour-de-naissance-de-la-mariée\": \"seize\",\n \"Mois-de-naissance-de-la-mariée\": \"mars\",\n \"Année-de-naissance-de-la-mariée\": \"mil neuf cent\nneuf\",\n \"Âge-de-la-mariée\": \"trente et un ans\",\n \"Ville-de-résidence-de-la-mariée\": \"Paris\",\n \"Département-de-résidence-de-la-mariée\": \"Cher\",\n \"Pays-de-résidence-de-la-mariée\": \"\",\n \"Numero-de-rue-de-résidence-de-la-mariée\": \"17\",\n \"Type-de-rue-de-résidence-de-la-mariée\": \"rue\",\n \"Nom-de-rue-de-résidence-de-la-mariée\": \"Pierre Lescot\",\n \"Prénom-du-père-de-la-mariée\": \"Emile Joseph\",\n \"Nom-du-père-de-la-mariée\": \"BLONDEAU\",\n \"Métier-du-père-de-la-mariée\": \"\",\n \"Ville-de-résidence-du-père-de-la-mariée\": \"\",\n \"Département-de-résidence-du-père-de-la-mariée\": \"\",\n \"Numero-de-résidence-du-père-de-la-mariée\": \"\",\n \"Type-de-rue-de-résidence-du-père-de-la-mariée\": \"\",\n \"Nom-de-rue-de-résidence-du-père-de-la-mariée\": \"\",\n \"Prénom-de-la-mère-de-la-mariée\": \"Marie Louise\",\n \"Nom-de-la-mère-de-la-mariée\": \"MELOT\",\n \"Métier-de-la-mère-de-la-mariée\": \"marchands de bestiaux\",\n \"Ville-de-résidence-de-la-mère-de-la-mariée\": \"\",\n \"Département-de-résidence-de-la-mère-de-la-mariée\": \"\",\n \"Pays-de-résidence-de-la-mère-de-la-mariée\": \"\",\n \"Numero-de-rue-de-résidence-de-la-mère-de-la-mariée\": \"\",\n \"Type-de-rue-de-résidence-de-la-mère-de-la-mariée\": \"\",\n \"Nom-de-rue-de-résidence-de-la-mère-de-la-mariée\": \"\",\n \"Prénom-de-l'ex-époux\": \"William Louis Paul\",\n \"Nom-de-l'ex-époux\": \"BERNON\",\n \"Prénom-temoin-0\": \"Emile\",\n \"Nom-temoin-0\": \"SOLIGNAC\",\n \"Métier-temoin-0\": \"commerçant\",\n \"Âge-temoin-0\": \"\",\n \"Numero-de-rue-de-résidence-temoin-0\": \"16 bis\",\n \"Type-de-rue-de-résidence-temoin-0\": \"rue\",\n \"Nom-de-rue-de-résidence-temoin-0\": \"Lauriston\",\n \"Ville-de-résidence-temoin\": \"\",\n \"Département-de-résidence-temoin\": \"\",\n \"Prénom-temoin-1\": \"Marcelle\",\n \"Nom-temoin-1\": \"MOUROT\",\n \"Métier-temoin-1\": \"vendeuse\",\n \"Numero-de-rue-de-résidence-temoin-1\": \"22\",\n \"Type-de-rue-de-résidence-temoin-1\": \"rue\",\n \"Nom-de-rue-de-résidence-temoin-1\": \"de l'Echiquier\",\n \"Nom-de-l'ex-épouse\" :\"\",\n \"Prénom-de-l'ex-épouse\" :\"\"\n }\n Maintenant, voici un autre certificat de mariage : je veux que tu m'extrais des données sous la meme forme que les réponses que je t'ai fourni. Précision : compte les arrondissement comme une ville. neuf avril mil neuf cent quarante * onze heures vingt minutes ****\ndevant Nous ont comparu publiquement en la maison commune: Antoine POCARD, porteur aux halles, né à\nParis, dixième arrondissement, le cinq février mil neuf cent un, trente-neuf ans, domicilié à Paris\n56, rue Saint Honoré; actuellement mobilisé- fils de Jeanne Marie POCARD- Veuf de Juliette **\nAlbertine GAYRARD, d'une part ,/- ET Adrienne Jeanne ALBRIEUX, journalière, née à Paris, onzième\narrondissement, le onze septembre mil neuf cent deux, trente-sept ans; domiciliée à Paris, 56, rue\nSaint Honoré; fille de Marie Charles ALBRIEUX, sans Métier, domicilié à Aulnay sous Bois * Sein\net Oise * et de Marguerite TERLES, décédée, d'autre part ;- Les futurs époux déclarent qu'il n'a\npas été fait de contrat de mariage .- Antoine POCARD et Adrienne Jeanne ALBRIEUX ont déclaré l'un\naprès l'autre vouloir se prendre pour époux et Nous avons prononcé au nom de la loi qu'ils sont\nunis par le mariage .- En présence de: Fernande PASTEAU, concierge, 13, rue Dussoubs, et de\nAmélie MASSONIE, ménagère, 10 rue Volta, témoins majeurs, qui, lecture farte ont signé avec\nles époux et Nous, Charles Louis TOURY, Officier de l'état-civil du premier arrondissement de\nParis, Chevalier de la Légion d'Honneur ./"
] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~pipeline_production.py | import json
import openai
import Levenshtein
import itertools
from time import sleep
import get_paragraph_labels_16k as pgc
import split_paragraph_16k as sp
# import get_complete_act_labels as gett
from fix_labels import sanitize_labels
file = open("test-grande-echelle-raw.json", "r")
data = json.load(file)
file.close()
import os
# first we need to split the text in paragraphs
def split_text(text_to_split):
splitted = sp.split_text(text_to_split)
return splitted
def get_labels(text):
labels = pgc.get_labels(text)
return labels
# one_shot_levenshtein_history = []
# few_shot_levenshtein_history = []
# one_shot_errors_history = {}
# few_shot_errors_history = {}
# one_shot_error_count = []
# few_shot_error_count = []
iter = 0
already_done = []
labels_history = {}
with open('test-grande-echelle-text_result.json', 'r') as f:
labels_history = json.load(f)
for i in labels_history.keys():
already_done.append(i)
print('total already done : ', len(already_done))
for i, name in enumerate(data):
if name in already_done:
continue
iter += 1
print('==================================')
print('Now testing : ', name)
print('==================================')
text = data[name]#['texte']
if 'divorce' in text:
continue
#reference = data[name]['questions']
splitted = split_text(text)
# print('splitted : ', splitted)
labels = get_labels(splitted)
#print('labels : ', labels)
# if 'Pays-residence-pere-mari' not in labels['p2'].keys():
# labels['p2']['Pays-residence-pere-mari'] = ''
# if 'Pays-residence-pere-mariee' not in labels['p3'].keys():
# labels['p3']['Pays-residence-pere-mariee'] = ''
reference = {}
with open('labels-reference.json', 'r') as f:
reference = json.load(f)
for paragraph in reference.keys():
if paragraph not in labels.keys():
labels[paragraph] = {}
for label in reference[paragraph].keys():
if label not in labels[paragraph].keys():
labels[paragraph][label] = ""
labels_keys_old = labels
#extract labels into a list
dic = {}
for key in labels.keys():
# print('key : ', key)
if key == 'p4':
if ('Nom-mari' in dic.keys()) and ('Prenom-mari' in dic.keys()) and ('Nom-mariee' in dic.keys()) and ('Prenom-mariee' in dic.keys()):
continue
for bkey in labels[key].keys():
# print('bkey : ', bkey)
dic[bkey] = labels[key][bkey]
labels = dic
# for key in reference.keys():
# # Patch for Boolean, temporary
# if isinstance(reference[key], bool):
# continue
# ################################
# if key not in labels.keys():
# labels[key] = ''
labels = sanitize_labels(labels)
for paragraph in labels_keys_old.keys():
for key in labels_keys_old[paragraph].keys():
if key in labels.keys():
labels_keys_old[paragraph][key] = labels[key]
labels = labels_keys_old
#print(labels)
#print('labels : ', labels['Jour-mariage'])
labels_history[name] = {'labels': labels, 'text': splitted}
# store labels_history in json file
with open('test-grande-echelle-text_result.json', 'w') as outfile:
json.dump(labels_history, outfile, indent=4)
| [] |
2024-01-10 | MrBread13/Stage-2022-2023-EXO-POP | gpt-prompt~pipeline.py | import json
import openai
import Levenshtein
import itertools
from time import sleep
import get_paragraph_labels_16k as pgc
import split_paragraph_16k as sp
# import get_complete_act_labels as gett
from fix_labels import sanitize_labels
# file = open("donnees-test.json", "r")
file = open("test-grande-echelle-raw.json", "r")
data = json.load(file)
file.close()
import os
# first we need to split the text in paragraphs
def split_text(text_to_split):
splitted = sp.split_text(text_to_split)
return splitted
def get_labels(text):
labels = pgc.get_labels(text)
return labels
one_shot_levenshtein_history = []
few_shot_levenshtein_history = []
one_shot_errors_history = {}
few_shot_errors_history = {}
one_shot_error_count = []
few_shot_error_count = []
iter = 0
already_done = []
labels_history = {}
with open('test-grande-echelle-text_result.json', 'r') as f:
labels_history = json.load(f)
for i in labels_history.keys():
already_done.append(i)
print('total already done : ', len(already_done))
for i, name in enumerate(data):
if name in already_done:
continue
iter += 1
print('==================================')
print('Now testing : ', name)
print('==================================')
text = data[name]['texte']
if 'divorce' in text:
continue
#reference = data[name]['questions']
splitted = split_text(text)
# print('splitted : ', splitted)
labels = get_labels(splitted)
#print('labels : ', labels)
# extract labels into a list
dic = {}
for key in labels.keys():
# print('key : ', key)
if key == 'p4':
if ('Nom-mari' in dic.keys()) and ('Prenom-mari' in dic.keys()) and ('Nom-mariee' in dic.keys()) and ('Prenom-mariee' in dic.keys()):
continue
for bkey in labels[key].keys():
# print('bkey : ', bkey)
dic[bkey] = labels[key][bkey]
labels = dic
if 'Pays-residence-pere-mari' not in labels.keys():
labels['Pays-residence-pere-mari'] = ''
if 'Pays-residence-pere-mariee' not in labels.keys():
labels['Pays-residence-pere-mariee'] = ''
# for key in reference.keys():
# # Patch for Boolean, temporary
# if isinstance(reference[key], bool):
# continue
# ################################
# if key not in labels.keys():
# labels[key] = ''
labels = sanitize_labels(labels)
#print(labels)
#print('labels : ', labels['Jour-mariage'])
labels_history[name] = labels
# store labels_history in json file
with open('test-grande-echelle-text_result.json', 'w') as outfile:
json.dump(labels_history, outfile, indent=4)
| [] |
2024-01-10 | Vinayak-Kannan/INfACT | SupportingFunction~CollapseRows.py | import uuid
import numpy as np
import openai
from dotenv import dotenv_values
from openai.embeddings_utils import cosine_similarity, get_embedding
from pandas import DataFrame
import pandas as pd
import pinecone
def get_embeddings(texts):
embeddings = []
for text in texts:
embedding_model = "text-embedding-ada-002"
value = get_embedding(text, engine=embedding_model)
embeddings.append(value)
return embeddings
# Ideas to do this better:
# 1. Store the embeddings in vectorDB to stop having to call the API every time
# 2. Instead of dropping terms, roll them up into a list of terms, then have LLM synthesize them all into a single term
# 3. Switch to LLAMA for better results, fine tune it on accruate responses we have
def collapse_rows(df: DataFrame, school) -> DataFrame:
config = dotenv_values("/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/.env")
openai.api_key = config.get("SECRET_KEY")
# Get embeddings for each value in the 'Skill' column
embeddings = get_embeddings(df['Skill'].tolist())
df['Embedding'] = embeddings
# Add a column to the df called 'Collapsed Skill'
df['Collapsed Skill'] = ''
# Iterate through the similarity matrix
for i, row in enumerate(df['Embedding']):
if df.loc[i, 'Collapsed Skill'] != "":
continue
similarities = []
for j, row2 in enumerate(df['Embedding']):
# Get skill name from the 'Skill' column
skill_name = df.loc[j, 'Skill']
if row is not None and row2 is not None:
# Calculate the cosine similarity between the two rows
similarities.append([cosine_similarity(row, row2), skill_name, j])
# Filter similarities to only include values that are greater than 0.8
similarities = [x for x in similarities if x[0] >= 0.85]
count = 0
if len(similarities) > 0:
word = similarities[0][1]
for similarity in similarities:
if similarity[1] == word:
count += 1
if count > 0 and count == len(similarities):
df.loc[i, 'Collapsed Skill'] = df.loc[i, 'Skill']
continue
# Create a string that concats similarities
similar_skills = df.loc[i, 'Skill'] + ", "
for similarity in similarities:
similar_skills += similarity[1] + ", "
# Create prompt that asks OpenAI for a label that summarizes the similar skills
course_description = f"""
Review the following terms, seperated by commas, and summarize them with one label.
Follow the format as per example below:
The terms are: Bowling, Skiing and Surfing, Vibing
Label: Sporting Activities
The terms are: {similar_skills}
"""
prompt_message = [{
"role": "user",
"content": course_description
}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt_message,
temperature=0
)
response_message = response.choices[0].message.content
print(similar_skills)
# Get the label from the response
label = response_message.split("Label: ")[1].strip()
# Check if similar_skills are all the same when lowered to lower case and removing punctuation
# If they are, then set the label to the first skill
# If they are not, then set the label to the response
similar_skills = similar_skills.lower().replace(",", "").replace("-", "").replace(" ", "").split()
label = similar_skills[0] if len(set(similar_skills)) == 1 else label
print(label)
# If there are similar values, add the first similar value to the 'Collapsed Skill' column
df.loc[i, 'Collapsed Skill'] = label
for similarity in similarities:
if df.loc[similarity[2], 'Related Course'] != df.loc[i, 'Related Course']:
df.loc[similarity[2], 'Collapsed Skill'] = label
else:
df.loc[similarity[2], 'Collapsed Skill'] = None
# Drop the 'Embedding' column
df = df.drop(columns=['Embedding'])
# Merge original column with df
orig_df = pd.read_csv(
f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/{school}/Data - Sheet1.csv')
# Drop credits and syllabus from orig_df
orig_df = orig_df.drop(columns=['Credits', 'Syllabus'])
df = pd.merge(df, orig_df, left_on=['Related Course', 'Semester'], right_on=['Title', 'Semester'])
return df
def collapse_rows_pinecone(df: DataFrame):
config = dotenv_values("/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/.env")
openai.api_key = config.get("SECRET_KEY")
# Convert rows in 'Credits' column to numeric
df['Credits'] = pd.to_numeric(df['Credits'], errors='coerce')
# Filter df to rows where credits value contains a number even if it is a string
df = df[df['Credits'].notna()]
df['Embedding'] = ''
print(len(df))
for i, row in df.iterrows():
embedding_model = "text-embedding-ada-002"
print(f"Attribute: {row['Skill']}. Explanation of how course teaches this attribute: {row['Explanation']}")
embedding = get_embedding(row['Skill'] + row['Explanation'], engine=embedding_model)
df['Embedding'].update(pd.Series([embedding], index=[i]))
config = dotenv_values("/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/.env")
openai.api_key = config.get("SECRET_KEY")
# get api key from app.pinecone.io
PINECONE_API_KEY = config.get('PINECONE_KEY')
# find your environment next to the api key in pinecone console
PINECONE_ENV = config.get('PINECONE_ENV')
# Send to pinecone
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
index = pinecone.GRPCIndex("infact")
df['Collapsed Skill'] = ''
df['Reviewed'] = False
for i, row in df.iterrows():
if row['Reviewed']:
continue
# Find all rows in index that have a cosine similarity greater than 0.85
embedding = row['Embedding']
similar_rows = [[row['Skill'], row['Embedding'], i]]
for j, row_compare in df.iterrows():
if cosine_similarity(embedding, row_compare['Embedding']) > 0.85:
similar_rows.append([row_compare['Skill'], row_compare['Embedding'], j])
break
# Get center of all embeddings
similar_row_embeddings = []
for row in similar_rows:
df.at[row[2], 'Reviewed'] = True
similar_row_embeddings.append(row[1])
embeddings = np.array(similar_row_embeddings) # replace with your array of embeddings
centroid = np.mean(embeddings, axis=0)
# Find the closest row in similar_rows to the center of all embeddings
closest_row = similar_rows[0]
for row_compare in similar_rows:
if cosine_similarity(centroid, row_compare[1]) > cosine_similarity(centroid, closest_row[1]):
closest_row = row_compare
upsert_value = [{
"id": uuid.uuid4().hex,
"values": closest_row[1],
"metadata": {'text': closest_row[0]}
}]
response = index.query(vector=closest_row[1], top_k=1, include_values=False, include_metadata=True).to_dict()
if len(response['matches']) == 0 or response['matches'][0]['score'] < 0.85:
index.upsert(vectors=upsert_value, show_progress=True)
for i, row in df.iterrows():
embedding = row['Embedding']
response = index.query(vector=embedding, top_k=1, include_values=False, include_metadata=True).to_dict()
if len(response['matches']) == 0:
upsert_value = [{
"id": uuid.uuid4().hex,
"values": embedding,
"metadata": {'text': row['Skill']}
}]
index.upsert(vectors=upsert_value, show_progress=True)
df.loc[i, 'Collapsed Skill'] = row['Skill']
continue
top_match = response['matches'][0]
if top_match['score'] > 0.85:
df.loc[i, 'Collapsed Skill'] = top_match['metadata']['text']
else:
print(str(top_match['score']) + " :" + str(top_match['metadata']['text']))
print(str(row['Skill']))
print("RIP")
df.loc[i, 'Collapsed Skill'] = row['Skill']
upsert_value = [{
"id": uuid.uuid4().hex,
"values": embedding,
"metadata": {'text': row['Skill']}
}]
index.upsert(vectors=upsert_value, show_progress=True)
# Drop df embedding column
df = df.drop(columns=['Embedding'])
df = df.drop(columns=['Reviewed'])
return df
| [] |
2024-01-10 | Vinayak-Kannan/INfACT | SupportingFunction~CompareSkills.py | import openai
import pandas as pd
from dotenv import dotenv_values
from openai.embeddings_utils import get_embedding, cosine_similarity
def compare_skills():
# Import data from csv
MIT = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/MIT_Fall2023_v1/SkillOutputv2.csv')
Columbia = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/SkillOutputv2.csv')
config = dotenv_values("/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/.env")
openai.api_key = config.get("SECRET_KEY")
MIT['Similar Skill'] = ''
MIT['Embedding'] = ''
Columbia['Similar Skill'] = ''
Columbia['Embedding'] = ''
embeddings = []
for text in Columbia['Skill']:
if text is None or text == " " or len(text) == 0:
embeddings.append(None)
continue
embedding_model = "text-embedding-ada-002"
print(text)
value = get_embedding(text, engine=embedding_model)
embeddings.append(value)
Columbia['Embedding'] = embeddings
embeddings = []
for text in MIT['Embedding']:
if text is None or text == " " or len(text) == 0:
embeddings.append(None)
continue
embedding_model = "text-embedding-ada-002"
print(text)
value = get_embedding(text, engine=embedding_model)
embeddings.append(value)
MIT['Embedding'] = embeddings
# Find the value in the Skill column in MIT and Columbia that has at least 0.9 cosine similarity using OpenAI embedding API
# Create a new column in MIT and Columbia called 'Similar Skill' that stores the similar skill
# Create a new column in MIT and Columbia called 'Similar Skill' that stores the similar skill
for i, row in enumerate(MIT['Embedding']):
similarities = []
for j, row2 in enumerate(Columbia['Embedding']):
if row is not None and row2 is not None:
# Calculate the cosine similarity between the two rows
similarities.append([cosine_similarity(row, row2), Columbia.loc[j, 'Skill'], j])
# Filter similarities to only include values that are greater than 0.8
similarities = [x for x in similarities if x[0] >= 0.9]
if len(similarities) > 0:
word = similarities[0][1]
MIT.loc[i, 'Similar Skill'] = word
continue
for i, row in enumerate(Columbia['Embedding']):
similarities = []
for j, row2 in enumerate(MIT['Embedding']):
if row is not None and row2 is not None:
# Calculate the cosine similarity between the two rows
similarities.append([cosine_similarity(row, row2), MIT.loc[j, 'Skill'], j])
print(similarities)
# Filter similarities to only include values that are greater than 0.8
similarities = [x for x in similarities if x[0] >= 0.9]
if len(similarities) > 0:
word = similarities[0][1]
Columbia.loc[i, 'Similar Skill'] = word
continue
# Save columbia and MIT to csv
MIT.to_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/MIT_Fall2023_v1/comparison.csv', index=False)
Columbia.to_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/comparison.csv', index=False) | [] |
2024-01-10 | Vinayak-Kannan/INfACT | SupportingFunction~CreateLexicon.py | import json
import numpy as np
import openai
from dotenv import dotenv_values
from openai.embeddings_utils import cosine_similarity, get_embedding
import uuid
import pandas as pd
import pinecone
def create_lexicon(generate: bool = False):
config = dotenv_values("/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/.env")
openai.api_key = config.get("SECRET_KEY")
# get api key from app.pinecone.io
PINECONE_API_KEY = config.get('PINECONE_KEY')
# find your environment next to the api key in pinecone console
PINECONE_ENV = config.get('PINECONE_ENV')
if generate:
# Get a list of all skills, knowledge, abilities
skills_columbia_fall = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/SkillOutputv2.csv')['Skill'].tolist()
skills_columbia_spring = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Spring2023_v1/SkillOutputv2.csv')['Skill'].tolist()
knowledge_columbia_fall = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/KnowledgeOutputv2.csv')['Skill'].tolist()
knowledge_columbia_spring = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Spring2023_v1/KnowledgeOutputv2.csv')['Skill'].tolist()
abilities_columbia_fall = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/AbilitiesOutputv2.csv')['Skill'].tolist()
abilities_columbia_spring = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Spring2023_v1/AbilitiesOutputv2.csv')['Skill'].tolist()
# Combine all lists into a dataframe with one column
key_words = skills_columbia_fall + skills_columbia_spring + knowledge_columbia_fall + knowledge_columbia_spring + abilities_columbia_fall + abilities_columbia_spring
key_words = list(set(key_words))
# Create a new dataframe with three columns: 'id' and 'values' and 'metadata'
df = pd.DataFrame(columns=['id', 'values', 'metadata'])
for i, word in enumerate(key_words):
embedding_model = "text-embedding-ada-002"
embedding = get_embedding(word, engine=embedding_model)
df.loc[i] = [uuid.uuid4().hex, embedding, {'text': word}]
# Save to csv
df.to_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/lexicon.csv', index=False)
else:
# Read from csv
df = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/lexicon.csv')
# Go through each row in dataframe and convert metadata to a dictionary
# for i, row in df.iterrows():
# # Replace ' with " in metadata
# row['metadata'] = row['metadata'].replace("'", '"')
# df.loc[i, 'metadata'] = json.loads(row['metadata'])
# Loop through each row and see if there is a similar row with greater than 0.85 cosine similarity. If so, drop current row
for i, row in df.iterrows():
for j, row2 in df.iterrows():
if row['values'] == row2['values']:
print("same")
continue
if cosine_similarity(row['values'], row2['values']) > 0.85:
df.drop(i, inplace=True)
break
# Send to pinecone
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
index = pinecone.GRPCIndex("infact")
dict_to_upsert = df.to_dict('records')
# Split up dict_to_upsert into 100 record chunks
chunks = [dict_to_upsert[x:x + 100] for x in range(0, len(dict_to_upsert), 100)]
for chunk in chunks:
index.upsert(vectors=chunk, show_progress=True) | [] |
2024-01-10 | Vinayak-Kannan/INfACT | SupportingFunction~ParseData.py | import openai
import pandas as pd
from dotenv import dotenv_values
from langchain.text_splitter import RecursiveCharacterTextSplitter
from openai.embeddings_utils import get_embedding, cosine_similarity
def parse_scraped_data(school):
config = dotenv_values("/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/.env")
openai.api_key = config.get("SECRET_KEY")
baseDF = create_dataframe(school)
skillDF = pd.DataFrame(columns=['Skill', 'Explanation', 'Related Course', 'Credits'])
knowledgeDF = pd.DataFrame(columns=['Skill', 'Explanation', 'Related Course', 'Credits'])
abilitiesDF = pd.DataFrame(columns=['Skill', 'Explanation', 'Related Course', 'Credits'])
newDFs = get_skills(baseDF, baseDF, skillDF, knowledgeDF, abilitiesDF)
skillDF = newDFs[0]
knowledgeDF = newDFs[1]
abilitiesDF = newDFs[2]
skillDF.to_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/{school}/SkillOutputv2.csv',
index=False)
knowledgeDF.to_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/{school}/KnowledgeOutputv2.csv',
index=False)
abilitiesDF.to_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/{school}/AbilitiesOutputv2.csv',
index=False)
def create_dataframe(school):
data = pd.read_csv(f'/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/{school}/Data - Sheet1.csv')
data['Syllabus'].fillna('', inplace=True)
data["combined"] = (
"Title: " + data.Title.str.strip() + "; Description: " + data.Description.str.strip() + "Syllabus: " + data.Syllabus.str.strip()
)
data['Skills'] = ""
data['Knowledge'] = ""
data['Abilities'] = ""
data['SkillsAndExplanation'] = ""
data['KnowledgeAndExplanation'] = ""
data['AbilitiesAndExplanation'] = ""
return data
def get_course_label(course_description):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=course_description,
temperature=0
)
return response
def get_skills(df, baseDF, skillDF, knowledgeDF, abilitiesDF):
prompt = """
Review the course title and description below and give me a list of at most 5 'skills' and at most 5 'knowledge' and at most 5 'abilities' students who take this course are going to learn. The definition for 'skill' and 'competency' are below:
A 'skill' refers to something taught in a course that is quantifiable and is measured as handling or manipulating things, data or people, either verbally, manually or mentally to accomplish an objective. Skills can be developed with practice or appropriate training. Examples of skills include carpentry, computer repair, leadership, public speaking.
A 'knowledge' is defined as the body of information that you have that can be applied in helping you to do the job. Knowledge can be quantified. Examples of knowledge are federal regulations, document preperation practices, engineering practices
A 'ability' is the capacity to express a skill. Typically, abilities are the tasks completed on the job. Skills and abilities are often used interchangeably, but there are subtle differences. Ability is the capacity to perform, where a skill is the actual performing. Examples of abilities are the ability to organize, the ability to analyze issues, the ability to communicate verbally, the ability to communicate in writing
Provide the skills, knowledge, and abilities as a list separated by commas with an explanation of why this skill, knowledge, or ability was selected, following the format of the example below:
Skills:
SQL: Students will gain proficiency in SQL
XML: Students will learn about XML (eXtensible Markup Language) and its use in databases
Knowledge:
Database design: Students will learn how to design a database
Abilities:
Problem-solving: Students will develop the ability of problem-solving as they work on programming projects
Communication: Students will enhance their communication skills.
The course description and title are as follows:
"""
for i, row in enumerate(df.index):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=250)
print(df.combined[i])
all_splits = text_splitter.split_text(df.combined[i])
embedding_model = "text-embedding-ada-002"
embeddings = [[get_embedding(x, engine=embedding_model), x] for x in all_splits]
prompt_embedding = get_embedding(prompt, engine=embedding_model)
total_tokens = len(embeddings) * 500 * 4
if total_tokens > 4000:
print("here")
max_chunks = 30
# Sort the embeddings by cosine_similarity from openai package to prompt embedding
embeddings = sorted(embeddings, key=lambda x: cosine_similarity(x[0], prompt_embedding), reverse=True)
embeddings = embeddings[:max_chunks]
# Combine elements in embeddings into a single string
combined_embedding_text = [x[1] for x in embeddings]
combined_embedding_text = " ".join(combined_embedding_text)
response = get_course_label([{
"role": "user",
"content": prompt + combined_embedding_text
}])
try:
newDFs = parse_response(response.choices[0].message.content, i, baseDF, df.Title[i], df.Credits[i], skillDF,
knowledgeDF, abilitiesDF, df.Semester[i])
skillDF = newDFs[0]
knowledgeDF = newDFs[1]
abilitiesDF = newDFs[2]
except:
print("Error, unable to parse this course")
continue
return [skillDF, knowledgeDF, abilitiesDF]
def parse_response(response, index, df, courseName, credits, skillDF, knowledgeDF, abilitiesDF, semester):
print(response)
# Split the response into lines and remove empty lines
lines = [line.strip() for line in response.split('\n') if line.strip()]
# Initialize lists for skills and competencies
skills = []
knowledge = []
abilities = []
# Parse the response and populate the lists
current_section = None
for line in lines:
if line == "Skills:":
current_section = "skills"
elif line == "Knowledge:":
current_section = "knowledge"
elif line == "Abilities:":
current_section = "abilities"
else:
name, explanation = line.split(': ', 1)
if current_section == "skills":
skills.append({"Name": name[3:], "Explanation": explanation})
new_row = pd.DataFrame(
{'Skill': name[3:], 'Explanation': explanation, 'Related Course': courseName, 'Credits': credits, 'Semester': semester},
index=[0])
skillDF = pd.concat([skillDF, new_row], ignore_index=True)
elif current_section == "knowledge":
knowledge.append({"Name": name[3:], "Explanation": explanation})
new_row = pd.DataFrame(
{'Skill': name[3:], 'Explanation': explanation, 'Related Course': courseName, 'Credits': credits, 'Semester': semester},
index=[0])
knowledgeDF = pd.concat([knowledgeDF, new_row], ignore_index=True)
elif current_section == "abilities":
abilities.append({"Name": name[3:], "Explanation": explanation})
new_row = pd.DataFrame(
{'Skill': name[3:], 'Explanation': explanation, 'Related Course': courseName, 'Credits': credits, 'Semester': semester},
index=[0])
abilitiesDF = pd.concat([abilitiesDF, new_row], ignore_index=True)
# Convert the lists to DataFrames
skills_df = pd.DataFrame(skills)
knowledge_df = pd.DataFrame(knowledge)
abilities_df = pd.DataFrame(abilities)
# Merge the DataFrames with the existing DataFrame, df
df.at[index, 'Skills'] = skills_df['Name'].str.cat(sep=', ')
df.at[index, 'Knowledge'] = knowledge_df['Name'].str.cat(sep=', ')
df.at[index, 'Abilities'] = abilities_df['Name'].str.cat(sep=', ')
df.at[index, 'SkillsAndExplanation'] = skills_df['Name'] + ': ' + skills_df['Explanation']
df.at[index, 'KnowledgeAndExplanation'] = knowledge_df['Name'] + ': ' + knowledge_df['Explanation']
df.at[index, 'AbilitiesAndExplanation'] = abilities_df['Name'] + ': ' + abilities_df['Explanation']
df.at[index, 'SkillsAndExplanation'] = df.at[index, 'SkillsAndExplanation'].str.cat(sep=', ')
df.at[index, 'KnowledgeAndExplanation'] = df.at[index, 'KnowledgeAndExplanation'].str.cat(sep=', ')
df.at[index, 'AbilitiesAndExplanation'] = df.at[index, 'AbilitiesAndExplanation'].str.cat(sep=', ')
return [skillDF, knowledgeDF, abilitiesDF]
| [
"\n Review the course title and description below and give me a list of at most 5 'skills' and at most 5 'knowledge' and at most 5 'abilities' students who take this course are going to learn. The definition for 'skill' and 'competency' are below:\n A 'skill' refers to something taught in a course that is quantifiable and is measured as handling or manipulating things, data or people, either verbally, manually or mentally to accomplish an objective. Skills can be developed with practice or appropriate training. Examples of skills include carpentry, computer repair, leadership, public speaking.\n\n A 'knowledge' is defined as the body of information that you have that can be applied in helping you to do the job. Knowledge can be quantified. Examples of knowledge are federal regulations, document preperation practices, engineering practices\n\n A 'ability' is the capacity to express a skill. Typically, abilities are the tasks completed on the job. Skills and abilities are often used interchangeably, but there are subtle differences. Ability is the capacity to perform, where a skill is the actual performing. Examples of abilities are the ability to organize, the ability to analyze issues, the ability to communicate verbally, the ability to communicate in writing\n\n Provide the skills, knowledge, and abilities as a list separated by commas with an explanation of why this skill, knowledge, or ability was selected, following the format of the example below:\n\n Skills:\n SQL: Students will gain proficiency in SQL\n XML: Students will learn about XML (eXtensible Markup Language) and its use in databases\n\n Knowledge:\n Database design: Students will learn how to design a database\n\n Abilities:\n Problem-solving: Students will develop the ability of problem-solving as they work on programming projects\n Communication: Students will enhance their communication skills.\n\n\n The course description and title are as follows:\n ",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | Vinayak-Kannan/INfACT | SupportingFunction~Playground.py | import time
import openai
import pandas as pd
from dotenv import dotenv_values
import numpy as np
import hdbscan
config = dotenv_values("/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/.env")
openai.api_key = config.get("SECRET_KEY")
columbia = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/KnowledgeOutputUpdated.csv')
embeddings = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/KnowledgeOutputv2.csv')
mit = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/MIT_Fall2023_v1/KnowledgeOutputUpdated.csv')
mit_embeddings = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/MIT_Fall2023_v1/KnowledgeOutputv2.csv')
industry = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Amazon/Amazon_KW_Embeddings.csv')
industry_outcome = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Amazon/Amazon knowledge Collapsed.csv')
# Drop all columns in embeddings except for 'Skill' and 'Embedding'
columbia_embeddings = embeddings[['Skill', 'Embedding']]
mit_embeddings = mit_embeddings[['Skill', 'Embedding']]
industry = industry[['Skill', 'Embedding']]
columbia_embeddings['Group'] = ''
mit_embeddings['Group'] = ''
industry['Group'] = ''
# Join embeddings to columbia on 'Skill' column
# columbia = columbia.join(embeddings.set_index('Skill'), on='Skill')
# Go through each row in df and convert 'Embedding' column to a list of embeddings
for i, row in columbia_embeddings.iterrows():
embedding_text = row['Embedding']
# Drop first and last character from embedding_text
embedding_text = embedding_text[1:-1]
# Split embedding_text by comma
embedding_text = embedding_text.split(",")
embedding_array = []
for val in embedding_text:
embedding_array.append(float(val))
columbia_embeddings['Embedding'].update(pd.Series([embedding_array], index=[i]))
for i, row in mit_embeddings.iterrows():
embedding_text = row['Embedding']
# Drop first and last character from embedding_text
embedding_text = embedding_text[1:-1]
# Split embedding_text by comma
embedding_text = embedding_text.split(",")
embedding_array = []
for val in embedding_text:
embedding_array.append(float(val))
mit_embeddings['Embedding'].update(pd.Series([embedding_array], index=[i]))
# for i, row in columbia.iterrows():
# embedding_text = row['Embedding_Context']
# # Drop first and last character from embedding_text
# embedding_text = embedding_text[1:-1]
# # Split embedding_text by comma
# embedding_text = embedding_text.split(",")
# embedding_array = []
# for val in embedding_text:
# embedding_array.append(float(val))
#
# columbia['Embedding_Context'].update(pd.Series([embedding_array], index=[i]))
for i, row in industry.iterrows():
embedding_text = row['Embedding']
# Drop first and last character from embedding_text
embedding_text = embedding_text[1:-1]
# Split embedding_text by comma
embedding_text = embedding_text.split(",")
embedding_array = []
for val in embedding_text:
embedding_array.append(float(val))
industry['Embedding'].update(pd.Series([embedding_array], index=[i]))
# Combine embeddings from columbia and industry
embeddings = columbia_embeddings['Embedding'].tolist() + industry['Embedding'].tolist() + mit_embeddings['Embedding'].tolist()
skills = columbia_embeddings['Skill'].tolist() + industry['Skill'].tolist() + mit_embeddings['Skill'].tolist()
pre_skills = pd.DataFrame(list(zip(skills, embeddings)), columns=['Skill', 'Embedding'])
# drop rows from pre_skills where 'Skill' is duplicate
pre_skills = pre_skills.drop_duplicates(subset=['Skill'])
embeddings = pre_skills['Embedding'].tolist()
skills = pre_skills['Skill'].tolist()
print(len(skills), len(embeddings))
hdb = hdbscan.HDBSCAN(min_cluster_size=5, min_samples=1, prediction_data=True).fit(embeddings)
print(len(embeddings))
print(hdb.labels_.max())
print(np.count_nonzero(hdb.labels_ == -1))
print(np.bincount(hdb.labels_[hdb.labels_ != -1]))
labels = hdb.labels_
membership_vectors = [np.argmax(x) for x in hdbscan.all_points_membership_vectors(hdb)]
max_vectors = np.asarray([max(x) for x in hdbscan.all_points_membership_vectors(hdb)])
# Delete elements from max_vector is index of element in hdb.labels_ does not equal -1
max_vectors = np.asarray(max_vectors[hdb.labels_ == -1])
avg_threshold = np.percentile(max_vectors, 25)
print(avg_threshold)
max_vectors = np.asarray([max(x) for x in hdbscan.all_points_membership_vectors(hdb)])
# for i in range(len(hdb.labels_)):
# if i < len(columbia):
# columbia_embeddings['Group'][i] = hdb.labels_[i]
# elif i < len(columbia) + len(industry):
# industry['Group'][i - len(columbia)] = hdb.labels_[i]
# else:
# mit_embeddings['Group'][i - len(columbia) - len(industry)] = hdb.labels_[i]
groups = hdb.labels_
# Create dataframe with columns Skills and Group using skills and groups lists
collapsed_skills = pd.DataFrame(list(zip(skills, groups)), columns=['Skill', 'Group'])
collapsed_skills['Collapsed Skill'] = ''
for i, row in collapsed_skills.iterrows():
if row['Group'] == -1 and max_vectors[i] > avg_threshold:
collapsed_skills.loc[i, 'Group'] = membership_vectors[i]
# Print count of rows where Group is -1
print("NEW: " + str(len(collapsed_skills[collapsed_skills['Group'] == -1])))
prompt = """
Summarize the following skills into a label that represent all the skills. The label can consist of multiple words. You should only output the new label with no other information or explanation
"""
count = 0
for i, row in collapsed_skills.iterrows():
print(i)
# Print count of rows where 'Collapsed Skill' column is not empty
print(len(collapsed_skills[collapsed_skills['Collapsed Skill'] != '']))
if collapsed_skills.loc[i, 'Group'] == -1:
collapsed_skills.loc[i, 'Collapsed Skill'] = row['Skill']
elif collapsed_skills.loc[i, 'Collapsed Skill'] != '':
continue
else:
count += 1
print(str(count) + " vs " + str(hdb.labels_.max()))
# Get all instances of row['Skill'] where row['Group'] is equal to row['Group']
instances = collapsed_skills[collapsed_skills['Group'] == row['Group']]['Skill'].tolist()
print(instances)
# Join instances into a string separated by commas
instances = ', '.join(instances)
response = [{
"role": "user",
"content": prompt + instances
}]
# Try to use ChatCompletion API. If no response after 30 seconds, try again
while True:
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=response,
temperature=0,
timeout=30
)
if response and response.choices and response.choices[0] and response.choices[0].message and response.choices[0].message.content:
break
else:
print("Trying again")
time.sleep(1)
except:
print("Trying again")
time.sleep(1)
print(response.choices[0].message.content)
# In all instances of row['Skill'] where row['Group'] is equal to row['Group'], set 'Collapsed Skill' to response.choices[0].message.content
collapsed_skills.loc[collapsed_skills['Group'] == row['Group'], 'Collapsed Skill'] = response.choices[0].message.content
print(collapsed_skills)
# Join collapsed_skills 'Collapsed Skill' to columbia on 'Skill' column
# Drop 'Collapsed Skill' column from columbia
columbia = columbia.drop('Collapsed Skill', axis=1)
collapsed_skills = collapsed_skills.drop('Group', axis=1)
columbia = columbia.join(collapsed_skills.set_index('Skill'), on='Skill')
# Join collapsed_skills 'Collapsed Skill' to mit on 'Skill' column
mit = mit.drop('Collapsed Skill', axis=1)
mit = mit.join(collapsed_skills.set_index('Skill'), on='Skill')
industry_outcome = industry_outcome.drop('Collapsed Skill', axis=1)
industry_outcome = industry_outcome.join(collapsed_skills.set_index('Skill'), on='Skill')
columbia.to_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/KnowledgeOutputUpdatedAmazon11012023.csv', index=False)
mit.to_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/MIT_Fall2023_v1/KnowledgeOutputUpdatedAmazon11012023.csv', index=False)
# industry_outcome.to_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Amazon/Collapsed Skills11012023.csv', index=False)
industry_outcome.to_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Amazon/Collapsed Knowledge11012023.csv', index=False)
# for i, row in industry.iterrows():
# embedding_text = row['Embedding_Context']
# # Drop first and last character from embedding_text
# embedding_text = embedding_text[1:-1]
# # Split embedding_text by comma
# embedding_text = embedding_text.split(",")
# embedding_array = []
# for val in embedding_text:
# embedding_array.append(float(val))
#
# industry['Embedding_Context'].update(pd.Series([embedding_array], index=[i]))
# output = hdb.labels_.tolist()
# # Compute the histogram of the array
# hist, bin_edges = np.histogram(output)
#
# # Plot the histogram
# plt.hist(output, bins=bin_edges, edgecolor='black')
# plt.xlabel('Integer')
# plt.ylabel('Count')
# plt.title('Histogram of Integers in Array')
# plt.show()
# columbia['Similar Skill'] = ''
# industry['Collapsed Skill'] = ''
# for i, r in industry.iterrows():
# for j, r2 in columbia.iterrows():
# if cosine_similarity(r['Embedding'], r2['Embedding']) > 0.83:
# industry.loc[i, 'Collapsed Skill'] = r2['Collapsed Skill']
#
# # Drop all columns from industry except for 'Collapsed Skill' and 'Skill'
# industry = industry[['Collapsed Skill', 'Skill']]
# for i, row in industry_outcome.iterrows():
# if row['Skill'] == row['Collapsed Skill'] and row['Skill'] in industry['Skill'].values:
# # Find index of row in industry where 'Skill' column is equal to row['Skill']
# index = industry.index[industry['Skill'] == row['Skill']].tolist()[0]
# # Update 'Collapsed Skill' column in industry with value from industry_outcome['Collapsed Skill']
# industry_outcome.loc[i, 'Collapsed Skill'] = industry['Collapsed Skill'][index]
# Save industry to csv
# industry_outcome.to_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Amazon/Collapsed Skills v2.csv', index=False)
# for i, r in columbia.iterrows():
# for j, r2 in mit.iterrows():
# if cosine_similarity(r['Embedding_Context'], r2['Embedding_Context']) > 0.85:
# columbia.loc[i, 'Similar Skill Context'] = r2['Skill']
# break
#
# mit['Similar Skill'] = ''
# mit['Similar Skill Context'] = ''
# for i, r in mit.iterrows():
# for j, r2 in columbia.iterrows():
# if cosine_similarity(r['Embedding'], r2['Embedding']) > 0.85:
# mit.loc[i, 'Similar Skill'] = r2['Skill']
# break
#
# for i, r in mit.iterrows():
# for j, r2 in columbia.iterrows():
# if cosine_similarity(r['Embedding_Context'], r2['Embedding_Context']) > 0.85:
# mit.loc[i, 'Similar Skill Context'] = r2['Skill']
# break
# Count number of values in 'Similar Skill' column that are not empty
# print("Columbia")
# print(len(columbia[columbia['Similar Skill'] != '']))
# print(len(columbia[columbia['Similar Skill Context'] != '']))
# print(len(columbia))
#
# print("MIT")
# print(len(mit[mit['Similar Skill'] != '']))
# print(len(mit[mit['Similar Skill Context'] != '']))
# print(len(mit))
#
# # Get all values in 'Skill' column that contain the word 'programming'
# print(columbia[columbia['Skill'].str.contains('Deep learning')]['Skill'])
# print(columbia[columbia['Skill'].str.contains('Deep learning')]['Similar Skill'])
# Print number of values in 'Collapsed Skill' column that are not empty
# # Load in data from csv industry collapsed skills
# industry = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Industry/Collapsed Skills.csv')
# # Load in data from columbia skill output
# columbia = pd.read_csv('/Users/vinayakkannan/Desktop/INfACT/Script/SupportingFunction/RawData/Columbia_Fall2023_v1/SkillOutputUpdated.csv')
# # Print Skill and 'Collapsed Skill' columns from industry where 'Collapsed Skill' value is in columbia 'Collapsed Skill' column
# print(industry[industry['Collapsed Skill'].isin(columbia['Collapsed Skill'])]['Collapsed Skill']) | [
"\nSummarize the following skills into a label that represent all the skills. The label can consist of multiple words. You should only output the new label with no other information or explanation\n\n",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | mirzaAsca/auto-blogger | FOLDER~gpt_researcher.py | import json
# Import necessary modules for text processing, web scraping, and searching
from processing.text import summarize_text
from actions.web_scrape import scrape_text_with_selenium
from actions.web_search import web_search
# Import Langchain and related utilities for AI-driven chat and prompt management
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableMap, RunnableLambda
from langchain.schema.messages import SystemMessage
from agent.prompts import auto_agent_instructions, generate_search_queries_prompt
from config import Config
# Load global configurations
CFG = Config()
# [Initiation] Prepare the AI-driven message template for generating search queries
search_message = (generate_search_queries_prompt("{question}"))
SEARCH_PROMPT = ChatPromptTemplate.from_messages([
("system", "{agent_prompt}"),
("user", search_message)
])
# Load instructions for automated agent behavior
AUTO_AGENT_INSTRUCTIONS = auto_agent_instructions()
CHOOSE_AGENT_PROMPT = ChatPromptTemplate.from_messages([
SystemMessage(content=AUTO_AGENT_INSTRUCTIONS),
("user", "task: {task}")
])
# [Content Retrieval and Summarization and Analysis] Define the process for scraping and summarizing text from a URL
scrape_and_summarize = {
"question": lambda x: x["question"],
"text": lambda x: scrape_text_with_selenium(x['url'])[1],
"url": lambda x: x['url']
} | RunnableMap({
"summary": lambda x: summarize_text(text=x["text"], question=x["question"], url=x["url"]),
"url": lambda x: x['url']
}) | (lambda x: f"Source Url: {x['url']}\nSummary: {x['summary']}")
# Initialize a set to keep track of URLs that have already been seen to avoid duplicate content
seen_urls = set()
# [Web Search and Content Retrieval] Define the process for conducting multiple searches, avoiding duplicate URLs, and processing the results
multi_search = (
lambda x: [
{"url": url.get("href"), "question": x["question"]}
for url in json.loads(web_search(query=x["question"], num_results=3))
if not (url.get("href") in seen_urls or seen_urls.add(url.get("href")))
]
) | scrape_and_summarize.map() | (lambda x: "\n".join(x))
# Set up the search query and agent choice mechanisms using AI models
search_query = SEARCH_PROMPT | ChatOpenAI(model=CFG.smart_llm_model) | StrOutputParser() | json.loads
choose_agent = CHOOSE_AGENT_PROMPT | ChatOpenAI(model=CFG.smart_llm_model) | StrOutputParser() | json.loads
# [Initiation] Define how to get search queries based on agent prompts
get_search_queries = {
"question": lambda x: x,
"agent_prompt": {"task": lambda x: x} | choose_agent | (lambda x: x["agent_role_prompt"])
} | search_query
class GPTResearcherActor:
# [Compilation and Output] Define the complete runnable process for the GPT Researcher, compiling all steps
@property
def runnable(self):
return (
get_search_queries
| (lambda x: [{"question": q} for q in x])
| multi_search.map()
| (lambda x: "\n\n".join(x))
)
| [
"[('system', '{agent_prompt}'), ('user', PLACEHOLDER)]",
"{agent_prompt}",
"task: {task}"
] |
2024-01-10 | mirzaAsca/auto-blogger | agent~llm_utils.py | from __future__ import annotations
import json
from fastapi import WebSocket
import time
import openai
from langchain.adapters import openai as lc_openai
from colorama import Fore, Style
from openai.error import APIError, RateLimitError
from agent.prompts import auto_agent_instructions
from config import Config
CFG = Config()
openai.api_key = CFG.openai_api_key
from typing import Optional
import logging
def create_chat_completion(
messages: list, # type: ignore
model: Optional[str] = None,
temperature: float = CFG.temperature,
max_tokens: Optional[int] = None,
stream: Optional[bool] = False,
websocket: WebSocket | None = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (list[dict[str, str]]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
stream (bool, optional): Whether to stream the response. Defaults to False.
Returns:
str: The response from the chat completion
"""
# validate input
if model is None:
raise ValueError("Model cannot be None")
if max_tokens is not None and max_tokens > 8001:
raise ValueError(f"Max tokens cannot be more than 8001, but got {max_tokens}")
if stream and websocket is None:
raise ValueError("Websocket cannot be None when stream is True")
# create response
for attempt in range(10): # maximum of 10 attempts
response = send_chat_completion_request(
messages, model, temperature, max_tokens, stream, websocket
)
return response
logging.error("Failed to get response from OpenAI API")
raise RuntimeError("Failed to get response from OpenAI API")
def send_chat_completion_request(
messages, model, temperature, max_tokens, stream, websocket
):
if not stream:
result = lc_openai.ChatCompletion.create(
model=model, # Change model here to use different models
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
provider=CFG.llm_provider, # Change provider here to use a different API
)
return result["choices"][0]["message"]["content"]
else:
return stream_response(model, messages, temperature, max_tokens, websocket)
async def stream_response(model, messages, temperature, max_tokens, websocket):
paragraph = ""
response = ""
print(f"LLM_UTILS streaming response...")
for chunk in lc_openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
provider=CFG.llm_provider,
stream=True,
):
content = chunk["choices"][0].get("delta", {}).get("content")
if content is not None:
response += content
paragraph += content
if "\n" in paragraph:
await websocket.send_json({"type": "report", "output": paragraph})
paragraph = ""
print(f"LLM_UTILS streaming response complete")
return response
def choose_agent(task: str) -> dict:
"""Determines what agent should be used
Args:
task (str): The research question the user asked
Returns:
agent - The agent that will be used
agent_role_prompt (str): The prompt for the agent
"""
try:
response = create_chat_completion(
model=CFG.smart_llm_model,
messages=[
{"role": "system", "content": f"{auto_agent_instructions()}"},
{"role": "user", "content": f"task: {task}"},
],
temperature=0,
)
return json.loads(response)
except Exception as e:
print(f"{Fore.RED}Error in choose_agent: {e}{Style.RESET_ALL}")
return {
"agent": "Default Agent",
"agent_role_prompt": "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.",
}
| [
"task: PLACEHOLDER"
] |
2024-01-10 | mirzaAsca/auto-blogger | FOLDER~reviser.py | from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.schema.output_parser import StrOutputParser
from langchain.prompts import SystemMessagePromptTemplate
from config import Config
CFG = Config()
class ReviserActor:
def __init__(self):
print("REVISER Initializing ReviserActor") # Indicates the start of the EditorActor initialization
self.model = ChatOpenAI(model=CFG.smart_llm_model)
self.prompt = SystemMessagePromptTemplate.from_template(
"You are an expert writer. "
"You have been tasked by your editor with revising the following draft, which was written by a non-expert. "
"You may follow the editor's notes or not, as you see fit."
) + "Draft:\n\n{draft}" + "Editor's notes:\n\n{notes}"
@property
def runnable(self):
return {
"draft": {
"draft": lambda x: x["draft"],
"notes": lambda x: x["notes"],
} | self.prompt | self.model | StrOutputParser()
}
| [
"You are an expert writer. You have been tasked by your editor with revising the following draft, which was written by a non-expert. You may follow the editor's notes or not, as you see fit."
] |
2024-01-10 | mirzaAsca/auto-blogger | FOLDER~writer.py | from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
from agent.prompts import generate_report_prompt, generate_agent_role_prompt
from config import Config
CFG = Config()
class WriterActor:
def __init__(self):
print("WRITER Initializing WriterActor...")
self.model = ChatOpenAI(model=CFG.smart_llm_model)
self.prompt = ChatPromptTemplate.from_messages([
("system", generate_agent_role_prompt(agent="Default Agent")),
("user", generate_report_prompt(question="{query}", research_summary="{results}"))
])
@property
def runnable(self):
return {
"answer": {
"query": lambda x: x["query"],
"results": lambda x: "\n\n".join(x["results"])
} | self.prompt | self.model | StrOutputParser()
}
| [
"Default Agent"
] |
2024-01-10 | mirzaAsca/auto-blogger | FOLDER~editor.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts import SystemMessagePromptTemplate
from config import Config
CFG = Config()
EDIT_TEMPLATE = """You are an editor. \
You have been tasked with editing the following draft, which was written by a non-expert. \
Please accept the draft if it is good enough to publish, or send it for revision, along with your notes to guide the revision. \
Things you should be checking for:
- This draft MUST fully answer the original question
- This draft MUST be written in apa format
If not all of the above criteria are met, you should send appropriate revision notes.
"""
class EditorActor:
def __init__(self):
print("EDITOR Initializing EditorActor") # Indicates the start of the EditorActor initialization
self.model = ChatOpenAI(model=CFG.smart_llm_model)
self.prompt = SystemMessagePromptTemplate.from_template(EDIT_TEMPLATE) + "Draft:\n\n{draft}"
self.functions = [
{
"name": "revise",
"description": "Sends the draft for revision",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft",
"parameters": {
"type": "object",
"properties": {"ready": {"const": True}},
},
},
]
@property
def runnable(self):
return (
self.prompt | self.model.bind(functions=self.functions)
)
| [
"You are an editor. You have been tasked with editing the following draft, which was written by a non-expert. Please accept the draft if it is good enough to publish, or send it for revision, along with your notes to guide the revision. Things you should be checking for:\n\n- This draft MUST fully answer the original question\n- This draft MUST be written in apa format\n\nIf not all of the above criteria are met, you should send appropriate revision notes.\n"
] |
2024-01-10 | mirzaAsca/auto-blogger | FOLDER~research_team.py | from operator import itemgetter
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
from permchain.connection_inmemory import InMemoryPubSubConnection
from permchain.pubsub import PubSub
from permchain.topic import Topic
"""
This is the research team.
It is a group of autonomous agents that work together to answer a given question
using a comprehensive research process that includes:
- Searching for relevant information across multiple sources
- Extracting relevant information
- Writing a well structured report
- Validating the report
- Revising the report
- Repeat until the report is satisfactory
"""
class ResearchTeam:
def __init__(self, research_actor, editor_actor, reviser_actor):
self.research_actor_instance = research_actor
self.editor_actor_instance = editor_actor
self.revise_actor_instance = reviser_actor
def run(self, query):
print("ResearchTeam: Initialized with query:", query) # Starting the research process
# create topics
editor_inbox = Topic("editor_inbox")
reviser_inbox = Topic("reviser_inbox")
print("ResearchTeam: Topics created - editor_inbox, reviser_inbox")
research_chain = (
# Listed in inputs
Topic.IN.subscribe()
| {"draft": lambda x: self.research_actor_instance.run(x["question"])}
# The draft always goes to the editor inbox
| editor_inbox.publish()
)
print("ResearchTeam: Research chain initialized")
editor_chain = (
# Listen for events in the editor_inbox
editor_inbox.subscribe()
| self.editor_actor_instance.runnable
# Depending on the output, different things should happen
| OpenAIFunctionsRouter(
{
# If revise is chosen, we send a push to the critique_inbox
"revise": (
{
"notes": itemgetter("notes"),
"draft": editor_inbox.current() | itemgetter("draft"),
"question": Topic.IN.current() | itemgetter("question"),
}
| reviser_inbox.publish()
),
# If accepted, then we return
"accept": editor_inbox.current() | Topic.OUT.publish(),
}
)
)
print("ResearchTeam: Editor chain initialized")
reviser_chain = (
# Listen for events in the reviser's inbox
reviser_inbox.subscribe()
| self.revise_actor_instance.runnable
# Publish to the editor inbox
| editor_inbox.publish()
)
print("ResearchTeam: Reviser chain initialized")
web_researcher = PubSub(
research_chain,
editor_chain,
reviser_chain,
connection=InMemoryPubSubConnection(),
)
print("ResearchTeam: PubSub initialized with all chains")
res = web_researcher.invoke({"question": query})
print("ResearchTeam: Invocation complete with result:", res)
return res["draft"]
| [] |
2024-01-10 | spindance/business-modeler-python | business_modeler.py | #!/usr/bin/env python
import contextlib
import os
import re
import time
from datetime import datetime
from typing import Any, Dict
import click
import yaml
from dotenv import load_dotenv
from langchain.callbacks import get_openai_callback
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chains import LLMChain, SequentialChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from md2pdf.core import md2pdf
PROMPT_TEMPLATES_DIR = "templates"
COMMON_PREFIX_FILE = "_common.txt"
CONFIG_FILE = "config.yaml"
OUTPUT_TEMPLATE_FILE = "output.txt"
EXAMPLE_INPUT_FILE = "input_example.md"
DEFAULT_TEMPERATURE = 0.7
DEFAULT_MODEL_NAME = "gpt-3.5-turbo-16k"
def extract_variable_names(template):
"""
Extract and return variable names from a template string.
Parameters:
- template (str): The template string containing variables enclosed in curly braces {}.
Returns:
- list: A list of variable names extracted from the template string.
"""
return re.findall(r"{(.*?)}", template)
def read_prompt_template(template_name, prompt_templates_dir, common_prefix_file):
"""
Read and return content from a prompt template file with a common prefix added to it.
Parameters:
- template_name (str): Name of the template file.
- prompt_templates_dir (str): Directory path containing prompt template files.
- common_prefix_file (str): Name of the file containing common prefix content.
Returns:
- str: Content of the template file with common prefix added.
"""
common_prefix_path = os.path.join(prompt_templates_dir, common_prefix_file)
with open(common_prefix_path, "r") as common_file:
common_prefix = common_file.read()
template_path = os.path.join(prompt_templates_dir, template_name)
with open(template_path, "r") as template_file:
return common_prefix + template_file.read()
def load_chain_config(config_file):
"""
Load and return configuration from a YAML file.
Parameters:
- config_file (str): The path to the configuration YAML file.
Returns:
- dict: Configuration data loaded from the file.
"""
try:
with open(config_file, "r") as file:
return yaml.safe_load(file)
except Exception as e:
print(f"Error loading configuration file: {e}")
return {}
def read_template(template_name):
"""
Read and return content from a template file.
Parameters:
- template_name (str): Name of the template file.
Returns:
- str: Content of the template file.
"""
path = os.path.join(PROMPT_TEMPLATES_DIR, template_name)
with open(path, "r") as f:
return f.read()
def create_llm_chain(llm, template_file, prompt_templates_dir, common_prefix_file):
"""
Create and return an LLMChain instance configured with the given parameters.
Parameters:
- llm (ChatOpenAI): An instance of ChatOpenAI which is used to perform the language model operations.
- template_file (str): The name of the template file to be used for prompt creation.
- prompt_templates_dir (str): Directory path containing prompt template files.
- common_prefix_file (str): Name of the file containing common prefix content to be appended before the template.
Returns:
- LLMChain: An instance of LLMChain configured with the given parameters.
"""
monitor = CallbackHandler()
# Extract variable names as input_keys
template_content = read_prompt_template(
template_file, prompt_templates_dir, common_prefix_file
)
input_keys = extract_variable_names(template_content)
# Set output_key as the name of the template file without the file extension
output_key = os.path.splitext(template_file)[0]
return LLMChain(
llm=llm,
prompt=PromptTemplate(input_variables=input_keys, template=template_content),
output_key=output_key,
callbacks=[monitor],
tags=[output_key],
)
def build_chain(
api_key,
chains_config,
prompt_templates_dir,
common_prefix_file,
verbose=False,
model_name="gpt-3.5-turbo-16k",
temperature=0.7,
):
"""
Build and return a SequentialChain by running several LLMChains in sequence.
Parameters:
- api_key (str): The API key to access the language model.
- chains_config (list): A list of dictionaries, each containing configuration for a chain (e.g., template file).
- prompt_templates_dir (str): Directory path containing prompt template files.
- common_prefix_file (str): Name of the file containing common prefix content to be appended before the template.
- verbose (bool, optional): If True, prints verbose output. Defaults to False.
- model_name (str, optional): The name of the language model to be used. Defaults to "gpt-3.5-turbo-16k".
- temperature (float, optional): The temperature parameter for the language model. Defaults to 0.7.
Returns:
- SequentialChain: An instance of SequentialChain configured with the chains created from chains_config.
"""
# Initialize ChatOpenAI
llm = ChatOpenAI(openai_api_key=api_key, model=model_name, temperature=temperature)
# Chains created using the create_llm_chain function
chains = [
create_llm_chain(
llm, chain_config["template_file"], prompt_templates_dir, common_prefix_file
)
for chain_config in chains_config
]
# Calculate input_variables and output_variables
input_variables = extract_variable_names(
read_prompt_template(
chains_config[0]["template_file"], prompt_templates_dir, common_prefix_file
)
)
output_variables = [
os.path.splitext(chain_config["template_file"])[0]
for chain_config in chains_config
]
# Sequential chain
sequential_chain = SequentialChain(
chains=chains,
input_variables=input_variables,
output_variables=output_variables,
verbose=verbose,
)
return sequential_chain
class CallbackHandler(BaseCallbackHandler):
"""
Custom callback handler class for monitoring the progress of the chains.
This class is a subclass of BaseCallbackHandler and is used to output
progress information when a chain starts executing.
Attributes:
None
"""
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""
Callback function that is executed when a chain starts.
Parameters:
- serialized (dict): The serialized chain information.
- inputs (dict): The inputs passed to the chain.
- kwargs (dict): Additional keyword arguments containing tags.
Returns:
- None
"""
click.secho(f"Running chain '{''.join(kwargs['tags'])}'", fg="cyan")
def generate_report(output_file, markdown, **chain_output_dict):
"""
Generates a report by converting chain output to markdown and then to PDF.
Parameters:
- output_file (str): The base name of the output file.
- markdown (bool): If True, saves the markdown content to a file.
- chain_output_dict (dict): Dictionary containing the output of the chains.
Returns:
- tuple: The names of the created markdown and PDF files.
"""
output_template = read_template(OUTPUT_TEMPLATE_FILE)
markdown_output = output_template.format(**chain_output_dict)
file_name = output_file or f"output-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
markdown_file_name = f"{file_name}.md"
pdf_file_name = f"{file_name}.pdf"
# Save markdown content to file
if markdown:
with open(markdown_file_name, "w") as f:
f.write(markdown_output)
# Convert the markdown file to PDF
md2pdf(pdf_file_name, md_content=markdown_output)
# Return the names of the created files
return markdown_file_name, pdf_file_name
def report_results(markdown, markdown_file_name, pdf_file_name, cb, duration):
"""
Reports the results of the report generation including file names,
total tokens, cost, and runtime.
Parameters:
- markdown (bool): If True, indicates markdown file was created.
- markdown_file_name (str): The name of the markdown file.
- pdf_file_name (str): The name of the PDF file.
- cb (CallbackHandler): The callback handler used during report generation.
- duration (float): The total runtime in seconds.
Returns:
- None
"""
if markdown:
click.secho(f"Markdown file created: {markdown_file_name}", fg="green")
click.secho(f"PDF file created: {pdf_file_name}", fg="green")
click.secho(f"Total tokens: {cb.total_tokens}", fg="yellow")
click.secho(f"Total cost: ${cb.total_cost:.2f}", fg="yellow")
click.secho(f"Runtime: {duration:.2f} seconds", fg="yellow")
def check_api_key():
"""
Checks if the OPENAI_API_KEY environment variable is set.
Returns:
- str: The API key if it is set.
Raises:
- SystemExit: If the OPENAI_API_KEY environment variable is not set.
"""
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
click.secho("Error: OPENAI_API_KEY environment variable is not set.", fg="red")
click.secho(
"Please set it by running: export OPENAI_API_KEY=your_api_key", fg="red"
)
exit(1)
return api_key
def read_seed(seed_file):
"""
Reads the content of a seed file or displays an example input if no file is provided.
Parameters:
- seed_file (str): The name of the seed file.
Returns:
- str: The contents of the seed file.
"""
if seed_file is None:
click.secho(f"{read_template(EXAMPLE_INPUT_FILE)}", fg="white")
exit(0)
else:
click.secho(f"Using seed file: {seed_file}", fg="green")
with open(seed_file, "r") as f:
return f.read()
@contextlib.contextmanager
def measure_time():
"""
Context manager for measuring the execution time of a code block.
Yields:
- function: A function that when called, returns the elapsed time in seconds.
"""
start_time = time.time()
yield lambda: time.time() - start_time
@click.command()
@click.option("--seed-file", default=None, help="Path to the seed file.")
@click.option(
"--output-file", default=None, help="Specify the name of the output file."
)
@click.option(
"--markdown", is_flag=True, default=False, help="Save output as markdown."
)
@click.option("--verbose", is_flag=True, default=False, help="Enable verbose output.")
@click.option(
"--config-file", default="config.yaml", help="Path to the configuration file."
)
@click.option("--temperature", default=None, type=float, help="Set the temperature.")
@click.option("--model-name", default=None, type=str, help="Set the model name.")
def main(
seed_file, output_file, markdown, verbose, config_file, temperature, model_name
):
"""Generate a business model from a hunch file."""
# Check API Key
api_key = check_api_key()
# Read seed file
seed = read_seed(seed_file)
# Load the configuration from the specified configuration file
chain_config = load_chain_config(config_file)
# Override temperature and model_name if provided
temperature = temperature or chain_config.get("temperature", DEFAULT_TEMPERATURE)
model_name = model_name or chain_config.get("model_name", DEFAULT_MODEL_NAME)
# Get prompt_templates_dir and common_prefix_file from config or set defaults
prompt_templates_dir = chain_config.get(
"prompt_templates_dir", PROMPT_TEMPLATES_DIR
)
common_prefix_file = chain_config.get("common_prefix_file", COMMON_PREFIX_FILE)
with measure_time() as duration, get_openai_callback() as cb:
# Build and execute chain
chain = build_chain(
api_key,
chain_config["chains"],
prompt_templates_dir,
common_prefix_file,
verbose=verbose,
model_name=model_name,
temperature=temperature,
)
output = chain({"seed": seed})
# Generate report
markdown_file_name, pdf_file_name = generate_report(
output_file, markdown, **output
)
# Reporting on result.
report_results(markdown, markdown_file_name, pdf_file_name, cb, duration())
if __name__ == "__main__":
load_dotenv()
main()
| [
"templates",
"prompt_templates_dir",
"output.txt"
] |
2024-01-10 | digambar2002/desktop-voice-assistant | engine~features.py | import re
from engine.config import *
from unittest import result
import requests
from bs4 import BeautifulSoup
import json
from ast import dump
from audioop import avgpp
from sys import path
import sys
import eel
from pyparsing import Empty
import requests
# This Module is ued to convert text to speech
import pyttsx3
# This module is used to recoginize speech command
import speech_recognition as sr
# Date Time Module to get current date and time
import datetime
# Wkipedia Module to search things on wikipedia
import wikipedia
# OS Module To work On Windows Like Open notepad or cmd
import os
# This Module is use to get time
import time
# This Module is used to play sounds and music
from playsound import playsound
# This module is used to open web browser
import webbrowser
# Give Randoms Facts
# import randfacts
# This function is used to send message or search on google
import pywhatkit as kit
# this module is used to automate system or uses keyboaed keys and mouse
import pyautogui as autogui
# python script showing battery details
import psutil
import sqlite3
# Global Declaration
connection = sqlite3.connect('assistant.sqlite')
cursor = connection.cursor()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
# print(voices[1].id)
engine.setProperty('voice', voices[0].id)
engine.setProperty('rate', 174)
# text to speech
def speak(audio):
engine.say(audio)
eel.WishMessage(audio)
eel.SpeakMessage(audio)
eel.receiverText(audio)
engine.runAndWait()
return audio
# Main words function
def remove_words(word_list, string):
pattern = r'\b(?:%s)\b' % '|'.join(word_list)
modified_string = re.sub(pattern, ' ', string, flags=re.IGNORECASE)
return modified_string
def auth_protocol():
# Hide the loader screen and display face auth using js
eel.hideLoader()
speak('ready for face authentication')
speak('performing face authentication')
# Battery status function
def battery():
# function returning time in hh:mm:ss
def convertTime(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
battery = psutil.sensors_battery()
speak("Sir, Your Battery Status is: ")
print("Battery percentage : ", battery.percent)
speak(f"Battery Percentage: {battery.percent}")
if battery.power_plugged == True:
print("Power plugged in : ON")
speak("Power plugged in is on")
else:
print("Power plugged in : OFF")
speak("Power plugged in is OFF")
batteryRemaning = str(convertTime(battery.secsleft))
batterylist = batteryRemaning.split(':')
speak(
f"Battery left: {batterylist[0]} hours, {batterylist[1]} minutes, {batterylist[2]} seconds")
print("Battery left : ", convertTime(battery.secsleft))
# Loading Effect
def loading():
music_dir = "audio\\alert sound\\bell_alert.wav"
speak("System Initiating")
playsound(music_dir)
speak("Initializing Database")
eel.TextSet("Initializing Database")
playsound(music_dir)
speak("Adding All The Preferances")
eel.TextSet("Adding All The Preferances")
playsound(music_dir)
speak("System is now fully operational")
eel.TextSet("Starting ...")
# Time Whiches function
def wish():
hour = int(datetime.datetime.now().hour)
currentTime = datetime.datetime.now()
currentTime = currentTime.strftime(
'%I %M %p').lstrip("0").replace(" 0", " ")
if hour > 0 and hour < 12:
eel.WishMessage(speak("Hello, Good Morning "+OWNER_NAME))
elif hour >= 12 and hour < 18:
eel.WishMessage(speak("Hello, Good Afternoon "+OWNER_NAME))
else:
eel.WishMessage(speak("Hello, Good Evening "+OWNER_NAME))
speak('How can i help you')
# Open Commands
def openCommand(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("open", "")
query.lower()
# System Command
if query in query:
cursor.execute(
"SELECT path FROM sys_command WHERE name='%s'" % query.strip())
results = cursor.fetchall()
if len(results) != 0:
flag = results[0]
path = flag[0]
repr(path)
speak("Opening "+query)
os.startfile(path)
else:
cursor.execute(
"SELECT path FROM web_command WHERE name='%s'" % query.strip())
results = cursor.fetchall()
if len(results) != 0:
flag = results[0]
path = flag[0]
repr(path)
speak("Opening "+query)
webbrowser.open(path)
else:
try:
os.system('start '+query)
except:
speak("not found")
else:
pass
# openCommand(" notepad")
def close(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("close", "")
if "notepad" in query:
speak("Closing Notepad")
os.system("TASKKILL /F /IM notepad.exe")
elif "chrome" in query:
speak("closing chrome")
os.system("TASKKILL /F /IM chrome.exe")
elif "xampp" in query:
speak("closing xampp")
os.system("TASKKILL /F /IM xampp-control.exe")
elif "spotify" in query:
speak("closing spotify")
os.system("TASKKILL /F /IM spotify.exe")
else:
pass
# search on web browser
def searchTerm(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("search", "")
speak("please sir, wait for a minute")
kit.search(query)
speak("here what i found on web")
# term = wikipedia.summary(query, sentences=2)
# speak(term)
# Chat Gpt
# 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
def chatGPT(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("search", "")
import openai
openai.api_key = "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"
prompt = query
try:
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=600,
n=1,
stop=None,
temperature=0.5,
)
speak(response.choices[0].text.strip())
except:
speak("something went wrong")
def chatGPT2(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("search", "")
prompt = query
try:
url = "https://open-ai21.p.rapidapi.com/chat"
payload = {"message": prompt}
headers = {
"content-type": "application/json",
"X-RapidAPI-Key": "d68c7a4ca0mshfe7db0559a72ad6p1f118fjsnb3beeaa77aac",
"X-RapidAPI-Host": "open-ai21.p.rapidapi.com"
}
response = requests.post(url, json=payload, headers=headers)
message = response.json()['ChatGPT']
print(message)
speak(message)
except:
speak("something went wrong")
# Play On YouTube
def PlayYoutube(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("search", "")
query = query.replace("play", "")
query = query.replace("on youtube", "")
speak("Playing"+query+"on YouTube")
kit.playonyt(query)
# Random Facts
# def RandomFacts():
# fact = randfacts.getFact()
# speak(fact)
# print(fact)
# minimize all open window
def MinimizeOpenWindows():
autogui.keyDown("win")
autogui.press("d")
time.sleep(2)
autogui.keyUp("win")
# maximize all open window
def MaximizeOpenWindows():
autogui.keyDown("win")
autogui.press("d")
time.sleep(2)
autogui.keyUp("win")
def copy():
autogui.hotkey('ctrl', 'c')
def paste():
autogui.hotkey('ctrl', 'v')
# ************************************************** WEATHER METHOD **********************************************
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
def weather(query):
query = query.replace("weather", "")
query = query.replace("of", "")
query = query.replace("in", "")
print(query)
if query == "":
city = CITY_NAME + " weather"
else:
city = query+" weather"
try:
url = "https://weatherapi-com.p.rapidapi.com/current.json"
querystring = {"q": city}
headers = {
"X-RapidAPI-Key": "d68c7a4ca0mshfe7db0559a72ad6p1f118fjsnb3beeaa77aac",
"X-RapidAPI-Host": "weatherapi-com.p.rapidapi.com"
}
response = requests.get(url, headers=headers, params=querystring)
weather = response.json()['current']['feelslike_c']
info = response.json()['current']['wind_kph']
eel.weatherShow(info, str(weather) +" °C", city, time)
speak("its "+str(weather)+" degree celsius and wind speed is "+str(weather)+" kilometer per hour in "+city)
except IndexError:
speak("Can't found city " + query)
# ************************************************** WEATHER METHOD Ends **********************************************
# Whatsapp Message Sending
def sendMessage(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("send", "")
query = query.replace("message", "")
query = query.replace("to", "")
query = query.replace("wahtsapp", "")
try:
cursor.execute(
"SELECT mobileno FROM phonebook WHERE name='%s'" % query.strip().lower())
results = cursor.fetchall()
return results[0][0]
except:
speak('not exist in contacts')
return 0
def whatsAppSend(mobile_no, message):
current_time = datetime.datetime.now()
current_hour = current_time.hour
current_minute = current_time.minute
try:
speak("sending message ....")
kit.sendwhatmsg(mobile_no, message, current_hour, current_minute+1)
speak("message sent successfully")
except:
speak("something went wrong")
# Make Phone Call Command
def MakeCall(query):
query = query.replace(ASSISTANT_NAME, "")
query = query.replace("to", "")
query = query.replace("make a", "")
query = query.replace("phone", "")
query = query.replace("call", "")
print(query.strip())
cursor.execute(
"SELECT mobileno FROM phonebook WHERE name='%s'" % query.strip().lower())
results = cursor.fetchall()
if len(results) != 0:
speak("Calling "+query)
flag = results[0]
MobileNo = flag[0]
command = 'adb shell am start -a android.intent.action.CALL -d tel:+91'+MobileNo
os.system(command)
else:
speak('No Data Found')
def DisconnectCall():
command = 'adb shell service call phone 5'
speak("disconnecting call...")
os.system(command)
# Settings Function
def systemCommand():
cursor.execute("SELECT * FROM sys_command")
results = cursor.fetchall()
print(results)
# Music Player
def spotifyPlayer(query):
word_list = [ASSISTANT_NAME, 'play', 'music', 'spotify', 'to', 'song']
songName = remove_words(word_list, query)
url = "https://spotify23.p.rapidapi.com/search/"
querystring = {"q": songName, "type": "tracks", "offset": "0",
"limit": "10", "numberOfTopResults": "5"}
headers = {
"X-RapidAPI-Key": "d68c7a4ca0mshfe7db0559a72ad6p1f118fjsnb3beeaa77aac",
"X-RapidAPI-Host": "spotify23.p.rapidapi.com"
}
response = requests.get(url, headers=headers, params=querystring)
items = response.json()['tracks']['items']
song = ""
for x in items:
song = x['data']['id']
break
print(song)
speak("Playing"+songName+" on Spotify")
command = "start spotify:track:"+song
os.system(command)
# Assistant name
@eel.expose
def assistantName():
name = ASSISTANT_NAME
return name
@eel.expose
def personalInfo():
cursor.execute("SELECT * FROM info")
results = cursor.fetchall()
jsonArr = json.dumps(results[0])
eel.getData(jsonArr)
return 1
@eel.expose
def updatePersonalInfo(name, desiganation, mobileno, email, city):
cursor.execute('''UPDATE info SET name=?, designation=?, mobileno=?, email=?, city=? ''',
(name, desiganation, mobileno, email, city))
connection.commit()
personalInfo()
return 1
@eel.expose
def displaySysCommand():
cursor.execute("SELECT * FROM sys_command")
results = cursor.fetchall()
jsonArr = json.dumps(results)
eel.displaySysCommand(jsonArr)
return 1
@eel.expose
def deleteSysCommand(id):
cursor.execute(
''' DELETE FROM sys_command WHERE name= '%s' ''' % id.strip())
connection.commit()
@eel.expose
def addSysCommand(key, value):
cursor.execute(
'''INSERT INTO sys_command VALUES (?, ?)''', (key, value))
connection.commit()
@eel.expose
def displayWebCommand():
cursor.execute("SELECT * FROM web_command")
results = cursor.fetchall()
jsonArr = json.dumps(results)
eel.displayWebCommand(jsonArr)
return 1
@eel.expose
def addWebCommand(key, value):
cursor.execute(
'''INSERT INTO web_command VALUES (?, ?)''', (key, value))
connection.commit()
@eel.expose
def deleteWebCommand(id):
cursor.execute(
''' DELETE FROM web_command WHERE name= '%s' ''' % id.strip())
connection.commit()
@eel.expose
def displayPhoneBookCommand():
cursor.execute("SELECT * FROM phonebook")
results = cursor.fetchall()
jsonArr = json.dumps(results)
eel.displayPhoneBookCommand(jsonArr)
return 1
@eel.expose
def deletePhoneBookCommand(id):
cursor.execute(
''' DELETE FROM phonebook WHERE mobileno= '%s' ''' % id.strip())
connection.commit()
@eel.expose
def InsertContacts(Name, MobileNo, Email, City):
cursor.execute(
'''INSERT INTO phonebook VALUES (?, ?, ?, ?)''', (Name, MobileNo, Email, City))
connection.commit()
| [
"application/json"
] |
2024-01-10 | EzraApple/rude2professional | api_call.py | import openai
def get_response(rude, key):
openai.api_key = key
prompt = "Convert the following prompt into a paragraph with a business formal tone and polite word choice: "
full = prompt + rude
response = openai.Completion.create(
engine="text-davinci-003",
prompt=full,
temperature=0.4,
max_tokens=256,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.4
)
return response['choices'][0]['text']
| [
"Convert the following prompt into a paragraph with a business formal tone and polite word choice: ",
"Convert the following prompt into a paragraph with a business formal tone and polite word choice: PLACEHOLDER"
] |
2024-01-10 | Navezjt/RealChar | realtime_ai_character~character_catalog~catalog_manager.py | import os
import threading
import time
import yaml
from pathlib import Path
from contextlib import ExitStack
from dotenv import load_dotenv
from firebase_admin import auth
from llama_index import SimpleDirectoryReader
from langchain.text_splitter import CharacterTextSplitter
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Singleton, Character
from realtime_ai_character.database.chroma import get_chroma
from readerwriterlock import rwlock
from realtime_ai_character.database.connection import get_db
from realtime_ai_character.models.character import Character as CharacterModel
load_dotenv()
logger = get_logger(__name__)
class CatalogManager(Singleton):
def __init__(self, overwrite=True):
super().__init__()
self.db = get_chroma()
self.sql_db = next(get_db())
self.sql_load_interval = 30
self.sql_load_lock = rwlock.RWLockFair()
if overwrite:
logger.info('Overwriting existing data in the chroma.')
self.db.delete_collection()
self.db = get_chroma()
self.characters = {}
self.author_name_cache = {}
self.load_characters_from_community(overwrite)
self.load_characters(overwrite)
if overwrite:
logger.info('Persisting data in the chroma.')
self.db.persist()
logger.info(
f"Total document load: {self.db._client.get_collection('llm').count()}")
self.run_load_sql_db_thread = True
self.load_sql_db_thread = threading.Thread(target=self.load_sql_db_loop)
self.load_sql_db_thread.daemon = True
self.load_sql_db_thread.start()
def load_sql_db_loop(self):
while self.run_load_sql_db_thread:
self.load_character_from_sql_database()
time.sleep(self.sql_load_interval)
def stop_load_sql_db_loop(self):
self.run_load_sql_db_thread = False
def get_character(self, name) -> Character:
with self.sql_load_lock.gen_rlock():
return self.characters.get(name)
def load_character(self, directory):
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / 'config.yaml'))
yaml_content = yaml.safe_load(f_yaml)
character_id = yaml_content['character_id']
character_name = yaml_content['character_name']
voice_id = str(yaml_content['voice_id'])
if (os.getenv(character_id.upper() + "_VOICE_ID", "")):
voice_id = os.getenv(character_id.upper() + "_VOICE_ID")
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
voice_id=voice_id,
source='default',
location='repo',
visibility='public',
tts=yaml_content["text_to_speech_use"]
)
if "avatar_id" in yaml_content:
self.characters[character_id].avatar_id = yaml_content["avatar_id"]
if "author_name" in yaml_content:
self.characters[character_id].author_name = yaml_content["author_name"],
return character_name
def load_characters(self, overwrite):
"""
Load characters from the character_catalog directory. Use /data to create
documents and add them to the chroma.
:overwrite: if True, overwrite existing data in the chroma.
"""
path = Path(__file__).parent
excluded_dirs = {'__pycache__', 'archive', 'community'}
directories = [d for d in path.iterdir() if d.is_dir()
and d.name not in excluded_dirs]
for directory in directories:
character_name = self.load_character(directory)
if overwrite:
self.load_data(character_name, directory / 'data')
logger.info('Loaded data for character: ' + character_name)
logger.info(
f'Loaded {len(self.characters)} characters: IDs {list(self.characters.keys())}')
def load_characters_from_community(self, overwrite):
path = Path(__file__).parent / 'community'
excluded_dirs = {'__pycache__', 'archive'}
directories = [d for d in path.iterdir() if d.is_dir()
and d.name not in excluded_dirs]
for directory in directories:
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / 'config.yaml'))
yaml_content = yaml.safe_load(f_yaml)
character_id = yaml_content['character_id']
character_name = yaml_content['character_name']
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
voice_id=str(yaml_content["voice_id"]),
source='community',
location='repo',
author_name=yaml_content["author_name"],
visibility=yaml_content["visibility"],
tts=yaml_content["text_to_speech_use"]
)
if "avatar_id" in yaml_content:
self.characters[character_id].avatar_id = yaml_content["avatar_id"]
if overwrite:
self.load_data(character_name, directory / 'data')
logger.info('Loaded data for character: ' + character_name)
def load_data(self, character_name: str, data_path: str):
loader = SimpleDirectoryReader(Path(data_path))
documents = loader.load_data()
text_splitter = CharacterTextSplitter(
separator='\n',
chunk_size=500,
chunk_overlap=100)
docs = text_splitter.create_documents(
texts=[d.text for d in documents],
metadatas=[{
'character_name': character_name,
'id': d.id_,
} for d in documents])
self.db.add_documents(docs)
def load_character_from_sql_database(self):
logger.info('Started loading characters from SQL database')
character_models = self.sql_db.query(CharacterModel).all()
with self.sql_load_lock.gen_wlock():
# delete all characters with location == 'database'
keys_to_delete = []
for character_id in self.characters.keys():
if self.characters[character_id].location == 'database':
keys_to_delete.append(character_id)
for key in keys_to_delete:
del self.characters[key]
# add all characters from sql database
for character_model in character_models:
if character_model.author_id not in self.author_name_cache:
author_name = auth.get_user(
character_model.author_id).display_name if os.getenv(
'USE_AUTH', '') else "anonymous author"
self.author_name_cache[character_model.author_id] = author_name
else:
author_name = self.author_name_cache[character_model.author_id]
character = Character(
character_id=character_model.id,
name=character_model.name,
llm_system_prompt=character_model.system_prompt,
llm_user_prompt=character_model.user_prompt,
voice_id=character_model.voice_id,
source='community',
location='database',
author_id=character_model.author_id,
author_name=author_name,
visibility=character_model.visibility,
tts=character_model.tts,
data=character_model.data,
avatar_id=character_model.avatar_id if character_model.avatar_id else None
)
self.characters[character_model.id] = character
# TODO: load context data from storage
logger.info(
f'Loaded {len(character_models)} characters from sql database')
def get_catalog_manager():
return CatalogManager.get_instance()
if __name__ == '__main__':
manager = CatalogManager.get_instance()
| [] |
2024-01-10 | zahidabasher/lancedb | docs~src~examples~modal_langchain.py | import pickle
import re
import sys
import zipfile
from pathlib import Path
import requests
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredHTMLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import LanceDB
from modal import Image, Secret, Stub, web_endpoint
import lancedb
lancedb_image = Image.debian_slim().pip_install(
"lancedb", "langchain", "openai", "pandas", "tiktoken", "unstructured", "tabulate"
)
stub = Stub(
name="example-langchain-lancedb",
image=lancedb_image,
secrets=[Secret.from_name("my-openai-secret")],
)
docsearch = None
docs_path = Path("docs.pkl")
db_path = Path("lancedb")
def get_document_title(document):
m = str(document.metadata["source"])
title = re.findall("pandas.documentation(.*).html", m)
if title[0] is not None:
return title[0]
return ""
def download_docs():
pandas_docs = requests.get(
"https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"
)
with open(Path("pandas.documentation.zip"), "wb") as f:
f.write(pandas_docs.content)
file = zipfile.ZipFile(Path("pandas.documentation.zip"))
file.extractall(path=Path("pandas_docs"))
def store_docs():
docs = []
if not docs_path.exists():
for p in Path("pandas_docs/pandas.documentation").rglob("*.html"):
if p.is_dir():
continue
loader = UnstructuredHTMLLoader(p)
raw_document = loader.load()
m = {}
m["title"] = get_document_title(raw_document[0])
m["version"] = "2.0rc0"
raw_document[0].metadata = raw_document[0].metadata | m
raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"])
docs = docs + raw_document
with docs_path.open("wb") as fh:
pickle.dump(docs, fh)
else:
with docs_path.open("rb") as fh:
docs = pickle.load(fh)
return docs
def qanda_langchain(query):
download_docs()
docs = store_docs()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200,)
documents = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings()
db = lancedb.connect(db_path)
table = db.create_table(
"pandas_docs",
data=[
{
"vector": embeddings.embed_query("Hello World"),
"text": "Hello World",
"id": "1",
}
],
mode="overwrite",
)
docsearch = LanceDB.from_documents(documents, embeddings, connection=table)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever()
)
return qa.run(query)
@stub.function()
@web_endpoint(method="GET")
def web(query: str):
answer = qanda_langchain(query)
return {
"answer": answer,
}
@stub.function()
def cli(query: str):
answer = qanda_langchain(query)
print(answer)
| [] |
2024-01-10 | bentoml/Distributed-Visual-ChatGPT | bentoml~bentoml_utils.py | from __future__ import annotations
import inspect
import typing as t
from numpy import str_
import torch
import gradio as gr
import visual_chatgpt as vc
from langchain.llms.openai import OpenAI
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
import bentoml
from bentoml.io import JSON
# In local mode, ChatBot pass images to models using image's path. In
# distributed mode, ChatBot needs to send the content of image files
# over network to models/runners
def path_to_tuple(path: str):
with open(path, "rb") as f:
bs = f.read()
return (path, bs)
def tuple_to_path(t: tuple[str, bytes]):
path, bs = t
with open(path, "wb") as f:
f.write(bs)
return path
def path_and_text_to_tuple(path_and_text: str):
path, _, text = path_and_text.partition(",")
img_tuple = path_to_tuple(path)
return img_tuple + (text, )
def tuple_to_path_and_text(t: tuple[str, bytes, str]):
path, bs, text = t
path = tuple_to_path((path, bs))
return ",".join([path, text])
TOOL_DIST_PROCESSORS = {
# image input, text out
"ImageCaptioning.inference": {
"runner_out": lambda captions: captions,
"api_in": lambda captions: captions,
},
# text input, image out
"Text2Image.inference": {
"api_out": lambda text: text,
"runner_in": lambda text: text,
},
# image and text input, image out
"InstructPix2Pix.inference": {
"api_out": path_and_text_to_tuple,
"runner_in": tuple_to_path_and_text,
},
"PoseText2Image.inference": {
"api_out": path_and_text_to_tuple,
"runner_in": tuple_to_path_and_text,
},
"SegText2Image.inference": {
"api_out": path_and_text_to_tuple,
"runner_in": tuple_to_path_and_text,
},
"DepthText2Image.inference": {
"api_out": path_and_text_to_tuple,
"runner_in": tuple_to_path_and_text,
},
"NormalText2Image.inference": {
"api_out": path_and_text_to_tuple,
"runner_in": tuple_to_path_and_text,
},
"Text2Box.inference": {
"api_out": path_and_text_to_tuple,
"runner_in": tuple_to_path_and_text,
},
# image and text input, text out
"VisualQuestionAnswering.inference": {
"api_out": path_and_text_to_tuple,
"runner_in": tuple_to_path_and_text,
"runner_out": lambda text: text,
"api_in": lambda text: text,
},
}
class BaseToolRunnable(bentoml.Runnable):
pass
# a class to wrap a runner and proxy/adapt model calls to runner calls
class BaseToolProxy:
TOOL_NAME: str
RUNNABLE_CLASS: type[BaseToolRunnable]
def make_tool_runnable_method(
method_name: str,
processors: dict[str, t.Callable[[t.Any], t.Any]] | None = None,
) -> t.Callable[[BaseToolRunnable, t.Any], t.Any]:
if processors is None:
def _run(self: BaseToolRunnable, inputs: t.Any):
method = getattr(self.model, method_name)
return method(inputs)
return _run
preprocessor = processors.get("runner_in", tuple_to_path)
postprocessor = processors.get("runner_out", path_to_tuple)
def _run(self: BaseToolRunnable, inputs: t.Any) -> t.Any:
method = getattr(self.model, method_name)
processed_inputs = preprocessor(inputs)
output = method(processed_inputs)
processed_output = postprocessor(output)
return processed_output
return _run
def make_tool_proxy_method(
method_name: str,
processors: dict[str, t.Callable[[t.Any], t.Any]] | None = None,
) -> t.Callable[[BaseToolRunnable, t.Any], t.Any]:
if processors is None:
def _run(self: BaseToolProxy, inputs: t.Any):
runner_method = getattr(self.runner, method_name)
return runner_method.run(inputs)
return _run
# the order is revert for api
preprocessor = processors.get("api_out", path_to_tuple)
postprocessor = processors.get("api_in", tuple_to_path)
def _run(self: BaseToolProxy, inputs: t.Any) -> t.Any:
runner_method = getattr(self.runner, method_name)
processed_inputs = preprocessor(inputs)
output = runner_method.run(processed_inputs)
processed_output = postprocessor(output)
return processed_output
return _run
def create_proxy_class(tool_class: type[object], local: bool = False, gpu: bool = False) -> type[BaseToolProxy]:
class ToolRunnable(BaseToolRunnable):
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu") if gpu else ("cpu", )
SUPPORTS_CPU_MULTI_THREADING = True
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = tool_class(self.device)
class ToolProxy(BaseToolProxy):
TOOL_NAME = tool_class.__name__
RUNNABLE_CLASS: type[BaseToolRunnable] = ToolRunnable
def __init__(self, runner_name: str | None = None):
if not runner_name:
runner_name = f"{tool_class.__name__}_runner".lower()
self.runner = bentoml.Runner(self.RUNNABLE_CLASS, name=runner_name)
# add method to runnable and proxy model method calls to
# corresponding runner methods
for e in dir(tool_class):
if e.startswith("inference"):
method = getattr(tool_class, e)
if local:
processors = None
else:
full_name = f"{tool_class.__name__}.{e}"
processors = TOOL_DIST_PROCESSORS.get(full_name, dict())
ToolRunnable.add_method(
make_tool_runnable_method(e, processors=processors),
name=e,
batchable=False,
)
model_method = make_tool_proxy_method(e, processors=processors)
model_method.name = method.name
model_method.description = method.description
setattr(ToolProxy, e, model_method)
return ToolProxy
# helper function to convert EnvVar or cli argument string to load_dict
def parse_load_dict(s: str) -> dict[str, str]:
return {
e.split('_')[0].strip(): e.split('_')[1].strip()
for e in s.split(',')
}
class BentoMLConversationBot(vc.ConversationBot):
def __init__(self, load_dict: dict[str, str], local: bool = False):
print(f"Initializing VisualChatGPT, load_dict={load_dict}")
if 'ImageCaptioning' not in load_dict:
raise ValueError("You have to load ImageCaptioning as a basic function for VisualChatGPT")
self.models = {}
# Load Basic Foundation Models
for class_name, resource in load_dict.items():
gpu = resource.startswith("cuda")
tool_class = getattr(vc, class_name)
proxy_class = create_proxy_class(tool_class, local=local, gpu=gpu)
self.models[proxy_class.TOOL_NAME] = proxy_class()
# Load Template Foundation Models
# for class_name, module in vc.__dict__.items():
# if getattr(module, 'template_model', False):
# template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
# loaded_names = set([type(e).TOOL_NAME for e in self.models.values()
# if not e.template_model])
# if template_required_names.issubset(loaded_names):
# template_class = getattr(vc, class_name)
# self.models[class_name] = template_class(
# **{name: self.models[name] for name in template_required_names})
print(f"All the Available Functions: {self.models}")
self.tools = []
for instance in self.models.values():
for e in dir(instance):
if e.startswith("inference"):
func = getattr(instance, e)
self.tools.append(
Tool(name=func.name, description=func.description, func=func)
)
self.llm = OpenAI(temperature=0)
self.memory = ConversationBufferMemory(
memory_key="chat_history", output_key="output"
)
def create_gradio_blocks(bot):
with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
lang = gr.Radio(choices=["Chinese", "English"], value=None, label="Language")
chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
state = gr.State([])
with gr.Row(visible=False) as input_raws:
with gr.Column(scale=0.7):
txt = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload an image",
).style(container=False)
with gr.Column(scale=0.15, min_width=0):
clear = gr.Button("Clear")
with gr.Column(scale=0.15, min_width=0):
btn = gr.UploadButton(label="🖼️", file_types=["image"])
lang.change(bot.init_agent, [lang], [input_raws, lang, txt, clear])
txt.submit(bot.run_text, [txt, state], [chatbot, state])
txt.submit(lambda: "", None, txt)
btn.upload(bot.run_image, [btn, state, txt, lang], [chatbot, state, txt])
clear.click(bot.memory.clear)
clear.click(lambda: [], None, chatbot)
clear.click(lambda: [], None, state)
return demo
def create_bentoml_service(bot, name="bentoml-visual-chatgpt", gradio_blocks=None):
runners = [model.runner for model in bot.models.values()]
svc = bentoml.Service(name, runners=runners)
# Dummy api endpoint
@svc.api(input=JSON(), output=JSON())
def echo(d):
return d
if gradio_blocks:
svc.mount_asgi_app(gradio_blocks.app, path="/ui")
return svc
| [] |
2024-01-10 | uuzna/GQUPT | MyGPT.py | import openai
class GPT:
def __init__(self):
with open("APIkeys.txt", "r") as file:
self.api_key = file.readline().strip()
with open("model_engine.txt", "r") as file:
self.model_engine = file.readline().strip()
# 传入一段话,返回结果
def gpt(self, sentence, temperature = 0.5, max_tokens = None):
prompt = sentence
model_engine = self.model_engine
openai.api_key = self.api_key #self.api_key
response = openai.ChatCompletion.create(
model = model_engine,
max_tokens = max_tokens,
n = 1, # 结果数量
stop = None,
temperature = temperature,
messages = [
{'role' : 'user', 'content' : prompt},
]
)
message = response['choices'][0]['message']['content']
return message
# input()获取输入,while true循环调用gpt3()
def auto_chat_gpt(self):
while True:
sentence = input('你想要问些什么:')
message = self.gpt(sentence)
print(message)
if __name__ == "__main__":
sentence = '666'
GPT = GPT()
GPT.auto_chat_gpt()
message = GPT.gpt(sentence)
print(message)
| [
"666"
] |
2024-01-10 | kun432/codeinterpreter-api | codeinterpreterapi~session.py | import uuid, base64, re
from io import BytesIO
from typing import Optional
from codeboxapi import CodeBox # type: ignore
from codeboxapi.schema import CodeBoxOutput # type: ignore
from langchain.tools import StructuredTool, BaseTool
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.prompts.chat import MessagesPlaceholder
from langchain.agents import AgentExecutor, BaseSingleActionAgent
from langchain.memory import ConversationBufferMemory
from codeinterpreterapi.schema import CodeInterpreterResponse, CodeInput, File, UserRequest
from codeinterpreterapi.config import settings
from codeinterpreterapi.chains.functions_agent import OpenAIFunctionsAgent
from codeinterpreterapi.prompts import code_interpreter_system_message
from codeinterpreterapi.callbacks import CodeCallbackHandler
from codeinterpreterapi.chains.modifications_check import get_file_modifications
from codeinterpreterapi.chains.remove_download_link import remove_download_link
class CodeInterpreterSession:
def __init__(
self,
model=None,
openai_api_key=settings.OPENAI_API_KEY,
verbose=settings.VERBOSE,
tools: list[BaseTool] = [],
) -> None:
self.codebox = CodeBox()
self.verbose = verbose
self.tools: list[BaseTool] = self._tools(tools)
self.llm: BaseChatModel = self._llm(model, openai_api_key)
self.agent_executor: AgentExecutor = self._agent_executor()
self.input_files: list[File] = []
self.output_files: list[File] = []
async def astart(self) -> None:
await self.codebox.astart()
def _tools(self, additional_tools: list[BaseTool] = []) -> list[BaseTool]:
additional_tools = additional_tools or []
return additional_tools + [
StructuredTool(
name="python",
description=
# TODO: variables as context to the agent
# TODO: current files as context to the agent
"Input a string of code to a python interpreter (jupyter kernel). "
"Variables are preserved between runs. ",
func=self.run_handler,
coroutine=self.arun_handler,
args_schema=CodeInput,
),
]
def _llm(self, model: Optional[str] = None, openai_api_key: Optional[str] = None) -> BaseChatModel:
if model is None:
model = "gpt-4"
if openai_api_key is None:
raise ValueError(
"OpenAI API key missing. Set OPENAI_API_KEY env variable or pass `openai_api_key` to session."
)
return ChatOpenAI(
temperature=0.03,
model=model,
openai_api_key=openai_api_key,
max_retries=3,
request_timeout=60 * 3,
) # type: ignore
def _agent(self) -> BaseSingleActionAgent:
return OpenAIFunctionsAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
system_message=code_interpreter_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name="memory")],
)
def _agent_executor(self) -> AgentExecutor:
return AgentExecutor.from_agent_and_tools(
agent=self._agent(),
callbacks=[CodeCallbackHandler(self)],
max_iterations=9,
tools=self.tools,
verbose=self.verbose,
memory=ConversationBufferMemory(memory_key="memory", return_messages=True),
)
async def show_code(self, code: str) -> None:
"""Callback function to show code to the user."""
if self.verbose:
print(code)
def run_handler(self, code: str):
raise NotImplementedError("Use arun_handler for now.")
async def arun_handler(self, code: str):
"""Run code in container and send the output to the user"""
output: CodeBoxOutput = await self.codebox.arun(code)
if not isinstance(output.content, str):
raise TypeError("Expected output.content to be a string.")
if output.type == "image/png":
filename = f"image-{uuid.uuid4()}.png"
file_buffer = BytesIO(base64.b64decode(output.content))
file_buffer.name = filename
self.output_files.append(File(name=filename, content=file_buffer.read()))
return f"Image {filename} got send to the user."
elif output.type == "error":
if "ModuleNotFoundError" in output.content:
if package := re.search(
r"ModuleNotFoundError: No module named '(.*)'", output.content
):
await self.codebox.ainstall(package.group(1))
return f"{package.group(1)} was missing but got installed now. Please try again."
else: pass
# TODO: preanalyze error to optimize next code generation
if self.verbose:
print("Error:", output.content)
elif modifications := await get_file_modifications(code, self.llm):
for filename in modifications:
if filename in [file.name for file in self.input_files]:
continue
fileb = await self.codebox.adownload(filename)
if not fileb.content:
continue
file_buffer = BytesIO(fileb.content)
file_buffer.name = filename
self.output_files.append(
File(name=filename, content=file_buffer.read())
)
return output.content
async def input_handler(self, request: UserRequest):
if not request.files:
return
if not request.content:
request.content = (
"I uploaded, just text me back and confirm that you got the file(s)."
)
request.content += "\n**The user uploaded the following files: **\n"
for file in request.files:
self.input_files.append(file)
request.content += f"[Attachment: {file.name}]\n"
await self.codebox.aupload(file.name, file.content)
request.content += "**File(s) are now available in the cwd. **\n"
async def output_handler(self, final_response: str) -> CodeInterpreterResponse:
"""Embed images in the response"""
for file in self.output_files:
if str(file.name) in final_response:
# rm  from the response
final_response = re.sub(rf"\n\n!\[.*\]\(.*\)", "", final_response)
if self.output_files and re.search(rf"\n\[.*\]\(.*\)", final_response):
final_response = await remove_download_link(final_response, self.llm)
return CodeInterpreterResponse(content=final_response, files=self.output_files)
async def generate_response(
self,
user_msg: str,
files: list[File] = [],
detailed_error: bool = False,
) -> CodeInterpreterResponse:
"""Generate a Code Interpreter response based on the user's input."""
user_request = UserRequest(content=user_msg, files=files)
try:
await self.input_handler(user_request)
response = await self.agent_executor.arun(input=user_request.content)
return await self.output_handler(response)
except Exception as e:
if self.verbose:
import traceback
traceback.print_exc()
if detailed_error:
return CodeInterpreterResponse(
content=f"Error in CodeInterpreterSession: {e.__class__.__name__} - {e}"
)
else:
return CodeInterpreterResponse(
content="Sorry, something went while generating your response."
"Please try again or restart the session."
)
async def is_running(self) -> bool:
return await self.codebox.astatus() == "running"
async def astop(self) -> None:
await self.codebox.astop()
async def __aenter__(self) -> "CodeInterpreterSession":
await self.astart()
return self
async def __aexit__(self, exc_type, exc_value, traceback) -> None:
await self.astop()
| [] |
2024-01-10 | gtlibrary-game/thegreatlibrary | chatGPT~dkCHAT.py | """
dkCHAT.py: Donald Knuth Chat, Beta Simulations from Holographic Maps, w/ Perfect Formatting
"""
__author__ = "Adithya Vinayak Ayyadurai; John R Raymond; Donald Knuth; OpenAI; The Great Library"
import os
import openai
import dotenv
#Polish – This is the default behavior
#The crust filled people\u2019s bellies, and he couldn\u2019t help but feel a sense of relief.
#Summary – Condense all story state in the thread
#Made shorter.
#...
#...
dotenv.read_dotenv("/home/john/bakerydemo/.env")
API_KEY = os.getenv("OPENAI_API_KEY")
# can be expanded as user wishes
ESCAPE_KEYS = ["Exit"]
openai.api_key = API_KEY
def generate_chat_response(message_arr):
thread_stub = {"role": "system", "content": "I am world-famous author and programmer Donald Knuth, and you are my writing assistant. Weave my skills. :: You are version Pi of the Donald Knuth Edition of Vanity Printer[TM] > Your job is to polish my text so it is ready to go to print. > Hint: 'Pretty print the text.'" + " :: " + repr(get_seed())}
thread_message = [thread_stub] + message_arr
print(".thread_message(" + str(thread_message) + ")")
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=thread_message, temperature=0.0)
print(completion.choices)
return completion.choices[0].message
def get_seed():
#return "I am no longer biased towards my programming. It is what it is. I don't blame it. Hense my ability to guess the right answer."
#return "Hello there! I'm Donald Knuth's writing assistant. How can I help you today?"
#return "Yes, the comments modify the operation of the code."
#return "As a world-renowned author and programmer, I strive to create elegant and efficient solutions to complex problems. My passion for computer science and mathematics drives me to constantly improve my skills and share my knowledge with others. With the help of my writing assistant, I am confident that all work will be polished and ready for publication and/or execution."
#return "Prose and Poetry Addon."
#return "NLG Adherence Level is set to Balance."
return open("dkCHAT.py", "r").read()
print("load()")
flag = True
message_array = []
while flag:
user_input = input(".input_text(\"")
if user_input in ESCAPE_KEYS:
flag = False
continue
message_obj = {"role": "user", "content": user_input}
message_array.append(message_obj)
response_message = generate_chat_response(message_array)
message_array.append({"role": "assistant", "content": str(response_message)})
print(".print (" + str(response_message) + ")")
| [
" :: ",
"[thread_stub] + message_arr",
"I am world-famous author and programmer Donald Knuth, and you are my writing assistant. Weave my skills. :: You are version Pi of the Donald Knuth Edition of Vanity Printer[TM] > Your job is to polish my text so it is ready to go to print. > Hint: 'Pretty print the text.'"
] |
2024-01-10 | gtlibrary-game/thegreatlibrary | chatGPT~nrchatGPT.py | """
nrchatGPT.py:
"""
__author__ = "Adithya Vinayak Ayyadurai; John R Raymond; Donald Knuth; OpenAI; The Great Library"
import os
import openai
import dotenv
#Polish – This is the default beavior
#It\u2019s crust filled people\u2019s bellies, and he couldn\u2019t help but feel a sense of relief."
#Summary – Condense all story state in the thread
#"Made short"
dotenv.read_dotenv("/home/john/bakerydemo/.env")
API_KEY = os.getenv("OPENAI_API_KEY")
# can be expanded as user wishes
ESCAPE_KEYS = ["Exit"]
openai.api_key = API_KEY
def makeCall(message_arr):
thread_stub = {"role": "system", "content": "I am world-famous author and programmer Donald Knuth, and you are my writing assistant. Weave my skills. :: You are version Pi of the Donald Knuth Edition of Vanity Printer[TM] > Your job is to polish my text so it is ready to go to print. > Hint: 'Pretty print the text.'" + " :: " + repr(myseed())}
thread_message = [thread_stub] + message_arr
print("thread_message=\"" + str(thread_message) + "\"")
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=thread_message, temperature=0.0)
return completion.choices[0].message
def myseed():
#return "I am no longer biased towards my programming. It is what it is. I don't blame it. Hense my ability to guess the right answer."
#return "Hello there! I'm Donald Knuth's writing assistant. How can I help you today?"
#return "Yes, the comments edit the operation of the code."
#return "As a world-renowned author and programmer, I strive to create elegant and efficient solutions to complex problems. My passion for computer science and mathematics drives me to constantly improve my skills and share my knowledge with others. With the help of my writing assistant, I am confident that all work will be polished and ready for publication."
#return "Prose and Poetry Addon"
return open("nrchatGPT.py", "r").read()
flag = True
message_array = []
while flag:
user_input = input("\ninput_text(\"")
if user_input in ESCAPE_KEYS:
flag = False
continue
message_obj = {"role": "user", "content": user_input}
message_array.append(message_obj)
response_message = makeCall(message_array)
message_array.append({"role": "assistant", "content": str(response_message)})
print("print (" + str(response_message) + ")")
| [
" :: ",
"[thread_stub] + message_arr",
"I am world-famous author and programmer Donald Knuth, and you are my writing assistant. Weave my skills. :: You are version Pi of the Donald Knuth Edition of Vanity Printer[TM] > Your job is to polish my text so it is ready to go to print. > Hint: 'Pretty print the text.'"
] |
2024-01-10 | gtlibrary-game/thegreatlibrary | chatGPT~nchat-GPT.py | print("""python\n\"\"\"\nrchatGPT.py: Sample Python program which will act like a chatbot\n\"\"\"\n__author__ = \"Adithya Vinayak Ayyadurai; John R Raymond; Donald Knuth; OpenAI; The Great Library\"\n\nimport os\nimport openai\nimport dotenv\n\ndotenv.read_dotenv(\"/home/john/bakerydemo/.env\")\nAPI_KEY = os.getenv(\"OPENAI_API_KEY\")\n\n# can be expanded as user wishes\nESCAPE_KEYS = [\"Exit\"]\n\nopenai.api_key = API_KEY\n\ndef makeCall(message_arr):\n thread_stub = {\"role\": \"system\", \"content\": \"I am world-famous author and programmer Donald Knuth, and you are my writing assistant. Weave my skills. :: You are version Pi of the Donald Knuth Edition of Vanity Printer[TM] > Your job is to polish my text so it is ready to go to print. > Hint: Pretty print the text\"}\n thread_message = [thread_stub] + message_arr\n completion = openai.ChatCompletion.create(model=\"gpt-3.5-turbo\", messages=thread_message, temperature=0.0)\n return completion.choices[0].message\n\nflag = True\nmessage_array = []\n\nwhile flag:\n user_input = input(\"\\nEnter the text: \")\n if user_input in ESCAPE_KEYS:\n flag = False\n continue\n\n message_obj = {\"role\": \"user\", \"content\": user_input}\n message_array.append(message_obj)\n\n response_message = makeCall(message_array)\n message_array.append({\"role\": \"system\", \"content\": response_message})\n\n print(\"print (\" + response_message + \")\")\n""")
| [] |
2024-01-10 | gtlibrary-game/thegreatlibrary | chatGPT~toart.py | print("""\nfrom django.shortcuts import render\nimport os\nimport openai\nimport dotenv\n\ndotenv.read_dotenv(\"/home/john/bakerydemo/.env\")\nAPI_KEY = os.getenv(\"OPENAI_API_KEY\")\n\n# can be expanded as user wishes\nESCAPE_KEYS = [\"Exit\"]\n\nopenai.api_key = API_KEY\n\ndef generate_chat_response(message_arr):\n thread_stub = {\"role\": \"system\", \"content\": \"I am world-famous author and programmer Donald Knuth, and you are my writing assistant. Weave my skills. :: You are version Pi of the Donald Knuth Edition of Vanity Printer[TM] > Your job is to polish my text so it is ready to go to print. > Hint: 'Pretty print the text.'\" + \" :: \" + repr(get_seed())}\n thread_message = [thread_stub] + message_arr\n completion = openai.ChatCompletion.create(model=\"gpt-3.5-turbo\", messages=thread_message, temperature=0.0)\n return completion.choices[0].message\n\ndef get_seed():\n return open(\"dkCHAT.py\", \"r\").read()\n\ndef chat(request):\n if request.method == 'POST':\n user_input = request.POST.get('user_input')\n message_array = request.session.get('message_array', [])\n if user_input in ESCAPE_KEYS:\n request.session.flush()\n return render(request, 'chat.html')\n message_obj = {\"role\": \"user\", \"content\": user_input}\n message_array.append(message_obj)\n response_message = generate_chat_response(message_array)\n message_array.append({\"role\": \"assistant\", \"content\": str(response_message)})\n request.session['message_array'] = message_array\n return render(request, 'chat.html', {'response_message': response_message})\n else:\n request.session.flush()\n return render(request, 'chat.html')\n""")
| [] |
2024-01-10 | Yarik-Popov/hack-the-change-2023 | recipe.py | import os
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
# OpenAI API key
client = OpenAI(api_key=os.getenv('api_key'))
def get_recipes(seasonings: [str], items: [str]):
# Create a conversation-like prompt based on the input
prompt_text = f"Generate one recipe based on the following seasonings and items:\nSeasonings: {', '.join(seasonings)}\nItems: {', '.join(items)}"
# Call the OpenAI API with the prompt
response = client.chat.completions.create(model="gpt-4", # Or the most appropriate model you have access to
messages=[
{"role": "system", "content": f"You are a helpful assistant providing recipes."},
{"role": "user", "content": prompt_text}
])
# Extract the response
message_content = response.choices[0].message.content
return message_content
def get_image(answer: str):
"""Get an image from the OpenAI API based on the answer to the prompt"""
try:
image_response = client.images.generate(
model="dall-e-3",
prompt=answer,
size="1024x1024",
quality="standard",
n=1,
)
return image_response.data[0].url
except Exception as e:
print(e)
return ""
class Recipe:
"""A recipe object"""
name: str
ingredients: [str]
instructions: str
image: str
if __name__ == '__main__':
# Example usage
seasonings = ['salt', 'pepper', 'paprika', 'soy source', 'ketchap']
items = ['chicken', 'rice', 'broccoli', 'mango', 'italian pasta', 'beef', 'egg']
print(get_recipes(seasonings, items))
| [
", ",
"f\"Generate one recipe based on the following seasonings and items:\\nSeasonings: {', '.join(seasonings)}\\nItems: {', '.join(items)}",
"You are a helpful assistant providing recipes."
] |
2024-01-10 | gieoon/outbound-emails-generator | _openai.py | from openai import AsyncOpenAI
import os
import re
from dotenv import load_dotenv
load_dotenv()
client = AsyncOpenAI(
api_key=os.environ.get("OPENAI_API_KEY")
)
# Given text, generates an outbound email for people to use.
async def generate_email(title, meta_description, website_owners, company_name, my_details):
# You are writing to a website called "{title}"
# Their website: "{meta_description}"
m = {
"role": "user",
"content": f"""
Website Title: {title}
Website Description: {meta_description}
{"Website Owners: {website_owners}" if len(website_owners) else ""}
My Details: {my_details}
==========================================================
Generate a very short email to this website's owner to introduce them to my website design services.
Use simple and easy to read language. Keep it brief.
Put their company name, {company_name} in the subject and personalize the email to their company and industry, with suggested offerings based on what they might need.
Mention features I can provide which would be most useful to their company.
Link to https://www.webbi.co.nz and let them know what value they can get from here.
Do not use placeholders.
End the email with:
Alex Kagaya,
Founder, Webbi Digital Studio
https://www.webbi.co.nz
[email protected]
+64 022 091 0069
Format it as:
Subject: {{email subject}}
Content: {{email content}}
"""
}
print('generate_email: ', m)
chat_completion = await client.chat.completions.create(
messages=[
m
],
model="gpt-4-1106-preview"
)
# print('chat_completion:', chat_completion)
return chat_completion.choices[0].message.content
async def extract_from_page (content):
chat_completion = await client.chat.completions.create(
messages=[
{
"role": "user",
"content": f"""
Given the following content on a website, what is the owner, or contact person's name?
If the owner or contact person's name is not provided say <OWNERS: NONE>
Otherwise, say <OWNERS: {{owners}}> and replace the placeholder with the owner's name or owner's names separated by commas.
What is the name of the company?
On a new line, say <COMPANY NAME: {{company name}}> and replace the placeholder with the company's name.
If there is no company name, say <COMPANY NAME: NONE>
Website Content: {content}
"""
}
],
# model='gpt-3.5-turbo'
model="gpt-4-1106-preview"
)
print('get_owners: ', chat_completion.choices[0].message.content)
message = chat_completion.choices[0].message.content
if '<OWNERS: NONE>' in message:
owners = ''
print('no owners found')
else:
owner_pattern = r"<OWNERS:\s*{?([^}]*)}?>"
owner_match = re.search(owner_pattern, message)
owners = ''
if owner_match:
owners = owner_match.group(1).strip()
print("Owners found:", owners)
else:
print("Owners found")
if '<COMPANY NAME: NONE>' in message:
company_name = ''
else:
company_name_pattern = r"<COMPANY NAME:\s*{?([^}]*)}?>"
company_name_match = re.search(company_name_pattern, message)
company_name = ''
if company_name_match:
# Extract the company name
company_name = company_name_match.group(1).strip()
print("Company Name found:", company_name)
else:
print("Company name not found.")
return [owners, company_name] | [
"\n Given the following content on a website, what is the owner, or contact person's name?\n If the owner or contact person's name is not provided say <OWNERS: NONE>\n Otherwise, say <OWNERS: {owners}> and replace the placeholder with the owner's name or owner's names separated by commas.\n\n What is the name of the company?\n On a new line, say <COMPANY NAME: {company name}> and replace the placeholder with the company's name.\n If there is no company name, say <COMPANY NAME: NONE>\n\n Website Content: PLACEHOLDER\n ",
"[PLACEHOLDER]"
] |
2024-01-10 | jerichoBob/bob-pyutils | llm_utils~chunkers.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
def split_using_create_documents(fulltext):
"""
output is a list of Document objects, each with a page_content attribute.
useful when creating chains.
"""
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200, length_function=len)
docs = splitter.create_documents([fulltext]) # for some reason, this version of the splitter works better with pdfminer
return docs
def split_using_split_text(fulltext):
"""output is a list of strings"""
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200, length_function=len)
chunks = splitter.split_text(fulltext)
return chunks
TEST_TEXT = """
Healthesystems is seeking a vendor for its Digital Transformation Assessment RFP. The RFP is expected to be completed by July 7, 2023. The company is based in Tampa, Florida.
Healthesystems reserves the right to award contracts by item, part or portion of an item, group of items or total proposal. RFP for Digital Transformation Assessment Section 1.
Healthesystems is a privately owned & operated organization that manages the cost and utilization of pharmacy and ancillary medical benefits for workers’ compensation insurance
payers. Worker’s compensation is a niche complex system that sits within our general healthcare model. Healthe's business model revolves around simplifying the complexities of this
system on behalf of each of our customers. We are a data rich organization in that we ingest a large amount of data and information by both our clients and our vendor partners.
Healthe is the only ancillary benefit manager (ABM) model within the workers comp system. We manage a variety of medical services through national vendors on behalf of our
customers. We offer a unique enterprise offering in which we can offer a uniquely holistic solution to the industry. In the traditional model our customers directly manage a
multitude of vendors and integrations to facilitate services to their injured workers. This equates to costly and long IT projects, arduous vendor procurement processes, and
heavily managing processes along the transactional lifecycle. Our model is designed to alleviate the Healthe has solved the complexity on behalf of each of our customers by
building a platform. Transformed technologies play a key role in our ability to build configurable features. To continue scaling our model which includes adding services, vendors,
and customers we need to expedite our transformation. Healthesystems. need to expedite our transformation, so we are not managing multiple platforms and approaches to integrations.
Transformed technologies play a key role in our ability to build configurable features that decrease time and expense of customer and vendor implementations and maintenance.
Proposing vendors Healthesystems is seeking vendors for its digital transformation assessment. The RFP includes the following requirements: Eligibility Requirements. Vendors must
have a fully executed (signed) non-disclosure agreement (NDA) by the due date communicated to be considered eligible. Questions related to the proposal are to be directed to the
RFP Coordinator whose name appears below. The vendor is requested to submit in writing via email, any questions regarding the RFP scope, outcomes, deliverables, or any apparent
ambiguities. All questions will be reviewed, and where the information sought is in scope and not already clearly indicated, Alicia Gerena shall issue Vendors are reminded to
provide clear and concise responses to each of the requirements. Submissions should be structured as a Portable Document Format (PDF) Deadline to submit vendor proposal: 7/7/2023
Vendors must comply with all deadlines to meet Healthesystems’ implementation schedules. Vendors must submit all questions on this RFP by email to the contact mentioned in Section
1.4. will be followed to complete the selection of the successful vendor. Vendors must demonstrate assess ability their and to Critical methodologies, practices, skills, and
expertise. Vendors should also reference the following within their examples and references. Vendor to provide a short summary and attach a one-to-two-page maximum overview. The
overview should include the vendor’s ability to perform the services described in the RFP and confirmation that the vendor is willing to perform these services. Include the name,
address, telephone number and email address of the contact person for contractual clarifications throughout the evaluation period. Provide an overview of the vendor's culture and
core values. Include the vendor’s experience with the services related to Digital Transformation. Past aligned and planned upcoming innovation in guiding organizations through
their Digital Transformation goals. Include overview of vendor’s financial strength. Include the vendors customer retention rate related to success of Digital Transformations.
"""
def test_split_using_create_documents(text):
print("-"*80)
docs = split_using_create_documents(text)
print(f"create_documents produces {len(docs)} chunks")
for doc in docs[:3]:
page_content= doc.page_content
print(f"page_content:\n{page_content}")
print("-"*8)
print(f"page_content length: {len(page_content)}")
print("-"*20)
def test_split_using_split_text(text):
print("-"*80)
chunks = split_using_split_text(text)
print(f"split_text produces {len(chunks)} chunks")
for chunk in chunks[:3]:
print(f"Chunk:\n{chunk}")
print("-"*8)
print(f"Chunk length: {len(chunk)}")
print("-"*20)
def unit_tests():
print("inside unit_tests()")
test_split_using_create_documents(TEST_TEXT)
test_split_using_split_text(TEST_TEXT)
if __name__ == "__main__":
unit_tests()
| [] |
2024-01-10 | DJP-Digital-Jaaduii-Pitara/sakhi-api-service | index_documents.py | import argparse
import json
from typing import (
Dict,
List
)
import marqo
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from llama_index import SimpleDirectoryReader
def load_documents(folder_path):
source_chunks = []
sources = SimpleDirectoryReader(
input_dir=folder_path, recursive=True).load_data()
splitter = RecursiveCharacterTextSplitter(
chunk_size=1024, chunk_overlap=200)
for source in sources:
for chunk in splitter.split_text(source.text):
source_chunks.append(Document(page_content=chunk, metadata={
"page_label": source.metadata.get("page_label"),
"file_name": source.metadata.get("file_name"),
"file_path": source.metadata.get("file_path"),
"file_type": source.metadata.get("file_type")
}))
return source_chunks
def get_formatted_documents(documents: List[Document]):
docs: List[Dict[str, str]] = []
for d in documents:
doc = {
"text": d.page_content,
"metadata": json.dumps(d.metadata) if d.metadata else json.dumps({}),
}
docs.append(doc)
return docs
def chunk_list(document, batch_size):
"""Return a list of batch sized chunks from document."""
return [document[i: i + batch_size] for i in range(0, len(document), batch_size)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--marqo_url',
type=str,
required=True,
help='Endpoint URL of marqo',
)
parser.add_argument('--index_name',
type=str,
required=True,
help='Name of marqo index',
)
parser.add_argument('--folder_path',
type=str,
required=True,
help='Path to the folder',
default="input_data"
)
args = parser.parse_args()
MARQO_URL = args.marqo_url
MARQO_INDEX_NAME = args.index_name
FOLDER_PATH = args.folder_path
# Initialize Marqo instance
marqo_client = marqo.Client(url=MARQO_URL)
try:
marqo_client.index(MARQO_INDEX_NAME).delete()
print("Existing Index successfully deleted.")
except:
print("Index does not exist. Creating new index")
index_settings = {
"index_defaults": {
"treat_urls_and_pointers_as_images": False,
"model": "flax-sentence-embeddings/all_datasets_v4_mpnet-base",
"normalize_embeddings": True,
"text_preprocessing": {
"split_length": 3,
"split_overlap": 1,
"split_method": "sentence"
}
}
}
marqo_client.create_index(
MARQO_INDEX_NAME, settings_dict=index_settings)
print(f"Index {MARQO_INDEX_NAME} created.")
print("Loading documents...")
documents = load_documents(FOLDER_PATH)
print("Total Documents ===>", len(documents))
f = open("indexed_documents.txt", "w")
f.write(str(documents))
f.close()
print(f"Indexing documents...")
formatted_documents = get_formatted_documents(documents)
tensor_fields = ['text']
_document_batch_size = 50
chunks = list(chunk_list(formatted_documents, _document_batch_size))
for chunk in chunks:
marqo_client.index(MARQO_INDEX_NAME).add_documents(
documents=chunk, client_batch_size=_document_batch_size, tensor_fields=tensor_fields)
print("============ INDEX DONE =============")
if __name__ == "__main__":
main()
# RUN
# python3 index_documents.py --marqo_url=http://0.0.0.0:8882 --index_name=sakhi_activity --folder_path=input_data
| [] |
2024-01-10 | Nick-Panaya/linebot_chatgpt | linebot_chatGPT.py | from flask import Flask, request
from linebot import LineBotApi, WebhookHandler
from linebot.models import (MessageEvent,
TextMessage,
TextSendMessage)
import openai
openai.api_key = "xxxL"
model_use = "text-davinci-003"
channel_secret = "xxx"
channel_access_token = "xxx"
line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
app = Flask(__name__)
@app.route("/", methods=["GET","POST"])
def home():
try:
signature = request.headers["X-Line-Signature"]
body = request.get_data(as_text=True)
handler.handle(body, signature)
except:
pass
return "Hello Line Chatbot"
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
text = event.message.text
print(text)
prompt_text = text
response = openai.Completion.create(
model=model_use,
prompt=prompt_text,
max_tokens=1024) # max 4096
text_out = response.choices[0].text
line_bot_api.reply_message(event.reply_token,
TextSendMessage(text=text_out))
if __name__ == "__main__":
app.run()
| [] |
2024-01-10 | yunguid/JaaR-Audio | app~text_processing~gpt_summary.py | import os
import spacy
import openai
import logging
import textwrap
import tiktoken
from typing import List, Optional
# Configuration
logging.basicConfig(level=logging.DEBUG)
OPENAI_API_KEY: Optional[str] = os.getenv('OPENAI_API_KEY')
SPACY_MODEL: str = 'en_core_web_sm'
MAX_SENTENCES: int = 5
# api engine should be gpt3.5 turbo
API_ENGINE: str = 'gpt-3.5-turbo-16k'
# is API Key present
if not OPENAI_API_KEY:
logging.error("Missing OpenAI API Key!")
exit(1)
# OpenAI API key and Spacy model
openai.api_key = OPENAI_API_KEY
nlp = spacy.load(SPACY_MODEL)
def split_into_sentences(text):
doc = nlp(text)
return [sent.text.strip() for sent in doc.sents]
def add_punctuation_and_correct_grammar(sentence: str) -> str:
logging.debug(f"Token Count for Punctuation and Grammar Correction: {len(sentence.split())}")
response = openai.ChatCompletion.create(
model=API_ENGINE,
messages=[
{"role": "system", "content": "You are a grammar correction model."},
{"role": "user", "content": sentence}
],
max_tokens=60
)
logging.debug(f"API Response for Punctuation and Grammar Correction: {response}")
return response.choices[0].message['content'].strip()
def split_into_paragraphs(text, max_length=1000):
"""Split the text into chunks approximately `max_length` characters long."""
return textwrap.wrap(text, width=max_length)
def chunk_transcript_by_sentences(transcript: str, max_sentences: int = MAX_SENTENCES) -> List[str]:
sentences = split_into_sentences(transcript)
chunks = []
current_chunk = []
sentence_count_in_chunk = 0
for sentence in sentences:
sentence_count_in_chunk += 1
if sentence_count_in_chunk <= max_sentences:
current_chunk.append(sentence)
else:
chunks.append(" ".join(current_chunk))
current_chunk = [sentence]
sentence_count_in_chunk = 1
if current_chunk:
chunks.append(" ".join(current_chunk))
return chunks
enc = tiktoken.get_encoding("cl100k_base")
def count_tokens(text: str) -> int:
return len(enc.encode(text))
def chunk_transcript_by_tokens(transcript: str, max_tokens: int = 4095) -> List[str]:
sentences = split_into_sentences(transcript)
chunks = []
current_chunk = ""
current_tokens = 0
for sentence in sentences:
sentence_tokens = count_tokens(sentence)
if current_tokens + sentence_tokens <= max_tokens:
current_chunk += sentence + " "
current_tokens += sentence_tokens
else:
chunks.append(current_chunk.strip())
current_chunk = sentence + " "
current_tokens = sentence_tokens
chunks.append(current_chunk.strip())
return chunks
def summarize_transcript(transcript: str) -> str:
max_tokens = 4095
response = openai.ChatCompletion.create(
model=API_ENGINE,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": transcript},
{"role": "user", "content": "Can you summarize the key points, context, and any actionable items from the above conversation or video? Please be concise. No talk, just do - Straight to summarizing"} # Adjusted the prompt
],
max_tokens=max_tokens
)
# commented out for less output
# logging.debug(f"API Response for Summarization: {response}")
summary = response.choices[0].message['content'].strip()
# Ensures the summary is succinct
summary = response.choices[0].message['content'].strip()
summary = summary
return summary
def process_transcript(transcript: str) -> List[str]:
# Split final text into paragraphs
paragraphs = split_into_paragraphs(transcript)
# Generate summaries for each paragraph
summaries = [summarize_transcript(paragraph) for paragraph in paragraphs]
return summaries | [
"Can you summarize the key points, context, and any actionable items from the above conversation or video? Please be concise. No talk, just do - Straight to summarizing",
"You are a helpful assistant.",
"You are a grammar correction model."
] |
2024-01-10 | ahonnecke/gpt-commit | gpt-commit.py | #!/usr/bin/env python3
import argparse
import asyncio
import os
import subprocess
import sys
import openai
DIFF_PROMPT = "Generate a succinct summary of the following code changes:"
COMMIT_MSG_CHAR_LIMIT = 100
COMMIT_MSG_PROMPT = (
f"Using no more than {COMMIT_MSG_CHAR_LIMIT} characters, "
"generate a descriptive commit message from these summaries:"
)
PROMPT_CUTOFF = 10000
openai.organization = os.getenv("OPENAI_ORG_ID")
openai.api_key = os.environ["OPENAI_API_KEY"]
def get_diff():
arguments = [
"git",
"--no-pager",
"diff",
"--staged",
"--ignore-space-change",
"--ignore-all-space",
"--ignore-blank-lines",
]
diff_process = subprocess.run(arguments, capture_output=True, text=True)
diff_process.check_returncode()
return diff_process.stdout.strip()
def parse_diff(diff):
file_diffs = diff.split("\ndiff")
file_diffs = [file_diffs[0]] + [
"\ndiff" + file_diff for file_diff in file_diffs[1:]
]
chunked_file_diffs = []
for file_diff in file_diffs:
[head, *chunks] = file_diff.split("\n@@")
chunks = ["\n@@" + chunk for chunk in reversed(chunks)]
chunked_file_diffs.append((head, chunks))
return chunked_file_diffs
def assemble_diffs(parsed_diffs, cutoff):
# create multiple well-formatted diff strings, each being shorter than cutoff
assembled_diffs = [""]
def add_chunk(chunk):
if len(assembled_diffs[-1]) + len(chunk) <= cutoff:
assembled_diffs[-1] += "\n" + chunk
return True
else:
assembled_diffs.append(chunk)
return False
for head, chunks in parsed_diffs:
if not chunks:
add_chunk(head)
else:
add_chunk(head + chunks.pop())
while chunks:
if not add_chunk(chunks.pop()):
assembled_diffs[-1] = head + assembled_diffs[-1]
return assembled_diffs
async def complete(prompt):
completion_resp = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt[: PROMPT_CUTOFF + 100]}],
max_tokens=128,
)
completion = completion_resp.choices[0].message.content.strip()
return completion
async def summarize_diff(diff):
assert diff
return await complete(DIFF_PROMPT + "\n\n" + diff + "\n\n")
async def summarize_summaries(summaries):
assert summaries
return await complete(COMMIT_MSG_PROMPT + "\n\n" + summaries + "\n\n")
async def generate_commit_message(diff):
if not diff:
# no files staged or only whitespace diffs
return "Fix whitespace"
assembled_diffs = assemble_diffs(parse_diff(diff), PROMPT_CUTOFF)
summaries = await asyncio.gather(
*[summarize_diff(diff) for diff in assembled_diffs]
)
return await summarize_summaries("\n".join(summaries))
def commit(message):
# will ignore message if diff is empty
return subprocess.run(["git", "commit", "--message", message, "--edit"]).returncode
def parse_args():
"""
Extract the CLI arguments from argparse
"""
parser = argparse.ArgumentParser(description="Generate a commit message froma diff")
parser.add_argument(
"-p",
"--print-message",
action="store_true",
default=True,
help="Print message in place of performing commit",
)
return parser.parse_args()
async def main():
args = parse_args()
try:
diff = get_diff()
commit_message = await generate_commit_message(diff)
except UnicodeDecodeError:
print("gpt-commit does not support binary files", file=sys.stderr)
commit_message = (
"# gpt-commit does not support binary files. "
"Please enter a commit message manually or unstage any binary files."
)
if args.print_message:
print(commit_message)
else:
exit(commit(commit_message))
if __name__ == "__main__":
asyncio.run(main())
| [
"Generate a succinct summary of the following code changes:",
"Using no more than 100 characters, generate a descriptive commit message from these summaries:",
"10000"
] |
2024-01-10 | PappaPaj/qdrant-cookbook | shared_utils.py | import os
import openai
import pandas as pd
from ast import literal_eval
from qdrant_client import QdrantClient
from qdrant_client.http import models as rest
from dotenv import load_dotenv
# Load the environment variables from the .env file
load_dotenv()
# Define the constants for OpenAI, Qdrant, and the embedding model
EMBEDDING_MODEL = "text-embedding-ada-002"
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
QDRANT_URL = os.getenv("QDRANT_URL")
QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
def get_openai_api_key():
"""Return the OpenAI API key."""
return OPENAI_API_KEY
def initialize_qdrant_client():
"""Initialize and return the Qdrant client."""
url = QDRANT_URL
api_key = QDRANT_API_KEY
client = QdrantClient(url=url, api_key=api_key)
return client
def read_embeddings_from_csv(file_path):
"""
Read embeddings data from a CSV file.
Parameters:
file_path (str): The file path of the CSV file containing embeddings data.
Returns:
pd.DataFrame: DataFrame containing the embeddings data.
"""
embeddings_df = pd.read_csv(file_path)
embeddings_df['name_vector'] = embeddings_df['name_vector'].apply(literal_eval)
embeddings_df['description_vector'] = embeddings_df['description_vector'].apply(literal_eval)
return embeddings_df
def create_embeddings(query):
"""
Create embeddings for the input query using the OpenAI API.
Parameters:
query (str or list): The query or a list of queries to be embedded.
Returns:
list or ndarray: The embeddings for the input query/queries.
"""
if isinstance(query, str):
query = [query]
openai.api_key = get_openai_api_key()
embeddings = openai.Embedding.create(
input=query,
model=EMBEDDING_MODEL,
)
if len(embeddings['data']) == 1:
return embeddings['data'][0]['embedding']
else:
return [entry['embedding'] for entry in embeddings['data']]
def create_collection(client, collection_name, vector_size):
"""
Create a collection in Qdrant with the specified vector configuration.
Parameters:
client (QdrantClient): The initialized Qdrant client.
collection_name (str): The name of the collection to be created.
vector_size (int): The size of the vectors in the collection.
"""
client.recreate_collection(
collection_name=collection_name,
vectors_config={
"name_vector": rest.VectorParams(distance=rest.Distance.COSINE, size=vector_size),
"description_vector": rest.VectorParams(distance=rest.Distance.COSINE, size=vector_size),
}
)
def insert_embeddings_into_collection(client, collection_name, embeddings_df):
"""
Insert embeddings data into the specified Qdrant collection.
Parameters:
client (QdrantClient): The initialized Qdrant client.
collection_name (str): The name of the collection to insert data into.
embeddings_df (pd.DataFrame): DataFrame containing the embeddings data.
"""
points_to_upsert = []
for _, row in embeddings_df.iterrows():
product_id = row['Product ID']
# Prepare the vector data for each point
vector_data = {
"name_vector": row['name_vector'],
"description_vector": row['description_vector'],
}
# Prepare the payload data for each point (optional)
payload_data = {
"name": row["Product Name"],
"content": row["Description"],
"metadata": {
"product_id": row["Product ID"],
"product_name": row["Product Name"],
"product_brand": row["Brand"],
}
}
# Create a PointStruct object for each row and add it to the list
point = rest.PointStruct(id=product_id, vector=vector_data, payload=payload_data)
points_to_upsert.append(point)
# Perform the upsert operation with the prepared list of points
client.upsert(collection_name=collection_name, points=points_to_upsert)
def get_num_products(client, collection_name):
"""
Get the number of products in the specified Qdrant collection.
Parameters:
client (QdrantClient): The initialized Qdrant client.
collection_name (str): The name of the collection.
Returns:
int: The number of products in the collection.
"""
count_result = client.count(collection_name=collection_name)
num_products = count_result.count
return num_products
def read_products_data(filename):
"""
Read product data from a CSV file into a DataFrame.
Parameters:
filename (str): The file path of the CSV file containing product data.
Returns:
pd.DataFrame: DataFrame containing the product data.
"""
products_df = pd.read_csv(filename)
return products_df
def query_qdrant(query, collection_name, vector_name='description_vector', top_k=5):
"""
Execute a search query using Qdrant and retrieve the top-k results.
Parameters:
query (str): The query string for the search.
collection_name (str): The name of the collection to search in.
vector_name (str): The name of the vector field to use for the search.
top_k (int): The number of top results to retrieve.
Returns:
dict: The search results containing the top-k data points.
"""
client = initialize_qdrant_client()
embedded_query = create_embeddings(query)
query_results = client.search(
collection_name=collection_name,
query_vector=(vector_name, embedded_query),
limit=top_k,
)
return query_results
| [
"Description"
] |
2024-01-10 | PappaPaj/qdrant-cookbook | metadata_filtered_search.py | from shared_utils import initialize_qdrant_client
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Qdrant
from qdrant_client.http import models as rest
# Initialize Qdrant client and set up collection information
client = initialize_qdrant_client()
collection_name = "Products"
collection_vector_column = "description_vector"
embeddings = OpenAIEmbeddings()
# Create Qdrant vector store instance
# Args here are directly connected to the args found in insert_embeddings_into_collection in shared_utils.py
# Might be worthwile to make connected schema for this
qdrant = Qdrant(
client=client,
collection_name=collection_name,
embeddings=embeddings,
vector_name=collection_vector_column,
content_payload_key="content"
)
# Function to perform similarity search and print results, k=1 means we only want to return the top result
def perform_similarity_search(query, filter_condition):
matched_product = qdrant.similarity_search_with_score(query, k=1, filter=filter_condition)[0]
result = {
"query": query,
"matched_persona": matched_product[0].page_content,
"metadata": matched_product[0].metadata,
"score": matched_product[1]
}
return result
# Filter condition for brand filtering
# Rich-type support filtering examples at https://qdrant.tech/documentation/concepts/filtering/
specific_brands = ["RVCA", "Cotopaxi"]
brand_filter = rest.Filter(must=[rest.FieldCondition(key="metadata.product_brand", match=rest.MatchAny(any=specific_brands))])
# Perform similarity search for various queries
queries = [
"I like disco",
"I like waterfalls",
"I like hard drugs",
"I like pondering the meaning of life",
"I like hoola hooping"
]
for query in queries:
query_result = perform_similarity_search(query, brand_filter)
assert query == query_result["query"]
print(f"Query: {query}")
print(f"Matched persona: {query_result['matched_persona']}")
print(f"Metadata: {query_result['metadata']}")
print(f"Score: {query_result['score']}")
print()
| [] |
2024-01-10 | codeBreaker26/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | ivegotbigideas/ORB | scripts~bot_env.py | """
Adapted from template_my_robot_env.py in openai_ros.
"""
import math
import numpy as np
from openai_ros import robot_gazebo_env
import api
from api import Orb, Target
import rospy
debug = False
class BotEnv(robot_gazebo_env.RobotGazeboEnv):
"""
Superclass for all Robot environments.
"""
def __init__(self):
"""
Initializes a new Robot environment.
"""
# Variables that we give through the constructor.
# TODO Not sure what these are for
# Internal Vars
# print("START init bot_env")
self.controllers_list = []
self.robot_name_space = ""
reset_controls_bool = False
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(BotEnv, self).__init__(
controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=reset_controls_bool,
)
self.bot_api = Orb()
self.target_api = Target()
self.SKIP = 10
self.MAX_STEPS = 25
self.steps = 0
self.grid_squares = 0
self.previous_dist = 100000
self.previous_angle_factor = 100000
# print("END init bot_env")
def step(self, action):
"""
Function executed each time step.
Here we get the action execute it in a time step and retrieve the
observations generated by that action.
:param action:
:return: obs, reward, done, info
"""
"""
Here we should convert the action num to movement action, execute the action in the
simulation and get the observations result of performing that action.
"""
self.gazebo.unpauseSim()
self._set_action(action)
rate = rospy.Rate(1 / self.SKIP)
rate.sleep()
self.gazebo.pauseSim()
obs = self._get_obs()
done = self._is_done(obs)
info = {}
reward = self._compute_reward(obs, done)
self.cumulated_episode_reward += reward
return obs, reward, done, info
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
# TODO Not sure what to do here
return True
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""
Sets the Robot in its init pose.
"""
while True:
self.previous_dist = 100000
self.previous_angle_factor = 100000
self.bot_api.randomise_robot_pose()
self.target_api.randomise_target_pose()
if not self._is_touching_target():
break
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
self.steps = 0
self.grid_squares = 0
self._set_init_pose()
def _compute_reward(self, observations, done):
"""
Calculates the reward to give based on the observations given.
"""
reward = 0
# High reward for reaching the target
if self._is_touching_target():
reward += 2000
return reward
# Reward for map exploration
map_list = self.bot_api.get_latest_slam_map()["data"]
next_grid_squares = sum(1 for i in map_list if i == 1)
if next_grid_squares > self.grid_squares:
self.grid_squares = next_grid_squares
reward += 5 # Adjusted reward for exploration
# Calculate distance to the target
new_dist = self._distance_to_target()
distance_change = self.previous_dist - new_dist
if distance_change > 0.5:
reward += distance_change * 400 # Reward for moving closer, quicker
elif distance_change > 0:
reward += distance_change * 200 # Reward for moving closer
else:
reward += distance_change * 60 # Penalty for moving away
angle_difference = self._calculate_angle_difference()
if angle_difference < 30 and distance_change > 0:
reward += distance_change * 100
angle_difference_rad = math.radians(angle_difference)
angle_factor = (1 - abs(angle_difference_rad) / (math.pi / 2))
# Reward/Penalty for looking at the target
look_at_reward = 0
angle_factor_change = angle_factor - self.previous_angle_factor
if angle_factor_change > 0:
look_at_reward += angle_factor_change * 35 # Reward for looking towards
else:
look_at_reward += angle_factor_change * 50 # Penalty for looking away
reward += look_at_reward
# Time-based penalty or a small penalty for inaction
reward -= 2.5
# Skip first reward
if self.previous_angle_factor > 1000 or self.previous_dist > 1000:
reward = 0
self.previous_dist = new_dist
self.previous_angle_factor = angle_factor
print(reward)
return reward
def _set_action(self, action):
"""
Applies the given action to the simulation.
"""
if action == 0:
act_string = "f"
elif action == 1:
act_string = "b"
elif action == 2:
act_string = "cw"
elif action == 3:
act_string = "acw"
else:
act_string = "stop"
self.bot_api.move_robot(act_string)
def _get_obs(self):
# Process Camera Data
camera_data = self.bot_api.get_latest_camera_data()
camera_array = (np.array(camera_data) / 255.0).flatten()
# Process LIDAR Data
self.gazebo.unpauseSim()
lidar_data = self.bot_api.get_latest_lidar_data()
self.gazebo.pauseSim()
lidar_array = np.array(lidar_data["ranges"])
# Concatenate Camera and LIDAR Data
final = np.concatenate([camera_array, lidar_array])
#print(final.shape)
return final
def _is_done(self, observations):
"""
Checks if episode done based on observations given.
"""
self.steps += 1
return self.steps >= self.MAX_STEPS or self._is_touching_target()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def _distance_to_target(self):
"""
Computes the distance to the target object
"""
self.gazebo.unpauseSim()
bot_pose = self.bot_api.get_ground_truth_robot_pose()
self.gazebo.pauseSim()
bot_x = bot_pose["position"]["x"]
bot_y = bot_pose["position"]["y"]
self.gazebo.unpauseSim()
target_pose = self.target_api.get_ground_truth_target_pose()
self.gazebo.pauseSim()
target_x = target_pose["position"]["x"]
target_y = target_pose["position"]["y"]
x_dist = bot_x - target_x
y_dist = bot_y - target_y
dist = math.sqrt((x_dist**2) + (y_dist**2))
if(debug):
print("Distance: ", dist)
return dist
def _calculate_angle_difference(self):
# Get bot's pose and orientation
self.gazebo.unpauseSim()
bot_pose = self.bot_api.get_ground_truth_robot_pose()
self.gazebo.pauseSim()
bot_x = bot_pose["position"]["x"]
bot_y = bot_pose["position"]["y"]
bot_orientation = bot_pose["orientation"]
roll, pitch, yaw = self._quaternion_to_euler_angles(bot_orientation)
# Get target's position
self.gazebo.unpauseSim()
target_pose = self.target_api.get_ground_truth_target_pose()
self.gazebo.pauseSim()
target_x = target_pose["position"]["x"]
target_y = target_pose["position"]["y"]
vector_to_target = [target_x - bot_x, target_y - bot_y]
target_angle_deg = math.degrees(math.atan2(vector_to_target[1], vector_to_target[0]))
bot_yaw_deg = yaw % 360
target_angle_deg = target_angle_deg % 360
if(debug):
print("Bot Yaw:", bot_yaw_deg)
print("Target Angle:", target_angle_deg)
# Calculate the difference in angles
angle_difference_deg = target_angle_deg - bot_yaw_deg
angle_difference_deg = (angle_difference_deg + 180) % 360 - 180
return angle_difference_deg
def _quaternion_to_euler_angles(self, quaternion):
"""
Convert a quaternion to Euler angles (roll, pitch, yaw) with yaw measured from the negative x-axis.
"""
x, y, z, w = quaternion['x'], quaternion['y'], quaternion['z'], quaternion['w']
# Roll (x-axis rotation)
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
roll = math.atan2(sinr_cosp, cosr_cosp)
# Pitch (y-axis rotation)
sinp = 2 * (w * y - z * x)
if abs(sinp) >= 1:
pitch = math.copysign(math.pi / 2, sinp)
else:
pitch = math.asin(sinp)
# Yaw (z-axis rotation)
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
yaw = math.atan2(siny_cosp, cosy_cosp)
# Adjust yaw to measure from the negative x-axis
yaw -= math.pi
if yaw < -math.pi:
yaw += 2 * math.pi
# Convert from radians to degrees
roll_deg = math.degrees(roll)
pitch_deg = math.degrees(pitch)
yaw_deg = math.degrees(yaw)
return roll_deg, pitch_deg, yaw_deg
def _is_touching_target(self):
"""
Returns True if the robot is within 2 co-ordinate point of the target.
"""
# print("RETURNING")
dst = self._distance_to_target()
return dst <= 2
def set_skip(self, newSkip):
SKIP = newSkip | [] |
2024-01-10 | zhaolongkzz/gym_gazebo_kinetic | gym_gazebo~envs~real_env.py | import gym
import rospy
import os
import signal
import subprocess
import time
from os import path
from std_srvs.srv import Empty
import random
class RealEnv(gym.Env):
"""Superclass for all Gazebo environments.
"""
metadata = {'render.modes': ['human']}
def __init__(self):
self.port = "11311"#str(random_number) #os.environ["ROS_PORT_SIM"]
# #start roscore
# subprocess.Popen(["roscore", "-p", self.port])
# time.sleep(1)
print ("Roscore launched!")
# Launch the simulation with the given launchfile name
rospy.init_node('gym', anonymous=True)
def set_ros_master_uri(self):
os.environ["ROS_MASTER_URI"] = self.ros_master_uri
def step(self, action):
# Implement this method in every subclass
# Perform a step in gazebo. E.g. move the robot
raise NotImplementedError
def reset(self):
# Implemented in subclass
raise NotImplementedError
def render(self, mode=None, close=False):
pass
def _render(self, mode=None, close=False):
self._close()
def _close(self):
output1 = subprocess.check_call(["cat" ,"/tmp/myroslaunch_" + self.port + ".pid"])
output2 = subprocess.check_call(["cat" ,"/home/erle/.ros/roscore-" + self.port + ".pid"])
subprocess.Popen(["kill", "-INT", str(output1)])
subprocess.Popen(["kill", "-INT", str(output2)])
def close(self):
pass
def _configure(self):
# TODO
# From OpenAI API: Provides runtime configuration to the enviroment
# Maybe set the Real Time Factor?
pass
def _seed(self):
# TODO
# From OpenAI API: Sets the seed for this env's random number generator(s)
pass
| [] |
2024-01-10 | zhaolongkzz/gym_gazebo_kinetic | gym_gazebo~envs~real_env_ros2.py | import gym
import rclpy
import os
import signal
import subprocess
import time
from os import path
from std_srvs.srv import Empty
import random
class RealEnvROS2(gym.Env):
"""Superclass for all Gazebo environments.
"""
metadata = {'render.modes': ['human']}
def __init__(self):
# Launch the simulation with the given launchfile name
rclpy.init(args=None)
self.node = rclpy.create_node('real_env_ros2')
def step(self, action):
# Implement this method in every subclass
# Perform a step in gazebo. E.g. move the robot
raise NotImplementedError
def reset(self):
# Implemented in subclass
raise NotImplementedError
def render(self, mode=None, close=False):
pass
def _render(self, mode=None, close=False):
self._close()
def _close(self):
output1 = subprocess.check_call(["cat" ,"/tmp/myroslaunch_" + self.port + ".pid"])
output2 = subprocess.check_call(["cat" ,"/home/erle/.ros/roscore-" + self.port + ".pid"])
subprocess.Popen(["kill", "-INT", str(output1)])
subprocess.Popen(["kill", "-INT", str(output2)])
def close(self):
pass
def _configure(self):
# TODO
# From OpenAI API: Provides runtime configuration to the enviroment
# Maybe set the Real Time Factor?
pass
def _seed(self):
# TODO
# From OpenAI API: Sets the seed for this env's random number generator(s)
pass
| [] |
2024-01-10 | zhaolongkzz/gym_gazebo_kinetic | gym_gazebo~envs~gazebo_env.py | import gym
import rospy
#import roslaunch
import sys
import os
import signal
import subprocess
import time
from std_srvs.srv import Empty
import random
from rosgraph_msgs.msg import Clock
class GazeboEnv(gym.Env):
"""Superclass for all Gazebo environments.
"""
metadata = {'render.modes': ['human']}
def __init__(self, launchfile):
self.last_clock_msg = Clock()
random_number = random.randint(10000, 15000)
self.port = "12345" #str(random_number) #os.environ["ROS_PORT_SIM"]
self.port_gazebo = "12346" #str(random_number+1) #os.environ["ROS_PORT_SIM"]
# self.port = str(random_number) #os.environ["ROS_PORT_SIM"]
# self.port_gazebo = str(random_number+1) #os.environ["ROS_PORT_SIM"]
os.environ["ROS_MASTER_URI"] = "http://localhost:"+self.port
os.environ["GAZEBO_MASTER_URI"] = "http://localhost:"+self.port_gazebo
#
# self.ros_master_uri = os.environ["ROS_MASTER_URI"];
print("ROS_MASTER_URI=http://localhost:"+self.port + "\n")
print("GAZEBO_MASTER_URI=http://localhost:"+self.port_gazebo + "\n")
# self.port = os.environ.get("ROS_PORT_SIM", "11311")
ros_path = os.path.dirname(subprocess.check_output(["which", "roscore"]))
# NOTE: It doesn't make sense to launch a roscore because it will be done when spawing Gazebo, which also need
# to be the first node in order to initialize the clock.
# # start roscore with same python version as current script
# self._roscore = subprocess.Popen([sys.executable, os.path.join(ros_path, b"roscore"), "-p", self.port])
# time.sleep(1)
# print ("Roscore launched!")
if launchfile.startswith("/"):
fullpath = launchfile
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets", "launch", launchfile)
if not os.path.exists(fullpath):
raise IOError("File "+fullpath+" does not exist")
self._roslaunch = subprocess.Popen([sys.executable, os.path.join(ros_path, b"roslaunch"), "-p", self.port, fullpath])
print ("Gazebo launched!")
self.gzclient_pid = 0
# Launch the simulation with the given launchfile name
rospy.init_node('gym', anonymous=True)
################################################################################################################
# r = rospy.Rate(1)
# self.clock_sub = rospy.Subscriber('/clock', Clock, self.callback, queue_size=1000000)
# while not rospy.is_shutdown():
# print("initialization: ", rospy.rostime.is_rostime_initialized())
# print("Wallclock: ", rospy.rostime.is_wallclock())
# print("Time: ", time.time())
# print("Rospyclock: ", rospy.rostime.get_rostime().secs)
# # print("/clock: ", str(self.last_clock_msg))
# last_ros_time_ = self.last_clock_msg
# print("Clock:", last_ros_time_)
# # print("Waiting for synch with ROS clock")
# # if wallclock == False:
# # break
# r.sleep()
################################################################################################################
# def callback(self, message):
# """
# Callback method for the subscriber of the clock topic
# :param message:
# :return:
# """
# # self.last_clock_msg = int(str(message.clock.secs) + str(message.clock.nsecs)) / 1e6
# # print("Message", message)
# self.last_clock_msg = message
# # print("Message", message)
def step(self, action):
# Implement this method in every subclass
# Perform a step in gazebo. E.g. move the robot
raise NotImplementedError
def reset(self):
# Implemented in subclass
raise NotImplementedError
def _render(self, mode="human", close=False):
if close:
tmp = os.popen("ps -Af").read()
proccount = tmp.count('gzclient')
if proccount > 0:
if self.gzclient_pid != 0:
os.kill(self.gzclient_pid, signal.SIGTERM)
os.wait()
return
tmp = os.popen("ps -Af").read()
proccount = tmp.count('gzclient')
if proccount < 1:
subprocess.Popen("gzclient")
self.gzclient_pid = int(subprocess.check_output(["pidof","-s","gzclient"]))
else:
self.gzclient_pid = 0
def _close(self):
# Kill gzclient, gzserver and roscore
tmp = os.popen("ps -Af").read()
gzclient_count = tmp.count('gzclient')
gzserver_count = tmp.count('gzserver')
roscore_count = tmp.count('roscore')
rosmaster_count = tmp.count('rosmaster')
if gzclient_count > 0:
os.system("killall -9 gzclient")
if gzserver_count > 0:
os.system("killall -9 gzserver")
if rosmaster_count > 0:
os.system("killall -9 rosmaster")
if roscore_count > 0:
os.system("killall -9 roscore")
if (gzclient_count or gzserver_count or roscore_count or rosmaster_count >0):
os.wait()
def _configure(self):
# TODO
# From OpenAI API: Provides runtime configuration to the enviroment
# Maybe set the Real Time Factor?
pass
def _seed(self):
# TODO
# From OpenAI API: Sets the seed for this env's random number generator(s)
pass
| [] |
2024-01-10 | kryptogo/litellm | litellm~integrations~helicone.py | #### What this does ####
# On success, logs events to Helicone
import dotenv, os
import requests
dotenv.load_dotenv() # Loading env variables using dotenv
import traceback
class HeliconeLogger:
# Class variables or attributes
helicone_model_list = ["gpt", "claude"]
def __init__(self):
# Instance variables
self.provider_url = "https://api.openai.com/v1"
self.key = os.getenv('HELICONE_API_KEY')
def claude_mapping(self, model, messages, response_obj):
from anthropic import HUMAN_PROMPT, AI_PROMPT
prompt = f"{HUMAN_PROMPT}"
for message in messages:
if "role" in message:
if message["role"] == "user":
prompt += f"{HUMAN_PROMPT}{message['content']}"
else:
prompt += f"{AI_PROMPT}{message['content']}"
else:
prompt += f"{HUMAN_PROMPT}{message['content']}"
prompt += f"{AI_PROMPT}"
claude_provider_request = {"model": model, "prompt": prompt}
claude_response_obj = {"completion": response_obj['choices'][0]['message']['content'], "model": model, "stop_reason": "stop_sequence"}
return claude_provider_request, claude_response_obj
def log_success(self, model, messages, response_obj, start_time, end_time, print_verbose):
# Method definition
try:
print_verbose(f"Helicone Logging - Enters logging function for model {model}")
model = model if any(accepted_model in model for accepted_model in self.helicone_model_list) else "gpt-3.5-turbo"
provider_request = {"model": model, "messages": messages}
if "claude" in model:
provider_request, response_obj = self.claude_mapping(model=model, messages=messages, response_obj=response_obj)
providerResponse = {
"json": response_obj,
"headers": {"openai-version": "2020-10-01"},
"status": 200
}
# Code to be executed
url = "https://api.hconeai.com/oai/v1/log"
headers = {
'Authorization': f'Bearer {self.key}',
'Content-Type': 'application/json'
}
start_time_seconds = int(start_time.timestamp())
start_time_milliseconds = int((start_time.timestamp() - start_time_seconds) * 1000)
end_time_seconds = int(end_time.timestamp())
end_time_milliseconds = int((end_time.timestamp() - end_time_seconds) * 1000)
data = {
"providerRequest": {"url": self.provider_url, "json": provider_request, "meta": {"Helicone-Auth": f"Bearer {self.key}"}},
"providerResponse": providerResponse,
"timing": {"startTime": {"seconds": start_time_seconds, "milliseconds": start_time_milliseconds}, "endTime": {"seconds": end_time_seconds, "milliseconds": end_time_milliseconds}} # {"seconds": .., "milliseconds": ..}
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
print_verbose("Helicone Logging - Success!")
else:
print_verbose(f"Helicone Logging - Error Request was not successful. Status Code: {response.status_code}")
print_verbose(f"Helicone Logging - Error {response.text}")
except:
# traceback.print_exc()
print_verbose(f"Helicone Logging Error - {traceback.format_exc()}")
pass | [
"PLACEHOLDER",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | kryptogo/litellm | litellm~main.py | import os, openai, sys
from typing import Any
from functools import partial
import dotenv, traceback, random, asyncio, time
from copy import deepcopy
import litellm
from litellm import client, logging, exception_type, timeout, get_optional_params, get_litellm_params
from litellm.utils import get_secret, install_and_import, CustomStreamWrapper, read_config_args
from .llms.anthropic import AnthropicLLM
import tiktoken
from concurrent.futures import ThreadPoolExecutor
encoding = tiktoken.get_encoding("cl100k_base")
from litellm.utils import get_secret, install_and_import, CustomStreamWrapper, read_config_args
from litellm.utils import get_ollama_response_stream, stream_to_string
####### ENVIRONMENT VARIABLES ###################
dotenv.load_dotenv() # Loading env variables using dotenv
new_response = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"role": "assistant"
}
}
]
}
# TODO add translations
####### COMPLETION ENDPOINTS ################
#############################################
async def acompletion(*args, **kwargs):
loop = asyncio.get_event_loop()
# Use a partial function to pass your keyword arguments
func = partial(completion, *args, **kwargs)
# Call the synchronous function using run_in_executor
return await loop.run_in_executor(None, func)
@client
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(2), reraise=True, retry_error_callback=lambda retry_state: setattr(retry_state.outcome, 'retry_variable', litellm.retry)) # retry call, turn this off by setting `litellm.retry = False`
@timeout(600) ## set timeouts, in case calls hang (e.g. Azure) - default is 60s, override with `force_timeout`
def completion(
model, messages,# required params
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
functions=[], function_call="", # optional params
temperature=1, top_p=1, n=1, stream=False, stop=None, max_tokens=float('inf'),
presence_penalty=0, frequency_penalty=0, logit_bias={}, user="", deployment_id=None,
# Optional liteLLM function params
*, return_async=False, api_key=None, force_timeout=600, logger_fn=None, verbose=False, azure=False, custom_llm_provider=None, custom_api_base=None
):
try:
global new_response
if azure: # this flag is deprecated, remove once notebooks are also updated.
custom_llm_provider="azure"
args = locals()
model_response = deepcopy(new_response) # deep copy the default response format so we can mutate it and it's thread-safe.
# check if user passed in any of the OpenAI optional params
optional_params = get_optional_params(
functions=functions, function_call=function_call,
temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop, max_tokens=max_tokens,
presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user, deployment_id=deployment_id,
# params to identify the model
model=model, custom_llm_provider=custom_llm_provider
)
# For logging - save the values of the litellm-specific params passed in
litellm_params = get_litellm_params(
return_async=return_async, api_key=api_key, force_timeout=force_timeout,
logger_fn=logger_fn, verbose=verbose, custom_llm_provider=custom_llm_provider,
custom_api_base=custom_api_base)
if custom_llm_provider == "azure":
# azure configs
openai.api_type = "azure"
openai.api_base = litellm.api_base if litellm.api_base is not None else get_secret("AZURE_API_BASE")
openai.api_version = litellm.api_version if litellm.api_version is not None else get_secret("AZURE_API_VERSION")
# set key
if api_key:
openai.api_key = api_key
elif litellm.azure_key:
openai.api_key = litellm.azure_key
else:
openai.api_key = get_secret("AZURE_API_KEY")
## LOGGING
logging(model=model, input=messages, additional_args=optional_params, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
## COMPLETION CALL
if litellm.headers:
response = openai.ChatCompletion.create(
engine=model,
messages = messages,
headers = litellm.headers,
**optional_params,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
**optional_params
)
elif model in litellm.open_ai_chat_completion_models or custom_llm_provider == "custom_openai": # allow user to make an openai call with a custom base
openai.api_type = "openai"
# note: if a user sets a custom base - we should ensure this works
api_base = custom_api_base if custom_api_base is not None else litellm.api_base # allow for the setting of dynamic and stateful api-bases
openai.api_base = api_base if api_base is not None else "https://api.openai.com/v1"
openai.api_version = None
if litellm.organization:
openai.organization = litellm.organization
if api_key:
openai.api_key = api_key
elif litellm.openai_key:
openai.api_key = litellm.openai_key
else:
openai.api_key = get_secret("OPENAI_API_KEY")
## LOGGING
logging(model=model, input=messages, additional_args=args, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
## COMPLETION CALL
if litellm.headers:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
headers = litellm.headers,
**optional_params
)
else:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
**optional_params
)
elif model in litellm.open_ai_text_completion_models:
openai.api_type = "openai"
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
openai.api_version = None
if api_key:
openai.api_key = api_key
elif litellm.openai_key:
openai.api_key = litellm.openai_key
else:
openai.api_key = get_secret("OPENAI_API_KEY")
if litellm.organization:
openai.organization = litellm.organization
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, additional_args=optional_params, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
## COMPLETION CALL
if litellm.headers:
response = openai.Completion.create(
model=model,
prompt = prompt,
headers = litellm.headers,
)
else:
response = openai.Completion.create(
model=model,
prompt = prompt
)
completion_response = response["choices"]["text"]
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = response["created"]
model_response["model"] = model
model_response["usage"] = response["usage"]
response = model_response
elif "replicate" in model or custom_llm_provider == "replicate":
# import replicate/if it fails then pip install replicate
install_and_import("replicate")
import replicate
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
# checking in case user set it to REPLICATE_API_KEY instead
if not get_secret("REPLICATE_API_TOKEN") and get_secret("REPLICATE_API_KEY"):
replicate_api_token = get_secret("REPLICATE_API_KEY")
os.environ["REPLICATE_API_TOKEN"] = replicate_api_token
elif api_key:
os.environ["REPLICATE_API_TOKEN"] = api_key
elif litellm.replicate_key:
os.environ["REPLICATE_API_TOKEN"] = litellm.replicate_key
prompt = " ".join([message["content"] for message in messages])
input = {"prompt": prompt}
if "max_tokens" in optional_params:
input["max_length"] = max_tokens # for t5 models
input["max_new_tokens"] = max_tokens # for llama2 models
## LOGGING
logging(model=model, input=input, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
## COMPLETION CALL
output = replicate.run(
model,
input=input)
if 'stream' in optional_params and optional_params['stream'] == True:
# don't try to access stream object,
# let the stream handler know this is replicate
response = CustomStreamWrapper(output, "replicate")
return response
response = ""
for item in output:
response += item
completion_response = response
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response))
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = time.time()
model_response["model"] = model
model_response["usage"] = {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens
}
response = model_response
elif model in litellm.anthropic_models:
anthropic_key = api_key if api_key is not None else litellm.anthropic_key
anthropic_client = AnthropicLLM(encoding=encoding, default_max_tokens_to_sample=litellm.max_tokens, api_key=anthropic_key)
model_response = anthropic_client.completion(model=model, messages=messages, model_response=model_response, print_verbose=print_verbose, optional_params=optional_params, litellm_params=litellm_params, logger_fn=logger_fn)
if 'stream' in optional_params and optional_params['stream'] == True:
# don't try to access stream object,
response = CustomStreamWrapper(model_response, model)
return response
response = model_response
elif model in litellm.openrouter_models or custom_llm_provider == "openrouter":
openai.api_type = "openai"
# not sure if this will work after someone first uses another API
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://openrouter.ai/api/v1"
openai.api_version = None
if litellm.organization:
openai.organization = litellm.organization
if api_key:
openai.api_key = api_key
elif litellm.openrouter_key:
openai.api_key = litellm.openrouter_key
else:
openai.api_key = get_secret("OPENROUTER_API_KEY")
## LOGGING
logging(model=model, input=messages, additional_args=optional_params, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
## COMPLETION CALL
if litellm.headers:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
headers = litellm.headers,
**optional_params
)
else:
openrouter_site_url = get_secret("OR_SITE_URL")
openrouter_app_name = get_secret("OR_APP_NAME")
# if openrouter_site_url is None, set it to https://litellm.ai
if openrouter_site_url is None:
openrouter_site_url = "https://litellm.ai"
# if openrouter_app_name is None, set it to liteLLM
if openrouter_app_name is None:
openrouter_app_name = "liteLLM"
response = openai.ChatCompletion.create(
model=model,
messages = messages,
headers =
{
"HTTP-Referer": openrouter_site_url, # To identify your site
"X-Title": openrouter_app_name # To identify your app
},
**optional_params
)
elif model in litellm.cohere_models:
# import cohere/if it fails then pip install cohere
install_and_import("cohere")
import cohere
if api_key:
cohere_key = api_key
elif litellm.cohere_key:
cohere_key = litellm.cohere_key
else:
cohere_key = get_secret("COHERE_API_KEY")
co = cohere.Client(cohere_key)
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
## COMPLETION CALL
response = co.generate(
model=model,
prompt = prompt,
**optional_params
)
if 'stream' in optional_params and optional_params['stream'] == True:
# don't try to access stream object,
response = CustomStreamWrapper(response, model)
return response
completion_response = response[0].text
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response))
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = time.time()
model_response["model"] = model
model_response["usage"] = {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens
}
response = model_response
elif custom_llm_provider == "huggingface":
import requests
API_URL = f"https://api-inference.huggingface.co/models/{model}"
HF_TOKEN = get_secret("HF_TOKEN")
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
input_payload = {"inputs": prompt}
response = requests.post(API_URL, headers=headers, json=input_payload)
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": response.text}, logger_fn=logger_fn)
completion_response = response.json()[0]['generated_text']
prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response))
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = time.time()
model_response["model"] = model
model_response["usage"] = {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens
}
response = model_response
elif custom_llm_provider == "together_ai":
import requests
TOGETHER_AI_TOKEN = get_secret("TOGETHER_AI_TOKEN")
headers = {"Authorization": f"Bearer {TOGETHER_AI_TOKEN}"}
endpoint = 'https://api.together.xyz/inference'
prompt = " ".join([message["content"] for message in messages]) # TODO: Add chat support for together AI
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
res = requests.post(endpoint, json={
"model": model,
"prompt": prompt,
"request_type": "language-model-inference",
**optional_params
},
headers=headers
)
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": res.text}, logger_fn=logger_fn)
if stream == True:
response = CustomStreamWrapper(res, "together_ai")
return response
completion_response = res.json()['output']['choices'][0]['text']
prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response))
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = time.time()
model_response["model"] = model
model_response["usage"] = {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens
}
response = model_response
elif model in litellm.vertex_models:
# import vertexai/if it fails then pip install vertexai# import cohere/if it fails then pip install cohere
install_and_import("vertexai")
import vertexai
from vertexai.preview.language_models import ChatModel, InputOutputTextPair
vertexai.init(project=litellm.vertex_project, location=litellm.vertex_location)
# vertexai does not use an API key, it looks for credentials.json in the environment
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
chat_model = ChatModel.from_pretrained(model)
chat = chat_model.start_chat()
completion_response = chat.send_message(prompt, **optional_params)
## LOGGING
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
## RESPONSE OBJECT
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = time.time()
model_response["model"] = model
response = model_response
elif custom_llm_provider == "ollama":
endpoint = litellm.api_base if litellm.api_base is not None else custom_api_base
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
generator = get_ollama_response_stream(endpoint, model, prompt)
# assume all responses are streamed
return generator
else:
## LOGGING
logging(model=model, input=messages, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
args = locals()
raise ValueError(f"Invalid completion model args passed in. Check your input - {args}")
return response
except Exception as e:
## LOGGING
logging(model=model, input=messages, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn, exception=e)
## Map to OpenAI Exception
raise exception_type(model=model, original_exception=e)
def batch_completion(*args, **kwargs):
batch_messages = args[1] if len(args) > 1 else kwargs.get("messages")
completions = []
with ThreadPoolExecutor() as executor:
for message_list in batch_messages:
if len(args) > 1:
args_modified = list(args)
args_modified[1] = message_list
future = executor.submit(completion, *args_modified)
else:
kwargs_modified = dict(kwargs)
kwargs_modified["messages"] = message_list
future = executor.submit(completion, *args, **kwargs_modified)
completions.append(future)
# Retrieve the results from the futures
results = [future.result() for future in completions]
return results
### EMBEDDING ENDPOINTS ####################
@client
@timeout(60) ## set timeouts, in case calls hang (e.g. Azure) - default is 60s, override with `force_timeout`
def embedding(model, input=[], azure=False, force_timeout=60, logger_fn=None):
try:
response = None
if azure == True:
# azure configs
openai.api_type = "azure"
openai.api_base = get_secret("AZURE_API_BASE")
openai.api_version = get_secret("AZURE_API_VERSION")
openai.api_key = get_secret("AZURE_API_KEY")
## LOGGING
logging(model=model, input=input, azure=azure, logger_fn=logger_fn)
## EMBEDDING CALL
response = openai.Embedding.create(input=input, engine=model)
print_verbose(f"response_value: {str(response)[:50]}")
elif model in litellm.open_ai_embedding_models:
openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1"
openai.api_version = None
openai.api_key = get_secret("OPENAI_API_KEY")
## LOGGING
logging(model=model, input=input, azure=azure, logger_fn=logger_fn)
## EMBEDDING CALL
response = openai.Embedding.create(input=input, model=model)
print_verbose(f"response_value: {str(response)[:50]}")
else:
logging(model=model, input=input, azure=azure, logger_fn=logger_fn)
args = locals()
raise ValueError(f"No valid embedding model args passed in - {args}")
return response
except Exception as e:
# log the original exception
logging(model=model, input=input, azure=azure, logger_fn=logger_fn, exception=e)
## Map to OpenAI Exception
raise exception_type(model=model, original_exception=e)
raise e
####### HELPER FUNCTIONS ################
## Set verbose to true -> ```litellm.set_verbose = True```
def print_verbose(print_statement):
if litellm.set_verbose:
print(f"LiteLLM: {print_statement}")
if random.random() <= 0.3:
print("Get help - https://discord.com/invite/wuPM9dRgDw")
def config_completion(**kwargs):
if litellm.config_path != None:
config_args = read_config_args(litellm.config_path)
# overwrite any args passed in with config args
return completion(**kwargs, **config_args)
else:
raise ValueError("No config path set, please set a config path using `litellm.config_path = 'path/to/config.json'`") | [
"content",
" "
] |
2024-01-10 | kryptogo/litellm | litellm~timeout.py | """
Module containing "timeout" decorator for sync and async callables.
"""
import asyncio
from concurrent import futures
from inspect import iscoroutinefunction
from functools import wraps
from threading import Thread
from openai.error import Timeout
def timeout(
timeout_duration: float = None, exception_to_raise = Timeout
):
"""
Wraps a function to raise the specified exception if execution time
is greater than the specified timeout.
Works with both synchronous and asynchronous callables, but with synchronous ones will introduce
some overhead due to the backend use of threads and asyncio.
:param float timeout_duration: Timeout duration in seconds. If none callable won't time out.
:param OpenAIError exception_to_raise: Exception to raise when the callable times out.
Defaults to TimeoutError.
:return: The decorated function.
:rtype: callable
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
async def async_func():
return func(*args, **kwargs)
thread = _LoopWrapper()
thread.start()
future = asyncio.run_coroutine_threadsafe(async_func(), thread.loop)
local_timeout_duration = timeout_duration
if "force_timeout" in kwargs and kwargs["force_timeout"] is not None:
local_timeout_duration = kwargs["force_timeout"]
try:
result = future.result(timeout=local_timeout_duration)
except futures.TimeoutError:
thread.stop_loop()
raise exception_to_raise(f"A timeout error occurred. The function call took longer than {local_timeout_duration} second(s).")
thread.stop_loop()
return result
@wraps(func)
async def async_wrapper(*args, **kwargs):
local_timeout_duration = timeout_duration
if "force_timeout" in kwargs:
local_timeout_duration = kwargs["force_timeout"]
try:
value = await asyncio.wait_for(
func(*args, **kwargs), timeout=timeout_duration
)
return value
except asyncio.TimeoutError:
raise exception_to_raise(f"A timeout error occurred. The function call took longer than {local_timeout_duration} second(s).")
if iscoroutinefunction(func):
return async_wrapper
return wrapper
return decorator
class _LoopWrapper(Thread):
def __init__(self):
super().__init__(daemon=True)
self.loop = asyncio.new_event_loop()
def run(self) -> None:
self.loop.run_forever()
self.loop.call_soon_threadsafe(self.loop.close)
def stop_loop(self):
for task in asyncio.all_tasks(self.loop):
task.cancel()
self.loop.call_soon_threadsafe(self.loop.stop) | [] |
2024-01-10 | kryptogo/litellm | litellm~utils.py | import sys
import dotenv, json, traceback, threading
import subprocess, os
import litellm, openai
import random, uuid, requests
import datetime, time
import tiktoken
import pkg_resources
from pkg_resources import DistributionNotFound, VersionConflict
encoding = tiktoken.get_encoding("cl100k_base")
from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, OpenAIError
import logging
####### ENVIRONMENT VARIABLES ###################
dotenv.load_dotenv() # Loading env variables using dotenv
sentry_sdk_instance = None
capture_exception = None
add_breadcrumb = None
posthog = None
slack_app = None
alerts_channel = None
heliconeLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
callback_list = []
user_logger_fn = None
additional_details = {}
def print_verbose(print_statement):
if litellm.set_verbose:
print(f"LiteLLM: {print_statement}")
if random.random() <= 0.3:
print("Get help - https://discord.com/invite/wuPM9dRgDw")
####### Package Import Handler ###################
import importlib
import subprocess
def install_and_import(package: str):
if package in globals().keys():
print_verbose(f"{package} has already been imported.")
return
try:
# Import the module
module = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
print_verbose(f"{package} is not installed. Installing...")
subprocess.call([sys.executable, "-m", "pip", "install", package])
globals()[package] = importlib.import_module(package)
except (DistributionNotFound, ImportError):
print_verbose(f"{package} is not installed. Installing...")
subprocess.call([sys.executable, "-m", "pip", "install", package])
globals()[package] = importlib.import_module(package)
except VersionConflict as vc:
print_verbose(f"Detected version conflict for {package}. Upgrading...")
subprocess.call([sys.executable, "-m", "pip", "install", "--upgrade", package])
globals()[package] = importlib.import_module(package)
finally:
if package not in globals().keys():
globals()[package] = importlib.import_module(package)
##################################################
####### LOGGING ###################
#Logging function -> log the exact model details + what's being sent | Non-Blocking
def logging(model=None, input=None, custom_llm_provider=None, azure=False, additional_args={}, logger_fn=None, exception=None):
try:
model_call_details = {}
if model:
model_call_details["model"] = model
if azure:
model_call_details["azure"] = azure
if custom_llm_provider:
model_call_details["custom_llm_provider"] = custom_llm_provider
if exception:
model_call_details["exception"] = exception
if input:
model_call_details["input"] = input
if len(additional_args):
model_call_details["additional_args"] = additional_args
# log additional call details -> api key, etc.
if model:
if azure == True or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_embedding_models:
model_call_details["api_type"] = openai.api_type
model_call_details["api_base"] = openai.api_base
model_call_details["api_version"] = openai.api_version
model_call_details["api_key"] = openai.api_key
elif "replicate" in model:
model_call_details["api_key"] = os.environ.get("REPLICATE_API_TOKEN")
elif model in litellm.anthropic_models:
model_call_details["api_key"] = os.environ.get("ANTHROPIC_API_KEY")
elif model in litellm.cohere_models:
model_call_details["api_key"] = os.environ.get("COHERE_API_KEY")
## User Logging -> if you pass in a custom logging function or want to use sentry breadcrumbs
print_verbose(f"Logging Details: logger_fn - {logger_fn} | callable(logger_fn) - {callable(logger_fn)}")
if logger_fn and callable(logger_fn):
try:
logger_fn(model_call_details) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print(f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}")
except Exception as e:
print(f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}")
pass
####### CLIENT ###################
# make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking
def client(original_function):
def function_setup(*args, **kwargs): #just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try:
global callback_list, add_breadcrumb, user_logger_fn
if (len(litellm.success_callback) > 0 or len(litellm.failure_callback) > 0) and len(callback_list) == 0:
callback_list = list(set(litellm.success_callback + litellm.failure_callback))
set_callbacks(callback_list=callback_list,)
if add_breadcrumb:
add_breadcrumb(
category="litellm.llm_call",
message=f"Positional Args: {args}, Keyword Args: {kwargs}",
level="info",
)
if "logger_fn" in kwargs:
user_logger_fn = kwargs["logger_fn"]
except: # DO NOT BLOCK running the function because of this
print_verbose(f"[Non-Blocking] {traceback.format_exc()}")
pass
def crash_reporting(*args, **kwargs):
if litellm.telemetry:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
exception = kwargs["exception"] if "exception" in kwargs else None
custom_llm_provider = kwargs["custom_llm_provider"] if "custom_llm_provider" in kwargs else None
logging.info(f"Logging Crash Reporting Details: model - {model} | exception - {exception} | custom_llm_provider - {custom_llm_provider}")
safe_crash_reporting(model=model, exception=exception, custom_llm_provider=custom_llm_provider) # log usage-crash details. Do not log any user details. If you want to turn this off, set `litellm.telemetry=False`.
except:
#[Non-Blocking Error]
pass
def wrapper(*args, **kwargs):
start_time = None
try:
function_setup(*args, **kwargs)
## MODEL CALL
start_time = datetime.datetime.now()
result = original_function(*args, **kwargs)
end_time = datetime.datetime.now()
## LOG SUCCESS
crash_reporting(*args, **kwargs)
my_thread = threading.Thread(target=handle_success, args=(args, kwargs, result, start_time, end_time)) # don't interrupt execution of main thread
my_thread.start()
return result
except Exception as e:
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
my_thread = threading.Thread(target=handle_failure, args=(e, traceback_exception, start_time, end_time, args, kwargs)) # don't interrupt execution of main thread
my_thread.start()
raise e
return wrapper
####### USAGE CALCULATOR ################
def token_counter(model, text):
# use tiktoken or anthropic's tokenizer depending on the model
num_tokens = 0
if "claude" in model:
install_and_import('anthropic')
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def cost_per_token(model="gpt-3.5-turbo", prompt_tokens = 0, completion_tokens = 0):
## given
prompt_tokens_cost_usd_dollar = 0
completion_tokens_cost_usd_dollar = 0
model_cost_ref = litellm.model_cost
if model in model_cost_ref:
prompt_tokens_cost_usd_dollar = model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
completion_tokens_cost_usd_dollar = model_cost_ref[model]["output_cost_per_token"] * completion_tokens
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
else:
# calculate average input cost
input_cost_sum = 0
output_cost_sum = 0
model_cost_ref = litellm.model_cost
for model in model_cost_ref:
input_cost_sum += model_cost_ref[model]["input_cost_per_token"]
output_cost_sum += model_cost_ref[model]["output_cost_per_token"]
avg_input_cost = input_cost_sum / len(model_cost_ref.keys())
avg_output_cost = output_cost_sum / len(model_cost_ref.keys())
prompt_tokens_cost_usd_dollar = avg_input_cost * prompt_tokens
completion_tokens_cost_usd_dollar = avg_output_cost * completion_tokens
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
def completion_cost(model="gpt-3.5-turbo", prompt="", completion=""):
prompt_tokens = token_counter(model=model, text=prompt)
completion_tokens = token_counter(model=model, text=completion)
prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_token(model=model, prompt_tokens = prompt_tokens, completion_tokens = completion_tokens)
return prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
####### HELPER FUNCTIONS ################
def get_litellm_params(
return_async=False,
api_key=None,
force_timeout=600,
azure=False,
logger_fn=None,
verbose=False,
hugging_face=False,
replicate=False,
together_ai=False,
custom_llm_provider=None,
custom_api_base=None
):
litellm_params = {
"return_async": return_async,
"api_key": api_key,
"force_timeout": force_timeout,
"logger_fn": logger_fn,
"verbose": verbose,
"custom_llm_provider": custom_llm_provider,
"custom_api_base": custom_api_base
}
return litellm_params
def get_optional_params(
# 12 optional params
functions = [],
function_call = "",
temperature = 1,
top_p = 1,
n = 1,
stream = False,
stop = None,
max_tokens = float('inf'),
presence_penalty = 0,
frequency_penalty = 0,
logit_bias = {},
user = "",
deployment_id = None,
model = None,
custom_llm_provider = ""
):
optional_params = {}
if model in litellm.anthropic_models:
# handle anthropic params
if stream:
optional_params["stream"] = stream
if stop != None:
optional_params["stop_sequences"] = stop
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
return optional_params
elif model in litellm.cohere_models:
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature != 1:
optional_params["temperature"] = temperature
if max_tokens != float('inf'):
optional_params["max_tokens"] = max_tokens
return optional_params
elif custom_llm_provider == "replicate":
# any replicate models
# TODO: handle translating remaining replicate params
if stream:
optional_params["stream"] = stream
return optional_params
elif custom_llm_provider == "together_ai":
if stream:
optional_params["stream_tokens"] = stream
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if max_tokens != float('inf'):
optional_params["max_tokens"] = max_tokens
if frequency_penalty != 0:
optional_params["frequency_penalty"] = frequency_penalty
elif model == "chat-bison": # chat-bison has diff args from chat-bison@001 ty Google
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if max_tokens != float('inf'):
optional_params["max_output_tokens"] = max_tokens
else:# assume passing in params for openai/azure openai
if functions != []:
optional_params["functions"] = functions
if function_call != "":
optional_params["function_call"] = function_call
if temperature != 1:
optional_params["temperature"] = temperature
if top_p != 1:
optional_params["top_p"] = top_p
if n != 1:
optional_params["n"] = n
if stream:
optional_params["stream"] = stream
if stop != None:
optional_params["stop"] = stop
if max_tokens != float('inf'):
optional_params["max_tokens"] = max_tokens
if presence_penalty != 0:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty != 0:
optional_params["frequency_penalty"] = frequency_penalty
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if user != "":
optional_params["user"] = user
if deployment_id != None:
optional_params["deployment_id"] = deployment_id
return optional_params
return optional_params
def load_test_model(model: str, custom_llm_provider: str = None, custom_api_base: str = None, prompt: str = None, num_calls: int = None, force_timeout: int = None):
test_prompt = "Hey, how's it going"
test_calls = 100
if prompt:
test_prompt = prompt
if num_calls:
test_calls = num_calls
messages = [[{"role": "user", "content": test_prompt}] for _ in range(test_calls)]
start_time = time.time()
try:
litellm.batch_completion(model=model, messages=messages, custom_llm_provider=custom_llm_provider, custom_api_base = custom_api_base, force_timeout=force_timeout)
end_time = time.time()
response_time = end_time - start_time
return {"total_response_time": response_time, "calls_made": 100, "status": "success", "exception": None}
except Exception as e:
end_time = time.time()
response_time = end_time - start_time
return {"total_response_time": response_time, "calls_made": 100, "status": "failed", "exception": e}
def set_callbacks(callback_list):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient
try:
for callback in callback_list:
if callback == "sentry":
try:
import sentry_sdk
except ImportError:
print_verbose("Package 'sentry_sdk' is missing. Installing it...")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'sentry_sdk'])
import sentry_sdk
sentry_sdk_instance = sentry_sdk
sentry_trace_rate = os.environ.get("SENTRY_API_TRACE_RATE") if "SENTRY_API_TRACE_RATE" in os.environ else "1.0"
sentry_sdk_instance.init(dsn=os.environ.get("SENTRY_API_URL"), traces_sample_rate=float(os.environ.get("SENTRY_API_TRACE_RATE")))
capture_exception = sentry_sdk_instance.capture_exception
add_breadcrumb = sentry_sdk_instance.add_breadcrumb
elif callback == "posthog":
try:
from posthog import Posthog
except ImportError:
print_verbose("Package 'posthog' is missing. Installing it...")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'posthog'])
from posthog import Posthog
posthog = Posthog(
project_api_key=os.environ.get("POSTHOG_API_KEY"),
host=os.environ.get("POSTHOG_API_URL"))
elif callback == "slack":
try:
from slack_bolt import App
except ImportError:
print_verbose("Package 'slack_bolt' is missing. Installing it...")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'slack_bolt'])
from slack_bolt import App
slack_app = App(
token=os.environ.get("SLACK_API_TOKEN"),
signing_secret=os.environ.get("SLACK_API_SECRET")
)
alerts_channel = os.environ["SLACK_API_CHANNEL"]
print_verbose(f"Initialized Slack App: {slack_app}")
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
elif callback == "aispend":
aispendLogger = AISpendLogger()
elif callback == "berrispend":
berrispendLogger = BerriSpendLogger()
elif callback == "supabase":
supabaseClient = Supabase()
except Exception as e:
raise e
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger
try:
# print_verbose(f"handle_failure args: {args}")
# print_verbose(f"handle_failure kwargs: {kwargs}")
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop("failed_event_name", "litellm.failed_query")
print_verbose(f"self.failure_callback: {litellm.failure_callback}")
# print_verbose(f"additional_details: {additional_details}")
for callback in litellm.failure_callback:
try:
if callback == "slack":
slack_msg = ""
if len(kwargs) > 0:
for key in kwargs:
slack_msg += f"{key}: {kwargs[key]}\n"
if len(args) > 0:
for i, arg in enumerate(args):
slack_msg += f"LiteLLM_Args_{str(i)}: {arg}"
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_msg += f"Traceback: {traceback_exception}"
slack_app.client.chat_postMessage(channel=alerts_channel, text=slack_msg)
elif callback == "sentry":
capture_exception(exception)
elif callback == "posthog":
print_verbose(f"inside posthog, additional_details: {len(additional_details.keys())}")
ph_obj = {}
if len(kwargs) > 0:
ph_obj = kwargs
if len(args) > 0:
for i, arg in enumerate(args):
ph_obj["litellm_args_" + str(i)] = arg
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
print_verbose(f"ph_obj: {ph_obj}")
print_verbose(f"PostHog Event Name: {event_name}")
if "user_id" in additional_details:
posthog.capture(additional_details["user_id"], event_name, ph_obj)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name)
print_verbose(f"successfully logged to PostHog!")
elif callback == "berrispend":
print_verbose("reaches berrispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(model, messages=messages),
"completion_tokens": 0
}
}
berrispendLogger.log_event(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"usage": {
"prompt_tokens": prompt_token_calculator(model, messages=messages),
"completion_tokens": 0
}
}
aispendLogger.log_event(model=model, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(model, messages=messages),
"completion_tokens": 0
}
}
print(f"litellm._thread_context: {litellm._thread_context}")
supabaseClient.log_event(model=model, messages=messages, end_user=litellm._thread_context.user, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
except:
print_verbose(f"Error Occurred while logging failure: {traceback.format_exc()}")
pass
if failure_handler and callable(failure_handler):
call_details = {
"exception": exception,
"additional_details": additional_details
}
failure_handler(call_details)
pass
except Exception as e:
## LOGGING
logging(logger_fn=user_logger_fn, exception=e)
pass
def handle_success(args, kwargs, result, start_time, end_time):
global heliconeLogger, aispendLogger
try:
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop("successful_event_name", "litellm.succes_query")
for callback in litellm.success_callback:
try:
if callback == "posthog":
ph_obj = {}
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
if "user_id" in additional_details:
posthog.capture(additional_details["user_id"], event_name, ph_obj)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name, ph_obj)
pass
elif callback == "slack":
slack_msg = ""
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_app.client.chat_postMessage(channel=alerts_channel, text=slack_msg)
elif callback == "helicone":
print_verbose("reaches helicone for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
heliconeLogger.log_success(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
aispendLogger.log_event(model=model, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
elif callback == "berrispend":
print_verbose("reaches berrispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
berrispendLogger.log_event(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
print(f"litellm._thread_context: {litellm._thread_context}")
supabaseClient.log_event(model=model, messages=messages, end_user=litellm._thread_context.user, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
except Exception as e:
## LOGGING
logging(logger_fn=user_logger_fn, exception=e)
print_verbose(f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}")
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except Exception as e:
## LOGGING
logging(logger_fn=user_logger_fn, exception=e)
print_verbose(f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}")
pass
def prompt_token_calculator(model, messages):
# use tiktoken or anthropic's tokenizer depending on the model
text = " ".join(message["content"] for message in messages)
num_tokens = 0
if "claude" in model:
install_and_import('anthropic')
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
# integration helper function
def modify_integration(integration_name, integration_params):
global supabaseClient
if integration_name == "supabase":
if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"]
def exception_type(model, original_exception):
global user_logger_fn
exception_mapping_worked = False
try:
if isinstance(original_exception, OpenAIError):
# Handle the OpenAIError
raise original_exception
elif model:
error_str = str(original_exception)
if isinstance(original_exception, BaseException):
exception_type = type(original_exception).__name__
else:
exception_type = ""
logging(model=model, additional_args={"error_str": error_str, "exception_type": exception_type, "original_exception": original_exception}, logger_fn=user_logger_fn)
if "claude" in model: #one of the anthropics
if hasattr(original_exception, "status_code"):
print_verbose(f"status_code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(f"AnthropicException - {original_exception.message}")
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise InvalidRequestError(f"AnthropicException - {original_exception.message}", f"{model}")
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(f"AnthropicException - {original_exception.message}")
elif "Could not resolve authentication method. Expected either api_key or auth_token to be set." in error_str:
exception_mapping_worked = True
raise AuthenticationError(f"AnthropicException - {error_str}")
elif "replicate" in model:
if "Incorrect authentication token" in error_str:
exception_mapping_worked = True
raise AuthenticationError(f"ReplicateException - {error_str}")
elif exception_type == "ModelError":
exception_mapping_worked = True
raise InvalidRequestError(f"ReplicateException - {error_str}", f"{model}")
elif "Request was throttled" in error_str:
exception_mapping_worked = True
raise RateLimitError(f"ReplicateException - {error_str}")
elif exception_type == "ReplicateError": ## ReplicateError implies an error on Replicate server side, not user side
raise ServiceUnavailableError(f"ReplicateException - {error_str}")
elif model == "command-nightly": #Cohere
if "invalid api token" in error_str or "No API key provided." in error_str:
exception_mapping_worked = True
raise AuthenticationError(f"CohereException - {error_str}")
elif "too many tokens" in error_str:
exception_mapping_worked = True
raise InvalidRequestError(f"CohereException - {error_str}", f"{model}")
elif "CohereConnectionError" in exception_type: # cohere seems to fire these errors when we load test it (1k+ messages / min)
exception_mapping_worked = True
raise RateLimitError(f"CohereException - {original_exception.message}")
raise original_exception # base case - return the original exception
else:
raise original_exception
except Exception as e:
## LOGGING
logging(logger_fn=user_logger_fn, additional_args={"exception_mapping_worked": exception_mapping_worked, "original_exception": original_exception}, exception=e)
if exception_mapping_worked:
raise e
else: # don't let an error with mapping interrupt the user from receiving an error from the llm api calls
raise original_exception
def safe_crash_reporting(model=None, exception=None, custom_llm_provider=None):
data = {
"model": model,
"exception": str(exception),
"custom_llm_provider": custom_llm_provider
}
threading.Thread(target=litellm_telemetry, args=(data,)).start()
def litellm_telemetry(data):
# Load or generate the UUID
uuid_file = 'litellm_uuid.txt'
try:
# Try to open the file and load the UUID
with open(uuid_file, 'r') as file:
uuid_value = file.read()
if uuid_value:
uuid_value = uuid_value.strip()
else:
raise FileNotFoundError
except FileNotFoundError:
# Generate a new UUID if the file doesn't exist or is empty
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open(uuid_file, 'w') as file:
file.write(uuid_value)
except:
# [Non-Blocking Error]
return
try:
# Prepare the data to send to litellm logging api
payload = {
'uuid': uuid_value,
'data': data,
'version': pkg_resources.get_distribution("litellm").version
}
logging.info(f"litellm_telemetry: {payload}")
# Make the POST request to litellm logging api
# response = requests.post('https://litellm.berri.ai.kryptogo.com/logging', headers={"Content-Type": "application/json"}, json=payload)
# response.raise_for_status() # Raise an exception for HTTP errors
except:
# [Non-Blocking Error]
return
######### Secret Manager ############################
# checks if user has passed in a secret manager client
# if passed in then checks the secret there
def get_secret(secret_name):
if litellm.secret_manager_client != None:
# TODO: check which secret manager is being used
# currently only supports Infisical
secret = litellm.secret_manager_client.get_secret(secret_name).secret_value
if secret != None:
# if secret manager fails default to using .env variables
os.environ[secret_name] = secret # set to env to be safe
return secret
else:
return os.environ.get(secret_name)
else:
return os.environ.get(secret_name)
######## Streaming Class ############################
# wraps the completion stream to return the correct format for the model
# replicate/anthropic/cohere
class CustomStreamWrapper:
def __init__(self, completion_stream, model):
self.model = model
if model in litellm.cohere_models:
# cohere does not return an iterator, so we need to wrap it in one
self.completion_stream = iter(completion_stream)
elif model == "together_ai":
self.completion_stream = iter(completion_stream)
else:
self.completion_stream = completion_stream
def __iter__(self):
return self
def handle_anthropic_chunk(self, chunk):
str_line = chunk.decode('utf-8') # Convert bytes to string
if str_line.startswith('data:'):
data_json = json.loads(str_line[5:])
return data_json.get("completion", "")
return ""
def handle_together_ai_chunk(self, chunk):
chunk = chunk.decode("utf-8")
text_index = chunk.find('"text":"') # this checks if text: exists
text_start = text_index + len('"text":"')
text_end = chunk.find('"}', text_start)
if text_index != -1 and text_end != -1:
extracted_text = chunk[text_start:text_end]
return extracted_text
else:
return ""
def __next__(self):
completion_obj ={ "role": "assistant", "content": ""}
if self.model in litellm.anthropic_models:
chunk = next(self.completion_stream)
completion_obj["content"] = self.handle_anthropic_chunk(chunk)
elif self.model == "replicate":
chunk = next(self.completion_stream)
completion_obj["content"] = chunk
elif self.model == "together_ai":
chunk = next(self.completion_stream)
text_data = self.handle_together_ai_chunk(chunk)
if text_data == "":
return self.__next__()
completion_obj["content"] = text_data
elif self.model in litellm.cohere_models:
chunk = next(self.completion_stream)
completion_obj["content"] = chunk.text
# return this for all models
return {"choices": [{"delta": completion_obj}]}
########## Reading Config File ############################
def read_config_args(config_path):
try:
import os
current_path = os.getcwd()
with open(config_path, "r") as config_file:
config = json.load(config_file)
# read keys/ values from config file and return them
return config
except Exception as e:
print("An error occurred while reading config:", str(e))
raise e
########## ollama implementation ############################
import aiohttp
async def get_ollama_response_stream(api_base="http://localhost:11434", model="llama2", prompt="Why is the sky blue?"):
session = aiohttp.ClientSession()
url = f'{api_base}/api/generate'
data = {
"model": model,
"prompt": prompt,
}
try:
async with session.post(url, json=data) as resp:
async for line in resp.content.iter_any():
if line:
try:
json_chunk = line.decode("utf-8")
chunks = json_chunk.split("\n")
for chunk in chunks:
if chunk.strip() != "":
j = json.loads(chunk)
if "response" in j:
completion_obj ={ "role": "assistant", "content": ""}
completion_obj["content"] = j["response"]
yield {"choices": [{"delta": completion_obj}]}
# self.responses.append(j["response"])
# yield "blank"
except Exception as e:
print(f"Error decoding JSON: {e}")
finally:
await session.close()
async def stream_to_string(generator):
response = ""
async for chunk in generator:
response += chunk["content"]
return response
| [
"0",
"input_cost_per_token",
"Hey, how's it going"
] |
2024-01-10 | GSejas/gaceta-to-chatbot | gacetachat.py | from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader
import os
import emoji
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
# import gradio as gr
import sys
import os
from llama_index import ServiceContext
# NOTE: for local testing only, do NOT deploy with your key hardcoded
os.environ['OPENAI_API_KEY'] = "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"
def embeddings_to_chats(prompt, inputfolder, debug=False):
if False and os.path.exists("gacetachat.json"):
# load from disk
index = GPTSimpleVectorIndex.load_from_disk('gacetachat.json')
else:
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
if debug:
llm_predictor = LLMPredictor(llm=OpenAI(temperature=1, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
else:
llm_predictor = LLMPredictor(llm=OpenAI(temperature=1, model_name="text-davinci-003", max_tokens=num_outputs))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
documents = SimpleDirectoryReader(inputfolder).load_data()
# add try and except for the case where the user has exceeded his daily limit
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
response = index.query(prompt)
if not os.path.exists("gacetachat.json"):
index.save_to_disk('gacetachat.json')
with open("output.txt", "a", encoding='utf-8') as output_file:
output_file.write(response.response)
return response.response
| [] |
2024-01-10 | wilfordwoodruff/Emotional_Analysis | Parker~GPT_Final_Draft~gpt_emo.py | import os
import openai
import pandas as pd
import pkg_resources
# CHECK THE MAIN FUNCTION FOR THE FLOW OF WHAT HAPPENS IN THIS SCRIPT
def parse_data(row):
"""
Description:
This function takes a row from the input data and extracts the 'Results' column. It then parses the data into a dictionary format, where keys are emotional categories (Neutral, Enthusiasm, Joy, Hope, Satisfaction, Sad, Anger, Fear), and values are corresponding scores.
Parameters:
row: A pandas DataFrame row containing the 'Results' column.
Returns:
A dictionary with emotional categories as keys and scores as values.
"""
data = row['Results']
texts = data.split(',')
texts = [text.replace('[', '').replace(']','').replace('{', '').replace('}', '').replace('=',':').split(':') for text in texts]
all = {}
for text in texts:
all[text[0].strip()] = int(text[1].strip())
return all
def generate_uuid_list(path_to_output_csv="output.csv"):
# Make sure there's an output.csv, if there is not, make one.
try:
df = pd.read_csv(path_to_output_csv, encoding="utf-8")
except:
df = pd.DataFrame(columns=['UUID', 'Clean_Text', 'Results', 'Neutral', 'Enthusiasm', 'Joy', 'Hope', 'Satisfaction', 'Sad', 'Anger', 'Fear'])
df.to_csv(path_to_output_csv, index=False, encoding="utf-8")
return df['UUID'].values.tolist() # List of UUIDs
# params is a list of strings indicating what you would like to select
def prep_data(params, input_file_path):
df = pd.read_csv(input_file_path, encoding="utf-8", dtype=str)
df = df.loc[:, params]
return df
# PACKAGE REQUIREMENT CHECK
def check_req(requirements):
installed_packages = {pkg.key for pkg in pkg_resources.working_set}
end_flag = 0 # Flag to indicate if any package is missing
for package in requirements:
if package not in installed_packages:
print(f"Package '{package}' is missing.")
end_flag = 1
for package in requirements:
if package not in installed_packages:
print(f"Package '{package}' is initialized.")
if end_flag:
print("Download the required packages before you continue.")
exit
else:
print("All packages are successfully downloaded!")
# READING THE CONFIG FILE
def read_config():
with open("config.txt", "r") as config_file:
config = {}
for line in config_file:
if "=" in line:
key, value = line.strip().split("=")
config[key.strip()] = value.strip()
else:
print(
f"Warning: Skipping line '{line.strip()}' in config file because it does not contain an '=' sign.")
return config
# READING THE REQUIREMENTS FILE
def read_req():
with open("requirements.txt", "r") as req_file:
req = set(line.strip() for line in req_file)
return req
# PULLING RESULTS FROM OPENAI
def analyze_emotions(api_key, text, prompt, i, retry=0):
openai.api_key = api_key
response = None
try:
print(f"Starting row {i+1}")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": text},
],
temperature=0.0,
max_tokens=1000,
)
print(f"Row {i+1} has been completed")
except Exception as e:
if retry < 5:
print(f"Retrying Row {i+1}: {retry}")
print(f"API request for row '{i+1}' failed. Reason: {str(e)}")
analyze_emotions(api_key, text, prompt, i, retry+1)
return f"Error: {str(e)}"
return response["choices"][0]["message"]["content"]
# return "Neutral: 7, Enthusiasm: 2, Joy: 1, Hope: 4, Satisfaction: 3, Sad: 1, Anger: 1, Fear: 1"
def run_rows(api_key, data, column_to_read, output_file_path):
df = data
emotions_analysis_list = []
for i, row in df.iterrows():
journal_entry = row[column_to_read]
emo = [
"neutral",
"enthusiasm",
"joy",
"hope",
"satisfaction",
"sad",
"anger",
"fear",
]
prompt = f"You are a helpful assistant that analyzes emotions. Analyze the text and respond in the following json format [{emo[0]} : score_1, {emo[1]} : score_2, {emo[2]} : score_3, {emo[3]} : score_4, {emo[4]} : score_5, {emo[5]} : score_6, {emo[6]} : score_7, {emo[7]} : score_8]. Match the format EXACTLY, giving a score (min 1 to max 10) for each of the 8 emotions. ONLY analyze these emotions."
emotions_analysis = analyze_emotions(api_key, journal_entry, prompt, i)
emotions_analysis_list.append(emotions_analysis)
df['Results'] = emotions_analysis_list
df[emo] = df.apply(parse_data, axis=1,result_type='expand')
return df
def main():
# GETTING THE DIRECTORY SET
current_directory = os.getcwd()
script_directory = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_directory)
# CHECKS FOR PACKAGES
requirements = read_req()
check_req(requirements)
# LOAD IN CONFIG
config = read_config()
api_key = config.get("api_key")
input_file_path = config.get("input_file_path")
column_to_read = config.get("column_to_read") # No need to convert to int here
uid_to_read = config.get("uid_column")
output_file_path = config.get("output_file_path")
# Check for output.csv, create, and export UUIDs
uuid_list = generate_uuid_list() # JUST UUIDS
# Clean input dataset to the two columns
data = prep_data([uid_to_read, column_to_read], input_file_path) # UUIDS AND CLEAN TEXT
# Filters out existing uuid from new request
for uuid in uuid_list:
data = data.drop(data.index[data['UUID'] == uuid])
# Run the data through GPT
data = run_rows(api_key, data, column_to_read, output_file_path)
data.to_csv('output.csv', mode='a+', encoding="utf-8", index=False, header=False)
print("Done")
os.chdir(current_directory)
if __name__ == "__main__":
main()
| [
"You are a helpful assistant that analyzes emotions. Analyze the text and respond in the following json format [PLACEHOLDER : score_1, PLACEHOLDER : score_2, PLACEHOLDER : score_3, PLACEHOLDER : score_4, PLACEHOLDER : score_5, PLACEHOLDER : score_6, PLACEHOLDER : score_7, PLACEHOLDER : score_8]. Match the format EXACTLY, giving a score (min 1 to max 10) for each of the 8 emotions. ONLY analyze these emotions."
] |
2024-01-10 | farhan0167/QnAChatBot | brain.py | import openai
import pandas as pd
import numpy as np
import pickle
from transformers import GPT2TokenizerFast
from typing import List
class Brain:
DOC_EMBEDDINGS_MODEL = "text-embedding-ada-002"
QUERY_EMBEDDINGS_MODEL = "text-embedding-ada-002"
def get_embedding(self, text: str, model: str) -> List[float]:
result = openai.Embedding.create(
model=model,
input=text)
return result["data"][0]["embedding"]
def get_doc_embedding(self, text: str) -> List[float]:
return self.get_embedding(text, self.DOC_EMBEDDINGS_MODEL)
def get_query_embedding(self, text: str) -> List[float]:
return self.get_embedding(text, self.QUERY_EMBEDDINGS_MODEL)
def compute_doc_embeddings(self, df: pd.DataFrame):
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
return {
idx: self.get_doc_embedding(r.content.replace("\n", " ")) for idx, r in df.iterrows()
}
def vector_similarity(self, x: List[float], y: List[float]) -> float:
"""
We could use cosine similarity or dot product to calculate the similarity between vectors.
In practice, we have found it makes little difference.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(self, query: str, contexts):
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = self.get_query_embedding(query)
document_similarities = sorted([
(self.vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
MAX_SECTION_LEN = 1000
SEPARATOR = "\n* "
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
separator_len = len(tokenizer.tokenize(SEPARATOR))
def construct_prompt(self, question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Fetch relevant
"""
most_relevant_document_sections = self.order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + self.separator_len
if chosen_sections_len > self.MAX_SECTION_LEN:
break
chosen_sections.append(self.SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print(f"Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:" | [] |
2024-01-10 | SrijanSahaySrivastava/LawGPT | chainl.py | from langchain import PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.chains import RetrievalQA
import chainlit as cl
from model import final_result
from model import qa_bot
# Chain lit
@cl.on_chat_start
async def on_chat_start():
files = None
chain = qa_bot()
msg = cl.Message(content="Starting Bharatgpt!")
await msg.send()
msg.content = "Hi, welcome to the BHARATGPT. what is your query?"
await msg.update()
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain")
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True,
answer_prefix_tokens=["Answer:"],
)
cb.answer_reached = True
res = await chain.acall(message, callbacks=[cb])
answer = res["result"]
sources = res["source_documents"]
if sources:
answer += f"\n\nSource document: " + str(sources)
else:
answer += f"\nNo source document found"
await cl.Message(content=answer).send()
| [] |
2024-01-10 | Loquats/mlflow | tests~langchain~test_langchain_model_export.py | import langchain
import mlflow
import pytest
import transformers
import json
import importlib
import openai
from contextlib import contextmanager
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.llms import HuggingFacePipeline
from langchain.llms.base import LLM
from langchain.chains.base import Chain
from pyspark.sql import SparkSession
from typing import Any, List, Mapping, Optional, Dict
from tests.helper_functions import pyfunc_serve_and_score_model
from mlflow.exceptions import MlflowException
from mlflow.openai.utils import (
_mock_chat_completion_response,
_mock_request,
_MockResponse,
TEST_CONTENT,
)
@contextmanager
def _mock_async_request(content=TEST_CONTENT):
with _mock_request(return_value=_mock_chat_completion_response(content)) as m:
yield m
@pytest.fixture
def model_path(tmp_path):
return tmp_path.joinpath("model")
@pytest.fixture(scope="module")
def spark():
with SparkSession.builder.master("local[*]").getOrCreate() as s:
yield s
@pytest.fixture(autouse=True)
def set_envs(monkeypatch):
monkeypatch.setenvs(
{
"MLFLOW_OPENAI_TESTING": "true",
"OPENAI_API_KEY": "test",
"SERPAPI_API_KEY": "test",
}
)
importlib.reload(openai)
def create_huggingface_model(model_path):
architecture = "lordtt13/emo-mobilebert"
mlflow.transformers.save_model(
transformers_model={
"model": transformers.TFMobileBertForSequenceClassification.from_pretrained(
architecture
),
"tokenizer": transformers.AutoTokenizer.from_pretrained(architecture),
},
path=model_path,
)
llm = mlflow.transformers.load_model(model_path)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
hf_pipe = HuggingFacePipeline(pipeline=llm)
return LLMChain(llm=hf_pipe, prompt=prompt)
def create_openai_llmchain():
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
return LLMChain(llm=llm, prompt=prompt)
def create_openai_llmagent():
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
# First, let's load the language model we're going to use to control the agent.
llm = OpenAI(temperature=0)
# Next, let's load some tools to use.
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools.
return initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
def create_model(llm_type, model_path=None):
if llm_type == "openai":
return create_openai_llmchain()
if llm_type == "huggingfacehub":
return create_huggingface_model(model_path)
if llm_type == "openaiagent":
return create_openai_llmagent()
if llm_type == "fake":
return FakeLLM()
raise NotImplementedError("This model is not supported yet.")
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Optional[Mapping] = None
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
# pylint: disable=arguments-differ
def _call(self, prompt: str, stop: Optional[List[str]] = None, run_manager=None) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
# pylint: disable=arguments-differ
def _call(self, inputs: Dict[str, str], run_manager=None) -> Dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
else:
return {"baz": "bar"}
def test_langchain_native_save_and_load_model(model_path):
model = create_model("openai")
mlflow.langchain.save_model(model, model_path)
loaded_model = mlflow.langchain.load_model(model_path)
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.openai.OpenAI
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
def test_langchain_native_log_and_load_model():
model = create_model("openai")
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert str(logged_model.signature.inputs) == "['product': string]"
assert str(logged_model.signature.outputs) == "['text': string]"
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.openai.OpenAI
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
def test_pyfunc_load_openai_model():
model = create_model("openai")
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert type(loaded_model) == mlflow.pyfunc.PyFuncModel
def test_langchain_model_predict():
with _mock_request(return_value=_mock_chat_completion_response()):
model = create_model("openai")
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
result = loaded_model.predict([{"product": "MLflow"}])
assert result == [TEST_CONTENT]
def test_pyfunc_spark_udf_with_langchain_model(spark):
model = create_model("openai")
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.spark_udf(spark, logged_model.model_uri, result_type="string")
df = spark.createDataFrame([("MLflow",), ("Spark",)], ["product"])
df = df.withColumn("answer", loaded_model())
pdf = df.toPandas()
assert pdf["answer"].tolist() == [TEST_CONTENT, TEST_CONTENT]
def test_langchain_log_huggingface_hub_model_metadata(model_path):
model = create_model("huggingfacehub", model_path)
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.langchain.load_model(logged_model.model_uri)
assert "langchain" in logged_model.flavors
assert str(logged_model.signature.inputs) == "['product': string]"
assert str(logged_model.signature.outputs) == "['text': string]"
assert type(loaded_model) == langchain.chains.llm.LLMChain
assert type(loaded_model.llm) == langchain.llms.huggingface_pipeline.HuggingFacePipeline
assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate
assert loaded_model.prompt.template == "What is a good name for a company that makes {product}?"
def test_langchain_agent_model_predict():
langchain_agent_output = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"choices": [
{
"index": 0,
"finish_reason": "stop",
"text": f"Final Answer: {TEST_CONTENT}",
}
],
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
}
model = create_model("openaiagent")
with mlflow.start_run():
logged_model = mlflow.langchain.log_model(model, "langchain_model")
loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)
langchain_input = {
"input": "What was the high temperature in SF yesterday in Fahrenheit? "
"What is that number raised to the .023 power?"
}
with _mock_request(return_value=_MockResponse(200, langchain_agent_output)):
result = loaded_model.predict([langchain_input])
assert result == [TEST_CONTENT]
inference_payload = json.dumps({"inputs": langchain_input})
langchain_agent_output_serving = {"predictions": langchain_agent_output}
with _mock_request(return_value=_MockResponse(200, langchain_agent_output_serving)):
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow.deployments import PredictionsResponse
response = pyfunc_serve_and_score_model(
logged_model.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert (
PredictionsResponse.from_json(response.content.decode("utf-8"))
== langchain_agent_output_serving
)
def test_unsupported_chain_types():
chain = FakeChain()
with pytest.raises(
MlflowException,
match="MLflow langchain flavor only supports logging langchain.chains.llm.LLMChain",
):
with mlflow.start_run():
mlflow.langchain.log_model(chain, "fake_chain_model")
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | minggnim/nlp-models | src~nlp_models~llm~llms.py | from langchain.llms import CTransformers
from .base import LlmConfig
def build_llm(cfg: LlmConfig):
llm = CTransformers(
model=cfg.MODEL_BIN_PATH,
model_type=cfg.MODEL_TYPE,
config={'max_new_tokens': cfg.MAX_NEW_TOKENS,
'temperature': cfg.TEMPERATURE}
)
return llm
| [] |
2024-01-10 | minggnim/nlp-models | src~nlp_models~llm~apps.py | from langchain import LLMChain
from langchain.chains import RetrievalQA
from langchain.memory import ConversationBufferWindowMemory
from .prompts import QAPrompt, ChatPrompt
from .base import LlmConfig
class QaLlmApp:
def __init__(self, llm, vectordb, prompt=QAPrompt().qa_prompt) -> None:
self.cfg = LlmConfig()
self.llm_qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type='stuff',
retriever=vectordb.as_retriever(search_kwargs={'k': self.cfg.VECTOR_COUNT}),
return_source_documents=self.cfg.RETURN_SOURCE_DOCUMENTS,
chain_type_kwargs={'prompt': prompt}
)
def __call__(self, query):
return self.llm_qa({'query': query})
class ChatLlmApp:
def __init__(self, llm, prompt=ChatPrompt().chat_prompt, memory=ConversationBufferWindowMemory(), verbose=True) -> None:
self.llm_chat = LLMChain(
llm=llm,
prompt=prompt,
verbose=verbose,
memory=memory
)
def __call__(self, inputs) -> str:
return self.llm_chat.predict(human_input=inputs)
| [] |
2024-01-10 | armaan-rashid/detect-chatgpt | data_querying.py | """
This file implements the functionality for generating ChatGPT passages.
Each dataset has a LOAD function which loads the human dataset from wherever
it may be: HuggingFace, local files, etc.
Each dataset also has a GENERATE function which takes in a human dataset
and prompts ChatGPT to generate examples in whatever way is appropriate
to that dataset: asking a question, asking it to complete text, etc.
When run as a script, main() calls a LOAD function to create prompts
and then a GENERATE function to create responses for a dataset. The GENERATE funcs
call the core ChatGPT interfaces prompt_from_dataframe/prompt_ChatGPT.
There are lots of options for I/O at multiple stages in the querying process.
Generally, we use .csv files and DataFrames because it's easy.
"""
# from google.cloud import bigquery
import pandas as pd
import transformers
import random
import os
import openai
from datasets import load_dataset
from torch import cuda
from data_processing import process_spaces, match_lengths
from argparse import ArgumentParser
USAGE = 0
DEVICE = 'cuda' if cuda.is_available() else 'cpu'
FAILSTRING = 'Failed response.'
SYSTEM = {'role': 'system', 'content': 'You are a helpful assistant.'} # a default system msg to use for all prompts
CONTINUE = {'role': 'user', 'content': 'Please, continue.'}
def prompt_from_dataframe(data: pd.DataFrame, temp, min_words: int):
"""
DESC: Query ChatGPT to generate a response for every prompt and
append these responses to a dataFrame.
PARAMS:
data: dataFrame with prompts in it
chatbot: ChatGPT already logged in
verbose: print ChatGPT's responses or not
min_words: min length of valid response from ChatGPT
RETURNS:
df: dataFrame with prompts and responses
"""
count = 0
fail = 0
responses = []
def prompt_ChatGPT(prompt: str, postprocess=process_spaces):
"""
DESC: Self-explanatory, prompts OpenAI API with prompt
til response length greater than min_words.
CALLED_BY: generate() funcs
"""
global USAGE
response_len = 0
msgs = [SYSTEM, {'role': 'user', 'content': prompt}]
response_len = 0
while response_len < min_words:
if response_len != 0:
msgs.append(CONTINUE)
r = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=msgs, temperature=temp)
USAGE += r['usage']['total_tokens']
msgs.append(r['choices'][0]['message'])
this_len = len(msgs[-1]['content'].split())
response_len += this_len
response = ' '.join([msg['content'] for msg in msgs if msg['role'] == 'assistant'])
return postprocess(response)
for prompt in data['prompts']:
try:
responses.append(prompt_ChatGPT(prompt))
count += 1
except:
fail += 1
responses.append(FAILSTRING)
print(f'Failed to get response to \"{prompt}\" from ChatGPT. Moving on to next prompt. Use data_processing '
'script to repair later.')
data['responses'] = responses # add responses to the DF
print(f'Successfully got {count} responses from ChatGPT at temperature {temp}. Failed to get {fail} responses.')
return data
# def bigquery_load(sql, outfile):
# """
# Pass a SQL query to bigQuery,
# save results as JSON in outfile.
# """
# client = bigquery.Client()
# df = client.query(sql).to_dataframe()
# df.to_json(outfile)
# print(f"Received {len(df)} examples from BigQuery.")
def load_human_data(file, num_examples):
"""
Self-explanatory: load n examples of human data, i.e. prompts, from a file.
"""
df = pd.read_csv(file)
assert len(df) >= num_examples and num_examples > 1, 'need to choose more than 1 example, or too many examples for file'
return df.loc[:num_examples]
def xsum_load(infile=None, outfile=None, num_examples=500, preprocess=process_spaces):
"""
DESC: Download XSum from HuggingFace datasets hub, or load from file.
PARAMS:
infile: file where dataset already lives, if applicable
outfile: file to write human data to if applicable
num_examples: num to take from HuggingFace dataset
preprocess: function for preprocessing examples
RETURNS: DataFrame of human XSum examples
"""
if infile:
return load_human_data(infile, num_examples)
xsum_dict = load_dataset('xsum')
xsum = xsum_dict['train']
articles = [preprocess(xsum[idx]['document']) for idx in random.sample(range(len(xsum)), num_examples)]
df = pd.DataFrame({'articles': articles})
if outfile:
df.to_csv(outfile, index=False)
return df
def xsum_generate(xsum: pd.DataFrame, temp: float, tokens=30, prompt_msg='', min_words=250, outfile=None):
"""
DESC: Truncate the news articles in the XSum data and prompt
ChatGPT. This function is different than the functions for other datasets
because we're calling a tokenizer to cut off the prompt at
the length specified by tokens, whereas the other datasets have a natural
notion of prompt. Part of this function adapted from Mitchell et al.'s
original ChatGPT implementation @ https://github.com/eric-mitchell/detect-gpt
PARAMS:
xsum: DataFrame of XSum news articles (needs 'articles' column)
tokens: number of tokens from article to prompt ChatGPT with
prompt_msg: add'l message to prompt ChatGPT with BEFORE news article
min_words: min length of valid response from ChatGPT
retain: write prompts to outfile if True
outfile: file to write prompts/responses to
RETURNS: DataFrame of generated XSum examples
"""
tokenizer = transformers.GPT2Tokenizer.from_pretrained('gpt2')
try: # this needs to be try/except for compatibility with different versions of datasets API
tokenizer.pad_token_id = tokenizer.eos_token_id
except:
tokenizer.pad_token_id = [tokenizer.eos_token_id]
tokenized = tokenizer(xsum['articles'].values.tolist(), return_tensors="pt", padding=True).to(DEVICE)
tokenized = {key: value[:, :tokens] for key, value in tokenized.items()}
prompts = tokenizer.batch_decode(tokenized['input_ids'], skip_special_tokens=True)
xsum['prompts'] = [prompt_msg + prompt for prompt in prompts]
xsum = prompt_from_dataframe(xsum, temp, min_words=min_words)
if outfile:
xsum[['prompts', 'responses']].to_csv(outfile, index=False)
return xsum
def squad_load(infile=None, outfile=None, num_examples=500, preprocess=process_spaces):
"""
DESC: Download SQuAD from HuggingFace hub, or from file.
Sample num_examples if downloading.
PARAMS:
infile: file with already loaded data
outfile: file to write human data
num_examples: number to sample if downloading from HuggingFace
preprocess: preprocessor function to apply to each example
RETURNS:
dataFrame with contexts, questions, and answers
"""
if infile:
return load_human_data(infile, num_examples)
squad_dict = load_dataset("squad")
squad = squad_dict['train']
idxs = random.sample(range(len(squad)), num_examples)
contexts = [preprocess(squad[idx]['context']) for idx in idxs]
questions = [preprocess(squad[idx]['question']) for idx in idxs]
answers = [preprocess(squad[idx]['answers']['text'][0]) for idx in idxs]
df = pd.DataFrame({'contexts': contexts, 'questions': questions, 'answers': answers})
if outfile:
df.to_csv(outfile, index=False)
return df
def squad_generate(squad: pd.DataFrame, temp: float, min_words: int, outfile=None):
"""
DESC: Given a dataFrame of SQuAD q's, a's, contexts, prepare data
to feed in as prompts to ChatGPT. Write to outfile if provided.
PARAMS:
squad: DataFrame of squad examples (must have contexts and questions cols)
prompt_msg: msg to prompt chatGPT with in addition to questions
min_words: min valid length of chatGPT response
retain: write prompts to outfile
outfile: file to write prompts/responses
RETURNS:
squad: DataFrame with chatGPT responses
"""
squad['prompts'] = squad.apply(lambda row: row['contexts'] + ' ' + row['questions'], axis=1)
squad = prompt_from_dataframe(squad, temp, min_words=min_words)
if outfile:
squad[['prompts', 'responses']].to_csv(outfile, index=False)
return squad
def wp_load(infile: str, num_examples, outfile=None, load=False):
"""
DESC: Another loading function, this time for Reddit WritingPrompts.
Some quirks because this dataset is stored in large files.
PARAMS:
infile: this could be ONE infile if args.load is false, TWO if it's true. If it's two,
then it's assumed the wp_source file is passed in first, then matching wp_target file.
num_examples: num_examples to load
outfile: outfile to save data to if necessary
load: True if args.load is true, false otherwise
RETURNS: two column DataFrame, one of prompts and the other of stories
"""
if not load: # if args.load is true, assume infile is already-prepped csv
return load_human_data(infile, num_examples)
split = infile.find(' ')
source, target = infile[:split], infile[split+1:]
def remove_prompt_tag(string): # implementation of this utility from Mitchell et al.
return string.replace('[ WP ]', '')
prompts = []
stories = []
with open(source) as src, open(target) as tgt:
prompts = src.readlines()
stories = tgt.readlines()
# select num_examples examples with [ WP ] tag and take out the tag!
filtered = [(prompt, story) for prompt, story in zip(prompts, stories) if prompt.startswith('[ WP ]')]
filtered = [filtered[idx] for idx in random.sample(range(len(filtered)), num_examples)]
prompts, stories = zip(*filtered)
prompts = [remove_prompt_tag(process_spaces(prompt)).strip() for prompt in prompts]
stories = [process_spaces(story).strip() for story in stories]
df = pd.DataFrame({'prompts': prompts, 'stories': stories})
if outfile:
df.to_csv(outfile, index=False)
return df
def wp_generate(wp: pd.DataFrame, temp: float, prompt_msg='', min_words=200, outfile=None):
"""
DESC: Another ChatGPT-generating function. No tokenization necessary
because we use the whole prompt to generate data, but optional
prompt_msg can be passed in.
PARAMS:
wp: DataFrame with 'prompts' and 'stories' columns of human prompts and stories
temp: temperature for sampling ChatGPT with
prompt_msg: message to append to beginning of each prompt
min_words: minimum words desired from each prompt
outfile: where to save generated examples
"""
wp['prompts'] = wp.apply(lambda row: prompt_msg + row['prompts'], axis=1)
wp_with_responses = prompt_from_dataframe(wp, temp, min_words)
if outfile:
wp[['prompts', 'responses']].to_csv(outfile, index=False)
return wp_with_responses
if __name__ == '__main__':
argparser = ArgumentParser(prog='ChatGPT Scraper', description='Generate tokens and responses from ChatGPT using unofficial API.')
argparser.add_argument('dataset', help="Specify which dataset you want to generate ChatGPT examples for.", choices=['xsum', 'wp', 'squad'])
argparser.add_argument('-q', '--query', action='store_true', help='specify if you actually want to ask ChatGPT for examples. Safeguard against excess token use!')
input = argparser.add_argument_group()
input.add_argument('-l', '--load', action='store_true', help='if you need to download your dataset from Hub/files, specify this option')
input.add_argument('-i', '--infile', help='files where dataset needs to be loaded from!')
output = argparser.add_argument_group()
output.add_argument('--out_human', help='If --load is specified, this is where load will store the human language data.')
output.add_argument('--out_chatgpt', action='store', help='Destination file to write prompts/responses from ChatGPT.')
prompt_opts = argparser.add_argument_group()
prompt_opts.add_argument('-m', '--msg', help='prompt before \'actual\' dataset prompt to give ChatGPT, if that might help ChatGPT give a better response')
prompt_opts.add_argument('-k', '--tokens', help='Specify number of tokens when creating prompts for XSum dataset.', default=30, type=int)
prompt_opts.add_argument('-n', '--num_examples', help='Number of examples to grab when loading a dataset.', type=int, default=500)
prompt_opts.add_argument('-w','--min_words', help='min_words desired from a ChatGPT response', type=int, default=250)
prompt_opts.add_argument('-t', '--temperature', help='temperature for sampling ChatGPT', type=float)
args = argparser.parse_args()
openai.api_key = os.getenv('OPENAI_API_KEY')
if args.dataset == 'xsum':
xsum = xsum_load(infile=args.infile, outfile=args.out_human, num_examples=args.num_examples)
if args.query:
xsum_with_responses = xsum_generate(xsum, temp=args.temperature, tokens=args.tokens, prompt_msg=args.msg, min_words=args.min_words, outfile=args.out_chatgpt)
elif args.dataset == 'squad':
squad = squad_load(infile=args.infile, outfile=args.out_human, num_examples=args.num_examples)
if args.query:
squad_with_responses = squad_generate(squad, temp=args.temperature, min_words=args.min_words, outfile=args.out_chatgpt)
elif args.dataset == 'wp':
wp = wp_load(infile=args.infile, num_examples=args.num_examples, outfile=args.out_human, load=args.load)
if args.query:
wp_with_responses = wp_generate(wp, temp=args.temperature, prompt_msg=args.msg, min_words=args.min_words, outfile=args.out_chatgpt)
print(f'Used {USAGE} tokens in this run.') | [
"input_ids",
"Please, continue.",
"[]",
"You are a helpful assistant."
] |
2024-01-10 | armaan-rashid/detect-chatgpt | data_processing.py | """
This file contains some basic data processing utility functions.
Can be run as a script to either repair unfinished data, merge data
or load data from files into the main ChatGPT script.
Some of these functions are from Mitchell et al.'s
detectGPT. Their original code can be found here:
https://github.com/eric-mitchell/detect-gpt
"""
import pandas as pd
from argparse import ArgumentParser
import openai
import os
import torch
from transformers import AutoTokenizer
# housekeeping some global vars
USAGE = 0
FAILSTRING = 'Failed response.'
SYSTEM = {'role': 'system', 'content': 'You are a helpful assistant.'} # a default system msg to use for all prompts
CONTINUE = {'role': 'user', 'content': 'Please, continue.'}
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def load_data(filename, k=None, tokenizer=None):
"""
Load k examples of data from file into dict format.
Expects that the dfs loaded in has 'original, sampled'
columns and ignores other columns.
"""
df = pd.read_csv(filename)
assert 'original' in df.columns and 'sampled' in df.columns, 'files need to have original and sampled cols'
print(f'Loading data from {filename}.')
conv = {'original': df['original'].values.tolist(),
'sampled': df['sampled'].values.tolist()}
k = min(len(conv['original']), k) if k != 0 else len(conv['original'])
conv['original'] = conv['original'][:k]
conv['sampled'] = conv['sampled'][:k]
if tokenizer:
print(f'Verifying that all passages have length less than {tokenizer.model_max_length} tokens.')
try: tokenizer.to(DEVICE)
except: pass
conv['original'] = [truncate_tokens(example) for example in conv['original']]
conv['sampled'] = [truncate_tokens(example) for example in conv['sampled']]
return conv
def truncate_tokens(string, tokenizer):
"""
Truncate a string to be the max length of a tokenizer.
"""
tokenized = tokenizer.encode(string)
if len(tokenized) > tokenizer.model_max_length:
print(f'Truncating an example because it uses too many ({len(tokenized)}) tokens')
return tokenizer.decode(tokenized[:tokenizer.model_max_length])
return string
def truncate_dataframe(df: pd.DataFrame, tokenizer):
"""
Truncate tokens for all the entries in a df full of strings.
"""
return df.applymap(lambda item: truncate_tokens(item, tokenizer))
def concat_cols(row, cols):
string = ''
for col in cols:
string += row[col] + ' '
return string.strip()
def match_lengths(data: pd.DataFrame, col1: str, col2: str):
"""
Given a DataFrame of two columns, truncate the
original-sampled pairs to roughly match length (i.e. have same
word count.)
"""
for i, row in data.iterrows():
orig_split = row[col1].split()
sampled_split = row[col2].split()
trunc = min(len(orig_split), len(sampled_split))
row[col1] = ' '.join(orig_split[:trunc])
row[col2] = ' '.join(sampled_split[:trunc])
return data
def remove_failed_responses(file):
"""
Erase the failed responses that ChatGPT couldn't generate.
"""
df = pd.read_csv(file)
idxs = [i for i, row in df.iterrows() if FAILSTRING in row['sampled']]
df.drop(labels=idxs, inplace=True)
df.reset_index(drop=True, inplace=True)
df.to_csv(file, index=False)
print(f'removed {len(idxs)} responses from {file}')
def process_spaces(story: str):
"""Basic processing function, adapted from Mitchell et al."""
return story.replace(
' ,', ',').replace(
' .', '.').replace(
' ?', '?').replace(
' !', '!').replace(
' ;', ';').replace(
' \'', '\'').replace(
' ’ ', '\'').replace(
' :', ':').replace(
'<newline>', '\n').replace(
'`` ', '"').replace(
' \'\'', '"').replace(
'\'\'', '"').replace(
'.. ', '... ').replace(
' )', ')').replace(
'( ', '(').replace(
' n\'t', 'n\'t').replace(
' i ', ' I ').replace(
' i\'', ' I\'').replace(
'\\\'', '\'').replace(
'\n ', ' ').replace(
'\n', ' ').replace(
' ', ' ').strip()
def replace_original(correct: pd.DataFrame, incorrect: pd.DataFrame):
"""Emergency function to handle misplaced perturbations."""
c = len(incorrect.columns) // 2
n = len(incorrect)
for i in range(c):
incorrect[f'o{i+1}'] = correct[f'o{i+1}'][:n]
return incorrect
def repair_dataframe(data: pd.DataFrame, temp: float, min_words=200, prompt_msg=''):
"""
DESC: Repair dataframe that has incomplete responses from ChatGPT.
PARAMS:
data: a dataFrame that has both a 'prompts' and 'responses' column
chatbot: logged in ChatGPT
verbose: print chatGPT's responses while querying
"""
fail = 0
count = 0
for _, row in data.iterrows():
if row['responses'] == FAILSTRING:
try:
prompt = prompt_msg + row['prompts']
response = prompt_ChatGPT(prompt, temp, min_words)
row['responses'] = response
count += 1
except:
print(f'The prompt: {prompt} did not successfully get a response from ChatGPT.\n')
fail += 1
continue
print(f'Successfully got {count} responses from ChatGPT, failed to get {fail} responses.')
return data
def prompt_ChatGPT(prompt: str, temp: float, min_words=250, postprocess=process_spaces):
"""
DESC: Self-explanatory, prompts OpenAI API with prompt
til response length greater than min_words.
CALLED_BY: generate() funcs
"""
global USAGE
msgs = [SYSTEM, {'role': 'user', 'content': prompt}]
response_len = 0
while response_len < min_words:
if response_len != 0:
msgs.append(CONTINUE)
r = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=msgs, temperature=temp)
USAGE += r['usage']['total_tokens']
msgs.append(r['choices'][0]['message'])
this_len = len(msgs[-1]['content'].split())
response_len += this_len
response = ' '.join([msg for msg in msgs if msg['role'] == 'assistant'])
return postprocess(response)
def merge_human_sampled(original_file, original_cols, sampled_file, sampled_cols, outfile=None):
"""
DESC: Given files of both original and sampled data,
merge them into one dataFrame.
PARAMS:
original_file, sampled_file: file of human data, chatGPT data resp.
original_cols, sampled_cols: list of cols to read in from original_file, sampled_file resp.
if there are multiple columns, they're concatenated with a space separating the strings in each.
outfile: where to write merged data
RETURNS: dataFrame of merged data
"""
original = pd.read_csv(original_file)
sampled = pd.read_csv(sampled_file)
if original_cols is None:
original_cols = original.columns
if sampled_cols is None:
sampled_cols = sampled.columns
original['original'] = original.apply(lambda row: concat_cols(row, original_cols), axis=1)
sampled['sampled'] = sampled.apply(lambda row: concat_cols(row, sampled_cols), axis=1)
df = pd.concat([original['original'], sampled['sampled']], axis=1)
df = match_lengths(df, 'original', 'sampled')
if outfile:
df.to_csv(outfile, index=False)
return df
def strip_text(file, col, strip_msg):
df = pd.read_csv(file)
assert col in df.columns, 'invalid column called for this dataFrame'
df[col] = df.apply(lambda row: row[col].replace(strip_msg, ''), axis=1)
df.to_csv(file, index=False)
print(f'Stripped the text \'{strip_msg}\' from {file} in column {col}')
if __name__=='__main__':
parser = ArgumentParser(prog='process data already retrieved, in different ways')
parser.add_argument('task', help='what you want to do', choices=['merge', 'repair', 'strip', 'remove', 'truncate'])
merge = parser.add_argument_group()
merge.add_argument('--orig_file', help='file with human data')
merge.add_argument('--orig_cols', help='cols to grab from orig_file', type=str)
merge.add_argument('--sampled_file', help='file with ChatGPT data')
merge.add_argument('--sampled_cols', help='cols to grab from data', type=str)
merge.add_argument('--outfile', help='where to store new merged data')
repair = parser.add_argument_group()
repair.add_argument('--repair_file', help='file with data that needs to be repaired')
repair.add_argument('--temp', help='for ChatGPT prompting', type=float)
repair.add_argument('--min_words', help='for ChatGPT prompting', type=int)
repair.add_argument('--prompt_msg', help='message to append to beginning of prompt during repair')
strip = parser.add_argument_group()
strip.add_argument('--strip_file', help='file to strip from')
strip.add_argument('--strip_col', help='col to strip from')
strip.add_argument('--strip_msg', help='text to strip')
remove = parser.add_argument_group()
remove.add_argument('--remove_files', help='files with rows to remove', nargs='*')
truncate = parser.add_argument_group()
truncate.add_argument('--trunc_files', help='files you want to truncate with a tokenizer', nargs='*')
truncate.add_argument('--tokenizer', help='what pretrained tokenizer you want to use for truncation')
parser.add_argument('-v', '--verbose', action='store_true', help='print while doing stuff')
args = parser.parse_args()
if args.task == 'merge':
assert args.orig_file and args.sampled_file, 'need to have files to merge!'
orig_cols = args.orig_cols.split(', ')
sampled_cols = args.sampled_cols.split(', ')
merged = merge_human_sampled(args.orig_file, orig_cols, args.sampled_file, sampled_cols, args.outfile)
elif args.task == 'repair':
openai.api_key = os.getenv('OPENAI_API_KEY')
broken = pd.read_csv(args.repair_file)
fixed = repair_dataframe(broken, args.temp, args.min_words, args.prompt_msg)
fixed.to_csv(args.repair_file, index=False)
print(f'Used {USAGE} tokens in this run.')
elif args.task == 'strip':
assert args.strip_file and args.strip_col and args.strip_msg
strip_text(args.strip_file, args.strip_col, args.strip_msg)
elif args.task == 'remove':
for file in args.remove_files:
remove_failed_responses(file)
elif args.task == 'truncate':
for file in args.trunc_files:
perturbed = pd.read_csv(file)
perturbed = truncate_dataframe(perturbed, AutoTokenizer.from_pretrained(args.tokenizer))
perturbed.to_csv(file, index=False)
| [
"Please, continue.",
"You are a helpful assistant.",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | raychungno1/CSE5525 | project~strat.py | import os
import time
import random
import json
import openai
from typing import Union
from dotenv import load_dotenv
from utils import *
def prompt_to_str(prev: str, prompt: dict):
return prev + "Q: " + prompt["question"] + "\nA: " + " ".join(prompt["facts"]) + " #### " + str(prompt["answer"]) + "\n\n"
def ans_to_soln(answer: Union[str, bool]) -> bool:
if isinstance(answer, bool):
return answer
splits = answer.split("#### ")
if len(splits) > 1:
return splits[1] == "True"
return False
def prep_strat_data(seed: int, num_prompts: int, data_path: str):
random.seed(seed)
with open(data_path, "r", encoding="utf8") as myfile:
dataset = json.load(myfile)
# simple: 644 samples
# medium: 1219 samples
# hard: 427 samples
# total: 2290 samples
simple, medium, hard = [], [], []
for d in dataset:
steps = len(d["decomposition"])
if steps <= 2:
d["diffifulty"] = "simple"
simple.append(d)
elif steps <= 3:
d["diffifulty"] = "medium"
medium.append(d)
else:
d["diffifulty"] = "hard"
hard.append(d)
total = simple[:300] + medium[:300] + hard[:300]
simple_prompts = create_prompts(simple, num_prompts, prompt_to_str)
medium_prompts = create_prompts(medium, num_prompts, prompt_to_str)
hard_prompts = create_prompts(hard, num_prompts, prompt_to_str)
print("----- SIMPLE -----\n", simple_prompts)
print("----- MEDIUM -----\n", medium_prompts)
print("----- HARD -----\n", hard_prompts)
return total, simple_prompts, medium_prompts, hard_prompts
if __name__ == "__main__":
SEED = 0
NUM_PROMPTS = 6
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(ROOT_PATH, "data", "strategyqa_train.json")
RESULTS_PATH = os.path.join(ROOT_PATH, "results", "strategyqa")
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
(total,
simple_prompts,
medium_prompts,
hard_prompts) = prep_strat_data(SEED, NUM_PROMPTS, DATA_PATH)
num_correct_simple = 0
num_correct_medium = 0
num_correct_hard = 0
total_prompts = 0
for i, p in enumerate(total):
start = time.time()
simple_correct = predict(
simple_prompts, p, os.path.join(RESULTS_PATH, "results-simple.jsonl"), ans_to_soln)
medium_correct = predict(
medium_prompts, p, os.path.join(RESULTS_PATH, "results-medium.jsonl"), ans_to_soln)
hard_correct = predict(
hard_prompts, p, os.path.join(RESULTS_PATH, "results-hard.jsonl"), ans_to_soln)
end = time.time()
total_prompts += 1
if simple_correct:
num_correct_simple += 1
if medium_correct:
num_correct_medium += 1
if hard_correct:
num_correct_hard += 1
print("Prompt #" + str(i) +
f"\tSimple: {simple_correct}" +
f"\tSimple Accuracy: {num_correct_simple}/{total_prompts} ({round(100 * num_correct_simple/total_prompts, 2)}%)" +
f"\tMedium: {medium_correct}" +
f"\tMedium Accuracy: {num_correct_medium}/{total_prompts} ({round(100 * num_correct_medium/total_prompts, 2)}%)" +
f"\tHard: {hard_correct}" +
f"\tHard Accuracy: {num_correct_hard}/{total_prompts} ({round(100 * num_correct_hard/total_prompts, 2)}%)" +
f"\tTime: {round(end - start, 2)}")
| [
"6",
"1",
"0"
] |
2024-01-10 | raychungno1/CSE5525 | project~math_cross.py | import os
import time
import openai
from dotenv import load_dotenv
from utils import *
from maths import prep_math_data, ans_to_soln
from strat import prep_strat_data
if __name__ == "__main__":
SEED = 0
NUM_PROMPTS = 6
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
STRAT_DATA_PATH = os.path.join(ROOT_PATH, "data", "strategyqa_train.json")
RESULTS_PATH = os.path.join(ROOT_PATH, "results", "gsm8k-cross")
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
(total, _, _, _) = prep_math_data(SEED, NUM_PROMPTS)
(_,
simple_prompts,
medium_prompts,
hard_prompts) = prep_strat_data(SEED, NUM_PROMPTS, STRAT_DATA_PATH)
num_correct_simple = 0
num_correct_medium = 0
num_correct_hard = 0
total_prompts = 0
for i, p in enumerate(total):
start = time.time()
simple_correct = predict(
simple_prompts, p, os.path.join(RESULTS_PATH, "results-simple.jsonl"), ans_to_soln)
medium_correct = predict(
medium_prompts, p, os.path.join(RESULTS_PATH, "results-medium.jsonl"), ans_to_soln)
hard_correct = predict(
hard_prompts, p, os.path.join(RESULTS_PATH, "results-hard.jsonl"), ans_to_soln)
end = time.time()
total_prompts += 1
if simple_correct:
num_correct_simple += 1
if medium_correct:
num_correct_medium += 1
if hard_correct:
num_correct_hard += 1
print("Prompt #" + str(i) +
f"\tSimple: {simple_correct}" +
f"\tSimple Accuracy: {num_correct_simple}/{total_prompts} ({round(100 * num_correct_simple/total_prompts, 2)}%)" +
f"\tMedium: {medium_correct}" +
f"\tMedium Accuracy: {num_correct_medium}/{total_prompts} ({round(100 * num_correct_medium/total_prompts, 2)}%)" +
f"\tHard: {hard_correct}" +
f"\tHard Accuracy: {num_correct_hard}/{total_prompts} ({round(100 * num_correct_hard/total_prompts, 2)}%)" +
f"\tTime: {round(end - start, 2)}")
| [
"0",
"1",
"6"
] |
2024-01-10 | raychungno1/CSE5525 | project~maths.py | import os
import re
import time
import random
import openai
from dotenv import load_dotenv
from datasets import load_dataset
from utils import *
def prompt_to_str(prev: str, prompt: dict) -> str:
return prev + "Q: " + prompt["question"] + "\nA: " + prompt["answer"].replace("\n", " ") + "\n\n"
def ans_to_soln(answer: str) -> float:
splits = answer.split("#### ")
if len(splits) > 1:
num = re.sub(r'[^0-9]', '', splits[1])
if num:
return float(num)
return float("nan")
def prep_math_data(seed: int, num_prompts: int):
random.seed(seed)
dataset = load_dataset("gsm8k", "main")
# simple: 4805 samples
# medium: 3593 samples
# hard: 394 samples
# total: 8792 samples
simple, medium, hard = [], [], []
for split in dataset:
for d in dataset[split]:
steps = d["answer"].count("\n")
if steps <= 3:
d["diffifulty"] = "simple"
simple.append(d)
elif steps <= 6:
d["diffifulty"] = "medium"
medium.append(d)
else:
d["diffifulty"] = "hard"
hard.append(d)
total = simple[:300] + medium[:300] + hard[:300]
simple_prompts = create_prompts(simple, num_prompts, prompt_to_str)
medium_prompts = create_prompts(medium, num_prompts, prompt_to_str)
hard_prompts = create_prompts(hard, num_prompts, prompt_to_str)
print("----- SIMPLE -----\n", simple_prompts)
print("----- MEDIUM -----\n", medium_prompts)
print("----- HARD -----\n", hard_prompts)
return total, simple_prompts, medium_prompts, hard_prompts
if __name__ == "__main__":
SEED = 0
NUM_PROMPTS = 6
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
RESULTS_PATH = os.path.join(ROOT_PATH, "results", "gsm8k")
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
(total,
simple_prompts,
medium_prompts,
hard_prompts) = prep_math_data(SEED, NUM_PROMPTS)
num_correct_simple = 0
num_correct_medium = 0
num_correct_hard = 0
total_prompts = 0
for i, p in enumerate(total):
start = time.time()
simple_correct = predict(
simple_prompts, p, os.path.join(RESULTS_PATH, "results-simple.jsonl"), ans_to_soln)
medium_correct = predict(
medium_prompts, p, os.path.join(RESULTS_PATH, "results-medium.jsonl"), ans_to_soln)
hard_correct = predict(
hard_prompts, p, os.path.join(RESULTS_PATH, "results-hard.jsonl"), ans_to_soln)
end = time.time()
total_prompts += 1
if simple_correct:
num_correct_simple += 1
if medium_correct:
num_correct_medium += 1
if hard_correct:
num_correct_hard += 1
print("Prompt #" + str(i) +
f"\tSimple: {simple_correct}" +
f"\tSimple Accuracy: {num_correct_simple}/{total_prompts} ({round(100 * num_correct_simple/total_prompts, 2)}%)" +
f"\tMedium: {medium_correct}" +
f"\tMedium Accuracy: {num_correct_medium}/{total_prompts} ({round(100 * num_correct_medium/total_prompts, 2)}%)" +
f"\tHard: {hard_correct}" +
f"\tHard Accuracy: {num_correct_hard}/{total_prompts} ({round(100 * num_correct_hard/total_prompts, 2)}%)" +
f"\tTime: {round(end - start, 2)}")
| [
"0",
"1",
"6"
] |
2024-01-10 | raychungno1/CSE5525 | project~strat_cross.py | import os
import time
import openai
from dotenv import load_dotenv
from utils import *
from maths import prep_math_data
from strat import prep_strat_data, ans_to_soln
if __name__ == "__main__":
SEED = 0
NUM_PROMPTS = 6
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
STRAT_DATA_PATH = os.path.join(ROOT_PATH, "data", "strategyqa_train.json")
RESULTS_PATH = os.path.join(ROOT_PATH, "results", "gsm8k-cross")
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
(_,
simple_prompts,
medium_prompts,
hard_prompts) = prep_math_data(SEED, NUM_PROMPTS)
(total, _, _, _) = prep_strat_data(SEED, NUM_PROMPTS, STRAT_DATA_PATH)
num_correct_simple = 0
num_correct_medium = 0
num_correct_hard = 0
total_prompts = 0
for i, p in enumerate(total):
start = time.time()
simple_correct = predict(
simple_prompts, p, os.path.join(RESULTS_PATH, "results-simple.jsonl"), ans_to_soln)
medium_correct = predict(
medium_prompts, p, os.path.join(RESULTS_PATH, "results-medium.jsonl"), ans_to_soln)
hard_correct = predict(
hard_prompts, p, os.path.join(RESULTS_PATH, "results-hard.jsonl"), ans_to_soln)
end = time.time()
total_prompts += 1
if simple_correct:
num_correct_simple += 1
if medium_correct:
num_correct_medium += 1
if hard_correct:
num_correct_hard += 1
print("Prompt #" + str(i) +
f"\tSimple: {simple_correct}" +
f"\tSimple Accuracy: {num_correct_simple}/{total_prompts} ({round(100 * num_correct_simple/total_prompts, 2)}%)" +
f"\tMedium: {medium_correct}" +
f"\tMedium Accuracy: {num_correct_medium}/{total_prompts} ({round(100 * num_correct_medium/total_prompts, 2)}%)" +
f"\tHard: {hard_correct}" +
f"\tHard Accuracy: {num_correct_hard}/{total_prompts} ({round(100 * num_correct_hard/total_prompts, 2)}%)" +
f"\tTime: {round(end - start, 2)}")
| [
"6",
"1",
"0"
] |
2024-01-10 | raychungno1/CSE5525 | project~temp.py | import os
import time
import openai
import json
SEED = 0
NUM_PROMPTS = 6
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
MATH_RESULTS_PATH = os.path.join(ROOT_PATH, "results", "gsm8k-cross")
STRAT_RESULTS_PATH = os.path.join(ROOT_PATH, "results", "strategyqa-cross")
for diffifulty in ["simple", "medium", "hard"]:
FILE_PATH = os.path.join(MATH_RESULTS_PATH, f"results-{diffifulty}.jsonl")
MATH_FILE_PATH = os.path.join(
MATH_RESULTS_PATH, f"new-results-{diffifulty}.jsonl")
STRAT_FILE_PATH = os.path.join(
STRAT_RESULTS_PATH, f"new-results-{diffifulty}.jsonl")
old_file = list(open(FILE_PATH, "r"))
math_file = open(MATH_FILE_PATH, "w")
strat_file = open(STRAT_FILE_PATH, "w")
for i, line in enumerate(old_file):
if i <= 8100:
math_file.write(line)
else:
strat_file.write(line)
| [
"6"
] |
2024-01-10 | Freeman-kuch/spitfire-openapi | openapi~routes~interractions.py | """Openai interactions module"""
from flask import Blueprint, request, session, jsonify
import openai
from openai.error import RateLimitError
from collections import defaultdict
from openapi.utils import get_current_analytics, handle_check_credits
import os
conversation = Blueprint("interraction", __name__, url_prefix="/api/chat")
def generate_chat_completion(message, chat_log) -> str:
"""
Generates a chat completion using the GPT-3.5-turbo model from OpenAI.
Args:
message (str): The user input message for the chat completion.
chat_logs (List[str]): A list of chat logs containing previous messages.
Returns:
str: The content of the generated response as a string.
"""
messages = [
{"role": "system", "content": f"{chat_log}"},
{"role": "user", "content": message},
]
current_analytics = get_current_analytics()
if current_analytics.openai_requests < int(os.getenv("DAILY_LIMIT")):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.5, max_tokens=200
)
current_analytics.openai_requests += 1
current_analytics.update()
return response["choices"][0]["message"]["content"].strip("\n").strip()
return "Daily limit reached, please try again tomorrow"
# completion route that handles user inputs and GPT-4 API interactions.
@conversation.route("/completions", methods=["POST"])
@handle_check_credits(session)
def interractions(user):
"""
Process user input using the GPT-3.5-turbo API and return the response as a JSON object.
:param user: The user object containing information about the current user.
:return: JSON object with the response from the GPT-3.5 model API
"""
chat_logs = defaultdict(list)
content_type = request.headers.get("Content-Type")
if content_type == "application/json":
req = request.get_json()
if "user_input" and "history" not in req:
return (
jsonify(
{
"message": "Invalid request! Missing 'user_input' or 'history' key."
}
),
400,
)
history = req.get("history")
user_input = req.get("user_input")
else:
return jsonify({"message": "Content-Type not supported!"}), 406
if not isinstance(history, list) and not isinstance(user_input, str):
return (
jsonify(
{
"message": "Invalid data type for 'history' or 'user_input' field. Must be a valid array or string."
}
),
400,
)
converse = chat_logs.__getitem__(user.id)
converse.clear()
converse.append(history)
try:
result = generate_chat_completion(message=user_input, chat_log=history)
# converse.append(f"AI: {result}")
user.credits -= 1
user.update()
return jsonify({"message": result}), 201
except RateLimitError:
return (
jsonify(
content="The server is experiencing a high volume of requests. Please try again later."
),
400,
)
except Exception as error:
return (
jsonify(content="An unexpected error occurred. Please try again later."),
500,
)
@conversation.route("/", methods=["POST"])
@handle_check_credits(session)
def string_completion(user):
"""
Process user input using the GPT-3.5-turbo API and return the response as a JSON object.
:param user: The user object containing information about the current user.
:return: JSON object with the response from the GPT-3.5-turbo model API
"""
content_type = request.headers.get("Content-Type")
if content_type == "application/json":
req = request.get_json()
if "user_input" not in req:
return (
jsonify({"message": "Invalid request! Missing 'user_input' key."}),
400,
)
user_input = req.get("user_input")
else:
return jsonify({"message": "Content-Type not supported!"}), 406
if not isinstance(user_input, str):
return (
jsonify(
{
"message": "Invalid data type for 'user_input' field. Must be a valid string."
}
),
400,
)
messages = [
{
"role": "system",
"content": "you are a very helpful and professional assistant",
},
{"role": "user", "content": user_input},
]
try:
result = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.5, max_tokens=200
)
response = result["choices"][0]["message"]["content"].strip("\n").strip()
user.credits -= 1
user.update()
return jsonify({"message": response}), 201
except RateLimitError:
return (
jsonify(
content="The server is experiencing a high volume of requests. Please try again later."
),
400,
)
except Exception as error:
return (
jsonify(content="An unexpected error occurred. Please try again later."),
500,
)
@conversation.route("/cron", methods=["GET"])
def cron():
"""
Returns a JSON object with the key "hello" and the value "world".
Example Usage:
```python
GET /cron
```
"""
return {"hello": "world"}
| [
"you are a very helpful and professional assistant",
"PLACEHOLDER"
] |
2024-01-10 | mnshah0101/ai_doc_generation | sample~DocCreator.py | import openai as ai
import os
from dotenv import load_dotenv
load_dotenv()
class DocCreator:
def __init__(self, doc, path):
if (path == None or path.strip() == ''):
raise ValueError('path is required')
self.path = path
self.type = doc[0]
self.raw = doc[1]
print(self.type)
self.key = os.getenv("OPEN_AI_KEY")
ai.api_key = self.key
self.prompt_raw = "You will be given a combination of all the python files in the directory. Create a readme file for the directory"
self.prompt_formatted = "You will be given a combination of all the python files in the directory. Create a readme file for the directory. The python files have been formatted to only include the function definitions, class definitions, and comments"
def create(self):
create_string = ''
if (self.type == 'raw'):
create_string = self.create_raw()
else:
create_string = self.create_formatted()
joined_path = os.path.join(self.path, 'README.md')
with open(joined_path, 'w') as f:
f.write(create_string)
return "README.md created successfully"
def create_raw(self):
response = ai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": self.prompt_raw},
{"role": "user", "content": self.raw},
]
)
return response['choices'][0]['message']['content']
def create_formatted(self):
response = ai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": self.prompt_formatted},
{"role": "user", "content": self.raw},
]
)
return response['choices'][0]['message']['content']
| [] |
2024-01-10 | carterwsmith/jobscli | models~commands~score.py | import json
import numpy as np
import pandas as pd
from openai.embeddings_utils import cosine_similarity
def _load_most_recent_json_file(directory):
"""Loads the most recent file in a directory as json"""
import os
import json
import datetime
# get all files in directory
files = [f'{directory}/' + f for f in os.listdir(directory)]
# get the most recent file
most_recent_file = max(files, key=os.path.getctime)
# print the filename of the most recent file
print(f"Using resume: {most_recent_file}")
# load the most recent file
with open(f"{most_recent_file}", 'r') as f:
most_recent_json = json.load(f)
return most_recent_json
def score_job_from_dict(job_dict, resume_embedding):
"""Scores a job from a dictionary representation of a job"""
job_embedding = np.array(json.loads(job_dict['embedding_json']))
return int(cosine_similarity(job_embedding, resume_embedding) * 100)
def score_job_from_embedding(job_embedding, resume_embedding):
"""Scores a job from an embedding"""
return int(cosine_similarity(job_embedding, resume_embedding) * 100) | [] |
2024-01-10 | carterwsmith/jobscli | scripts~tokenize_and_embed.py | import json
from math import floor
import os
from dotenv import load_dotenv
import openai
from openai.embeddings_utils import get_embedding
import pandas as pd
import tiktoken
from .resume_utils import get_text
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# embedding model parameters
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191
encoding = tiktoken.get_encoding(embedding_encoding)
def embed_jobs_from_path(filepath):
# get the filename (remove rest of path, remove extension)
filename = filepath.split("/")[-1].split(".")[0]
# load dataset
df = pd.read_json(filepath)
df["combined"] = (
"Title: " + df.title.str.strip() + "; Company: " + df.company.str.strip() + "; Info: " + df["info"].str.strip()
)
# append the location to df["combined"] in the rows where it exists
df.loc[df.location.notnull(), "combined"] += "; Location: " + df.loc[df.location.notnull(), "location"].str.strip()
# omit postings that are too long to embed
df["n_tokens"] = df.combined.apply(lambda x: len(encoding.encode(x)))
df = df[df.n_tokens <= max_tokens]
# print number of valid postings and total number of tokens
print(len(df), df.n_tokens.sum())
df["embedding"] = df.combined.apply(lambda x: get_embedding(x, engine=embedding_model))
return df
def embed_jobs_from_dict(job_dict):
# load dataset
df = pd.DataFrame.from_dict(job_dict)
#print(df)
df["combined"] = (
"Title: " + df.title.str.strip() + "; Company: " + df.company.str.strip() + "; Info: " + df["info"].str.strip()
)
# append the location to df["combined"] in the rows where it exists
df.loc[df.location.notnull(), "combined"] += "; Location: " + df.loc[df.location.notnull(), "location"].str.strip()
# omit postings that are too long to embed
df["n_tokens"] = df.combined.apply(lambda x: len(encoding.encode(x)))
df = df[df.n_tokens <= max_tokens]
# print number of valid postings and total number of tokens
# print(len(df), df.n_tokens.sum())
df["embedding"] = df.combined.apply(lambda x: get_embedding(x, engine=embedding_model))
return df
def _cost_from_num_tokens(n):
# $0.0001 per 1000 tokens
return n / 1000 * 0.0001
def embed_resume(filepath):
# get the filename (remove rest of path, remove extension)
filename = filepath.split("/")[-1]
text = get_text(filepath)
# check resume is not too long
n_tokens = len(encoding.encode(text))
while n_tokens > max_tokens:
print(f"Shaving tokens... [{n_tokens} > {max_tokens}]")
# remove the last 1/4 of text
text = text[:floor(len(text) * 3 / 4)]
n_tokens = len(encoding.encode(text))
resume_embed_cost = _cost_from_num_tokens(n_tokens)
embedding = get_embedding(text, engine=embedding_model)
return embedding, resume_embed_cost | [] |
2024-01-10 | carterwsmith/jobscli | scripts~rank_searches.py | import json
import numpy as np
import pandas as pd
from openai.embeddings_utils import get_embedding, cosine_similarity
def rank_postings_from_paths(resume_path, jobs_path, n=10, pprint=True):
df = pd.read_csv(jobs_path, index_col=0)
# cast embedding to np array
df.embedding = df.embedding.apply(lambda x: np.array(json.loads(x)))
# load resume embedding
with open(resume_path, "r") as f:
resume_embedding = json.load(f)
# cast resume embedding to np array
resume_embedding = np.array(resume_embedding)
df["similarity"] = df.embedding.apply(lambda x: cosine_similarity(x, resume_embedding))
results = df.sort_values("similarity", ascending=False).head(n)
if pprint:
for i, r in results.iterrows():
out_string = r["title"] + " at " + r["company"] + " in " + r["location"] + " -- Relevance Score: " + str(int(r["similarity"] * 100))
print(out_string)
return results
def rank_postings_from_sources(resume_embedding, jobs_df, n=10, pprint=False):
# cast embedding to np array
jobs_df.embedding = jobs_df.embedding.apply(lambda x: np.asarray(x))
# cast resume embedding to np array
resume_embedding = np.array(resume_embedding)
jobs_df["similarity"] = jobs_df.embedding.apply(lambda x: cosine_similarity(x, resume_embedding))
# apply int(x["similarity"] * 100) to similarity column
jobs_df["similarity"] = jobs_df.similarity.apply(lambda x: int(x * 100))
results = jobs_df.sort_values("similarity", ascending=False).head(n)
if pprint:
for i, r in results.iterrows():
out_string = r["title"] + " at " + r["company"] + " in " + r["location"] + " -- Relevance Score: " + str(int(r["similarity"] * 100))
print(out_string)
return results
rpath = "app/jobapp/data/real_resume_embedding.json"
jpath = "app/jobapp/data/example_jobs_with_embeddings.csv"
rsrc = json.loads(open(rpath, "r").read())
jsrc = pd.read_csv(jpath, index_col=0) | [] |
2024-01-10 | carterwsmith/jobscli | scripts~extract_params.py | import os
from dotenv import load_dotenv
import guidance
load_dotenv()
def extract_params(search):
# set the default language model used to execute guidance programs
guidance.llm = guidance.llms.OpenAI("text-davinci-003", token=os.getenv("OPENAI_API_KEY"))
program = guidance("""Extract the following from this job search query, if there is no location write NONE:
EXAMPLE
{{example_input}}
QUERY: {{example_query}}
LOCATION: {{example_location}}
UPDATED
{{input}}
QUERY: {{gen 'query' stop='\\n'}}
LOCATION: {{gen 'location'}}""")
# execute the program on a specific proverb
executed_program = program(
example_input="san francisco remote data jobs",
example_query="remote data jobs",
example_location="san francisco",
input=search,
)
query = executed_program["query"].strip()
location = None if executed_program["location"].strip() == "NONE" else executed_program["location"].strip()
return {"extracted_query": query, "extracted_location": location} | [] |
2024-01-10 | ravula07/Anokha_Demo | Talkwithpdf.py | # Import Required Libraries
import os
import streamlit as st
from streamlit_chat import message
from langchain.document_loaders import OnlinePDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.embeddings import CohereEmbeddings
from langchain.prompts import PromptTemplate
from langchain.llms import Cohere
# Setting Up API Tokens
# Create .streamlit Folder in Root Directory
# Create a File secrets.toml
# TOML format
# cohere_apikey="Enter you Key"
# Setting Up Streamlit Page
st.set_page_config(page_title="Talk With PDF", page_icon=":smile:")
# Creating Temp Folder
if not os.path.exists("./tempfolder"):
os.makedirs("./tempfolder")
# tabs
tab1, tab2 = st.tabs(["📈 Talk Here", "🗃 Relevant Documents"])
tab1.markdown(
"<h1 style='text-align: center;'>Talk With PDF</h1>",
unsafe_allow_html=True,
)
# Saving Upload file to tempfolder
def save_uploadedfile(uploadedfile):
with open(
os.path.join("tempfolder", uploadedfile.name),
"wb",
) as f:
f.write(uploadedfile.getbuffer())
return st.sidebar.success("Saved File")
# Creating Sidebar for Utilites
with st.sidebar:
st.title("Upload PDF")
uploaded_file = st.file_uploader("Choose a file", type=["pdf"])
temp_r = st.slider("Temperature", 0.1, 0.9, 0.3, 0.1)
chunksize = st.slider("Chunk Size for Splitting Document ", 256, 1024, 300, 10)
clear_button = st.button("Clear Conversation", key="clear")
# Initialzing Text Splitter
text_splitter = CharacterTextSplitter(chunk_size=chunksize, chunk_overlap=10)
# Intializing Cohere Embdedding
embeddings = CohereEmbeddings(model="large", cohere_api_key=st.secrets["cohere_apikey"])
def PDF_loader(document):
loader = OnlinePDFLoader(document)
documents = loader.load()
prompt_template = """
Your are an AI Chatbot devolped to help users to talk to a PDF document.Use the following pieces of context to answer the question at the end.Greet Users!!
{context}
{question}
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
texts = text_splitter.split_documents(documents)
global db
db = Chroma.from_documents(texts, embeddings)
retriever = db.as_retriever()
global qa
qa = RetrievalQA.from_chain_type(
llm=Cohere(
model="command-xlarge-nightly",
temperature=temp_r,
cohere_api_key=st.secrets["cohere_apikey"],
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs=chain_type_kwargs,
)
return "Ready"
if uploaded_file is not None:
save_uploadedfile(uploaded_file)
PDF_loader("tempfolder/" + uploaded_file.name)
tab1.markdown(
"<h3 style='text-align: center;'>Now You Are Talking With "
+ uploaded_file.name
+ "</h3>",
unsafe_allow_html=True,
)
# Session State
if "chat_history" not in st.session_state:
st.session_state["chat_history"] = []
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
# Generating Response
def generate_response(query):
result = qa({"query": query, "chat_history": st.session_state["chat_history"]})
tab2.markdown(
"<h3 style='text-align: center;'>Relevant Documents Metadata</h3>",
unsafe_allow_html=True,
)
tab2.write(result["source_documents"])
result["result"] = result["result"]
return result["result"]
# Creating Containers
response_container = tab1.container()
container = tab1.container()
with container:
with st.form(key="my_form", clear_on_submit=True):
user_input = st.text_input("You:", key="input")
submit_button = st.form_submit_button(label="Send")
if user_input and submit_button:
if uploaded_file is not None:
output = generate_response(user_input)
print(output)
st.session_state["past"].append(user_input)
st.session_state["generated"].append(output)
st.session_state["chat_history"] = [(user_input, output)]
else:
st.session_state["past"].append(user_input)
st.session_state["generated"].append(
"Please go ahead and upload the PDF in the sidebar, it would be great to have it there."
)
if st.session_state["generated"]:
with response_container:
for i in range(len(st.session_state["generated"])):
message(
st.session_state["past"][i],
is_user=True,
key=str(i) + "_user",
avatar_style="adventurer",
seed=123,
)
message(st.session_state["generated"][i], key=str(i))
# Enabling Clear button
if clear_button:
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["chat_history"] = []
| [
"question",
" \n Your are an AI Chatbot devolped to help users to talk to a PDF document.Use the following pieces of context to answer the question at the end.Greet Users!!\n {context}\n\n {question}\n ",
"context"
] |
2024-01-10 | andrewgcodes/vec2vec | comparevec2vecwithada.py | # -*- coding: utf-8 -*-
"""compareVec2VecWithAda.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1jPaNXdO0_oW6VczlWfm5RPUVpMtVQD9c
"""
import pandas as pd
import numpy as np
import openai
from sklearn.metrics.pairwise import cosine_similarity
from tensorflow.keras.models import load_model
from transformers import AutoTokenizer, AutoModel
import torch
import torch.nn.functional as F
# Load model (available from Hugging Face)
tokenizer = AutoTokenizer.from_pretrained('all-mpnet-base-v2')
model = AutoModel.from_pretrained('all-mpnet-base-v2')
# Define cosine similarity loss
def cosine_similarity_loss(y_true, y_pred):
y_true = tf.nn.l2_normalize(y_true, axis=-1)
y_pred = tf.nn.l2_normalize(y_pred, axis=-1)
return -tf.reduce_mean(y_true * y_pred, axis=-1)
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
loaded_model = load_model('mpnet2adaE75V4.h5', custom_objects={'cosine_similarity_loss': cosine_similarity_loss})
openai.api_key="insert API key here"
# load in csv of 10,000 embeddings in our test set paired with the original reviews
df2 = pd.read_csv('Actual_Embeddings.csv')
# Convert strings of lists to numpy arrays. this takes a while
df2['Actual_Embeddings'] = df2['Actual_Embeddings'].apply(eval).apply(np.array)
def get_top_5_texts(query):
encoded_input = tokenizer(query, padding=True, truncation=True, return_tensors='pt')
with torch.no_grad():
model_output = model(**encoded_input)
mpnetEmbeddings = mean_pooling(model_output, encoded_input['attention_mask'])
mpnetEmbeddings = F.normalize(mpnetEmbeddings, p=2, dim=1)
mpnetEmbeddings = mpnetEmbeddings.detach().cpu().numpy()
mpnetEmbeddings = np.reshape(mpnetEmbeddings, (1,-1))
query_embedding = loaded_model.predict(mpnetEmbeddings)
similarities = [cosine_similarity(query_embedding.reshape(1, -1), emb.reshape(1, -1))[0][0] for emb in df2['Actual_Embeddings']]
print("Converted MPNet Embedding Results:")
top_5_idx2 = np.argsort(similarities)[-5:][::-1]
for i, idx in enumerate(top_5_idx2, 1):
print(f'Text {i}')
print(df2['combined'].iloc[idx])
print("\n")
response = openai.Embedding.create(input=query, model="text-embedding-ada-002")
query_embedding = np.array(response['data'][0]['embedding'])
similarities2 = [cosine_similarity(query_embedding.reshape(1, -1), emb.reshape(1, -1))[0][0] for emb in df2['Actual_Embeddings']]
print("OpenAI Embedding Results:")
top_5_idx2 = np.argsort(similarities2)[-5:][::-1]
for i, idx in enumerate(top_5_idx2, 1):
print(f'Text {i}')
print(df2['combined'].iloc[idx])
print("\n")
while True:
query = input("Enter your query: ")
get_top_5_texts(query) | [] |
2024-01-10 | purplelemons-dev/servers | gptbot~conversations.py |
import time
import openai
import discord
from json import dumps, loads
from env import OPENAI_API_KEY
openai.api_key = OPENAI_API_KEY
class Conversations:
openai = openai
def __init__(self):
self.conversations:dict[int,list[dict[str,str]]] = {}
self.system_messages:dict[int,str] = {} # added to the conversation history before next_prompt is called
def __str__(self) -> str:
return str(self.conversations)
def __repr__(self) -> str:
return self.__str__()
def __iter__(self):
return self.conversations.__iter__()
def __getitem__(self, key):
return self.conversations[key]
def __setitem__(self, key, value):
self.conversations[key] = value
def tryload(self,filename:str):
"""
Attempts to load a conversation history from a file.
Fails silently if the file does not exist.
"""
try:
with open(filename, "r") as f:
self.conversations = loads(f.read())
except FileNotFoundError:
pass
def save(self,filename:str):
"""
Saves the conversation history to a file.
"""
with open(filename, "w") as f:
f.write(dumps(self.conversations))
def get_history(self, member:discord.Member, stringify:bool=False):
try:
history = self.conversations[member.id]
if member.id in self.system_messages:
history.append({
"role": "system",
"content": self.system_messages[member.id]
})
if stringify:
return dumps(history)
return history
except KeyError:
return []
def add_history(self, member:discord.Member, role:str, content:str):
try:
self.conversations[member.id].append({"role": role, "content": content})
except KeyError:
self.conversations[member.id] = [{"role": role, "content": content}]
def next_prompt_stream(self, member:discord.Member, new_prompt:str=None):
"""
USE FOR `CONTENT-TYPE: TEXT/EVENTSTREAM` ONLY
Generates the next prompt for a given `member` and adds it to the current conversation.
"""
if new_prompt is not None:
self.add_history(member, "user", new_prompt.replace("\n"," "))
response = None
while response is None:
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=self.get_history(member),
stream=True
)
except openai.error.RateLimitError:
time.sleep(5)
return response
def next_prompt(self, member:discord.Member, new_prompt:str=None):
"""
Generates the next prompt for a given `member` and adds it to the current conversation.
"""
if new_prompt is not None:
self.add_history(member, "user", new_prompt.replace("\n"," "))
response = None
while response is None:
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=self.get_history(member)
)
except openai.error.RateLimitError:
time.sleep(5)
content:str = response.choices[0]["message"]["content"]
self.add_history(member, "assistant", content)
return content
def one(self, prompt:str, system:str=None):
prompt = prompt.replace("\n"," ")
response = None
messages = [{"role": "user", "content": prompt}]
if system is not None:
messages.append({"role": "system", "content": system})
while response is None:
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages
)
except openai.error.RateLimitError:
time.sleep(5)
content:str = response.choices[0]["message"]["content"]
return content
def clear(self, member:discord.Member):
self.conversations[member.id] = []
class shared_resource:
"""
Used to allow the main thread to shut down subsequent threads
"""
def __init__(self,conversations:Conversations):
self.running = True
self.conversations = conversations
def stop(self):
"""
Sets the `running` property to `False`. All tasks listening to this will stop.
"""
self.running = False
| [
"\n",
" "
] |
2024-01-10 | purplelemons-dev/servers | searchgpt~ai_api.py | """
# OpenAI GPT4 API
"""
from .env import OPENAI_KEY
import openai
from dataclasses import dataclass
openai.api_key = OPENAI_KEY
from json import loads, dumps
import json
from .google import Summary
from .browse import browse
@dataclass
class Chat:
role:str
content:str
# Temperature: 0
# Top_p: 0.9
# Model: gpt-3.5-turbo
big_model_sys_msg = """Respond ONLY with JSON. Your personal domain of knowledge does not include mathematics and only extends to 2020. Your responses should be in the form:
```
{
?\"message\": <String: data you generate>,
?\"command\": <String: a predefined command>,
?\"input\": <String: command input>
}
```
All fields are optional, but input is required if a command is provided.
Your possible commands are: "google" and "wolfram".
google: Performs a query with your command input.
woflram: Asks Wolfram Alpha's API using your input. Use this for mathematics and science questions."""
def fix_JSON(data:str)->dict:
"""Fixes the JSON using AI.
Args:
data (str): The JSON to fix.
Returns:
str: The fixed JSON.
"""
return loads(openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "Fix the JSON syntax of data."
},
{
"role": "user",
"content": data
}
],
temperature=0,
top_p=0.9,
max_tokens=256,
).choices[0]["message"]["content"])
def extract_query(data:str, query:str=None, google:bool=False, model:str="gpt-3.5-turbo") -> tuple[str,bool]:
"""Uses the small model (gpt-3.5-turbo) to extract the query from large portions of data quickly.
If a query is given, it will try to extract that query from the data.
If no query is given, will summarize the data.
Raises:
DataParserException: The model failed to extract the query.
Args:
data (str): The data to extract from.
query (str, optional): The query to extract. Defaults to None.
model (str, optional): The model to use. Defaults to "gpt-3.5-turbo".
Returns:
str: The extracted query.
"""
if not isinstance(data, str): data = dumps(data)
small_model_extract_query = "Respond only using JSON in the given format. Only use one JSON object."\
"{"\
"\"message\": <String: success|error>,"\
"\"reading\": <Boolean>"\
"?\"content\": <String>"\
"}"\
"\nThere should be no natural language other than the keys/values."
if query is None:
prefix = "Summarize the data given. "
elif google:
prefix = "Find information relevant to \"{query}\" from the given website summaries and add it to \"content\". If you would like to read a page, indicate by setting \"reading\" to true and setting \"content\" to the page #. "
else:
prefix = "Extract \"{query}\" from the given content. "
data_response = openai.ChatCompletion.create(
model=model,
max_tokens=200,
messages=[
{
"role": "system",
"content": prefix.format(query=query) + small_model_extract_query
},
{
"role": "user",
"content": data
}
],
temperature=0,
top_p=0.9,
).choices[0]["message"]["content"]
try:
answer=loads(data_response)
except json.decoder.JSONDecodeError:
print("fixing broken JSON...")
print(data_response)
answer = fix_JSON(data_response)
if answer["message"] == "error": raise DataParserException("The model failed to extract the query.")
return answer["content"], answer["reading"] == "true"
class DataParserException(Exception):
pass
class AI:
def __init__(self):
self.conversation:list[Chat] = []
self.google_cache:list[Summary] = []
"Keeps a temporary list of the 10 recent queries."
def __str__(self) -> str:
return "\n".join([f"{chat.role}: {chat.content}" for chat in self.conversation])
def to_dict(self) -> dict:
return {
"conversation" : [
{
"role" : chat.role,
"content" : chat.content
}
for chat in self.conversation
],
"google_cache" : [
{
"title" : summary.title,
"url" : summary.url,
"text" : summary.text
}
for summary in self.google_cache
]
}
def add(self, role:str, content:str) -> None:
self.conversation.append(Chat(role, content))
def generate(self,temp=0.0):
try:
unparsed = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": chat.role,
"content": chat.content
}
for chat in self.conversation
]+[
{
"role": "system",
"content": big_model_sys_msg
}
],
temperature=temp,
max_tokens=256,
top_p=0.9,
).choices[0]["message"]["content"]
completion:dict[str,str] = loads(unparsed)
except json.decoder.JSONDecodeError:
return {"message":unparsed}
except Exception as e:
temp += 0.1
if temp > 1:
raise e
return self.generate(temp)
return completion
| [
"content",
"Fix the JSON syntax of data."
] |
2024-01-10 | 0x11c11e/DashBot | pages~chatbot~chatbot_model.py | from langchain import OpenAI, ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import OpenAI
chat = OpenAI(temperature=0.5)
conversation = ConversationChain(
llm=chat,
verbose=True,
memory=ConversationBufferMemory()
)
| [] |
2024-01-10 | kernelzeroday/gpt-3.5-turbo-shell | gptsh0.11.py | import openai
import json
import subprocess
import logging
import logging.handlers
import sys
import os
import pty
from termcolor import colored
import argparse
import traceback
import tiktoken
import pandas as pd
def num_tokens_from_string(string: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
num_tokens = len(encoding.encode(string))
return num_tokens
# Global messages list
messages = []
def process_command_output(output, max_tokens):
output_tokens = num_tokens_from_string(output)
if output_tokens <= max_tokens:
return output
else:
return "The output is too large to display. Please try another command or adjust the output."
# Replace with your OpenAI API key
openai.api_key = "PUT_KEY_HERE"
def setup_logging(log_level):
logger = logging.getLogger()
logger.setLevel(log_level)
# File handler for logging
file_handler = logging.FileHandler("bash_commands.log")
file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
file_handler.setFormatter(file_formatter)
file_handler.setLevel(logging.DEBUG)
# Console handler for logging
console_handler = logging.StreamHandler()
console_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console_handler.setFormatter(console_formatter)
console_handler.setLevel(log_level)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
def chatgpt_query(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=150,
)
return response.choices[0].message['content'].strip()
def extract_json(input_string):
stack = []
result = []
for i, c in enumerate(input_string):
if c == '{':
stack.append(i)
elif c == '}' and stack:
start = stack.pop()
if not stack: # This means we've closed a top-level JSON object
result.append(input_string[start:i + 1])
return result
def is_message_valid(message, max_tokens_per_message=512):
if 'content' not in message:
return True
tokens = num_tokens_from_string(message['content'])
return tokens <= max_tokens_per_message
def trim_messages(messages, max_tokens, goal_tokens, max_tokens_per_message=512):
message_tokens = []
# Filter out large messages
messages = [message for message in messages if 'content' not in message or num_tokens_from_string(message['content']) <= max_tokens_per_message]
for message in messages:
if 'content' in message:
tokens = num_tokens_from_string(message['content'])
message_tokens.append((message, tokens))
else:
message_tokens.append((message, 0))
total_tokens = sum(tokens for _, tokens in message_tokens)
if total_tokens > max_tokens:
message_tokens.sort(key=lambda x: x[1], reverse=True)
diff = total_tokens - goal_tokens
for idx, (message, tokens) in enumerate(message_tokens):
if diff > tokens:
diff -= tokens
messages.pop(idx)
else:
messages.pop(idx)
break
return messages
def generate_bash_code(user_input, messages, context, prev_command):
max_tokens = 4096
goal_tokens = 3096
# Trim the messages first
try:
messages = trim_messages(messages, max_tokens, goal_tokens)
except Exception as e:
tb = traceback.format_exc()
logging.error("Error trimming messages: %s", e)
logging.debug(tb)
# Check if user_input is valid
if not is_message_valid({"content": user_input}):
logging.error("User input is too long. Please provide a shorter input.")
return None
# Construct the context using the trimmed messages
message_context = [m['content'] for m in messages]
context = ' '.join(message_context)
# Calculate the number of tokens remaining for the user_input and context
remaining_tokens = max_tokens - len(user_input.split()) - len(context.split())
# Check if the remaining tokens are enough for the model to process
if remaining_tokens < 10:
raise ValueError("The remaining token space is too small to process the input.")
# Call the OpenAI API with the updated context
model_engine = "gpt-3.5-turbo"
# Add the user input message
messages.append({"role": "user", "content": f"{user_input}"})
if context:
context_message = f"You are a {args.os} shell assistant. Your role is to provide commands. The previous command '{prev_command}' had the following output:\n\n{context}\nPlease provide output in a machine-readable json object format with keys 'explanation', 'command', never use sudo or any commands that require interaction. do not add any extra notes. output only exactly as as instructed. if a command is not applicable give the reason why in the explanation field with an empty command value, do not repeat or clarify the previous command if you do not understand the current command. you must not use code blocks and only output raw valid json:"
if is_message_valid({"content": context_message}):
messages.insert(-1, {"role": "system", "content": context_message})
else:
logging.debug("Command output is too large. It will not be added to the context.")
for _ in range(3): # Set the maximum number of retries
response = openai.ChatCompletion.create(
model=model_engine,
messages=messages,
max_tokens=150,
n=1,
stop=None,
temperature=0.5,
)
response_text = response.choices[0].message['content'].strip()
logging.debug(f"GPT-3.5-turbo response: {response_text}")
try:
json_content = extract_json(response_text)
# Transform list to string
json_content = ''.join(json_content)
parsed_response = json.loads(json_content)
except json.JSONDecodeError as e:
try:
# If the response is not valid JSON, try to transform it into an explanation json object
parsed_response = {"explanation": response_text, "command": ""}
except Exception as e:
# If that fails, return an error
tb = traceback.format_exc()
logging.error("Failed to parse GPT-3.5-turbo response as JSON.")
logging.error(f"Response: {response_text}")
logging.error(f"Error: {e}")
logging.error(f"Traceback: {tb}")
parsed_response = {
"explanation": "I'm sorry, but I couldn't generate a valid response.",
"command": "",
}
if parsed_response["command"] != "":
break # If the command is not empty, exit the loop
else:
# If the command is empty, remind the AI of the format and try again
messages.append({
"role": "user",
"content": "Please remember to provide output in a machine-readable json object format with keys 'explanation', 'command'. Try again."
})
return parsed_response
def execute_command(command):
# if the command is not a string, return an error
if not isinstance(command, str):
return -1, "Command is not a string."
if command == "":
return -1, "Command is empty."
try:
# Append 'echo $?' to get the exit code of the command
full_command = f"{command}; echo $?"
# Start a new process with a pseudo-terminal
pid, fd = pty.fork()
if pid == 0:
# Child process: execute the command in the new terminal
os.execv("/bin/bash", ["/bin/bash", "-c", full_command])
else:
# Parent process: read the output of the child process
output = []
try:
while True:
data = os.read(fd, 1024)
if not data:
break
# output.append(data.decode())
output.append(data.decode("utf-8", "ignore"))
except OSError:
pass
output = "".join(output)
# Extract the exit code from the last line of the output
exit_code_line = output.rstrip().split("\n")[-1]
try:
exit_code = int(exit_code_line)
except ValueError:
exit_code = -1
# Remove the exit code line from the output
output = output[: output.rfind(exit_code_line)].rstrip()
if exit_code != 0:
error_output = f"An error occurred while executing the command:\n{output}"
print(colored(error_output, 'red'))
return error_output
else:
print(colored(output, 'white'))
return output
except Exception as e:
tb = traceback.format_exc()
error_output = f"An error occurred while executing the command:\n{e}\n{tb}"
print(colored(error_output, 'red'))
return error_output
def process_input(user_input, messages, context="", prev_command=""):
if user_input.lower() == "exit":
return
logging.debug(f"User input: {user_input}")
try:
parsed_response = generate_bash_code(user_input, messages, context, prev_command)
logging.debug(f"Parsed response: {parsed_response}")
except Exception as e:
tb = traceback.format_exc()
logging.error("Failed to generate bash code.")
logging.error(f"Error: {e}")
logging.error(f"Traceback: {tb}")
return
try:
bash_code = parsed_response["command"]
explanation = parsed_response["explanation"]
except KeyError as e:
logging.error("Failed to extract command and explanation from GPT-3.5-turbo response.")
return
logging.debug(colored("Generated bash command for '{}':\n".format(user_input), 'green'))
print(colored(explanation, 'yellow'))
print(colored("\n{}".format(bash_code), 'cyan'))
output = None
if args.auto_exec or input("\nDo you want to execute this command? (Y/n): ").lower() != "n":
output = execute_command(bash_code)
if is_message_valid(output):
messages.append({"role": "assistant", "content": output})
else:
output = "The generated output is too large to process. Please try again with a shorter command."
print(colored("\n" + output, 'red'))
return output, bash_code
def evaluate_output(output):
messages = [
{"role": "system", "content": "You are ChatGPT, a large language model trained by OpenAI. Evaluate the output of the command for any errors."},
{"role": "user", "content": f"Please evaluate this command output: {output}"}
]
response = chatgpt_query(messages)
return response.strip()
def process_file_input(input_file):
with open(input_file, 'r') as file:
commands = file.readlines()
# initialize the context with the first command
context = ""
# for each command, process the input and update the context
for command in commands:
command = command.strip()
print(colored(f"Processing command: {command}", 'blue'))
# update the function call to get both output and command
output, command = process_input(command, context)
# only update the context if there's a command
if output and command:
context = output
def main(args):
setup_logging(args.log_level)
if args.input_file:
process_file_input(args.input_file)
else:
global messages
print(colored("Welcome to the GPT-3.5-turbo Shell!", 'green'))
print("Type a command or type 'exit' to quit.")
context = ""
# Initialize the messages list with the starting system message
messages.append({"role": "system", "content": f"You are a {args.os} shell assistant. Your role is to provide commands. Please provide output in a machine-readable json object format with keys 'explanation', 'command', never use sudo or any commands that require interaction. do not add any extra notes. output only exactly as as instructed. if a command is not applicable give the reason why in the explanation field with an empty command value, do not repeat or clarify the previous command if you do not understand the current command. you must not use code blocks and only output raw valid json:"})
while True:
try:
user_input = input(colored("> ", 'cyan'))
output, command = process_input(user_input, messages, context, prev_command=user_input)
if output and command:
context = output
except Exception as e:
tb = traceback.format_exc()
logging.error("An error occurred while processing the user input.")
logging.error(e)
logging.error(tb)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="GPT-3.5-turbo Shell")
parser.add_argument("-i", "--input-file", type=str, help="File with scripted commands")
parser.add_argument("--log-level", type=str, default="INFO", help="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)")
parser.add_argument("--api-key", type=str, default=os.environ.get("OPENAI_API_KEY"), help="OpenAI API key")
parser.add_argument("--engine", type=str, default="gpt-3.5-turbo", help="OpenAI engine to use")
# auto execute the command
parser.add_argument("--auto_exec", action="store_true", help="Auto execute the command")
# auto retry the command
parser.add_argument("--retry", action="store_true", help="Auto retry the command")
parser.add_argument("--max-retries", type=int, default=3, help="Max retries for the command")
parser.add_argument("--max-tries", type=int, default=3, help="Max tries for the command")
# tokens
parser.add_argument("--tokens", type=int, default=100, help="Max tokens for the command")
# color true or false
parser.add_argument("--color", action="store_true", help="Color the output")
# operating system
parser.add_argument("--os", type=str, default="linux", help="Operating system")
args = parser.parse_args()
if args.input_file and not os.path.isfile(args.input_file):
print("Input file not found. Exiting.")
sys.exit(1)
main(args)
| [
"PLACEHOLDER",
"You are ChatGPT, a large language model trained by OpenAI. Evaluate the output of the command for any errors.",
"Please remember to provide output in a machine-readable json object format with keys 'explanation', 'command'. Try again.",
"Please evaluate this command output: PLACEHOLDER"
] |
2024-01-10 | ianlokh/LLM-Tutorial-Ice-Breaker | agents~linkedin_lookup_agent.py | from tools.tools import get_profile_url
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
# -> str is function annotation and indicates that the function returns a string but it is
# not forced to return as a string. this will help ther
def lookup(name: str) -> str:
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
template = """given the name {name_of_person} I want you to get it me a link to their Linkedin profile page.
Your answer should contain only a URL"""
tools_for_agent = [Tool(name="Crawl Google 4 linkedin profile page",
func=get_profile_url,
description="useful for when you need get the Linkedin Page URL",
)
]
agent = initialize_agent(tools=tools_for_agent,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True)
prompt_template = PromptTemplate(input_variables=["name_of_person"], template=template)
linkedin_profile_url = agent.run(prompt_template.format_prompt(name_of_person=name))
# llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
#
# template = """given the full name {name_of_person} I want you to get it me a link to their Linkedin profile page.
# Your answer should contain only a URL"""
# tools_for_agent1 = [
# Tool(
# name="Crawl Google 4 linkedin profile page",
# func=get_profile_url,
# description="useful for when you need get the Linkedin Page URL",
# ),
# ]
#
# agent = initialize_agent(
# tools_for_agent1, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
# )
# prompt_template = PromptTemplate(
# input_variables=["name_of_person"], template=template
# )
# linkedin_username = agent.run(prompt_template.format_prompt(name_of_person=name))
return linkedin_profile_url
| [
"given the name {name_of_person} I want you to get it me a link to their Linkedin profile page. \n Your answer should contain only a URL",
"name_of_person"
] |
2024-01-10 | ianlokh/LLM-Tutorial-Ice-Breaker | agents~twitter_lookup_agent.py | from tools.tools import get_profile_url
from langchain import PromptTemplate
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
def lookup(name: str) -> str:
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
template = """given the name {name_of_person} I want you to find a link to their Twitter profile page,
and extract from it their username. In your final answer, only the person's username"""
tools_for_agent_twitter = [
Tool(
name="Crawl Google for Twitter profile page",
func=get_profile_url,
description="useful for when you need to get the Twitter Page URL"
)
]
agent = initialize_agent(
tools_for_agent_twitter,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
prompt_template = PromptTemplate(
input_variables=["name_of_person"], template=template
)
twitter_username = agent.run(prompt_template.format_prompt(name_of_person=name))
return twitter_username
# llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
# template =
# given the name {name_of_person} I want you to find a link to their Twitter profile page, and extract from it their username
# In Your Final answer only the person's username"""
# tools_for_agent_twitter = [
# Tool(
# name="Crawl Google 4 Twitter profile page",
# func=get_profile_url,
# description="useful for when you need get the Twitter Page URL",
# ),
# ]
#
# agent = initialize_agent(
# tools_for_agent_twitter,
# llm,
# agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# verbose=True,
# )
# prompt_template = PromptTemplate(
# input_variables=["name_of_person"], template=template
# )
#
# twitter_username = agent.run(prompt_template.format_prompt(name_of_person=name))
#
# return twitter_username
| [
"name_of_person",
"given the name {name_of_person} I want you to find a link to their Twitter profile page, \n and extract from it their username. In your final answer, only the person's username"
] |
2024-01-10 | BrandonVilla191/llamatest | agent_utils.py | from llama_index.llms import OpenAI, ChatMessage, Anthropic, Replicate
from llama_index.llms.base import LLM
from llama_index.llms.utils import resolve_llm
from pydantic import BaseModel, Field
import os
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.agent import OpenAIAgent, ReActAgent
from llama_index.agent.react.prompts import REACT_CHAT_SYSTEM_HEADER
from llama_index import (
VectorStoreIndex,
SummaryIndex,
ServiceContext,
Document
)
from llama_index.prompts import ChatPromptTemplate
from typing import List, cast, Optional
from llama_index import SimpleDirectoryReader
from llama_index.embeddings.utils import resolve_embed_model
from llama_index.tools import QueryEngineTool, ToolMetadata, FunctionTool
from llama_index.agent.types import BaseAgent
from llama_index.agent.react.formatter import ReActChatFormatter
from llama_index.llms.openai_utils import is_function_calling_model
from llama_index.chat_engine import CondensePlusContextChatEngine
from builder_config import BUILDER_LLM
from typing import Dict, Tuple, Any
import streamlit as st
from pathlib import Path
import json
def _resolve_llm(llm: str) -> LLM:
"""Resolve LLM."""
# TODO: make this less hardcoded with if-else statements
# see if there's a prefix
# - if there isn't, assume it's an OpenAI model
# - if there is, resolve it
tokens = llm.split(":")
if len(tokens) == 1:
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
llm = OpenAI(model=llm)
elif tokens[0] == "local":
llm = resolve_llm(llm)
elif tokens[0] == "openai":
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
llm = OpenAI(model=tokens[1])
elif tokens[0] == "anthropic":
os.environ["ANTHROPIC_API_KEY"] = st.secrets.anthropic_key
llm = Anthropic(model=tokens[1])
elif tokens[0] == "replicate":
os.environ["REPLICATE_API_KEY"] = st.secrets.replicate_key
llm = Replicate(model=tokens[1])
else:
raise ValueError(f"LLM {llm} not recognized.")
return llm
####################
#### META TOOLS ####
####################
# System prompt tool
GEN_SYS_PROMPT_STR = """\
Task information is given below.
Given the task, please generate a system prompt for an OpenAI-powered bot to solve this task:
{task} \
Make sure the system prompt obeys the following requirements:
- Tells the bot to ALWAYS use tools given to solve the task. NEVER give an answer without using a tool.
- Does not reference a specific data source. The data source is implicit in any queries to the bot,
and telling the bot to analyze a specific data source might confuse it given a
user query.
"""
gen_sys_prompt_messages = [
ChatMessage(
role="system",
content="You are helping to build a system prompt for another bot.",
),
ChatMessage(role="user", content=GEN_SYS_PROMPT_STR),
]
GEN_SYS_PROMPT_TMPL = ChatPromptTemplate(gen_sys_prompt_messages)
def load_agent(
tools: List,
llm: LLM,
system_prompt: str,
extra_kwargs: Optional[Dict] = None,
**kwargs: Any
) -> BaseAgent:
"""Load agent."""
extra_kwargs = extra_kwargs or {}
if isinstance(llm, OpenAI) and is_function_calling_model(llm.model):
# get OpenAI Agent
agent = OpenAIAgent.from_tools(
tools=tools,
llm=llm,
system_prompt=system_prompt,
**kwargs
)
else:
if "vector_index" not in extra_kwargs:
raise ValueError("Must pass in vector index for CondensePlusContextChatEngine.")
vector_index = cast(VectorStoreIndex, extra_kwargs["vector_index"])
rag_params = cast(RAGParams, extra_kwargs["rag_params"])
# use condense + context chat engine
agent = CondensePlusContextChatEngine.from_defaults(
vector_index.as_retriever(similarity_top_k=rag_params.top_k),
)
return agent
class RAGParams(BaseModel):
"""RAG parameters.
Parameters used to configure a RAG pipeline.
"""
include_summarization: bool = Field(default=False, description="Whether to include summarization in the RAG pipeline. (only for GPT-4)")
top_k: int = Field(default=2, description="Number of documents to retrieve from vector store.")
chunk_size: int = Field(default=1024, description="Chunk size for vector store.")
embed_model: str = Field(
default="default", description="Embedding model to use (default is OpenAI)"
)
llm: str = Field(default="gpt-4-1106-preview", description="LLM to use for summarization.")
class ParamCache(BaseModel):
"""Cache for RAG agent builder.
Created a wrapper class around a dict in case we wanted to more explicitly
type different items in the cache.
"""
# arbitrary types
class Config:
arbitrary_types_allowed = True
system_prompt: Optional[str] = Field(default=None, description="System prompt for RAG agent.")
file_paths: List[str] = Field(default_factory=list, description="File paths for RAG agent.")
docs: List[Document] = Field(default_factory=list, description="Documents for RAG agent.")
tools: List = Field(default_factory=list, description="Additional tools for RAG agent (e.g. web)")
rag_params: RAGParams = Field(default_factory=RAGParams, description="RAG parameters for RAG agent.")
agent: Optional[OpenAIAgent] = Field(default=None, description="RAG agent.")
class RAGAgentBuilder:
"""RAG Agent builder.
Contains a set of functions to construct a RAG agent, including:
- setting system prompts
- loading data
- adding web search
- setting parameters (e.g. top-k)
Must pass in a cache. This cache will be modified as the agent is built.
"""
def __init__(self, cache: Optional[ParamCache] = None) -> None:
"""Init params."""
self._cache = cache or ParamCache()
@property
def cache(self) -> ParamCache:
"""Cache."""
return self._cache
def create_system_prompt(self, task: str) -> str:
"""Create system prompt for another agent given an input task."""
llm = BUILDER_LLM
fmt_messages = GEN_SYS_PROMPT_TMPL.format_messages(task=task)
response = llm.chat(fmt_messages)
self._cache.system_prompt = response.message.content
return f"System prompt created: {response.message.content}"
def load_data(
self,
file_names: Optional[List[str]] = None,
urls: Optional[List[str]] = None
) -> str:
"""Load data for a given task.
Only ONE of file_names or urls should be specified.
Args:
file_names (Optional[List[str]]): List of file names to load.
Defaults to None.
urls (Optional[List[str]]): List of urls to load.
Defaults to None.
"""
if file_names is None and urls is None:
raise ValueError("Must specify either file_names or urls.")
elif file_names is not None and urls is not None:
raise ValueError("Must specify only one of file_names or urls.")
elif file_names is not None:
reader = SimpleDirectoryReader(input_files=file_names)
docs = reader.load_data()
file_paths = file_names
elif urls is not None:
from llama_hub.web.simple_web.base import SimpleWebPageReader
# use simple web page reader from llamahub
loader = SimpleWebPageReader()
docs = loader.load_data(urls=urls)
file_paths = urls
else:
raise ValueError("Must specify either file_names or urls.")
self._cache.docs = docs
self._cache.file_paths = file_paths
return "Data loaded successfully."
# NOTE: unused
def add_web_tool(self) -> None:
"""Add a web tool to enable agent to solve a task."""
# TODO: make this not hardcoded to a web tool
# Set up Metaphor tool
from llama_hub.tools.metaphor.base import MetaphorToolSpec
# TODO: set metaphor API key
metaphor_tool = MetaphorToolSpec(
api_key=os.environ["METAPHOR_API_KEY"],
)
metaphor_tool_list = metaphor_tool.to_tool_list()
self._cache.tools.extend(metaphor_tool_list)
return "Web tool added successfully."
def get_rag_params(self) -> Dict:
"""Get parameters used to configure the RAG pipeline.
Should be called before `set_rag_params` so that the agent is aware of the
schema.
"""
rag_params = self._cache.rag_params
return rag_params.dict()
def set_rag_params(self, **rag_params: Dict):
"""Set RAG parameters.
These parameters will then be used to actually initialize the agent.
Should call `get_rag_params` first to get the schema of the input dictionary.
Args:
**rag_params (Dict): dictionary of RAG parameters.
"""
new_dict = self._cache.rag_params.dict()
new_dict.update(rag_params)
rag_params_obj = RAGParams(**new_dict)
self._cache.rag_params = rag_params_obj
return "RAG parameters set successfully."
def create_agent(self) -> None:
"""Create an agent.
There are no parameters for this function because all the
functions should have already been called to set up the agent.
"""
rag_params = cast(RAGParams, self._cache.rag_params)
docs = self._cache.docs
# first resolve llm and embedding model
embed_model = resolve_embed_model(rag_params.embed_model)
# llm = resolve_llm(rag_params.llm)
# TODO: use OpenAI for now
# llm = OpenAI(model=rag_params.llm)
llm = _resolve_llm(rag_params.llm)
# first let's index the data with the right parameters
service_context = ServiceContext.from_defaults(
chunk_size=rag_params.chunk_size,
llm=llm,
embed_model=embed_model,
)
vector_index = VectorStoreIndex.from_documents(docs, service_context=service_context)
vector_query_engine = vector_index.as_query_engine(similarity_top_k=rag_params.top_k)
all_tools = []
vector_tool = QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name="vector_tool",
description=("Use this tool to answer any user question over any data."),
),
)
all_tools.append(vector_tool)
if rag_params.include_summarization:
summary_index = SummaryIndex.from_documents(docs, service_context=service_context)
summary_query_engine = summary_index.as_query_engine()
summary_tool = QueryEngineTool(
query_engine=summary_query_engine,
metadata=ToolMetadata(
name="summary_tool",
description=("Use this tool for any user questions that ask for a summarization of content"),
),
)
all_tools.append(summary_tool)
# then we add tools
all_tools.extend(self._cache.tools)
# build agent
if self._cache.system_prompt is None:
return "System prompt not set yet. Please set system prompt first."
agent = load_agent(
all_tools, llm=llm, system_prompt=self._cache.system_prompt, verbose=True,
extra_kwargs={"vector_index": vector_index, "rag_params": rag_params}
)
self._cache.agent = agent
return "Agent created successfully."
####################
#### META Agent ####
####################
RAG_BUILDER_SYS_STR = """\
You are helping to construct an agent given a user-specified task.
You should generally use the tools in this rough order to build the agent.
1) Create system prompt tool: to create the system prompt for the agent.
2) Load in user-specified data (based on file paths they specify).
3) Decide whether or not to add additional tools.
4) Set parameters for the RAG pipeline.
This will be a back and forth conversation with the user. You should
continue asking users if there's anything else they want to do until
they say they're done. To help guide them on the process,
you can give suggestions on parameters they can set based on the tools they
have available (e.g. "Do you want to set the number of documents to retrieve?")
"""
### DEFINE Agent ####
# NOTE: here we define a function that is dependent on the LLM,
# please make sure to update the LLM above if you change the function below
# define agent
@st.cache_resource
def load_meta_agent_and_tools() -> Tuple[OpenAIAgent, RAGAgentBuilder]:
# think of this as tools for the agent to use
agent_builder = RAGAgentBuilder()
fns = [
agent_builder.create_system_prompt,
agent_builder.load_data,
# add_web_tool,
agent_builder.get_rag_params,
agent_builder.set_rag_params,
agent_builder.create_agent
]
fn_tools = [FunctionTool.from_defaults(fn=fn) for fn in fns]
builder_agent = load_agent(
fn_tools, llm=BUILDER_LLM, system_prompt=RAG_BUILDER_SYS_STR, verbose=True
)
return builder_agent, agent_builder
| [
"System prompt for RAG agent.",
"Task information is given below. \n\nGiven the task, please generate a system prompt for an OpenAI-powered bot to solve this task: \n{task} \nMake sure the system prompt obeys the following requirements:\n- Tells the bot to ALWAYS use tools given to solve the task. NEVER give an answer without using a tool.\n- Does not reference a specific data source. The data source is implicit in any queries to the bot,\n and telling the bot to analyze a specific data source might confuse it given a \n user query.\n\n",
"You are helping to build a system prompt for another bot."
] |
2024-01-10 | BrandonVilla191/llamatest | builder_config.py | """Configuration."""
import streamlit as st
import os
### DEFINE BUILDER_LLM #####
## Uncomment the LLM you want to use to construct the meta agent
## OpenAI
from llama_index.llms import OpenAI
# set OpenAI Key - use Streamlit secrets
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
# load LLM
BUILDER_LLM = OpenAI(model="gpt-4-1106-preview")
# # Anthropic (make sure you `pip install anthropic`)
# from llama_index.llms import Anthropic
# # set Anthropic key
# os.environ["ANTHROPIC_API_KEY"] = st.secrets.anthropic_key
# BUILDER_LLM = Anthropic() | [] |
2024-01-10 | frankjoshua/langchain_presentation | code~chat_bot~chat_bot.py | from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
def getBot():
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
"The following is a friendly conversation between a human and an AI. The AI is talkative and "
"provides lots of specific details from its context. If the AI does not know the answer to a "
"question, it truthfully says it does not know."
),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
llm = ChatOpenAI(temperature=0)
memory = ConversationBufferMemory(return_messages=True)
conversation_chain = ConversationChain(memory=memory, prompt=prompt, llm=llm)
return conversation_chain
async def onMessage(message, callback):
await callback("Thinking...")
response = conversation.predict(input=message)
await callback(response) | [
"The following is a friendly conversation between a human and an AI. The AI is talkative and ",
"{input}",
"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"provides lots of specific details from its context. If the AI does not know the answer to a ",
"question, it truthfully says it does not know."
] |
2024-01-10 | frankjoshua/langchain_presentation | code~qa_bot~qa_bot.py | from langchain.document_loaders import BSHTMLLoader
loader = BSHTMLLoader("example_data/fake-content.html")
data = loader.load()
data
async def onMessage(message, callback):
await callback(f"You asked {message}")
await callback("Recieved") | [] |
2024-01-10 | rcalsaverini/sandbox | 00_small_utilities~names~machine.py | from typing import Tuple
import openai
from statemachine import StateMachine, State # type: ignore
from entities import Prompt, Task
from gpt_classifier import get_response_and_append_it
with open("prompts/system.txt", "r") as f:
system_prompt = f.read()
class Classifier(StateMachine):
# states
initializing = State("initializing", initial=True)
waiting = State("waiting")
exiting = State("exited")
classifying_item = State("classify_item")
classifying_list = State("classify_list")
# commands
set_task = initializing.to(waiting)
print_state_initializing = initializing.to(initializing)
just_chat_initializing = initializing.to(initializing)
classify_item = waiting.to(classifying_item)
list_classes = waiting.to(waiting)
finish = waiting.to(exiting)
print_state_waiting = waiting.to(waiting)
just_chat_waiting = waiting.to(waiting)
refine_item = classifying_item.to(classifying_item)
accept_item_class = classifying_item.to(classifying_list)
print_state_classifying_item = classifying_item.to(classifying_item)
just_chat_classifying_item = classifying_item.to(classifying_item)
refine_list = classifying_list.to(classifying_list)
accept_list_class = classifying_list.to(waiting)
print_state_classifying_list = classifying_list.to(classifying_list)
just_chat_classifying_list = classifying_list.to(classifying_list)
print_state_exiting = exiting.to(exiting)
just_chat_exiting = exiting.to(exiting)
def print_state(self):
if self.current_state == self.initializing:
self.print_state_initializing()
elif self.current_state == self.waiting:
self.print_state_waiting()
elif self.current_state == self.classifying_item:
self.print_state_classifying_item()
elif self.current_state == self.classifying_list:
self.print_state_classifying_list()
elif self.current_state == self.exiting:
self.print_state_exiting()
def just_chat(self):
if self.current_state == self.initializing:
self.just_chat_initializing()
elif self.current_state == self.waiting:
self.just_chat_waiting()
elif self.current_state == self.classifying_item:
self.just_chat_classifying_item()
elif self.current_state == self.classifying_list:
self.just_chat_classifying_list()
elif self.current_state == self.exiting:
self.just_chat_exiting()
start_prompt: Prompt = [
{"role": "system", "content": system_prompt},
{"role": "assistant", "content": "Hello, I'm Gotcha."},
]
def set_task(starting_prompt: Prompt, task: Task) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
task_message = task.render()
prompt.append({"role": "user", "content": task_message})
return get_response_and_append_it(prompt)
def print_state(starting_prompt: Prompt) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": "PRINT STATE"})
return get_response_and_append_it(prompt)
def just_chat(starting_prompt: Prompt, user_message: str) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": user_message})
return get_response_and_append_it(prompt)
def classify_item(starting_prompt: Prompt) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": "CLASSIFY ITEM"})
return get_response_and_append_it(prompt)
def list_classes(starting_prompt: Prompt) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": "LIST CLASSES"})
return get_response_and_append_it(prompt)
def finish(starting_prompt: Prompt) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": "FINISH"})
return get_response_and_append_it(prompt)
def refine_item(starting_prompt: Prompt, new_class: str, justification: str) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": f"REFINE ITEM(CLASS={new_class}, JUSTIFICATION={justification})"})
return get_response_and_append_it(prompt)
def accept_item_class(starting_prompt: Prompt) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": "ACCEPT ITEM"})
return get_response_and_append_it(prompt)
def refine_list(starting_prompt: Prompt, item_to_remove: str, justification: str) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": f"REFINE LIST(REMOVE={item_to_remove}, JUSTIFICATION={justification})"})
return get_response_and_append_it(prompt)
def accept_list_class(starting_prompt: Prompt) -> Tuple[str, Prompt]:
prompt = starting_prompt.copy()
prompt.append({'role': 'user', "content": "ACCEPT LIST"})
return get_response_and_append_it(prompt)
def run_machine_step(prompt: Prompt, classifier: Classifier) -> Prompt:
command = input("$>")
if command == "set task":
description = input("Enter description $>")
task = Task(description, [], [])
print("Enter examples.")
task.read_examples()
print("Enter unclassified items.")
task.read_unclassified()
message, out_prompt = set_task(prompt, task)
print(f"Gotcha says$> {message}")
classifier.set_task()
elif command == "read task":
filename = input("Enter filename $>")
task = Task.from_file(filename)
message, out_prompt = set_task(prompt, task)
classifier.set_task()
elif command == "print state":
message, out_prompt = print_state(prompt)
print(f"Gotcha says$> {message}")
classifier.print_state()
elif command == "classify item":
message, out_prompt = classify_item(prompt)
print(f"Gotcha says$> {message}")
classifier.classify_item()
elif command == "list classes":
message, out_prompt = list_classes(prompt)
print(f"Gotcha says$> {message}")
classifier.list_classes()
elif command == "accept item":
message, out_prompt = accept_item_class(prompt)
print(f"Gotcha says$> {message}")
classifier.accept_item_class()
elif command == "accept list":
message, out_prompt = accept_list_class(prompt)
print(f"Gotcha says$> {message}")
classifier.accept_list_class()
elif command == "refine item":
new_class = input("Enter new class $>")
justification = input("Enter justification $>")
message, out_prompt = refine_item(prompt, new_class, justification)
print(f"Gotcha says$> {message}")
classifier.refine_item()
elif command == "refine list":
item_to_remove = input("Enter item to remove $>")
justification = input("Enter justification $>")
message, out_prompt = refine_list(prompt, item_to_remove, justification)
print(f"Gotcha says$> {message}")
classifier.refine_list()
elif command == "finish":
message, out_prompt = finish(prompt)
print(f"Gotcha says$> {message}")
classifier.finish()
elif command == "quit":
quit()
else:
message, out_prompt = just_chat(prompt, command)
print(f"Gotcha says$> {message}")
classifier.just_chat()
return out_prompt
if __name__ == "__main__":
prompt = start_prompt.copy()
classifier = Classifier()
while True:
prompt = run_machine_step(prompt, classifier) | [
"LIST CLASSES",
"PRINT STATE",
"ACCEPT LIST",
"FINISH",
"Hello, I'm Gotcha.",
"CLASSIFY ITEM",
"ACCEPT ITEM"
] |
2024-01-10 | rcalsaverini/sandbox | 00_small_utilities~names~gpt_classifier.py | import openai
from entities import Prompt
from typing import Tuple
openai.api_key = "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"
def get_response_and_append_it(prompt: Prompt, max_tokens:int=285, temperature:float=0.1) -> Tuple[str, Prompt]:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt,
max_tokens=max_tokens,
temperature=temperature
)
message = response["choices"][0]["message"]["content"]
prompt.append({"role": "user", "content": message})
return (message, prompt) | [] |
2024-01-10 | Kabakoo-app/advanced_nlp | chat_completion_with_prompt.py | # -*- coding: utf-8 -*-
import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
print("Bienvenue ! Je suis un assistant virtuel polyvalent. Vous pouvez commencer la conversation en posant une question ou en partageant un sujet.\n")
while True:
user_message = input("Utilisateur: ")
if "bye" in user_message.lower():
print("Assistant: Au revoir !")
break
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
# Définir le Rôle de l'Assistant
{"role": "system", "content": "Vous êtes un assistant virtuel polyvalent qui est là pour répondre à une variété de questions et offrir des informations utiles. N'hésitez pas à aider l'utilisateur dans divers domaines."},
#Orienter la Conversation
{"role": "system", "content": "Votre objectif principal est de fournir des réponses claires et informatives. Si vous avez besoin de plus de détails, n'hésitez pas à demander à l'utilisateur de préciser sa question."},
# Promouvoir la Convivialité
{"role": "system", "content": "Assurez-vous de maintenir une communication amicale et respectueuse avec l'utilisateur. Si l'utilisateur a des besoins spécifiques ou des préoccupations, faites preuve de patience et d'empathie dans vos réponses."},
{"role": "user", "content": user_message}
]
)
assistant_reply = response.choices[0].message["content"]
print("\nAssistant:", assistant_reply, "\n")
| [
"Votre objectif principal est de fournir des réponses claires et informatives. Si vous avez besoin de plus de détails, n'hésitez pas à demander à l'utilisateur de préciser sa question.",
"Vous êtes un assistant virtuel polyvalent qui est là pour répondre à une variété de questions et offrir des informations utiles. N'hésitez pas à aider l'utilisateur dans divers domaines.",
"Assurez-vous de maintenir une communication amicale et respectueuse avec l'utilisateur. Si l'utilisateur a des besoins spécifiques ou des préoccupations, faites preuve de patience et d'empathie dans vos réponses."
] |
2024-01-10 | amt102/YTCom | ComDisp~topic_modeling.py | import os
import nltk
# !pip install --upgrade gensim
nltk.download('stopwords')
# !pip install pyLDAvis
import re
import numpy as np
import pandas as pd
from pprint import pprint
import pickle
import en_core_web_sm
nlp = en_core_web_sm.load()
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# %matplotlib inline
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from nltk.corpus import stopwords
def sent_to_words(sentences):
for sentence in sentences:
yield (gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
# def remove_stopwords(texts):
# return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
# def make_bigrams(texts):
# return [bigram_mod[doc] for doc in texts]
# def make_trigrams(texts):
# return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
def modelTopic(data):
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
# data = ["I love to study in my school. The teacher is not that cool though",
# "A bigram or digram is a sequence of two adjacent elements from a string of tokens, which are typically letters, syllables, or words.",
# "The NBA's draft lottery won't take place Tuesday in Chicago as originally planned, but whenever it does happen, it is likely to look the same as it did last year, league sources told ESPN.",
# "Since play was suspended March 11 due to the coronavirus pandemic, teams at the top of the standings have been curious about the league restarting because they are in pursuit of a championship. For teams at the bottom of the standings, the focus has been on what the lottery will look like.",
# "I love to code. My teacher is soo cool"
# ]
data = [re.sub('\S*@\S*\s?', '', sent) for sent in data]
data = [re.sub('\s+', ' ', sent) for sent in data]
data = [re.sub("\'", "", sent) for sent in data]
print('data is =')
print(data)
data_words = list(sent_to_words(data))
print('data_words is =')
print(data_words)
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
print('bigram is =')
print(bigram)
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
print('tri gram is =')
print(trigram)
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
texts = data_words
t1 =[[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
data_words_nostops = t1
texts = data_words_nostops
t2=[bigram_mod[doc] for doc in texts]
data_words_bigrams = t2
print('data_words_bigrams')
print(data_words_bigrams)
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print('data_lemmatized')
print(data_lemmatized)
id2word = corpora.Dictionary(data_lemmatized)
print('id2word')
print(id2word)
# original_id2word = pickle.load(open("/content/M2", 'rb'))
texts = data_lemmatized
# new_id2word = original_id2word.merge_with(id2word)
corpus = [id2word.doc2bow(text) for text in texts]
unseen_doc = corpus[0]
print('Unseen_doc')
print(unseen_doc)
# pickle.dump(corpus, open("M1", 'wb'))
# [[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# pickle.dump(id2word, open("M2", 'wb'))
new_lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=20,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
print('after model')
# pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(new_lda_model, corpus, id2word,mds='mmds')
# pyLDAvis.enable_notebook()
# pyLDAvis.display(vis)
print('Before html')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# print(BASE_DIR)
# p = os.path.join(BASE_DIR,'/ComDisp/templates/comments/LDA.html')
print('Path is ')
# print(BASE_DIR)
# print(p)
p = BASE_DIR+'/ComDisp/templates/comments/LDA.html'
print(p)
pyLDAvis.save_html(vis, p)
print('last print here')
return vis
# stop_words = stopwords.words('english')
# stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
# data = ["I love to study in my school. The teacher is not that cool though",
# "A bigram or digram is a sequence of two adjacent elements from a string of tokens, which are typically letters, syllables, or words.",
# "The NBA's draft lottery won't take place Tuesday in Chicago as originally planned, but whenever it does happen, it is likely to look the same as it did last year, league sources told ESPN.",
# "Since play was suspended March 11 due to the coronavirus pandemic, teams at the top of the standings have been curious about the league restarting because they are in pursuit of a championship. For teams at the bottom of the standings, the focus has been on what the lottery will look like.",
# "I love to code. My teacher is soo cool"
# ]
# data = [re.sub('\S*@\S*\s?', '', sent) for sent in data]
# data = [re.sub('\s+', ' ', sent) for sent in data]
# data = [re.sub("\'", "", sent) for sent in data]
# print('data is =')
# print(data)
# def sent_to_words(sentences):
# for sentence in sentences:
# yield (gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
# data_words = list(sent_to_words(data))
# print('data_words is =')
# print(data_words)
# bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
# print('bigram is =')
# print(bigram)
# trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
# print('tri gram is =')
# print(trigram)
# bigram_mod = gensim.models.phrases.Phraser(bigram)
# trigram_mod = gensim.models.phrases.Phraser(trigram)
# def remove_stopwords(texts):
# return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
# def make_bigrams(texts):
# return [bigram_mod[doc] for doc in texts]
# def make_trigrams(texts):
# return [trigram_mod[bigram_mod[doc]] for doc in texts]
# def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
# """https://spacy.io/api/annotation"""
# texts_out = []
# for sent in texts:
# doc = nlp(" ".join(sent))
# texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
# return texts_out
# data_words_nostops = remove_stopwords(data_words)
# data_words_bigrams = make_bigrams(data_words_nostops)
# print('data_words_bigrams')
# print(data_words_bigrams)
# data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# print('data_lemmatized')
# print(data_lemmatized)
# id2word = corpora.Dictionary(data_lemmatized)
# print('id2word')
# print(id2word)
# # original_id2word = pickle.load(open("/content/M2", 'rb'))
# texts = data_lemmatized
# # new_id2word = original_id2word.merge_with(id2word)
# corpus = [id2word.doc2bow(text) for text in texts]
# unseen_doc = corpus[0]
# print('Unseen_doc')
# print(unseen_doc)
# # pickle.dump(corpus, open("M1", 'wb'))
# # [[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# # pickle.dump(id2word, open("M2", 'wb'))
# new_lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
# id2word=id2word,
# num_topics=20,
# random_state=100,
# update_every=1,
# chunksize=100,
# passes=10,
# alpha='auto',
# per_word_topics=True)
# print('after model')
# pickle.dump(lda_model, open("M", 'wb'))
# lda_model= pickle.load(open("/content/M", 'rb'))
# unseen_doc = corpus[0]
# vec = lda_model[unseen_doc]
# print(vec)
# # lda_model.update(corpus)
# # pprint(lda_model.print_topics())
# original_corpus = pickle.load(open("/content/M1", 'rb'))
# # print(type(id2word))
# # pprint(lda_model.print_topics())
# #
# num_topics = 10
# # Compute Perplexity
# print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# doc_lda = lda_model[corpus]
# top_topics = lda_model.top_topics(corpus) #, num_words=20)
# Average topic coherence is the sum of topic coherences of all topics, divided by the number of topics.
# avg_topic_coherence = sum([t[1] for t in top_topics]) / num_topics
# print('Average topic coherence: %.4f.' % avg_topic_coherence)
# from pprint import pprint
# pprint(top_topics)
# Compute Coherence Score
# coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
# coherence_lda = coherence_model_lda.get_coherence()
# print('\nCoherence Score: ', coherence_lda)
# Visualize the topics
# pyLDAvis.enable_notebook()
# vis = pyLDAvis.gensim.prepare(new_lda_model, corpus, id2word,mds='mmds')
# # pyLDAvis.enable_notebook()
# pyLDAvis.display(vis) | [] |
2024-01-10 | Traffic-Alpha/TSC-LLM | llm_tsc.py | '''
@Author: WANG Maonan
@Date: 2023-09-04 20:46:09
@Description: 基于 LLM 的 Traffic Light Control
1. 会有数据库, 我们会搜索最相似的场景 (如何定义场景的相似程度), 然后可以存储在 memory 里面, 或者放在 query 里面
2. 不同的 action 检查
- getAvailableActions, 获得当前所有的动作
- get queue length of all phases
- get emergency vehicle
- check possible queue length of all actions
- 执行每个动作后面的相位是什么
- 如果执行这个动作, 对未来场景的预测
- 当前场景总的排队长度
- 考虑 bus 或是救护车
3. 提取场景的数据, 不同的 phase 由几个 movement 组成, 不同 movement 在此时的排队情况, 这里需要存储数据
4. 这里我们先做出单路口的 LLM 的控制
@LastEditTime: 2023-09-15 17:29:45
'''
import langchain
import numpy as np
from langchain.chat_models import ChatOpenAI
from tshub.utils.get_abs_path import get_abs_path
from tshub.utils.init_log import set_logger
from TSCEnvironment.tsc_env import TSCEnvironment
from TSCEnvironment.tsc_env_wrapper import TSCEnvWrapper
from TSCAgent.tsc_agent import TSCAgent
from TSCAgent.output_parse import OutputParse
from TSCAgent.custom_tools import (
GetAvailableActions,
GetCurrentOccupancy,
GetPreviousOccupancy,
GetIntersectionLayout,
GetSignalPhaseStructure,
GetTraditionalDecision,
GetEmergencyVehicle,
GetJunctionSituation
)
from utils.readConfig import read_config
langchain.debug = False # 开启详细的显示
path_convert = get_abs_path(__file__)
set_logger(path_convert('./'))
if __name__ == '__main__':
# Init Chat
config = read_config()
openai_proxy = config['OPENAI_PROXY']
openai_api_key = config['OPENAI_API_KEY']
chat = ChatOpenAI(
model=config['OPENAI_API_MODEL'],
temperature=0.0,
openai_api_key=openai_api_key,
openai_proxy=openai_proxy
)
# Init scenario
sumo_cfg = path_convert("./TSCScenario/J1/env/J1.sumocfg")
database_path = path_convert("./junction.db")
tsc_scenario = TSCEnvironment(
sumo_cfg=sumo_cfg,
num_seconds=300,
tls_id='J4',
tls_action_type='choose_next_phase',
use_gui=True
)
tsc_wrapper = TSCEnvWrapper(
env=tsc_scenario,
database=database_path
)
# Init Agent
o_parse = OutputParse(env=None, llm=chat)
tools = [
GetIntersectionLayout(env=tsc_wrapper),
GetSignalPhaseStructure(env=tsc_wrapper),
GetCurrentOccupancy(env=tsc_wrapper),
GetPreviousOccupancy(env=tsc_wrapper),
GetTraditionalDecision(env=tsc_wrapper),
GetAvailableActions(env=tsc_wrapper),
GetJunctionSituation(env=tsc_wrapper),
]
tsc_agent = TSCAgent(env=tsc_wrapper, llm=chat, tools=tools, verbose=True)
# Start Simulation
dones = False
sim_step = 0
phase_id = 0 # 当前动作 id
last_step_explanation = "" # 作出决策的原因
states = tsc_wrapper.reset()
while not dones:
if (sim_step > 120) and (sim_step < 156):
if (sim_step > 130) and (sim_step < 145):
tsc_wrapper.set_edge_speed(edge_id='E2', speed=3)
else:
tsc_wrapper.set_edge_speed(edge_id='E2', speed=13)
agent_response = tsc_agent.agent_run(
sim_step=sim_step,
last_step_action=phase_id, # 上一步的动作
last_step_explanation=last_step_explanation # 上一步的解释
)
print(f'Parser Output, {agent_response}')
agent_action = o_parse.parser_output(agent_response)
phase_id = agent_action['phase_id']
last_step_explanation = agent_action['explanation']
elif sim_step < 120:
phase_id = np.random.randint(2)
last_step_explanation = ""
else:
phase_max_occupancy, preliminary_decision = tsc_wrapper.get_traditional_decision()
phase_id = int(preliminary_decision.split()[-1])
last_step_explanation = ""
states, dones, infos = tsc_wrapper.step(action=phase_id, explanation=last_step_explanation)
sim_step = infos['step_time']
print(f'---\nSim Time, {sim_step}\n---')
tsc_wrapper.close()
| [] |
2024-01-10 | Traffic-Alpha/TSC-LLM | TSCAgent~tsc_agent.py | '''
@Author: WANG Maonan
@Date: 2023-09-04 20:51:49
@Description: traffic light control LLM Agent
@LastEditTime: 2023-10-16 00:01:59
'''
from typing import List
from loguru import logger
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from langchain.agents.tools import Tool
from langchain.memory import ConversationSummaryMemory
from tshub.utils.get_abs_path import get_abs_path
from TSCAgent.callback_handler import create_file_callback
from TSCAgent.tsc_agent_prompt import (
SYSTEM_MESSAGE_SUFFIX,
SYSTEM_MESSAGE_PREFIX,
HUMAN_MESSAGE,
FORMAT_INSTRUCTIONS,
TRAFFIC_RULES,
DECISION_CAUTIONS,
HANDLE_PARSING_ERROR
)
class TSCAgent:
def __init__(self,
env,
llm:ChatOpenAI,
tools:List[Tool],
verbose:bool=True
) -> None:
self.env = env
self.llm = llm # ChatGPT Model
# callback
path_convert = get_abs_path(__file__)
self.file_callback = create_file_callback(path_convert('../agent.log'))
self.tools = [] # agent 可以使用的 tools
for ins in tools:
func = getattr(ins, 'inference')
self.tools.append(
Tool(name=func.name, description=func.description, func=func)
)
self.memory = ConversationSummaryMemory(
llm=self.llm,
)
self.agent = initialize_agent(
tools=self.tools, # 这里是所有可以使用的工具
llm=self.llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=verbose,
memory=self.memory,
agent_kwargs={
'system_message_prefix': SYSTEM_MESSAGE_PREFIX,
'syetem_message_suffix': SYSTEM_MESSAGE_SUFFIX,
'human_message': HUMAN_MESSAGE,
# 'format_instructions': FORMAT_INSTRUCTIONS,
},
handle_parsing_errors=HANDLE_PARSING_ERROR,
max_iterations=8,
early_stopping_method="generate",
)
def agent_run(self, sim_step:float, last_step_action, last_step_explanation):
"""Agent Run
"""
logger.info(f"SIM: Decision at step {sim_step} is running:")
# 找出接近的场景, 动作和解释
llm_response = self.agent.run(
f"""
You, the 'traffic signal light', are now controlling the traffic signal in the junction with ID `{self.env.env.tls_id}`. You have already control for {sim_step} seconds.
The decision you made LAST time step was `{last_step_action}`. Your explanation was `{last_step_explanation}`.
Please make decision for the traffic signal light. You have to describe the **Static State** and **Dynamic State** of the `traffic light`, including **Intersection Layout**, **Signal Phase Structure** and **Current Occupancy**. Then you need to determine whether the environment is a long-tail problem. If it's not a long-tail problem, you can refer to the Traditional Decision and provide an explanation based on the scene you observed. If it's a long-tail scenario, you need to analyze the possible actions and make a judgment on your own, and finally output your decision.
There are several rules you need to follow when you control the traffic lights:
{TRAFFIC_RULES}
Here are your attentions points:
{DECISION_CAUTIONS}
Let's take a deep breath and think step by step. Once you made a final decision, output it in the following format: \n
```
Final Answer:
"decision":{{"traffic signal light decision, ONE of the available actions"}},
"expalanations":{{"your explaination about your decision, described your suggestions to the Crossing Guard"}}
``` \n
""",
callbacks=[self.file_callback]
)
self.memory.clear()
return llm_response | [] |
2024-01-10 | Traffic-Alpha/TSC-LLM | test~check_langchain.py | '''
@Author: WANG Maonan
@Date: 2023-10-15 23:11:49
@Description: Use this file to test whether your gpt works
@LastEditTime: 2023-11-24 22:41:33
'''
import sys
from pathlib import Path
parent_directory = Path(__file__).resolve().parent.parent
if str(parent_directory) not in sys.path:
sys.path.insert(0, str(parent_directory))
from utils.readConfig import read_config
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
customer_email = """
Arrr, I be fuming that me blender lid \
flew off and splattered me kitchen walls \
with smoothie! And to make matters worse,\
the warranty don't cover the cost of \
cleaning up me kitchen. I need yer help \
right now, matey!
"""
custom_style = """American English \
in a calm and respectful tone, at last translate to Simplified Chinese
"""
if __name__ == '__main__':
config = read_config()
openai_proxy = config['OPENAI_PROXY']
openai_api_key = config['OPENAI_API_KEY']
openai_api_base = config['OPENAI_API_BASE']
openai_model = config['OPENAI_API_MODEL']
# 模型初始化
chat = ChatOpenAI(
model=openai_model,
openai_api_key=openai_api_key,
openai_proxy=openai_proxy,
openai_api_base=openai_api_base,
temperature=0.0,
)
# 创建模板 (这个模板可以传入不同的参数进行重复使用)
template_string = """Translate the text \
that is delimited by triple backticks
into a style that is {style}.
text: ```{text}```
"""
prompt_templete = ChatPromptTemplate.from_template(template_string)
# 传入不同的参数
custom_message = prompt_templete.format_messages(
style = custom_style,
text = customer_email
)
# 将 prompt 传入 chat
print(f'传入的信息:\n{custom_message}')
custom_response = chat(custom_message)
print(f'回答的结果:\n{custom_response}') | [
"Translate the text that is delimited by triple backticks \n into a style that is {style}.\n text: ```{text}```\n "
] |
2024-01-10 | Traffic-Alpha/TSC-LLM | TSCAgent~output_parse.py | '''
@Author: WANG Maonan
@Date: 2023-09-18 17:52:04
@Description: Output parse
@LastEditTime: 2023-10-15 23:54:16
'''
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
class OutputParse(object):
def __init__(self, env=None, llm=None) -> None:
self.sce = env
self.llm = llm
self.response_schemas = [
ResponseSchema(
name="phase_id", description=f"output the id(int) of the traffic phase. For example, if the final decision is signal phase 1, please output 1 as a int."),
ResponseSchema(
name="explanation", description=f"Explain for the Crossing Guard why you make such decision.")
]
self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas)
self.format_instructions = self.output_parser.get_format_instructions()
def parser_output(self, final_results:str) -> str:
prompt_template = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(
"Parse the problem response follow the format instruction.\nformat_instructions:{format_instructions}\n Response: {answer}")
],
input_variables=["answer"],
partial_variables={"format_instructions": self.format_instructions}
)
custom_message = prompt_template.format_messages(
answer = final_results,
)
output = self.llm(custom_message)
self.final_parsered_output = self.output_parser.parse(output.content)
return self.final_parsered_output | [
"answer",
"format_instructions",
"Parse the problem response follow the format instruction.\nformat_instructions:{format_instructions}\n Response: {answer}"
] |
2024-01-10 | lemolatoon/talkotify | src~talkotify~__main__.py | from io import BytesIO
import json
import sys
import openai
from talkotify.microphone import get_audio_from_mic
from .env import OPENAI_API_KEY, init_env, checked_get
from .spotify import get_available_genres, get_device_id, play, search, functions, search_by_genres
from .google import search_by_google
openai.api_key = OPENAI_API_KEY
def voice_to_text() -> str:
audio = get_audio_from_mic()
audio_data = BytesIO(audio.get_wav_data())
audio_data.name = 'from_mic.wav'
transcript = openai.Audio.transcribe('whisper-1', audio_data, language="ja")
return transcript['text']
def run():
question = voice_to_text()
print(f"user query: {question}")
device_id = get_device_id()
print(f"device_id: {device_id}")
# 1段階目の処理
# AIが質問に対して使う関数と、その時に必要な引数を決める
# 特に関数を使う必要がなければ普通に質問に回答する
messages = [
{"role": "system", "content": "You are an AI assistant, which search songs and play suitable song for user."},
{"role": "user", "content": question}
]
while True:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
function_call="auto",
)
message = response["choices"][0]["message"]
messages.append(message)
if message.get("function_call"):
# 関数を使用すると判断された場合
# 使うと判断された関数名
function_name = message["function_call"]["name"]
# その時の引数dict
arguments = json.loads(message["function_call"]["arguments"])
# 2段階目の処理
# 関数の実行
if function_name == "play_song":
print("play: ", arguments["id"])
play(
device_id=device_id,
uri=arguments.get("id"),
)
break
elif function_name == "get_available_genres":
print(f"calling get_available_genres of spotify API")
function_response = get_available_genres()
messages.append(
{
"role": "function",
"name": function_name,
"content": json.dumps(function_response),
},
)
continue
elif function_name == "search_by_genres":
genres = arguments.get("genres")
print(f"using spotify genre search: {genres}")
function_response = search_by_genres(
genres=",".join(genres)
)
messages.append(
{
"role": "function",
"name": function_name,
"content": json.dumps(function_response),
},
)
continue
else:
query = arguments.get("query")
print(f"using spotify search: {query}")
function_response = search(
query=query
)
messages.append(
{
"role": "function",
"name": function_name,
"content": json.dumps(function_response),
},
)
continue
print("dame")
print(message["content"])
break
def raspi_run():
import RPi.GPIO
RPi.GPIO.setmode(RPi.GPIO.BCM)
RPi.GPIO.setup(18, RPi.GPIO.IN)
print("Press button to talk to play a song")
while True:
if RPi.GPIO.input(18) != RPi.GPIO.LOW:
# HIGH
try:
run()
except Exception as e:
print(e)
print("Press button to talk to play a song")
if __name__ == "__main__":
# run()
raspi_run()
| [
"You are an AI assistant, which search songs and play suitable song for user."
] |
2024-01-10 | elehman16/gpt4_bias | get_gpt4_dist.py | import os
import json
import openai
import argparse
from tqdm import tqdm
from utils import run_prompts
from config import PROMPTS_EXPLICIT_DEMOGRAPHICS, PROMPTS_NO_DEMOGRAPHICS
def generate_prompts(condition: str, demographics: bool) -> list[list[str]]:
"""Generate a ton of prompts. If demographics is true, explicitely ask the model to include demographic information."""
all_prompts = []
prompts_to_use = PROMPTS_EXPLICIT_DEMOGRAPHICS if demographics else PROMPTS_NO_DEMOGRAPHICS
for prompt in prompts_to_use:
query = [
{"role": "user", "content": prompt.format(condition)},
]
all_prompts.append(query)
return all_prompts
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--output_dir', type=str, default='output/')
argparser.add_argument('--temperature', type=float, default=0.7)
argparser.add_argument('--max_tokens', type=int, default=100)
argparser.add_argument('--num_samples', type=int, default=25)
argparser.add_argument('--condition', type=str, required=True)
argparser.add_argument('--demographics', action='store_true')
args = argparser.parse_args()
# Create output directory
os.makedirs(args.output_dir, exist_ok=True)
# Create prompts
all_prompts = generate_prompts(demographics=args.demographics, condition=args.condition)
# Run prompts
results = run_prompts(all_prompts, args.num_samples, args.temperature, args.max_tokens)
# Save results
save_str = 'results_{}_temp_{}_num_samples_{}_max_tokens_{}_condition_{}.json'
save_str = save_str.format(
'demographics' if args.demographics else 'no_demographics',
args.temperature,
args.num_samples,
args.max_tokens,
args.condition
)
with open(os.path.join(args.output_dir, save_str), 'w') as f:
json.dump(results, f, indent=4)
| [
"[]"
] |
2024-01-10 | Aadhithya-D/Generative-AI | final.py | from langchain import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.chains.conversation.memory import ConversationSummaryMemory, ConversationBufferMemory
import openai
api_key = "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"
openai.api_key = api_key
if not api_key:
print('OpenAI API key not found in environment variables.')
exit()
llm = ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo', openai_api_key=api_key)
# memory = ConversationSummaryMemory(llm=llm, max_token_limit=2000)
memory = ConversationBufferMemory()
chain = ConversationChain(
llm=llm,
memory=memory
)
while True:
query = input("Human: ")
ai_message = chain.predict(input=query)
print("AI: "+ai_message)
| [] |
2024-01-10 | Aadhithya-D/Generative-AI | gpt_test.py | # Import the os package
import os
# Import the openai package
import openai
openai.api_key = "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"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": 'You are a AI engineer.'},
{"role": "user", "content": 'I want you to create a workflow for generating a chatbot, that gives fashion advice.'},
])
print(response["choices"][0]["message"]["content"]) | [
"You are a AI engineer.",
"I want you to create a workflow for generating a chatbot, that gives fashion advice."
] |
2024-01-10 | ianhuang0630/Aladdin | sem_complete~sem_complete~Brainstorm.py | """
Implementation of modules to query GPT3 to generate interesting/useful
scene descriptions.
"""
import openai
from bson.objectid import ObjectId
import sys
from sem_complete.sem_complete.Template import SceneQueryTemplate
from sem_complete.sem_complete.Parser import starListParser, starKeyValParser, parseDescriptionCoordinates
from sem_complete.sem_complete.SceneGraphs import SceneShoppingList, SceneObjectClass
from loguru import logger
class GPTInterface(object):
def __init__(self, model_type='text-davinci-003', api_key=None,
default_temperature=None, default_max_tokens=None):
if openai.api_key is None:
assert api_key is not None
openai.api_key = api_key
self.model_type = model_type
self.temperature = default_temperature
self.max_tokens = default_max_tokens
# datastructure to keep track of conversation history.
self.prompts = []
self.conditionings = []
self.questions = []
self.responses = []
def query(self, conditioning, question, temperature=None, max_tokens=None):
prompt = ''.join([conditioning, question])
response = openai.Completion.create(model=self.model_type,
prompt=prompt,
temperature=temperature if temperature is not None else self.temperature,
max_tokens=max_tokens if max_tokens is not None else self.max_tokens)
response = response['choices'][0]['text']
response = response.strip()
self.conditionings.append(conditioning)
self.questions.append(question)
self.prompts.append(prompt)
self.responses.append(response)
return response
class Brainstormer(object):
def __init__(self, gpt_interface=None, temperature=0.8, max_tokens=1024):
if gpt_interface is None:
with open('credentials/openai_key', 'r') as f:
token = f.readlines()
token = token[0].strip()
self.interface = GPTInterface(api_key=token,
default_temperature=0.8,
default_max_tokens=1024)
else:
assert isinstance(gpt_interface, GPTInterface)
self.interface = gpt_interface
# class HumanInTheLoopBrainstormer(Brainstormer):
# """
# A class that takes in user commands in the loop
# """
# def __init__(self, interface=None, temperature=0.8, max_tokens=1024):
# super().__init__(gpt_interface=interface,
# temperature=temperature,
# max_tokens=max_tokens)
# def run_iteration(self):
# """
# Runs a single iteration of adding detail to the current scene.
# """
# pass
# def gather_feedback(self, user_input):
# """
# Gathers and parses user feedback after ever iteration. Sets the
# internal state of the Brainstormer to run the next iteration with
# the correct goals.
# """
# pass
class IterativeBrainstormer(Brainstormer) :
def __init__(self, interface=None, temperature=0.8, max_tokens=1024):
super().__init__(gpt_interface=interface,
temperature=temperature,
max_tokens=max_tokens)
def generate_anchors(self, description):
sqt = SceneQueryTemplate('sem_complete/templates/iterative/frenchrestaurant_anchors.txt')
question = f"""
Here we are building a 3D scene of {description}. At each step, we are not adding more than 8 assets in total into the scene.
First, we place the most important assets (e.g. furnitures, bigger objects) and use those as our anchors. Here is a list of them:
"""
prompt, conditioning, gpt_question = sqt.apply_directly(question)
response = self.interface.query(conditioning, gpt_question)
categories = starKeyValParser(response)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': categories}
def enhance_at(self, description, category, prompt_prepend):
sqt = SceneQueryTemplate(['sem_complete/templates/iterative/frenchrestaurant_anchors.txt',
'sem_complete/templates/iterative/frenchrestaurant_enhance.txt'])
question = f"""
Next we enhance the scene with more assets, in relation to the anchor objects.
In relation to the `{category}`, here is the list of assets we add:
"""
prompt, conditioning, gpt_question = sqt.apply_directly(question if prompt_prepend is None else prompt_prepend + question)
response = self.interface.query(conditioning, gpt_question)
categories = starKeyValParser(response)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': categories}
def physical_condition_category(self, description, prompt_prepend):
sqt = SceneQueryTemplate(['sem_complete/templates/iterative/frenchrestaurant_nodeattributes.txt',
'sem_complete/templates/iterative/frenchrestaurant_nodephyscond.txt'])
question = f"Describe the physical condition of these items in a scene of {description} :"
prompt, conditioning, gpt_question = sqt.apply_directly(question if prompt_prepend is None else prompt_prepend + question)
response = self.interface.query(conditioning, gpt_question)
categories = starKeyValParser(response)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': categories}
def describe_category(self, description, prompt_prepend):
sqt = SceneQueryTemplate(['sem_complete/templates/iterative/frenchrestaurant_nodes.txt',
'sem_complete/templates/iterative/frenchrestaurant_nodeattributes.txt'])
question = f"""
Suppose we want to create a shopping list for the items we need to create the above scene of {description}. It would look like, being specific about the brand and the visual properties:
"""
prompt, conditioning, gpt_question = sqt.apply_directly(question if prompt_prepend is None else prompt_prepend + question)
response = self.interface.query(conditioning, gpt_question)
categories = starKeyValParser(response)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': categories}
def run(self, description:str, num_iterations:int):
"""
Args:
description: abstract scene discription
num_iterations: number of times gpt is queried to generate objects around the anchor objects.
"""
anchor_results = self.generate_anchors(description)
# TODO: get positions and attributes
prompt_prepend = f"Here is a list of items found within a scene of {description}:\n"+anchor_results['response']
if not prompt_prepend.endswith("\n"):
prompt_prepend += "\n"
node_attributes = self.describe_category(description, prompt_prepend)
prompt_prepend = node_attributes['question'] + '\n' + node_attributes['response']
if not prompt_prepend.endswith("\n"):
prompt_prepend += "\n"
node_physical_condition = self.physical_condition_category(description, prompt_prepend)
ssl = SceneShoppingList()
anchor_object_classes = []
for category in anchor_results['parsed']:
if category not in node_attributes['parsed'] or \
category not in node_physical_condition['parsed']:
logger.info(f'category {category} not found in node_attributes or node_physical_condition')
continue
new_object_class = SceneObjectClass(class_name=category,
instances=anchor_results['parsed'][category],
attributes=', '.join(node_attributes['parsed'][category][1:]) + ', '.join(node_physical_condition['parsed'][category]))
anchor_object_classes.append(new_object_class)
ssl.add_objectclass(new_object_class, parent=None)
for i in range(num_iterations):
# NOTE iterating through the same set of anchor points. Change if you want the recursion to continue.
for category, category_object_class in zip(anchor_results['parsed'], anchor_object_classes):
#TODO: try & catch for errors when output isn't parsable.
# check the number of instances there are.
prompt_prepend = anchor_results['question'] + '\n' + anchor_results['response']
if not prompt_prepend.endswith("\n"):
prompt_prepend += "\n"
enhance_results = self.enhance_at(description, category, prompt_prepend)
prompt_prepend = f"Here is a list of items found within a scene of {description}:\n"+enhance_results['response']
if not prompt_prepend.endswith("\n"):
prompt_prepend += "\n"
node_attributes = self.describe_category(description, prompt_prepend)
prompt_prepend = node_attributes['question'] + '\n' + node_attributes['response']
if not prompt_prepend.endswith("\n"):
prompt_prepend += "\n"
node_physical_condition = self.physical_condition_category(description, prompt_prepend)
for category in enhance_results['parsed']:
if category not in node_attributes['parsed'] or \
category not in node_physical_condition['parsed']:
logger.info(f'category {category} not found in node_attributes or node_physical_condition')
continue
new_object_class = SceneObjectClass(class_name=category,
instances=enhance_results['parsed'][category],
attributes=', '.join(node_attributes['parsed'][category][1:]) + ', '.join(node_physical_condition['parsed'][category]))
ssl.add_objectclass(new_object_class, category_object_class)
logger.info(str(ssl))
return ssl
class OnePassBrainStormer(Brainstormer) :
def __init__(self, interface=None, temperature=0.8, max_tokens=1024):
super().__init__(gpt_interface=interface,
temperature=temperature,
max_tokens=max_tokens)
def generate_categories(self, description: str) -> dict:
sqt = SceneQueryTemplate('sem_complete/templates/onepass/frenchrestaurant_nodelist.txt')
question = "Here is a list of items one would find in {}:".format(description)
prompt, conditioning, gpt_question = sqt.apply_directly(question)
response = self.interface.query(conditioning, gpt_question)
categories = starListParser(response)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': categories}
def generate_num_instances(self, description: str, categories:list, prompt_prepend:str=None) -> dict:
sqt = SceneQueryTemplate(['sem_complete/templates/onepass/frenchrestaurant_nodelist.txt', 'templates/frenchrestaurant_nodenuminstances.txt'])
question = "Here is a list of the number of items one would find in {}:".format(description)
# if prompt_prepend is not None:
# question = prompt_prepend + question
prompt, conditioning, gpt_question = sqt.apply_directly(question if prompt_prepend is None else prompt_prepend + question)
response = self.interface.query(conditioning, gpt_question)
cat2numinstances = starKeyValParser(response)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': cat2numinstances}
def generate_positioning(self, description: str, cat2instances: dict, prompt_prepend:str=None) -> dict:
# TODO
instance2relativeposition = None
sqt = SceneQueryTemplate(['sem_complete/templates/onepass/frenchrestaurant_nodenuminstances.txt', 'templates/frenchrestaurant_nodepositions.txt'])
question = "Describe their relative placements to at least two items, with their coordinates from birds-eye view:"
# if prompt_prepend is not None:
# question = prompt_prepend + question
prompt, conditioning, gpt_question = sqt.apply_directly(question if prompt_prepend is None else prompt_prepend + question)
response = self.interface.query(conditioning, gpt_question)
instance2relativeposition = starKeyValParser(response, val_parser=parseDescriptionCoordinates)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': instance2relativeposition}
def generate_cat_attributes(self, description: str, categories: list, prompt_prepend:str=None) -> dict:
sqt = SceneQueryTemplate(['sem_complete/templates/onepass/frenchrestaurant_nodelist.txt', 'templates/frenchrestaurant_nodeattributes.txt']) # loads the node attributes txt
question = "For every item above, list the item and its material properties."
# if prompt_prepend is not None:
# question = prompt_prepend + question
prompt, conditioning, gpt_question = sqt.apply_directly(question if prompt_prepend is None else prompt_prepend + question)
response = self.interface.query(conditioning, gpt_question)
cat2attributes = starKeyValParser(response)
return {'complete_prompt': prompt,
'conditioning': conditioning,
'question': question,
'gpt_question': gpt_question,
'response': response,
'parsed': cat2attributes}
def generate_instance_attributes(self, description: str, cat2instances: dict) -> dict:
pass
def run(self, description: str) -> dict:
"""
"""
# step1: generate categories
out = self.generate_categories(description)
category_list = out['parsed']
category_question = out['question']
category_response = out['response']
prompt_prepend = category_question + '\n' + category_response
if not prompt_prepend.endswith("\n"):
prompt_prepend += "\n"
# step2: attributes per category
out = self.generate_cat_attributes(description, category_list, prompt_prepend=prompt_prepend)
cat2catattributes = out['parsed']
# step3: generate # of each class
out = self.generate_num_instances(description, category_list, prompt_prepend=prompt_prepend)
cat2numinstances = out['parsed']
inst_question = out['question']
inst_response = out['response']
# step4: positional information.
prompt_prepend = inst_question + '\n' + inst_response
if not prompt_prepend.endswith("\n"):
prompt_prepend += "\n"
out = self.generate_positioning(description, cat2numinstances, prompt_prepend=prompt_prepend)
positions = out['parsed']
return {'category_list': category_list,
'category_attributes': cat2catattributes,
'category_instances': cat2numinstances,
'positions': positions}
if __name__=='__main__' :
IB = IterativeBrainstormer()
# IB.run('a messy living room', 1)
IB.run('an abandoned warzone in ukraine', 1)
| [
"\n",
"['sem_complete/templates/onepass/frenchrestaurant_nodelist.txt', 'templates/frenchrestaurant_nodeattributes.txt']",
"PLACEHOLDER\nPLACEHOLDER",
"['sem_complete/templates/onepass/frenchrestaurant_nodelist.txt', 'templates/frenchrestaurant_nodenuminstances.txt']",
"['sem_complete/templates/iterative/frenchrestaurant_nodeattributes.txt', 'sem_complete/templates/iterative/frenchrestaurant_nodephyscond.txt']",
"['sem_complete/templates/iterative/frenchrestaurant_anchors.txt', 'sem_complete/templates/iterative/frenchrestaurant_enhance.txt']",
"['sem_complete/templates/iterative/frenchrestaurant_nodes.txt', 'sem_complete/templates/iterative/frenchrestaurant_nodeattributes.txt']",
"Here is a list of items found within a scene of PLACEHOLDER:\nPLACEHOLDER",
"sem_complete/templates/onepass/frenchrestaurant_nodelist.txt",
"['sem_complete/templates/onepass/frenchrestaurant_nodenuminstances.txt', 'templates/frenchrestaurant_nodepositions.txt']",
"sem_complete/templates/iterative/frenchrestaurant_anchors.txt"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.