date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | c0sogi/LLMChat | app~utils~chat~commands~browsing.py | from typing import Optional
from app.models.chat_models import ResponseType
from app.models.function_calling.functions import FunctionCalls
from app.models.llms import OpenAIModel
from app.utils.chat.buffer import BufferedUserContext
from app.utils.chat.messages.handler import MessageHandler
from app.utils.function_calling.query import aget_query_to_search
class BrowsingCommands:
@staticmethod
async def browse(
user_query: str, /, buffer: BufferedUserContext, **kwargs
) -> tuple[Optional[str], ResponseType]:
"""Query LLM with duckduckgo browse results\n
/browse <query>"""
if user_query.startswith("/"):
# User is trying to invoke another command.
# Give control back to the command handler,
# and let it handle the command.
# e.g. `/browse /help` will invoke `/help` command
return user_query, ResponseType.REPEAT_COMMAND
# Save user query to buffer and database
await MessageHandler.user(msg=user_query, buffer=buffer)
if not isinstance(buffer.current_llm_model.value, OpenAIModel):
# Non-OpenAI models can't invoke function call,
# so we force function calling here
query_to_search: str = await aget_query_to_search(
buffer=buffer,
query=user_query,
function=FunctionCalls.get_function_call(
FunctionCalls.web_search
),
)
await MessageHandler.function_call(
callback_name=FunctionCalls.web_search.__name__,
callback_kwargs={"query_to_search": query_to_search},
buffer=buffer,
)
else:
# OpenAI models can invoke function call,
# so let the AI decide whether to invoke function call
function = FunctionCalls.get_function_call(
FunctionCalls.web_search
)
buffer.optional_info["functions"] = [function]
buffer.optional_info["function_call"] = function
await MessageHandler.ai(buffer=buffer)
# End of command
return None, ResponseType.DO_NOTHING
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~chat~tokens.py | from bisect import bisect_left
from typing import TYPE_CHECKING, Optional, Union
from langchain import PromptTemplate
from app.common.config import ChatConfig
from app.models.chat_models import ChatRoles, MessageHistory, UserChatContext
if TYPE_CHECKING:
from app.models.llms import LLMModel
def get_token_limit_with_n_messages(
user_chat_context: UserChatContext,
n_user_messages: int,
n_ai_messages: int,
n_system_messages: int,
prefix_prompt_tokens: int = 0,
suffix_prompt_tokens: int = 0,
) -> int:
"""
Get the number of tokens left in the LLM model,
with the given number of messages from each message history.
This is used to determine if the LLM model has enough tokens to generate a response.
"""
llm_model: LLMModel = user_chat_context.llm_model.value
users: list[MessageHistory] = user_chat_context.user_message_histories
ais: list[MessageHistory] = user_chat_context.ai_message_histories
syss: list[MessageHistory] = user_chat_context.system_message_histories
return llm_model.max_total_tokens - (
sum([m.tokens for m in users[-min(n_user_messages, len(users)) :]])
+ sum([m.tokens for m in ais[-min(n_ai_messages, len(ais)) :]])
+ sum([m.tokens for m in syss[-min(n_system_messages, len(syss)) :]])
+ prefix_prompt_tokens
+ suffix_prompt_tokens
+ llm_model.token_margin
+ ChatConfig.extra_token_margin
)
def make_formatted_query(
user_chat_context: UserChatContext,
question: str,
context: str,
query_template: Union[PromptTemplate, str],
with_n_user_messages: int = 0,
with_n_ai_messages: int = 0,
with_n_system_messages: int = 0,
) -> str:
"""Make a formatted query to the LLM model, with the given question and context.
Token limit is calculated based on the number of messages in the user, AI, and system message histories.
"""
llm_model = user_chat_context.llm_model.value
token_limit: int = (
get_token_limit_with_n_messages(
user_chat_context=user_chat_context,
n_user_messages=with_n_user_messages,
n_ai_messages=with_n_ai_messages,
n_system_messages=with_n_system_messages,
suffix_prompt_tokens=llm_model.suffix_tokens,
prefix_prompt_tokens=llm_model.prefix_tokens,
)
- 100
)
context = llm_model.tokenizer.get_chunk_of(
context,
tokens=token_limit
- user_chat_context.get_tokens_of(
query_template.format(context="", question=question)
),
)
return query_template.format(context=context, question=question)
def make_truncated_text(
user_chat_context: UserChatContext,
text: str,
with_n_user_messages: int = 0,
with_n_ai_messages: int = 0,
with_n_system_messages: int = 0,
) -> str:
llm_model = user_chat_context.llm_model.value
token_limit: int = (
get_token_limit_with_n_messages(
user_chat_context=user_chat_context,
n_user_messages=with_n_system_messages,
n_ai_messages=with_n_user_messages,
n_system_messages=with_n_ai_messages,
suffix_prompt_tokens=llm_model.suffix_tokens,
prefix_prompt_tokens=llm_model.prefix_tokens,
)
- 100
)
return llm_model.tokenizer.get_chunk_of(text, tokens=token_limit)
def cutoff_message_histories(
user_chat_context: UserChatContext,
user_message_histories: list[MessageHistory],
ai_message_histories: list[MessageHistory],
system_message_histories: list[MessageHistory],
token_limit: int,
) -> tuple[list[MessageHistory], list[MessageHistory], list[MessageHistory]]:
"""
Cutoff message histories to fit the token limit.
Forget the oldest messages when token limit is exceeded
"""
# Separate the prefix and suffix messages, and precompute the number of tokens.
prefix_message: Optional[MessageHistory] = None
suffix_message: Optional[MessageHistory] = None
llm_model: "LLMModel" = user_chat_context.llm_model.value
prefix_prompt = llm_model.prefix
suffix_prompt = llm_model.suffix
if prefix_prompt:
prefix_message = MessageHistory(
content=prefix_prompt,
tokens=llm_model.prefix_tokens,
role=llm_model.user_chat_roles.system,
actual_role=ChatRoles.SYSTEM.value,
timestamp=-1, # This is a dummy timestamp.
)
if suffix_prompt:
suffix_message = MessageHistory(
content=suffix_prompt,
tokens=llm_model.suffix_tokens,
role=llm_model.user_chat_roles.system,
actual_role=ChatRoles.SYSTEM.value,
timestamp=2**50, # This is a dummy timestamp.
)
# Calculates a cap on the number of tokens excluding prefix and suffix messages.
remaining_tokens = (
token_limit - prefix_message.tokens
if prefix_message
else token_limit - suffix_message.tokens
if suffix_message
else token_limit
)
print(f"- DEBUG: remaining_tokens: {remaining_tokens}", flush=True)
# If the remaining tokens are negative, return an empty tuple.
if remaining_tokens < 0:
return ([], [], [])
# Sort all messages by timestamp and filter out the prefix and suffix messages.
messages_without_prefix_and_suffix: list[MessageHistory] = sorted(
user_message_histories
+ ai_message_histories
+ [m for m in system_message_histories if not m.is_prefix and not m.is_suffix],
key=lambda m: m.timestamp,
)
# If the total tokens of all messages are less than or equal to the remaining tokens, return the input as it is.
if sum(m.tokens for m in messages_without_prefix_and_suffix) <= remaining_tokens:
_system_message_histories = [
m
for m in messages_without_prefix_and_suffix
if m in system_message_histories
]
if prefix_message:
_system_message_histories.insert(0, prefix_message)
if suffix_message:
_system_message_histories.append(suffix_message)
return (
user_message_histories,
ai_message_histories,
_system_message_histories,
)
# Get the cumulative tokens of all messages.
all_tokens = [0]
for m in messages_without_prefix_and_suffix:
all_tokens.append(all_tokens[-1] + m.tokens)
# Find the index of the first message that fits the remaining tokens using binary search.
index = bisect_left(all_tokens, all_tokens[-1] - remaining_tokens)
# Slice the selected messages from the index to the end.
selected_messages: list[MessageHistory] = messages_without_prefix_and_suffix[index:]
# Separate selected messages by each type using list comprehensions.
user_messages = [m for m in selected_messages if m in user_message_histories]
ai_messages = [m for m in selected_messages if m in ai_message_histories]
# Add prefix and suffix messages to the system message.
system_messages = [m for m in selected_messages if m in system_message_histories]
if prefix_message:
system_messages.insert(0, prefix_message)
if suffix_message:
system_messages.append(suffix_message)
# Returns a list of messages for each type as a tuple.
return (user_messages, ai_messages, system_messages)
| [] |
2024-01-10 | c0sogi/LLMChat | tests~test_chat.py | import json
import time
import pytest
from fastapi.testclient import TestClient
from starlette.testclient import WebSocketTestSession
from app.common.config import OPENAI_API_KEY
from app.models.chat_models import (
ChatRoles,
MessageHistory,
UserChatContext,
UserChatProfile,
)
from app.models.llms import LLMModels
from app.models.base_models import MessageFromWebsocket, MessageToWebsocket
# @pytest.mark.skip
@pytest.mark.asyncio
async def test_chat_redis(cache_manager):
# set random context
user_id: str = "test_user"
test_chat_room_id: str = "test_chat_room"
role: ChatRoles = ChatRoles.USER
message: str = "test message"
default_context: UserChatContext = UserChatContext.construct_default(
user_id=user_id, chat_room_id=test_chat_room_id
)
new_context: UserChatContext = UserChatContext(
user_chat_profile=UserChatProfile(
user_id=user_id,
chat_room_id=test_chat_room_id,
),
llm_model=LLMModels.gpt_4,
)
# delete test chat room
await cache_manager.delete_chat_room(
user_id=user_id, chat_room_id=test_chat_room_id
)
# create new context
await cache_manager.create_context(
user_chat_context=new_context,
)
# read new context
assert (
new_context.user_chat_profile.chat_room_id
== (
await cache_manager.read_context_from_profile(
user_chat_profile=new_context.user_chat_profile
)
).user_chat_profile.chat_room_id
)
# add new message to redis
new_message: MessageHistory = MessageHistory(
role=role.value,
content=message,
actual_role=role.value,
tokens=new_context.get_tokens_of(message),
)
await cache_manager.append_message_history(
user_id=user_id,
chat_room_id=test_chat_room_id,
role=role,
message_history=new_message,
)
# read message from redis
message_histories: list[MessageHistory] = await cache_manager.get_message_history(
user_id=user_id, chat_room_id=test_chat_room_id, role=role
)
assert message_histories == [new_message]
# reset context and read context
await cache_manager.reset_context(user_id=user_id, chat_room_id=test_chat_room_id)
assert (
default_context.user_chat_profile.chat_room_id
== (
await cache_manager.read_context_from_profile(
user_chat_profile=default_context.user_chat_profile
)
).user_chat_profile.chat_room_id
)
# delete test chat room
await cache_manager.delete_chat_room(
user_id=user_id, chat_room_id=test_chat_room_id
)
@pytest.mark.skipif(OPENAI_API_KEY is None, reason="OpenAI API Key is not set")
def test_chat_connection(client: TestClient, base_websocket_url: str):
with client.websocket_connect(
f"{base_websocket_url}/ws/chat/{OPENAI_API_KEY}"
) as ws_client:
assert isinstance(ws_client, WebSocketTestSession)
client_received: MessageToWebsocket = MessageToWebsocket.parse_raw(
ws_client.receive_text()
)
assert client_received.init
client_received: MessageToWebsocket = MessageToWebsocket.parse_raw(
ws_client.receive_text()
)
assert client_received.msg is not None and "tokens" in json.loads(
client_received.msg
)
assert client_received.chat_room_id is not None
# send message to websocket
ws_client.send_json(
MessageFromWebsocket(
msg="/ping",
chat_room_id=client_received.chat_room_id,
).dict()
)
# receive message from websocket
client_received: MessageToWebsocket = MessageToWebsocket.parse_raw(
ws_client.receive_text()
)
assert client_received.msg == "pong"
# close websocket
ws_client.close()
@pytest.mark.skipif(OPENAI_API_KEY is None, reason="OpenAI API Key is not set")
def test_chat_conversation(client: TestClient, base_websocket_url: str, test_logger):
# parameters
timeout: int = 10
with client.websocket_connect(
f"{base_websocket_url}/ws/chat/{OPENAI_API_KEY}"
) as ws_client:
assert isinstance(ws_client, WebSocketTestSession)
client_received: MessageToWebsocket = MessageToWebsocket.parse_raw(
ws_client.receive_text()
)
assert client_received.init
assert client_received.chat_room_id is not None
# send message to websocket
ws_client.send_json(
MessageFromWebsocket(
msg="say this word: TEST",
translate=None,
chat_room_id=client_received.chat_room_id,
).dict()
)
# receive messages from websocket, loop until received message with finish=True
# timeout: 10 seconds
received_messages: list[MessageToWebsocket] = []
now: float = time.time()
while time.time() - now < timeout:
client_received: MessageToWebsocket = MessageToWebsocket.parse_raw(
ws_client.receive_text()
)
received_messages.append(client_received)
if client_received.finish:
break
assert len(received_messages) > 0
# show received messages
for msg in received_messages:
test_logger.info(msg)
# assemble msg from received messages using list comprehension
received_msg: str = "".join(
[msg.msg for msg in received_messages if msg.msg is not None]
)
assert "TEST" in received_msg
# close websocket
ws_client.close()
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~chat~text_generations~completion_api.py | from typing import AsyncIterator, Literal, Optional
from urllib.parse import urlparse
from langchain import PromptTemplate
from app.common.config import config
from app.common.constants import ChatTurnTemplates
from app.errors.chat_exceptions import (
ChatContentFilterException,
ChatFunctionCallException,
ChatLengthException,
)
from app.models.base_models import MessageHistory
from app.models.completion_models import FunctionCallUnparsed
from app.models.function_calling.base import FunctionCall
from app.models.llms import ExllamaModel, LlamaCppModel, OpenAIModel
from app.utils.api.completion import (
request_chat_completion_with_streaming,
request_text_completion_with_streaming,
)
from app.utils.chat.buffer import BufferedUserContext
from app.utils.chat.messages.converter import (
chat_completion_api_parse_method,
message_histories_to_list,
message_histories_to_str,
)
from app.utils.chat.messages.turn_templates import identify_end_of_string
from app.utils.function_calling.parser import (
make_function_call_parsed_from_dict,
)
from app.utils.function_calling.token_count import (
get_num_tokens_from_functions,
)
def _get_stop_strings(
*roles: str, chat_turn_prompt: PromptTemplate
) -> list[str]:
"""Get stop strings for text completion API.
Stop strings are required to stop text completion API from generating
text that does not belong to the current chat turn.
e.g. The common stop string is "### USER:", which can prevent ai from generating
user's message itself."""
prompt_stop = set()
eos: Optional[str] = identify_end_of_string(
"role", "content", chat_turn_prompt=chat_turn_prompt
)
if eos:
prompt_stop.add(eos)
for role in roles:
avoids = (
chat_turn_prompt.format(role=role, content="").strip(),
f"{role}:",
f"### {role}:",
f"###{role}:",
)
prompt_stop.update(
avoids,
map(str.capitalize, avoids),
map(str.upper, avoids),
map(str.lower, avoids),
)
return list(prompt_stop)
def _get_api_key(buffer: BufferedUserContext) -> Optional[str]:
"""Return API key to use for completion API"""
user_defined_api_key: Optional[str] = buffer.optional_info.get("api_key")
return (
str(user_defined_api_key)
if user_defined_api_key is not None
else getattr(buffer.current_llm_model.value, "api_key", None)
)
def _get_api_base(buffer: BufferedUserContext) -> str:
"""Return API base to use for completion API"""
current_model = buffer.current_llm_model.value
if isinstance(current_model, (LlamaCppModel, ExllamaModel)):
api_url = config.llama_completion_url
assert api_url is not None
else:
api_url = current_model.api_url
parsed_url = urlparse(api_url)
return f"{parsed_url.scheme}://{parsed_url.netloc}/v1"
def _make_common_kwargs(buffer: BufferedUserContext, max_tokens: int) -> dict:
"""Make common kwargs to use for completion API"""
if isinstance(buffer.current_llm_model.value, OpenAIModel):
model = buffer.current_llm_model.value.name
else:
model = buffer.current_llm_model.name
return dict(
model=model,
temperature=buffer.current_user_chat_profile.temperature,
top_p=buffer.current_user_chat_profile.top_p,
presence_penalty=buffer.current_user_chat_profile.presence_penalty,
frequency_penalty=buffer.current_user_chat_profile.frequency_penalty,
user=buffer.user_id,
max_tokens=max_tokens,
api_key=_get_api_key(buffer=buffer),
api_base=_get_api_base(buffer=buffer),
)
def _make_chat_completion_kwargs(
buffer: BufferedUserContext,
user_message_histories: list[MessageHistory],
ai_message_histories: list[MessageHistory],
system_message_histories: list[MessageHistory],
max_tokens: int,
) -> dict:
"""Make kwargs to use for chat completion API"""
functions: Optional[list[FunctionCall]] = buffer.optional_info.get(
"functions"
)
function_call: Optional[
FunctionCall | Literal["auto", "none"]
] = buffer.optional_info.get("function_call")
if functions is not None:
max_tokens -= get_num_tokens_from_functions(functions)
return _make_common_kwargs(buffer=buffer, max_tokens=max_tokens) | {
"messages": message_histories_to_list(
parse_method=chat_completion_api_parse_method,
user_message_histories=user_message_histories,
ai_message_histories=ai_message_histories,
system_message_histories=system_message_histories,
),
"functions": functions,
"function_call": function_call,
}
def _make_text_completion_kwargs(
buffer: BufferedUserContext,
user_message_histories: list[MessageHistory],
ai_message_histories: list[MessageHistory],
system_message_histories: list[MessageHistory],
max_tokens: int,
):
chat_turn_prompt: PromptTemplate = (
buffer.current_llm_model.value.chat_turn_prompt
if isinstance(buffer.current_llm_model.value, LlamaCppModel)
else ChatTurnTemplates.ROLE_CONTENT_1
)
return _make_common_kwargs(buffer=buffer, max_tokens=max_tokens) | {
"prompt": message_histories_to_str(
user_chat_roles=buffer.current_user_chat_roles,
user_message_histories=user_message_histories,
ai_message_histories=ai_message_histories,
system_message_histories=system_message_histories,
chat_turn_prompt=chat_turn_prompt,
),
"stop": _get_stop_strings(
buffer.current_llm_model.value.user_chat_roles.user,
buffer.current_llm_model.value.user_chat_roles.ai,
buffer.current_llm_model.value.user_chat_roles.system,
chat_turn_prompt=chat_turn_prompt,
),
}
async def agenerate_from_chat_completion_api(
buffer: BufferedUserContext,
user_message_histories: list[MessageHistory],
ai_message_histories: list[MessageHistory],
system_message_histories: list[MessageHistory],
max_tokens: int,
) -> AsyncIterator[str]:
content: str = ""
function_call_name: str = ""
function_call_arguments: str = ""
async for chat_completion_chunk in request_chat_completion_with_streaming(
**_make_chat_completion_kwargs(
buffer=buffer,
user_message_histories=user_message_histories,
ai_message_histories=ai_message_histories,
system_message_histories=system_message_histories,
max_tokens=max_tokens,
)
):
_finish_reason = chat_completion_chunk["choices"][0]["finish_reason"]
_content = chat_completion_chunk["choices"][0]["delta"].get("content")
_function_call = chat_completion_chunk["choices"][0]["delta"].get(
"function_call"
)
if _content:
content += _content
yield _content
if _function_call is not None:
function_call_name += str(_function_call.get("name", ""))
function_call_arguments += str(_function_call.get("arguments", ""))
if _finish_reason not in ("null", None):
# End-of-Stream
if _finish_reason == "length":
raise ChatLengthException(
msg=content
) # raise exception for token limit
elif _finish_reason == "content_filter":
raise ChatContentFilterException(
msg="Omitted content due to a flag from our content filters"
) # raise exception for openai content filter
elif function_call_name:
# Raise exception for function call
function_call_unparsed = FunctionCallUnparsed(
name=function_call_name
)
if function_call_arguments:
function_call_unparsed[
"arguments"
] = function_call_arguments
function_call_parsed = make_function_call_parsed_from_dict(
function_call_unparsed
)
raise ChatFunctionCallException(
func_name=function_call_parsed["name"],
func_kwargs=function_call_parsed.get("arguments", {}),
)
async def agenerate_from_text_completion_api(
buffer: BufferedUserContext,
user_message_histories: list[MessageHistory],
ai_message_histories: list[MessageHistory],
system_message_histories: list[MessageHistory],
max_tokens: int,
) -> AsyncIterator[str]:
text: str = ""
async for completion_chunk in request_text_completion_with_streaming(
**_make_text_completion_kwargs(
buffer=buffer,
user_message_histories=user_message_histories,
ai_message_histories=ai_message_histories,
system_message_histories=system_message_histories,
max_tokens=max_tokens,
)
):
_finish_reason = completion_chunk["choices"][0]["finish_reason"]
_text = completion_chunk["choices"][0]["text"]
if _text:
text += _text
yield _text
if _finish_reason not in ("null", None):
if _finish_reason == "length":
raise ChatLengthException(
msg=text
) # raise exception for token limit
elif _finish_reason == "content_filter":
raise ChatContentFilterException(
msg="Omitted content due to a flag from our content filters"
) # raise exception for openai content filter
| [
"set()"
] |
2024-01-10 | c0sogi/LLMChat | app~utils~langchain~qdrant_vectorstore.py | """Wrapper around the Milvus vector database."""
import warnings
from asyncio import gather
from hashlib import md5
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
import numpy as np
from fastapi.concurrency import run_in_threadpool
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.qdrant import Qdrant as _Qdrant
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from qdrant_client import grpc
from qdrant_client.conversions import common_types
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
class Qdrant(_Qdrant):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str = "Shared",
embeddings: Optional[Embeddings] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
embedding_function: Optional[Callable] = None, # deprecated
):
"""Initialize with necessary components."""
super().__init__(
client=client,
collection_name=collection_name,
embeddings=embeddings,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
embedding_function=embedding_function,
)
async def aadd_texts(
self,
texts: Iterable[str],
collection_name: Optional[str] = None,
metadatas: Optional[List[dict]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from itertools import islice
from qdrant_client import grpc
from qdrant_client.conversions.conversion import payload_to_grpc
grpc_points = self.client.async_grpc_points
ids = []
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata for each text in a batch
batch_metadatas = (
list(islice(metadatas_iterator, batch_size)) or None
)
batch_ids = [
md5(text.encode("utf-8")).hexdigest() for text in batch_texts
]
points = [
grpc.PointStruct( # type: ignore
id=grpc.PointId(uuid=id), # type: ignore
vectors=grpc.Vectors(vector=grpc.Vector(data=vector)), # type: ignore
payload=payload_to_grpc(payload),
)
for id, vector, payload in zip(
batch_ids,
await self._aembed_texts(batch_texts),
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
await grpc_points.Upsert(
grpc.UpsertPoints( # type: ignore
collection_name=collection_name
if collection_name is not None
else self.collection_name,
points=points,
)
)
ids.extend(batch_ids)
return ids
async def asimilarity_search_with_score(
self,
query: str,
collection_name: str,
k: int = 4,
filter: Optional["MetadataFilter"] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
from qdrant_client.qdrant_remote import QdrantRemote
if not isinstance(self.client._client, QdrantRemote):
raise NotImplementedError(
"Async similarity search is only supported for remote clients",
)
from qdrant_client import grpc
from qdrant_client.conversions.conversion import RestToGrpc
from qdrant_client.http import models as rest
grpc_points = self.client.async_grpc_points
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict_grpc(filter)
elif filter is not None and isinstance(filter, rest.Filter):
qdrant_filter = RestToGrpc.convert_filter(filter)
else:
qdrant_filter = filter
response = await grpc_points.Search(
grpc.SearchPoints( # type: ignore
collection_name=self.collection_name
if collection_name is None
else collection_name,
vector=await self._aembed_query(query),
filter=qdrant_filter,
with_payload=grpc.WithPayloadSelector(enable=True), # type: ignore
limit=k,
)
)
return [
(
self._document_from_scored_point_grpc(
result,
self.content_payload_key,
self.metadata_payload_key,
),
result.score,
)
for result in response.result # type: ignore
]
async def asimilarity_search(
self,
query: str,
collection_name: Optional[str] = None,
k: int = 4,
filter: Optional["MetadataFilter"] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score(
query=query,
collection_name=collection_name
if collection_name is not None
else self.collection_name,
k=k,
filter=filter,
)
return list(map(itemgetter(0), results))
async def amax_marginal_relevance_search_with_score(
self,
query: str,
collection_name: Optional[str] = None,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
from qdrant_client import grpc
from qdrant_client.conversions.conversion import GrpcToRest
from qdrant_client.http.models import models as rest
grpc_points = self.client.async_grpc_points
embedding = await self._aembed_query(query)
response = await grpc_points.Search(
grpc.SearchPoints( # type: ignore
collection_name=collection_name
if collection_name is not None
else self.collection_name,
vector=embedding,
with_payload=grpc.WithPayloadSelector(enable=True), # type: ignore
with_vectors=grpc.WithVectorsSelector(enable=True), # type: ignore
limit=fetch_k,
)
)
embeddings: list[rest.VectorStruct] = [
GrpcToRest.convert_vectors(result.vectors)
for result in response.result
]
mmr_selected: list[int] = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
(
self._document_from_scored_point_grpc(
response.result[i],
self.content_payload_key,
self.metadata_payload_key,
),
response.result[i].score,
)
for i in mmr_selected
]
def _build_condition_grpc(
self, key: str, value: Any
) -> List["grpc.Condition"]:
from qdrant_client import grpc
out: List[grpc.Condition] = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition_grpc(f"{key}.{_key}", value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition_grpc(f"{key}[]", _value))
else:
out.extend(self._build_condition_grpc(f"{key}", _value))
else:
if isinstance(value, str):
value_type = "text"
elif isinstance(value, int):
value_type = "interger"
elif isinstance(value, bool):
value_type = "boolean"
else:
raise TypeError(f"Unsupported type {type(value)}")
out.append(
grpc.Condition(
field=grpc.FieldCondition( # type: ignore
key=f"{self.metadata_payload_key}.{key}",
match=grpc.Match(**{value_type: value}), # type: ignore
)
)
)
return out
def _qdrant_filter_from_dict_grpc(
self, filter: Optional["DictFilter"]
) -> Optional["grpc.Filter"]:
from qdrant_client import grpc
if not filter:
return None
return grpc.Filter( # type: ignore
must=[
condition
for key, value in filter.items()
for condition in self._build_condition_grpc(key, value)
]
)
async def _aembed_query(self, query: str) -> List[float]:
"""Embed query text.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = await run_in_threadpool(
self.embeddings.embed_query, query
)
else:
if self._embeddings_function is not None:
embedding = await run_in_threadpool(
self._embeddings_function, query
)
else:
raise ValueError(
"Neither of embeddings or embedding_function is set"
)
return embedding.tolist() if hasattr(embedding, "tolist") else embedding # type: ignore
async def _aembed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = await run_in_threadpool(
self.embeddings.embed_documents, list(texts)
)
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist() # type: ignore
elif self._embeddings_function is not None:
embeddings = await gather(
*[
run_in_threadpool(self._embeddings_function, text)
for text in texts
]
)
for embedding_idx in range(len(embeddings)):
if hasattr(embeddings[embedding_idx], "tolist"):
embeddings[embedding_idx] = embeddings[
embedding_idx
].tolist()
else:
raise ValueError(
"Neither of embeddings or embedding_function is set"
)
return embeddings
@staticmethod
def _document_from_scored_point_grpc(
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
from qdrant_client.conversions.conversion import grpc_to_payload
payload = grpc_to_payload(scored_point.payload)
return Document(
page_content=payload[content_payload_key],
metadata=payload.get(metadata_payload_key) or {},
)
| [] |
2024-01-10 | c0sogi/LLMChat | app~models~llms.py | from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from langchain import PromptTemplate
from app.common.config import OPENAI_API_KEY, ChatConfig
from app.common.constants import ChatTurnTemplates, DescriptionTemplates
from app.mixins.enum import EnumMixin
from .base_models import UserChatRoles
from .llm_tokenizers import (
BaseTokenizer,
ExllamaTokenizer,
LlamaTokenizer,
OpenAITokenizer,
)
@dataclass
class LLMModel:
name: str = "llm_model" # model name
max_total_tokens: int = 2048
max_tokens_per_request: int = 1024
tokenizer: BaseTokenizer = field(
default_factory=lambda: OpenAITokenizer(model_name="gpt-3.5-turbo")
)
user_chat_roles: UserChatRoles = field(
default_factory=lambda: UserChatRoles(
ai="assistant",
system="system",
user="user",
),
)
prefix_template: Optional[
Union[PromptTemplate, str]
] = None # A prefix to prepend to the generated text. If None, no prefix is prepended.
suffix_template: Optional[
Union[PromptTemplate, str]
] = None # A suffix to prepend to the generated text. If None, no suffix is prepended.
token_margin: int = field(
default=8,
metadata={
"description": " The marginal number of tokens when counting the number of tokens in context."
"The more token_margin, the more conservative the model will be when counting context tokens."
},
)
prefix: Optional[str] = field(init=False, repr=False, default=None)
suffix: Optional[str] = field(init=False, repr=False, default=None)
@staticmethod
def _prepare_format(
input_variables: list[str],
predefined_format: dict[str, str],
) -> dict[str, str | None]:
return dict(
zip(
input_variables,
map(
predefined_format.get,
input_variables,
),
)
)
def __post_init__(self):
user_chat_roles = self.user_chat_roles
predefined_format = {
"user": user_chat_roles.user,
"USER": user_chat_roles.user,
"ai": user_chat_roles.ai,
"AI": user_chat_roles.ai,
"char": user_chat_roles.ai,
"system": user_chat_roles.system,
"SYSTEM": user_chat_roles.system,
}
# If the global prefix is None, then use the prefix template
if ChatConfig.global_prefix is None:
if isinstance(self.prefix_template, PromptTemplate):
# format the template with the predefined format, only for input variables
self.prefix = self.prefix_template.format(
**self._prepare_format(
self.prefix_template.input_variables, predefined_format
)
)
elif isinstance(self.prefix_template, str):
self.prefix = self.prefix_template.format(**predefined_format)
else:
self.prefix = None
else:
self.prefix = ChatConfig.global_prefix
# If the global suffix is None, then use the suffix template
if ChatConfig.global_suffix is None:
if isinstance(self.suffix_template, PromptTemplate):
# format the template with the predefined format, only for input variables
self.suffix = self.suffix_template.format(
**self._prepare_format(
self.suffix_template.input_variables, predefined_format
)
)
elif isinstance(self.suffix_template, str):
self.suffix = self.suffix_template.format(**predefined_format)
else:
self.suffix = None
else:
self.suffix = ChatConfig.global_suffix
self._prefix_tokens: Optional[int] = None
self._suffix_tokens: Optional[int] = None
@property
def prefix_tokens(self) -> int:
# Lazy load the prefix tokens
if self.prefix is None:
return 0
if self._prefix_tokens is None:
self._prefix_tokens = (
self.tokenizer.tokens_of(self.prefix) + self.token_margin
)
return self._prefix_tokens
@property
def suffix_tokens(self) -> int:
# Lazy load the suffix tokens
if self.suffix is None:
return 0
if self._suffix_tokens is None:
self._suffix_tokens = (
self.tokenizer.tokens_of(self.suffix) + self.token_margin
)
return self._suffix_tokens
@dataclass
class LlamaCppModel(LLMModel):
"""Llama.cpp model that can be loaded from local path."""
tokenizer: LlamaTokenizer = field(
default_factory=lambda: LlamaTokenizer(model_name=""),
metadata={"description": "The tokenizer to use for this model."},
)
name: str = field(default="Llama C++")
model_path: str = field(
default="YOUR_GGML.bin"
) # The path to the model. Must end with .bin. You must put .bin file into "llama_models/ggml"
user_chat_roles: UserChatRoles = field(
default_factory=lambda: UserChatRoles(
ai="ASSISTANT",
system="SYSTEM",
user="USER",
),
)
prefix_template: Optional[Union[PromptTemplate, str]] = field(
default_factory=lambda: DescriptionTemplates.USER_AI__DEFAULT,
)
chat_turn_prompt: PromptTemplate = field(
default_factory=lambda: ChatTurnTemplates.ROLE_CONTENT_1
) # The prompt to use for chat turns.
n_parts: int = (
-1
) # Number of parts to split the model into. If -1, the number of parts is automatically determined.
n_gpu_layers: int = 30 # Number of layers to keep on the GPU. If 0, all layers are kept on the GPU.
seed: int = -1 # Seed. If -1, a random seed is used.
f16_kv: bool = True # Use half-precision for key/value cache.
logits_all: bool = (
False # Return logits for all tokens, not just the last token.
)
vocab_only: bool = False # Only load the vocabulary, no weights.
use_mlock: bool = True # Force system to keep model in RAM.
n_batch: int = 512 # Number of tokens to process in parallel. Should be a number between 1 and n_ctx.
last_n_tokens_size: int = 64 # The number of tokens to look back when applying the repeat_penalty.
use_mmap: bool = True # Whether to use memory mapping for the model.
streaming: bool = True # Whether to stream the results, token by token.
cache: bool = (
False # The size of the cache in bytes. Only used if cache is True.
)
echo: bool = True # Whether to echo the prompt.
lora_base: Optional[str] = None # The path to the Llama LoRA base model.
lora_path: Optional[
str
] = None # The path to the Llama LoRA. If None, no LoRa is loaded.
cache_type: Optional[Literal["disk", "ram"]] = "ram"
cache_size: Optional[int] = (
2 << 30
) # The size of the cache in bytes. Only used if cache is True.
n_threads: Optional[
int
] = None # Number of threads to use. If None, the number of threads is automatically determined.
low_vram: bool = False # Whether to use less VRAM.
embedding: bool = False # Whether to use the embedding layer.
rope_freq_base: float = 10000.0 # I use 26000 for n_ctx=4096. https://github.com/ggerganov/llama.cpp/pull/2054
rope_freq_scale: float = 1.0 # Generally, 2048 / n_ctx. https://github.com/ggerganov/llama.cpp/pull/2054
@dataclass
class ExllamaModel(LLMModel):
"""Exllama model that can be loaded from local path."""
model_path: str = field(
default="YOUR_GPTQ_FOLDER_NAME",
metadata={
"description": "The GPTQ model path to the model."
"e.g. If you have a model folder in 'llama_models/gptq/your_model',"
"then you should set this to 'your_model'."
},
)
tokenizer: ExllamaTokenizer = field(
default_factory=lambda: ExllamaTokenizer(model_name=""),
metadata={"description": "The tokenizer to use for this model."},
)
user_chat_roles: UserChatRoles = field(
default_factory=lambda: UserChatRoles(
ai="ASSISTANT",
system="SYSTEM",
user="USER",
),
)
prefix_template: Optional[Union[PromptTemplate, str]] = field(
default_factory=lambda: DescriptionTemplates.USER_AI__DEFAULT,
)
chat_turn_prompt: PromptTemplate = field(
default_factory=lambda: ChatTurnTemplates.ROLE_CONTENT_1,
metadata={"description": "The prompt to use for each chat turn."},
)
compress_pos_emb: float = field(
default=1.0,
metadata={
"description": "Increase to compress positional embeddings applied to sequence."
"This is useful when you want to extend context window size."
"e.g. If you want to extend context window size from 2048 to 4096, set this to 2.0."
},
)
gpu_peer_fix: bool = field(
default=False,
metadata={
"description": "Apparently Torch can have problems transferring tensors directly 1 GPU to another."
"Enable this to use system RAM as a buffer for GPU to GPU transfers."
},
)
auto_map: Optional[list[float]] = field(
default=None,
metadata={
"description": "List of floats with memory allocation in GB, per CUDA device, overrides device_map"
},
)
# Optional parameters
matmul_recons_thd: int = 8
fused_mlp_thd: int = 2
sdp_thd: int = 8
fused_attn: bool = True
matmul_fused_remap: bool = False
rmsnorm_no_half2: bool = False
rope_no_half2: bool = False
matmul_no_half2: bool = False
silu_no_half2: bool = False
concurrent_streams: bool = False
@dataclass
class OpenAIModel(LLMModel):
api_url: str = "https://api.openai.com/v1/chat/completions"
api_key: str | None = field(repr=False, default=None)
user_chat_roles: UserChatRoles = field(
default_factory=lambda: UserChatRoles(
ai="assistant",
system="system",
user="user",
),
)
class LLMModels(EnumMixin):
# OpenAI models
gpt_3_5_turbo = OpenAIModel(
name="gpt-3.5-turbo",
max_total_tokens=4096,
max_tokens_per_request=2048,
token_margin=8,
tokenizer=OpenAITokenizer("gpt-3.5-turbo"),
api_url="https://api.openai.com/v1/chat/completions",
api_key=OPENAI_API_KEY,
# prefix_template=PromptTemplate(
# template="You'll be roleplaying with the user, so respond to their comments as if they're annoying you.",
# input_variables=[],
# ), # Example of a prefix template
# suffix_template=PromptTemplate(
# template="You must respond to the user in Korean.",
# input_variables=[],
# ), # Example of a suffix template
)
gpt_3_5_turbo_16k = OpenAIModel(
name="gpt-3.5-turbo-16k",
max_total_tokens=16384,
max_tokens_per_request=8192,
token_margin=8,
tokenizer=OpenAITokenizer("gpt-3.5-turbo"),
api_url="https://api.openai.com/v1/chat/completions",
api_key=OPENAI_API_KEY,
)
gpt_4 = OpenAIModel(
name="gpt-4",
max_total_tokens=8192,
max_tokens_per_request=4096,
token_margin=8,
tokenizer=OpenAITokenizer("gpt-4"),
api_url="https://api.openai.com/v1/chat/completions",
api_key=OPENAI_API_KEY,
)
# Llama-cpp models
wizard_vicuna_13b_uncensored = LlamaCppModel(
name="Wizard-Vicuna-13B-Uncensored",
max_total_tokens=4096, # context tokens (n_ctx)
max_tokens_per_request=2048, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("ehartford/Wizard-Vicuna-13B-Uncensored"),
model_path="Wizard-Vicuna-13B-Uncensored.ggmlv3.q5_1.bin", # The filename of model. Must end with .bin.
prefix_template=DescriptionTemplates.USER_AI__DEFAULT,
)
gorilla_7b = LlamaCppModel(
name="gorilla-7B-GGML",
max_total_tokens=2048, # context tokens (n_ctx)
max_tokens_per_request=1024, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("gorilla-llm/gorilla-7b-hf-delta-v0"),
model_path="Gorilla-7B.ggmlv3.q3_K_S.bin", # The filename of model. Must end with .bin.
prefix_template=DescriptionTemplates.USER_AI__DEFAULT,
)
manticore_13b_uncensored = LlamaCppModel(
name="Manticore-13B-GGML",
max_total_tokens=2048, # context tokens (n_ctx)
max_tokens_per_request=1024, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("openaccess-ai-collective/manticore-13b"),
model_path="Manticore-13B.ggmlv2.q5_1.bin", # The filename of model. Must end with .bin.
)
kovicuna_7b = LlamaCppModel(
name="kovicuna_7b",
max_total_tokens=2048, # context tokens (n_ctx)
max_tokens_per_request=1024, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("digitous/13B-HyperMantis"),
model_path="kovicuna_q4km.bin", # The filename of model. Must end with .bin.
prefix_template=DescriptionTemplates.USER_AI__SHORT,
)
wizard_lm_13b_v1_1 = LlamaCppModel(
name="wizardLM-13B-Uncensored",
max_total_tokens=4096, # context tokens (n_ctx)
max_tokens_per_request=2048, # The maximum number of tokens to generate.
token_margin=8,
prefix_template=DescriptionTemplates.USER_AI__SHORT,
tokenizer=LlamaTokenizer("victor123/WizardLM-13B-1.0"),
model_path="wizardlm-13b-v1.1.ggmlv3.q4_K_S.bin", # The filename of model. Must end with .bin.
rope_freq_scale=0.5, # similar to `compress_pos_emb`, but inverse number: (2048 / n_ctx)
rope_freq_base=26000, # need some perplexity test; 26000 is my empirical value for 4096 context tokens
)
guanaco_13b = LlamaCppModel(
name="guanaco-13B-GGML",
max_total_tokens=2048, # context tokens (n_ctx)
max_tokens_per_request=1024, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer(
"timdettmers/guanaco-65b-merged"
), # timdettmers/guanaco-13b
model_path="guanaco-13B.ggmlv3.q5_1.bin", # The filename of model. Must end with .bin.
prefix_template=DescriptionTemplates.USER_AI__SHORT,
user_chat_roles=UserChatRoles(
user="Human",
ai="Assistant",
system="Instruction",
),
)
karen_the_editor_13b = LlamaCppModel(
name="Karen_theEditor_13B-GGML",
max_total_tokens=2048, # context tokens (n_ctx)
max_tokens_per_request=1024, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("FPHam/Karen_theEditor_13b_HF"),
model_path="Karen-The-Editor.ggmlv3.q5_1.bin", # The filename of model. Must end with .bin.
prefix_template=DescriptionTemplates.USER_AI__SHORT,
user_chat_roles=UserChatRoles(
user="USER",
ai="ASSISTANT",
system="SYSTEM",
),
)
airoboros_13b = LlamaCppModel(
name="airoboros-13b-gpt4-GGML",
max_total_tokens=4096, # context tokens (n_ctx)
max_tokens_per_request=2048, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("jondurbin/airoboros-13b-gpt4"),
model_path="airoboros-13b-gpt4.ggmlv3.q5_1.bin", # The filename of model. Must end with .bin.
prefix_template=DescriptionTemplates.USER_AI__SHORT,
)
selfee_7b = LlamaCppModel(
name="selfee-7B-GGML",
max_total_tokens=4096, # context tokens (n_ctx)
max_tokens_per_request=2048, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("kaist-ai/selfee-7b-delta"),
model_path="selfee-7b-superhot-8k.ggmlv3.q4_1.bin", # The filename of model. Must end with .bin.
prefix_template=DescriptionTemplates.USER_AI__SHORT,
rope_freq_scale=0.5, # similar to `compress_pos_emb`, but inverse number: (2048 / n_ctx)
rope_freq_base=26000, # need some perplexity test; 26000 is my empirical value for 4096 context tokens
)
llama_7b = LlamaCppModel(
name="llama-7b-GGML",
max_total_tokens=2048, # context tokens (n_ctx)
max_tokens_per_request=1024, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("HuggingFaceM4/llama-7b-tokenizer"),
model_path="llama-7b.ggmlv3.q5_K_M.bin", # The filename of model. Must end with .bin.
prefix_template=None,
embedding=True,
)
orca_mini_3b = LlamaCppModel(
name="orca_mini_3B-GGML",
max_total_tokens=2048, # context tokens (n_ctx)
max_tokens_per_request=1024, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("psmathur/orca_mini_3b"),
model_path="orca-mini-3b.ggmlv3.q4_1.bin", # The filename of model. Must end with .bin.
chat_turn_prompt=ChatTurnTemplates.ROLE_CONTENT_2,
user_chat_roles=UserChatRoles(
user="User",
ai="Response",
system="System",
),
)
airoboros_33b = LlamaCppModel(
name="airoboros-33b-gpt4-1.4-GGML",
max_total_tokens=5120, # context tokens (n_ctx)
max_tokens_per_request=2560, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("jondurbin/airoboros-33b-gpt4-1.4"),
model_path="airoboros-33b-gpt4-1.4.ggmlv3.q3_K_S.bin", # The filename of model. Must end with .bin.
n_gpu_layers=26,
chat_turn_prompt=ChatTurnTemplates.ROLE_CONTENT_5,
user_chat_roles=UserChatRoles(
user="USER",
ai="ASSISTANT",
system="SYSTEM",
),
)
chronos_hermes_13b = LlamaCppModel(
name="TheBloke/chronos-hermes-13B-GGML",
max_total_tokens=4096, # context tokens (n_ctx)
max_tokens_per_request=2048, # The maximum number of tokens to generate.
token_margin=8,
tokenizer=LlamaTokenizer("Austism/chronos-hermes-13b"),
model_path="chronos-hermes-13b.ggmlv3.q4_K_M.bin", # The filename of model. Must end with .bin.
chat_turn_prompt=ChatTurnTemplates.ROLE_CONTENT_1,
user_chat_roles=UserChatRoles(
user="User",
ai="Assistant",
system="System",
),
)
orca_mini_7b = ExllamaModel(
model_path="orca_mini_7b",
name="orca_mini_7b",
max_total_tokens=2048,
max_tokens_per_request=2048,
token_margin=8,
tokenizer=ExllamaTokenizer("orca_mini_7b"),
compress_pos_emb=2.0,
prefix_template=DescriptionTemplates.USER_AI__GAME,
user_chat_roles=UserChatRoles(
user="Player",
ai="Narrator",
system="System",
),
)
longchat_7b = ExllamaModel(
model_path="longchat_7b",
name="longchat_7b",
max_total_tokens=16384,
max_tokens_per_request=8192,
token_margin=8,
tokenizer=ExllamaTokenizer("longchat_7b"),
compress_pos_emb=8.0,
prefix_template=DescriptionTemplates.USER_AI__GAME,
user_chat_roles=UserChatRoles(
user="Player",
ai="Narrator",
system="System",
),
)
@classmethod
def find_model_by_name(cls, name: str) -> LLMModel | None:
for model in cls:
if model.value.name == name or model.name == name:
return model.value
return None
| [
"The prompt to use for each chat turn.",
"description"
] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | openai_ros~openai_ros~src~openai_ros~robot_envs~cube_single_disk_env.py | import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
"""
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
"""
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states", JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message("/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug("Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr("Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug("Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr("Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) + ", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom | [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | openai_ros~openai_ros~src~openai_ros~robot_gazebo_env.py | import rospy
import gym
from gym.utils import seeding
from .gazebo_connection import GazeboConnection
from .controllers_connection import ControllersConnection
#https://bitbucket.org/theconstructcore/theconstruct_msgs/src/master/msg/RLExperimentInfo.msg
from openai_ros.msg import RLExperimentInfo
# https://github.com/openai/gym/blob/master/gym/core.py
class RobotGazeboEnv(gym.Env):
def __init__(self, robot_name_space, controllers_list, reset_controls, start_init_physics_parameters=True, reset_world_or_sim="SIMULATION"):
# To reset Simulations
rospy.logdebug("START init RobotGazeboEnv")
self.gazebo = GazeboConnection(start_init_physics_parameters,reset_world_or_sim)
self.controllers_object = ControllersConnection(namespace=robot_name_space, controllers_list=controllers_list)
self.reset_controls = reset_controls
self.seed()
# Set up ROS related variables
self.episode_num = 0
self.cumulated_episode_reward = 0
self.reward_pub = rospy.Publisher('/openai/reward', RLExperimentInfo, queue_size=1)
rospy.logdebug("END init RobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Function executed each time step.
Here we get the action execute it in a time step and retrieve the
observations generated by that action.
:param action:
:return: obs, reward, done, info
"""
"""
Here we should convert the action num to movement action, execute the action in the
simulation and get the observations result of performing that action.
"""
rospy.logdebug("START STEP OpenAIROS")
self.gazebo.unpauseSim()
self._set_action(action)
self.gazebo.pauseSim()
obs = self._get_obs()
done = self._is_done(obs)
info = {}
reward = self._compute_reward(obs, done)
self.cumulated_episode_reward += reward
rospy.logdebug("END STEP OpenAIROS")
return obs, reward, done, info
def reset(self):
rospy.logdebug("Reseting RobotGazeboEnvironment")
self._reset_sim()
self._init_env_variables()
self._update_episode()
obs = self._get_obs()
rospy.logdebug("END Reseting RobotGazeboEnvironment")
return obs
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logdebug("Closing RobotGazeboEnvironment")
rospy.signal_shutdown("Closing RobotGazeboEnvironment")
def _update_episode(self):
"""
Publishes the cumulated reward of the episode and
increases the episode number by one.
:return:
"""
self._publish_reward_topic(
self.cumulated_episode_reward,
self.episode_num
)
self.episode_num += 1
self.cumulated_episode_reward = 0
def _publish_reward_topic(self, reward, episode_number=1):
"""
This function publishes the given reward in the reward topic for
easy access from ROS infrastructure.
:param reward:
:param episode_number:
:return:
"""
reward_msg = RLExperimentInfo()
reward_msg.episode_number = episode_number
reward_msg.episode_reward = reward
self.reward_pub.publish(reward_msg)
# Extension methods
# ----------------------------
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("START robot gazebo _reset_sim")
if self.reset_controls :
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self.gazebo.pauseSim()
else:
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.gazebo.pauseSim()
rospy.logdebug("END robot gazebo _reset_sim")
return True
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _get_obs(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self, observations):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
| [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | openai_ros~openai_ros~src~openai_ros~robot_envs~turtlebot2_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
class TurtleBot2Env(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self):
"""
Initializes a new TurtleBot2Env environment.
Turtlebot2 doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom : Odometry readings of the Base of the Robot
* /camera/depth/image_raw: 2d Depth image of the depth sensor.
* /camera/depth/points: Pointcloud sensor readings
* /camera/rgb/image_raw: RGB camera
* /kobuki/laser/scan: Laser Readings
Actuators Topic List: /cmd_vel,
Args:
"""
rospy.logdebug("Start TurtleBot2Env INIT...")
# Variables that we give through the constructor.
# None in this case
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot2Env, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
rospy.Subscriber("/camera/depth/image_raw", Image, self._camera_depth_image_raw_callback)
rospy.Subscriber("/camera/depth/points", PointCloud2, self._camera_depth_points_callback)
rospy.Subscriber("/camera/rgb/image_raw", Image, self._camera_rgb_image_raw_callback)
rospy.Subscriber("/kobuki/laser/scan", LaserScan, self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
# We dont need to check for the moment, takes too long
#self._check_camera_depth_image_raw_ready()
#self._check_camera_depth_points_ready()
#self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message("/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr("Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message("/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr("Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message("/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr("Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /kobuki/laser/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/kobuki/laser/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /kobuki/laser/scan READY=>")
except:
rospy.logerr("Current /kobuki/laser/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _odom_callback(self, data):
self.odom = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.1)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate, min_laser_distance=-1):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
crashed_into_something = self.has_crashed(min_laser_distance)
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
if crashed_into_something:
rospy.logerr("TurtleBot has crashed, stopping movement!")
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def has_crashed(self, min_laser_distance):
"""
It states based on the laser scan if the robot has crashed or not.
Crashed means that the minimum laser reading is lower than the
min_laser_distance value given.
If min_laser_distance == -1, it returns always false, because its the way
to deactivate this check.
"""
robot_has_crashed = False
if min_laser_distance != -1:
laser_data = self.get_laser_scan()
for i, item in enumerate(laser_data.ranges):
if item == float ('Inf') or numpy.isinf(item):
pass
elif numpy.isnan(item):
pass
else:
# Has a Non Infinite or Nan Value
if (item < min_laser_distance):
rospy.logerr("TurtleBot HAS CRASHED >>> item=" + str(item)+"< "+str(min_laser_distance))
robot_has_crashed = True
break
return robot_has_crashed
def get_odom(self):
return self.odom
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
| [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | aubo_openai_example~scripts~aubo_env.py | #! /usr/bin/env python
import numpy
import rospy
import geometry_msgs.msg
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState, Image
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseStamped
from aubo_moveit_config.aubo_commander import AuboCommander
from openai_ros import robot_gazebo_env
class AuboEnv(robot_gazebo_env.RobotGazeboEnv):
def __init__(self):
"""
Initializes a new aubo environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /aubo_i5/camera/image
Actuators Topic List:
* /aubo_i5/arm_controller/follow_joint_trajectory
* /aubo_i5/gripper_controller/gripper_cmd
Args:
"""
rospy.logdebug("Start AuboEnv INIT...")
JOINT_STATES_SUBSCRIBER = '/joint_states'
GIPPER_IMAGE_SUBSCRIBER = '/camera/image_raw'
self.joint_states_sub = rospy.Subscriber(JOINT_STATES_SUBSCRIBER, JointState, self.joints_callback)
self.joints = JointState()
self.grippper_camera_image_raw = rospy.Subscriber(GIPPER_IMAGE_SUBSCRIBER, Image, self.gripper_camera_callback)
self.grippper_camera_image_raw = Image()
self.controllers_list = []
self.aubo_commander = AuboCommander()
self.setup_planning_scene()
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(AuboEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
def setup_planning_scene(self):
# add table mesh in scene planning, avoiding to collosion
rospy.sleep(2)
p = PoseStamped()
p.header.frame_id = self.aubo_commander.robot.get_planning_frame()
p.pose.position.x = 0.75
p.pose.position.y = 0.
p.pose.position.z = 0.386
self.aubo_commander.scene.add_box("table",p,(0.91,0.91,0.77))
def joints_callback(self, data):
# get joint_states
self.joints = data
def gripper_camera_callback(self, data):
#get camera raw
self.grippper_camera_image_raw = data
def _check_all_systems_ready(self):
"""
Checks joint_state_publisher and camera topic , publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_gripper_camera_image_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message("/joint_states", JointState, timeout=1.0)
rospy.logdebug("Current /joint_states READY=>" + str(self.joints))
except:
rospy.logerr("Current /joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_gripper_camera_image_ready(self):
self.grippper_camera_image_raw = None
while self.grippper_camera_image_raw is None and not rospy.is_shutdown():
try:
self.grippper_camera_image_raw = rospy.wait_for_message("/camera/image_raw", Image , timeout=1.0)
rospy.logdebug("Current /camera/image_raw READY" )
except:
rospy.logerr("Current /camera/image_raw not ready yet, retrying for getting image_raw")
return self.grippper_camera_image_raw
def get_joints(self):
return self.joints
def set_trajectory_ee(self, position):
"""
Sets the enf effector position and orientation
"""
ee_pose = geometry_msgs.msg.Pose()
ee_pose.position.x = position[0]
ee_pose.position.y = position[1]
ee_pose.position.z = position[2]
ee_pose.orientation.x = 0.0
ee_pose.orientation.y = 1.0
ee_pose.orientation.z = 0.0
ee_pose.orientation.w = 0.0
return self.aubo_commander.move_ee_to_pose(ee_pose)
def set_trajectory_joints(self, arm_joints):
"""
Helper function.
Wraps an action vector of joint angles into a JointTrajectory message.
The velocities, accelerations, and effort do not control the arm motion
"""
position = [None] * 6
position[0] = arm_joints["shoulder_joint"]
position[1] = arm_joints["upperArm_joint"]
position[2] = arm_joints["foreArm_joint"]
position[3] = arm_joints["wrist1_joint"]
position[4] = arm_joints["wrist2_joint"]
position[5] = arm_joints["wrist3_joint"]
return self.aubo_commander.move_joints_traj(position)
def get_ee_pose(self):
gripper_pose = self.aubo_commander.get_ee_pose()
return gripper_pose
def get_ee_rpy(self):
gripper_rpy = self.aubo_commander.get_ee_rpy()
return gripper_rpy
def set_ee(self, value):
return self.aubo_commander.execut_ee(value)
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
| [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | openai_ros~openai_ros~src~openai_ros~task_envs~cartpole_stay_up~stay_up.py | from gym import utils
from openai_ros.robot_envs import cartpole_env
from gym.envs.registration import register
from gym import error, spaces
import rospy
import math
import numpy as np
# The path is __init__.py of openai_ros, where we import the MovingCubeOneDiskWalkEnv directly
register(
id='CartPoleStayUp-v0',
entry_point='openai_ros:task_envs.cartpole_stay_up.stay_up.CartPoleStayUpEnv',
timestep_limit=1000,
)
class CartPoleStayUpEnv(cartpole_env.CartPoleEnv):
def __init__(self):
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
high = np.array([
2.5 * 2,
np.finfo(np.float32).max,
0.7 * 2,
np.finfo(np.float32).max])
self.observation_space = spaces.Box(-high, high)
cartpole_env.CartPoleEnv.__init__(
self, control_type=self.control_type
)
def get_params(self):
#get configuration parameters
self.n_actions = rospy.get_param('/cartpole_v0/n_actions')
self.min_pole_angle = rospy.get_param('/cartpole_v0/min_pole_angle')
self.max_pole_angle = rospy.get_param('/cartpole_v0/max_pole_angle')
self.max_base_velocity = rospy.get_param('/cartpole_v0/max_base_velocity')
self.min_base_pose_x = rospy.get_param('/cartpole_v0/min_base_pose_x')
self.max_base_pose_x = rospy.get_param('/cartpole_v0/max_base_pose_x')
self.pos_step = rospy.get_param('/cartpole_v0/pos_step')
self.running_step = rospy.get_param('/cartpole_v0/running_step')
self.init_pos = rospy.get_param('/cartpole_v0/init_pos')
self.wait_time = rospy.get_param('/cartpole_v0/wait_time')
self.control_type = rospy.get_param('/cartpole_v0/control_type')
def _set_action(self, action):
# Take action
if action == 0: #LEFT
rospy.loginfo("GO LEFT...")
self.pos[0] -= self.pos_step
elif action == 1: #RIGHT
rospy.loginfo("GO RIGHT...")
self.pos[0] += self.pos_step
elif action == 2: #LEFT BIG
rospy.loginfo("GO LEFT BIG...")
self.pos[0] -= self.pos_step * 10
elif action == 3: #RIGHT BIG
rospy.loginfo("GO RIGHT BIG...")
self.pos[0] += self.pos_step * 10
# Apply action to simulation.
rospy.loginfo("MOVING TO POS=="+str(self.pos))
# 1st: unpause simulation
#rospy.logdebug("Unpause SIM...")
#self.gazebo.unpauseSim()
self.move_joints(self.pos)
rospy.logdebug("Wait for some time to execute movement, time="+str(self.running_step))
rospy.sleep(self.running_step) #wait for some time
rospy.logdebug("DONE Wait for some time to execute movement, time=" + str(self.running_step))
# 3rd: pause simulation
#rospy.logdebug("Pause SIM...")
#self.gazebo.pauseSim()
def _get_obs(self):
data = self.joints
# base_postion base_velocity pole angle pole velocity
#obs = [round(data.position[1],1), round(data.velocity[1],1), round(data.position[0],1), round(data.velocity[0],1)]
obs = [data.position[1], data.velocity[1], data.position[0], data.velocity[0]]
return np.array(obs)
def _is_done(self, observations):
done = False
data = self.joints
rospy.loginfo("BASEPOSITION=="+str(observations[0]))
rospy.loginfo("POLE ANGLE==" + str(observations[2]))
if (self.min_base_pose_x >= observations[0] or observations[0] >= self.max_base_pose_x): #check if the base is still within the ranges of (-2, 2)
rospy.logerr("Base Outside Limits==>min="+str(self.min_base_pose_x)+",pos="+str(observations[0])+",max="+str(self.max_base_pose_x))
done = True
if (self.min_pole_angle >= observations[2] or observations[2] >= self.max_pole_angle): #check if pole has toppled over
rospy.logerr(
"Pole Angle Outside Limits==>min=" + str(self.min_pole_angle) + ",pos=" + str(observations[2]) + ",max=" + str(
self.max_pole_angle))
done = True
rospy.loginfo("FINISHED get _is_done")
return done
def _compute_reward(self, observations, done):
"""
Gives more points for staying upright, gets data from given observations to avoid
having different data than other previous functions
:return:reward
"""
rospy.logdebug("START _compute_reward")
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
rospy.logdebug("END _compute_reward")
return reward
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.steps_beyond_done = None
def _set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
# Reset Internal pos variable
self.init_internal_vars(self.init_pos)
self.move_joints(self.pos)
| [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | openai_ros~openai_ros~src~openai_ros~task_envs~turtlebot2~turtlebot2_maze.py | import rospy
import numpy
import time
from gym import spaces
from openai_ros.robot_envs import turtlebot2_env
from gym.envs.registration import register
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
# The path is __init__.py of openai_ros, where we import the TurtleBot2MazeEnv directly
timestep_limit_per_episode = 10000 # Can be any Value
register(
id='TurtleBot2Maze-v0',
entry_point='openai_ros:task_envs.turtlebot2.turtlebot2_maze.TurtleBot2MazeEnv',
timestep_limit=timestep_limit_per_episode,
)
class TurtleBot2MazeEnv(turtlebot2_env.TurtleBot2Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot2/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.dec_obs = rospy.get_param("/turtlebot2/number_decimals_precision_obs", 1)
self.linear_forward_speed = rospy.get_param('/turtlebot2/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/turtlebot2/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot2/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/turtlebot2/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/turtlebot2/init_linear_turn_speed')
self.new_ranges = rospy.get_param('/turtlebot2/new_ranges')
self.min_range = rospy.get_param('/turtlebot2/min_range')
self.max_laser_value = rospy.get_param('/turtlebot2/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot2/min_laser_value')
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot2MazeEnv, self).__init__()
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
laser_scan = self._check_laser_scan_ready()
rospy.logdebug("laser_scan len===>"+str(len(laser_scan.ranges)))
# Laser data
self.laser_scan_frame = laser_scan.header.frame_id
# This is the length that the dicretised observations array will have
# Because 0 also counts it will have +1
num_laser_readings = (len(laser_scan.ranges)/self.new_ranges)
rospy.logdebug("num_laser_readings len===>"+str(num_laser_readings))
rospy.set_param('/turtlebot2/n_observations', num_laser_readings)
aux = rospy.get_param('/turtlebot2/n_observations')
rospy.logfatal("aux===>"+str(aux))
high = numpy.full((num_laser_readings), self.max_laser_value)
low = numpy.full((num_laser_readings), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot2/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot2/turn_reward")
self.end_episode_points = rospy.get_param("/turtlebot2/end_episode_points")
self.cumulated_steps = 0.0
self.laser_filtered_pub = rospy.Publisher('/turtlebot2/laser/scan_filtered', LaserScan, queue_size=1)
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=-1)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish
# and sometimes still have values from the prior position that triguered the done.
time.sleep(0.2)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: #FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: #LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: #RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base( linear_speed,
angular_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=self.min_range)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_observations = self.discretize_observation( laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("TurtleBot2 is Too Close to wall==>")
else:
rospy.logerr("TurtleBot2 is Ok ==>")
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
filtered_range = []
#mod = len(data.ranges)/new_ranges
mod = new_ranges
max_laser_value = data.range_max
min_laser_value = data.range_min
rospy.logdebug("data=" + str(data))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
#discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(round(max_laser_value,self.dec_obs))
elif numpy.isnan(item):
#discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(round(min_laser_value,self.dec_obs))
else:
#discretized_ranges.append(int(item))
discretized_ranges.append(round(item,self.dec_obs))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
# We add last value appended
filtered_range.append(discretized_ranges[-1])
else:
# We add value zero
filtered_range.append(0.1)
rospy.logdebug("Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
self.publish_filtered_laser_scan( laser_original_data=data,
new_filtered_laser_range=discretized_ranges)
return discretized_ranges
def publish_filtered_laser_scan(self, laser_original_data, new_filtered_laser_range):
rospy.logdebug("new_filtered_laser_range==>"+str(new_filtered_laser_range))
laser_filtered_object = LaserScan()
h = Header()
h.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work
h.frame_id = laser_original_data.header.frame_id
laser_filtered_object.header = h
laser_filtered_object.angle_min = laser_original_data.angle_min
laser_filtered_object.angle_max = laser_original_data.angle_max
new_angle_incr = abs(laser_original_data.angle_max - laser_original_data.angle_min) / len(new_filtered_laser_range)
#laser_filtered_object.angle_increment = laser_original_data.angle_increment
laser_filtered_object.angle_increment = new_angle_incr
laser_filtered_object.time_increment = laser_original_data.time_increment
laser_filtered_object.scan_time = laser_original_data.scan_time
laser_filtered_object.range_min = laser_original_data.range_min
laser_filtered_object.range_max = laser_original_data.range_max
laser_filtered_object.ranges = []
laser_filtered_object.intensities = []
for item in new_filtered_laser_range:
if item == 0.0:
laser_distance = 0.1
else:
laser_distance = item
laser_filtered_object.ranges.append(laser_distance)
laser_filtered_object.intensities.append(item)
self.laser_filtered_pub.publish(laser_filtered_object)
| [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | aubo_openai_example~scripts~start_qlearning.py | #!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
# import our training environment
from openai_ros.task_envs.sawyer import learn_to_touch_cube
if __name__ == '__main__':
rospy.init_node('sawyer_learn_to_pick_cube_qlearn', anonymous=True, log_level=rospy.WARN)
# Create the Gym environment
env = gym.make('SawyerTouchCube-v0')
rospy.loginfo("Gym environment done")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('my_sawyer_openai_example')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/sawyer/alpha")
Epsilon = rospy.get_param("/sawyer/epsilon")
Gamma = rospy.get_param("/sawyer/gamma")
epsilon_discount = rospy.get_param("/sawyer/epsilon_discount")
nepisodes = rospy.get_param("/sawyer/nepisodes")
nsteps = rospy.get_param("/sawyer/nsteps")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" + str(cumulated_reward))
rospy.logwarn("# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close() | [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | openai_ros~openai_ros~src~openai_ros~robot_envs~sawyer_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
# import intera_interface
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
from intera_core_msgs.msg import JointLimits
from sensor_msgs.msg import Image, JointState
from copy import deepcopy
class SawyerEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all SawyerEnv environments.
"""
def __init__(self):
"""
Initializes a new SawyerEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /robot/joint_limits: Odometry of the Base of Wamv
Actuators Topic List:
* As actuator we will use a class to interface with the movements through commands.
Args:
"""
rospy.logdebug("Start SawyerEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
self._joint_angle = dict() ###########
self._joint_velocity = dict() ###########
self._joint_effort = dict() ###########
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(SawyerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("SawyerEnv unpause...")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
# TODO: Fill it with the sensors
self._check_all_systems_ready()
rospy.Subscriber("/io/internal_camera/head_camera/image_raw", Image, self._head_camera_image_raw_callback)
rospy.Subscriber("/io/internal_camera/right_hand_camera/image_raw", Image, self._right_hand_camera_image_raw_callback)
###############
rospy.Subscriber("robot/joint_states", JointState, self._on_joint_states_callback, queue_size=1, tcp_nodelay=True)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished SawyerEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("SawyerEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END SawyerEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
self._check_head_camera_image_raw_ready()
self._check_right_hand_camera_image_raw_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_head_camera_image_raw_ready(self):
self.head_camera_image_raw = None
rospy.logdebug("Waiting for /io/internal_camera/head_camera/image_raw to be READY...")
while self.head_camera_image_raw is None and not rospy.is_shutdown():
try:
self.head_camera_image_raw = rospy.wait_for_message("/io/internal_camera/head_camera/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /io/internal_camera/head_camera/image_raw READY=>")
except:
rospy.logerr("Current /io/internal_camera/head_camera/image_raw not ready yet, retrying for getting head_camera_image_raw")
return self.head_camera_image_raw
def _check_right_hand_camera_image_raw_ready(self):
self.right_hand_camera_image_raw = None
rospy.logdebug("Waiting for /io/internal_camera/right_hand_camera/image_raw to be READY...")
while self.right_hand_camera_image_raw is None and not rospy.is_shutdown():
try:
self.right_hand_camera_image_raw = rospy.wait_for_message("/io/internal_camera/right_hand_camera/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /io/internal_camera/right_hand_camera/image_raw READY=>")
except:
rospy.logerr("Current /io/internal_camera/right_hand_camera/image_raw not ready yet, retrying for getting right_hand_camera_image_raw")
return self.right_hand_camera_image_raw
def _head_camera_image_raw_callback(self, data):
self.head_camera_image_raw = data
def _right_hand_camera_image_raw_callback(self, data):
self.right_hand_camera_image_raw = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rospy.loginfo("Valid Sawyer Limbs==>"+str(valid_limbs))
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
rospy.loginfo("Enabling robot...")
rs.enable()
self._map_actions_to_movement()
def _map_actions_to_movement(self, side="right", joint_delta=0.1):
self.limb = intera_interface.Limb(side)
try:
self.gripper = intera_interface.Gripper(side + '_gripper')
except:
self.has_gripper = False
rospy.loginfo("The electric gripper is not detected on the robot.")
else:
self.has_gripper = True
self.joints = self.limb.joint_names()
# define a dict for joint movement, change 0.1 per frame on each joitn
self.bindings = {
self.joints[0]+"_increase": (self.set_j, [self.joints[0], joint_delta], self.joints[0]+" increase"),
self.joints[0]+"_decrease": (self.set_j, [self.joints[0], -joint_delta], self.joints[0]+" decrease"),
self.joints[1]+"_increase": (self.set_j, [self.joints[1], joint_delta], self.joints[1]+" increase"),
self.joints[1]+"_decrease": (self.set_j, [self.joints[1], -joint_delta], self.joints[1]+" decrease"),
self.joints[2]+"_increase": (self.set_j, [self.joints[2], joint_delta], self.joints[2]+" increase"),
self.joints[2]+"_decrease": (self.set_j, [self.joints[2], -joint_delta], self.joints[2]+" decrease"),
self.joints[3]+"_increase": (self.set_j, [self.joints[3], joint_delta], self.joints[3]+" increase"),
self.joints[3]+"_decrease": (self.set_j, [self.joints[3], -joint_delta], self.joints[3]+" decrease"),
self.joints[4]+"_increase": (self.set_j, [self.joints[4], joint_delta], self.joints[4]+" increase"),
self.joints[4]+"_decrease": (self.set_j, [self.joints[4], -joint_delta], self.joints[4]+" decrease"),
self.joints[5]+"_increase": (self.set_j, [self.joints[5], joint_delta], self.joints[5]+" increase"),
self.joints[5]+"_decrease": (self.set_j, [self.joints[5], -joint_delta], self.joints[5]+" decrease"),
self.joints[6]+"_increase": (self.set_j, [self.joints[6], joint_delta], self.joints[6]+" increase"),
self.joints[6]+"_decrease": (self.set_j, [self.joints[6], -joint_delta], self.joints[6]+" decrease")
}
if self.has_gripper:
self.bindings.update({
"close": (self.set_g, "close", side+" gripper close"),
"open": (self.set_g, "open", side+" gripper open"),
"calibrate": (self.set_g, "calibrate", side+" gripper calibrate")
})
rospy.loginfo("Controlling joints...")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def execute_movement(self, action_id):
"""
It executed the command given through an id. This will move any joint
of Sawyer, including the gripper if it has it.
:param: action_id: These are the possible action_id values and the action asociated.
self.joints[0]+"_increase",
self.joints[0]+"_decrease",
self.joints[1]+"_increase",
self.joints[1]+"_decrease",
self.joints[2]+"_increase",
self.joints[2]+"_decrease",
self.joints[3]+"_increase",
self.joints[3]+"_decrease",
self.joints[4]+"_increase",
self.joints[4]+"_decrease",
self.joints[5]+"_increase",
self.joints[5]+"_decrease",
self.joints[6]+"_increase",
self.joints[6]+"_decrease",
gripper_close,
gripper_open,
gripper_calibrate
"""
if action_id in self.bindings:
cmd = self.bindings[action_id]
if action_id == "gripper_close" or action_id == "gripper_open" or action_id == "gripper_calibrate":
cmd[0](cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
#expand binding to something like "self.set_j(right, 'j0', joint_delta)"
cmd[0](*cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
rospy.logerr("NOT VALID key binding, it should be one of these: ")
for key, val in sorted(self.bindings.items(),
key=lambda x: x[1][2]):
rospy.logerr(" %s: %s" % (key, val[2]))
def set_j(self,joint_name, delta):
current_position = self.limb.joint_angle(joint_name)
joint_command = {joint_name: current_position + delta}
# publish the desired joint state to ros
self.limb.set_joint_positions(joint_command)
def set_g(self,action):
# execute the gripper
if self.has_gripper:
if action == "close":
self.gripper.close()
elif action == "open":
self.gripper.open()
elif action == "calibrate":
self.gripper.calibrate()
def move_joints_to_angle_blocking(self,joint_positions_dict, timeout=15.0, threshold=0.008726646):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
"""
self.limb.move_to_joint_positions( positions=joint_positions_dict,
timeout=15.0,
threshold=0.008726646,
test=None)
def _on_joint_states_callback(self, msg): ####################
for idx, name in enumerate(msg.name):
self._joint_angle[name] = msg.position[idx]
self._joint_velocity[name] = msg.velocity[idx]
self._joint_effort[name] = msg.effort[idx]
def get_all_joints_angles(self): ####################
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return deepcopy(self._joint_angle)
def get_limb_joint_names_array(self):
"""
Returns the Joint Names array of the Limb.
"""
return self.joints
def get_all_limb_joint_angles(self):
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return self.limb.joint_angles()
def get_all_limb_joint_efforts(self):
"""
Returns a dictionary dict({str:float}) with all the joints efforts
"""
return self.limb.joint_efforts()
def get_tf_start_to_end_frames(self,start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans,rot = None, None
try:
(trans,rot) = self.listener.lookupTransform(start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
pass
return trans,rot
def check_joint_limits_ready(self):
self.joint_limits = None
rospy.logdebug("Waiting for /robot/joint_limits to be READY...")
while self.joint_limits is None and not rospy.is_shutdown():
try:
self.joint_limits = rospy.wait_for_message("/robot/joint_limits", JointLimits, timeout=3.0)
rospy.logdebug("Current /robot/joint_limits READY=>")
except:
rospy.logerr("Current /robot/joint_limits not ready yet, retrying for getting joint_limits")
return self.joint_limits
def get_joint_limits(self):
return self.joint_limits
def get_head_camera_image_raw(self):
return self.head_camera_image_raw
def get_right_hand_camera_image_raw(self):
return self.right_hand_camera_image_raw
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
self.gazebo.unpauseSim()
joint_limits = self.check_joint_limits_ready()
self.gazebo.pauseSim()
return joint_limits | [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | openai_ros~openai_ros~src~openai_ros~task_envs~sawyer~learn_to_touch_cube.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import sawyer_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion
timestep_limit_per_episode = 10000 # Can be any Value
register(
id='SawyerTouchCube-v0',
entry_point='openai_ros:task_envs.sawyer.learn_to_touch_cube.SawyerTouchCubeEnv',
timestep_limit=timestep_limit_per_episode,
)
class SawyerTouchCubeEnv(sawyer_env.SawyerEnv):
def __init__(self):
"""
Make sawyer learn how pick up a cube
"""
# We execute this one before because there are some functions that this
# TaskEnv uses that use variables from the parent class, like the effort limit fetch.
super(SawyerTouchCubeEnv, self).__init__()
# Here we will add any init functions prior to starting the MyRobotEnv
# Only variable needed to be set here
rospy.logdebug("Start SawyerTouchCubeEnv INIT...")
number_actions = rospy.get_param('/sawyer/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
self.work_space_x_max = rospy.get_param("/sawyer/work_space/x_max")
self.work_space_x_min = rospy.get_param("/sawyer/work_space/x_min")
self.work_space_y_max = rospy.get_param("/sawyer/work_space/y_max")
self.work_space_y_min = rospy.get_param("/sawyer/work_space/y_min")
self.work_space_z_max = rospy.get_param("/sawyer/work_space/z_max")
self.work_space_z_min = rospy.get_param("/sawyer/work_space/z_min")
self.max_effort = rospy.get_param("/sawyer/max_effort")
self.dec_obs = rospy.get_param("/sawyer/number_decimals_precision_obs")
self.acceptable_distance_to_cube = rospy.get_param("/sawyer/acceptable_distance_to_cube")
self.tcp_z_position_min = rospy.get_param("/sawyer/tcp_z_position_min") #tcp is tool centre point
# We place the Maximum and minimum values of observations
# TODO: Fill when get_observations is done.
"""
We suppose that its all these:
head_pan, right_gripper_l_finger_joint, right_gripper_r_finger_joint, right_j0, right_j1,
right_j2, right_j3, right_j4, right_j5, right_j6
Plus the first three are the block_to_tcp vector
"""
# We fetch the limits of the joinst to get the effort and angle limits
self.joint_limits = self.init_joint_limits()
high = numpy.array([self.work_space_x_max,
self.work_space_y_max,
self.work_space_z_max,
self.joint_limits.position_upper[0],
self.joint_limits.position_upper[1],
self.joint_limits.position_upper[2],
self.joint_limits.position_upper[3],
self.joint_limits.position_upper[4],
self.joint_limits.position_upper[5],
self.joint_limits.position_upper[6],
self.joint_limits.position_upper[7]
# self.joint_limits.position_upper[8],
# self.joint_limits.position_upper[9]
])
low = numpy.array([ self.work_space_x_min,
self.work_space_y_min,
self.work_space_z_min,
self.joint_limits.position_lower[0],
self.joint_limits.position_lower[1],
self.joint_limits.position_lower[2],
self.joint_limits.position_lower[3],
self.joint_limits.position_lower[4],
self.joint_limits.position_lower[5],
self.joint_limits.position_lower[6],
self.joint_limits.position_lower[7]
# self.joint_limits.position_lower[8],
# self.joint_limits.position_lower[9]
])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.done_reward =rospy.get_param("/sawyer/done_reward")
self.closer_to_block_reward = rospy.get_param("/sawyer/closer_to_block_reward")
self.cumulated_steps = 0.0
rospy.logdebug("END SawyerTouchCubeEnv INIT...")
def _set_init_pose(self):
"""
Sets the two proppelers speed to 0.0 and waits for the time_sleep
to allow the action to be executed
"""
# We set the angles to zero of the limb
self.joints = self.get_limb_joint_names_array()
join_values_array = [0.0]*len(self.joints)
joint_positions_dict_zero = dict( zip( self.joints, join_values_array))
actual_joint_angles_dict = self.get_all_limb_joint_angles()
# We generate the two step movement. Turn Right/Left where you are and then set all to zero
if "right_j0" in actual_joint_angles_dict:
# We turn to the left or to the right based on where the position is to avoid the table.
if actual_joint_angles_dict["right_j0"] >= 0.0:
actual_joint_angles_dict["right_j0"] = 1.57
else:
actual_joint_angles_dict["right_j0"] = -1.57
if "right_j1" in actual_joint_angles_dict:
actual_joint_angles_dict["right_j1"] = actual_joint_angles_dict["right_j1"] - 0.3
self.move_joints_to_angle_blocking(actual_joint_angles_dict, timeout=15.0, threshold=0.008726646)
self.move_joints_to_angle_blocking(joint_positions_dict_zero, timeout=15.0, threshold=0.008726646)
# We Open the gripper
self.set_g(action="open")
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# We get the initial pose to mesure the distance from the desired point.
translation_tcp_block, rotation_tcp_block = self.get_tf_start_to_end_frames(start_frame_name="block", end_frame_name="right_electric_gripper_base")
tf_tcp_to_block_vector = Vector3()
tf_tcp_to_block_vector.x = translation_tcp_block[0]
tf_tcp_to_block_vector.y = translation_tcp_block[1]
tf_tcp_to_block_vector.z = translation_tcp_block[2]
self.previous_distance_from_block = self.get_magnitud_tf_tcp_to_block(tf_tcp_to_block_vector)
self.translation_tcp_world, _ = self.get_tf_start_to_end_frames(start_frame_name="world", end_frame_name="right_electric_gripper_base")
def _set_action(self, action):
"""
It sets the joints of sawyer based on the action integer given
based on the action number given.
:param action: The action integer that sets what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
if action == 0: # Increase joint_0
action_id = self.joints[0]+"_increase"
elif action == 1: # Decrease joint_0
action_id = self.joints[0]+"_decrease"
elif action == 2: # Increase joint_1
action_id = self.joints[1]+"_increase"
elif action == 3: # Decrease joint_1
action_id = self.joints[1]+"_decrease"
elif action == 4: # Increase joint_2
action_id = self.joints[2]+"_increase"
elif action == 5: # Decrease joint_2
action_id = self.joints[2]+"_decrease"
elif action == 6: # Increase joint_3
action_id = self.joints[3]+"_increase"
elif action == 7: # Decrease joint_3
action_id = self.joints[3]+"_decrease"
elif action == 8: # Increase joint_4
action_id = self.joints[4]+"_increase"
elif action == 9: # Decrease joint_4
action_id = self.joints[4]+"_decrease"
elif action == 10: # Increase joint_5
action_id = self.joints[5]+"_increase"
elif action == 11: # Decrease joint_5
action_id = self.joints[5]+"_decrease"
elif action == 12: # Increase joint_6
action_id = self.joints[6]+"_increase"
elif action == 13: # Decrease joint_6
action_id = self.joints[6]+"_decrease"
# We tell sawyer the action to perform
self.execute_movement(action_id)
rospy.logdebug("END Set Action ==>"+str(action)+","+str(action_id))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have access to, we need to read the
sawyerEnv API DOCS.
:return: observation
"""
rospy.logdebug("Start Get Observation ==>")
# We get the translation of the base of the gripper to the block
translation_tcp_block, _ = self.get_tf_start_to_end_frames(start_frame_name="block",
end_frame_name="right_electric_gripper_base")
translation_tcp_block_round = numpy.around(translation_tcp_block, decimals=self.dec_obs)
# We get this data but we dont put it in the observations because its something internal for evaluation.
# The order is crucial, get it upside down and it make no sense.
self.translation_tcp_world, _ = self.get_tf_start_to_end_frames(start_frame_name="world",
end_frame_name="right_electric_gripper_base")
# Same here, the values are used internally for knowing if done, they wont define the state (although these are left out for performance)
self.joints_efforts_dict = self.get_all_limb_joint_efforts()
rospy.logdebug("JOINTS EFFORTS DICT OBSERVATION METHOD==>"+str(self.joints_efforts_dict))
"""
We suppose that its all these:
head_pan, right_gripper_l_finger_joint, right_gripper_r_finger_joint, right_j0, right_j1,
right_j2, right_j3, right_j4, right_j5, right_j6
"""
# joints_angles_array = self.get_all_limb_joint_angles().values()
joints_angles_array = self.get_all_joints_angles().values()
joints_angles_array_round = numpy.around(joints_angles_array, decimals=self.dec_obs)
# We concatenate the two rounded arrays and convert them to standard Python list
observation = numpy.concatenate((translation_tcp_block_round,joints_angles_array_round), axis=0).tolist()
return observation
def _is_done(self, observations):
"""
We consider the episode done if:
1) The sawyer TCP is outside the workspace, with self.translation_tcp_world
2) The Joints exeded a certain effort ( it got stuck somewhere ), self.joints_efforts_array
3) The TCP to block distance is lower than a threshold ( it got to the place )
"""
is_stuck = self.is_arm_stuck(self.joints_efforts_dict)
tcp_current_pos = Vector3()
tcp_current_pos.x = self.translation_tcp_world[0]
tcp_current_pos.y = self.translation_tcp_world[1]
tcp_current_pos.z = self.translation_tcp_world[2]
is_inside_workspace = self.is_inside_workspace(tcp_current_pos)
tcp_to_block_pos = Vector3()
tcp_to_block_pos.x = observations[0]
tcp_to_block_pos.y = observations[1]
tcp_to_block_pos.z = observations[2]
has_reached_the_block = self.reached_block( tcp_to_block_pos,
self.acceptable_distance_to_cube,
self.translation_tcp_world[2],
self.tcp_z_position_min)
done = is_stuck or not(is_inside_workspace) or has_reached_the_block
rospy.logdebug("#### IS DONE ? ####")
rospy.logdebug("is_stuck ?="+str(is_stuck))
rospy.logdebug("Not is_inside_workspace ?="+str(not(is_inside_workspace)))
rospy.logdebug("has_reached_the_block ?="+str(has_reached_the_block))
rospy.logdebug("done ?="+str(done))
rospy.logdebug("#### #### ####")
return done
def _compute_reward(self, observations, done):
"""
We Base the rewards in if its done or not and we base it on
if the distance to the block has increased or not.
:return:
"""
tf_tcp_to_block_vector = Vector3()
tf_tcp_to_block_vector.x = observations[0]
tf_tcp_to_block_vector.y = observations[1]
tf_tcp_to_block_vector.z = observations[2]
distance_block_to_tcp = self.get_magnitud_tf_tcp_to_block(tf_tcp_to_block_vector)
distance_difference = distance_block_to_tcp - self.previous_distance_from_block
if not done:
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logdebug("DECREASE IN DISTANCE GOOD")
reward = self.closer_to_block_reward
else:
rospy.logerr("ENCREASE IN DISTANCE BAD")
#reward = -1*self.closer_to_block_reward
reward = 0.0
else:
if self.reached_block(tf_tcp_to_block_vector,self.acceptable_distance_to_cube,self.translation_tcp_world[2], self.tcp_z_position_min):
reward = self.done_reward
else:
reward = -1*self.done_reward
self.previous_distance_from_block = distance_block_to_tcp
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def is_arm_stuck(self, joints_efforts_dict):
"""
Checks if the efforts in the arm joints exceed certain theshhold
We will only check the joints_0,1,2,3,4,5,6
"""
is_arm_stuck = False
for joint_name in self.joint_limits.joint_names:
if joint_name in joints_efforts_dict:
effort_value = joints_efforts_dict[joint_name]
index = self.joint_limits.joint_names.index(joint_name)
effort_limit = self.joint_limits.effort[index]
rospy.logdebug("Joint Effort ==>Name="+str(joint_name)+",Effort="+str(effort_value)+",Limit="+str(effort_limit))
if abs(effort_value) > effort_limit:
is_arm_stuck = True
rospy.logerr("Joint Effort TOO MUCH ==>"+str(joint_name)+","+str(effort_value))
break
else:
rospy.logdebug("Joint Effort is ok==>"+str(joint_name)+","+str(effort_value))
else:
rospy.logdebug("Joint Name is not in the effort dict==>"+str(joint_name))
return is_arm_stuck
def reached_block(self,block_to_tcp_vector, minimum_distance, tcp_z_position, tcp_z_position_min):
"""
It return True if the transform TCP to block vector magnitude is smaller than
the minimum_distance.
tcp_z_position we use it to only consider that it has reached if its above the table.
"""
reached_block_b = False
distance_to_block = self.get_magnitud_tf_tcp_to_block(block_to_tcp_vector)
tcp_z_pos_ok = tcp_z_position >= tcp_z_position_min
distance_ok = distance_to_block <= minimum_distance
reached_block_b = distance_ok and tcp_z_pos_ok
rospy.logdebug("###### REACHED BLOCK ? ######")
rospy.logdebug("tcp_z_pos_ok==>"+str(tcp_z_pos_ok))
rospy.logdebug("distance_ok==>"+str(distance_ok))
rospy.logdebug("reached_block_b==>"+str(reached_block_b))
rospy.logdebug("############")
return reached_block_b
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_magnitud_tf_tcp_to_block(self, translation_vector):
"""
Given a Vector3 Object, get the magnitud
:param p_end:
:return:
"""
a = numpy.array(( translation_vector.x,
translation_vector.y,
translation_vector.z))
distance = numpy.linalg.norm(a)
return distance
def get_orientation_euler(self, quaternion_vector):
# We convert from quaternions to euler
orientation_list = [quaternion_vector.x,
quaternion_vector.y,
quaternion_vector.z,
quaternion_vector.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def is_inside_workspace(self,current_position):
"""
Check if the sawyer is inside the Workspace defined
"""
is_inside = False
rospy.logdebug("##### INSIDE WORK SPACE? #######")
rospy.logdebug("XYZ current_position"+str(current_position))
rospy.logdebug("work_space_x_max"+str(self.work_space_x_max)+",work_space_x_min="+str(self.work_space_x_min))
rospy.logdebug("work_space_y_max"+str(self.work_space_y_max)+",work_space_y_min="+str(self.work_space_y_min))
rospy.logdebug("work_space_z_max"+str(self.work_space_z_max)+",work_space_z_min="+str(self.work_space_z_min))
rospy.logdebug("############")
if current_position.x > self.work_space_x_min and current_position.x <= self.work_space_x_max:
if current_position.y > self.work_space_y_min and current_position.y <= self.work_space_y_max:
if current_position.z > self.work_space_z_min and current_position.z <= self.work_space_z_max:
is_inside = True
return is_inside
| [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | aubo_openai_example~scripts~train_a2c.py | #!/usr/bin/env python3
import rospy ###
from openai_ros.task_envs.sawyer import learn_to_touch_cube ###
import gym ###
# import sys ###
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') ###
from stable_baselines import A2C
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common import make_vec_env
from stable_baselines.common.vec_env import VecFrameStack
from stable_baselines.common.policies import CnnPolicy, CnnLstmPolicy, CnnLnLstmPolicy
# sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages') ###
# import filter_env
import gc
gc.enable()
###################################################################################################
ENV_NAME ='SawyerTouchCube-v0' ###
EPISODES = 100000
TEST = 10
def main():
rospy.init_node('sawyer_learn_to_pick_cube_a2c', anonymous=True, log_level=rospy.WARN) ###
# env = filter_env.makeFilteredEnv(gym.make(ENV_NAME))
# env = gym.make(ENV_NAME)
# Parallel environments
env = make_vec_env(ENV_NAME, n_envs=4) ###
# env = VecFrameStack(ENV_NAME, 4) ###
model = A2C(MlpPolicy, env, verbose=1) ###
# model = A2C(CnnPolicy, env, lr_schedule='constant') ###
env = gym.wrappers.Monitor(env, '~/catkin_ws/src/A2C/experiments/' + ENV_NAME,force=True)
model.learn(total_timesteps=25000) ###
model.save("a2c_sawyer")
for episode in xrange(EPISODES):
state = env.reset()
# Train
for step in xrange(env.spec.timestep_limit):
action = agent.noise_action(state)
next_state,reward,done,_ = env.step(action)
agent.perceive(state,action,reward,next_state,done)
state = next_state
if done:
break
# Testing:
if episode % 100 == 0 and episode > 100:
total_reward = 0
for i in xrange(TEST):
state = env.reset()
for j in xrange(env.spec.timestep_limit):
#env.render()
action = agent.action(state) # direct action for test ###TO EDIT: check chooseAction my_sawyer_openai_example/scripts/qlearn.py
state,reward,done,_ = env.step(action)
total_reward += reward
if done:
break
ave_reward = total_reward/TEST
print('episode: ',episode,'Evaluation Average Reward:',ave_reward)
env.monitor.close()
if __name__ == '__main__':
main()
| [] |
2024-01-10 | YufengJin/aubo_i5_reinforcement_learning | aubo_openai_example~scripts~start_qlearning_v2.py | #!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
#from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
from openai_ros.task_envs.sawyer import learn_to_touch_cube
if __name__ == '__main__':
rospy.init_node('sawyer_learn_to_pick_cube_qlearn',
anonymous=True, log_level=rospy.WARN)
# Init OpenAI_ROS ENV
# task_and_robot_environment_name = rospy.get_param(
# '/sawyer/task_and_robot_environment_name')
# env = StartOpenAI_ROS_Environment(
# task_and_robot_environment_name)
env = gym.make('SawyerTouchCube-v0')
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('my_sawyer_openai_example')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/sawyer/alpha")
Epsilon = rospy.get_param("/sawyer/epsilon")
Gamma = rospy.get_param("/sawyer/gamma")
epsilon_discount = rospy.get_param("/sawyer/epsilon_discount")
nepisodes = rospy.get_param("/sawyer/nepisodes")
nsteps = rospy.get_param("/sawyer/nsteps")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" +
str(cumulated_reward))
rospy.logwarn(
"# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(
reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| [] |
2024-01-10 | lscsoft/lalsuite-archive | lalapps~src~inspiral~inspiral.py | """
Classes needed for the inspiral analysis pipeline.
This script produced the necessary condor submit and dag files to run
the standalone inspiral code on LIGO data
"""
__author__ = 'Duncan Brown <[email protected]>'
__date__ = '$Date$'
__version__ = '$Revision$'
import copy
import string
import sys, os, re, subprocess
from glue import pipeline
from glue import lal
class InspiralError(Exception):
def __init__(self, args=None):
self.args = args
#############################################################################
class InspiralAnalysisJob(pipeline.AnalysisJob, pipeline.CondorDAGJob):
"""
An inspiral analysis job captures some of the common features of the specific
inspiral jobs that appear below. Specifically, the universe and exec_name
are set, the stdout and stderr from the job are directed to the logs
directory. The path to the executable is determined from the ini file.
"""
def __init__(self,cp,sections,exec_name,extension='xml',dax=False):
"""
cp = ConfigParser object from which options are read.
sections = sections of the ConfigParser that get added to the opts
exec_name = exec_name name in ConfigParser
"""
self.__exec_name = exec_name
self.__extension = extension
universe = cp.get('condor','universe')
executable = cp.get('condor',exec_name)
pipeline.CondorDAGJob.__init__(self,universe,executable)
pipeline.AnalysisJob.__init__(self,cp,dax)
self.add_condor_cmd('copy_to_spool','False')
self.set_grid_site('local')
self.__use_gpus = cp.has_option('condor', 'use-gpus')
mycp = copy.deepcopy(cp)
for sec in sections:
if mycp.has_section(sec):
# check to see if the job should run on a remote site
if mycp.has_option(sec,'remote-sites'):
remotesites = mycp.get(sec,'remote-sites')
mycp.remove_option(sec,'remote-sites')
self.set_grid_site(remotesites)
# add all the other options as arguments to the code
self.add_ini_opts(mycp, sec)
else:
print >>sys.stderr, "warning: config file is missing section [" + sec + "]"
self.set_stdout_file('logs/' + exec_name + \
'-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).out')
self.set_stderr_file('logs/' + exec_name + \
'-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).err')
self.set_sub_file(exec_name + '.sub')
def set_exec_name(self,exec_name):
"""
Set the exec_name name
"""
self.__exec_name = exec_name
def get_exec_name(self):
"""
Get the exec_name name
"""
return self.__exec_name
def set_extension(self,extension):
"""
Set the file extension
"""
self.__extension = extension
def get_extension(self):
"""
Get the extension for the file name
"""
return self.__extension
def get_use_gpus(self):
"""
Get whether this job was requested to run on a GPU node
"""
return self.__use_gpus
#############################################################################
class InspiralPlottingJob(InspiralAnalysisJob):
"""
The InspiralPlottingJob class will assign options common to all plotting
jobs. Currently this is only MPLCONFIGDIR.
"""
def __init__(self,cp,sections,exec_name,extension='xml',dax=False):
"""
cp = ConfigParser object from which options are read.
sections = sections of the ConfigParser that get added to the opts
exec_name = exec_name name in ConfigParser
"""
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('getenv','True')
if cp.has_option('pipeline','matplotlibdir'):
MPLConfigPath = cp.get('pipeline','matplotlibdir')
self.add_condor_cmd('environment','MPLCONFIGDIR=' + MPLConfigPath)
#############################################################################
class TmpltBankJob(InspiralAnalysisJob):
"""
A lalapps_tmpltbank job used by the inspiral pipeline. The static options
are read from the sections [data] and [tmpltbank] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The job
runs in the universe specfied in the ini file. The path to the executable
is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'tmpltbank'
extension = 'xml'
sections = ['data','tmpltbank']
have_pycbc = False
if cp.has_option('tmpltbank', 'pycbc'):
have_pycbc=True
cp.set('condor', 'universe', 'vanilla')
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
if have_pycbc:
self.add_condor_cmd('getenv', 'True')
class InspInjJob(InspiralAnalysisJob):
"""
A lalapps_inspinj job used by the grb inspiral pipeline. The static options
are read from the section [inspinj] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The
job runs in the universe specified in the ini file. The path to the
executable is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'inspinj'
sections = ['inspinj']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
self.set_universe('vanilla')
self.__listDone=[]
self.__listNodes=[]
def set_done(self,number,node):
self.__listDone.append(number)
self.__listNodes.append(node)
def check_node(self, number):
if self.__listDone.count(number):
index=self.__listDone.index(number)
return self.__listNodes[index]
return None
class BbhInjJob(InspiralAnalysisJob):
"""
A lalapps_bbhinj job used by the online inspiral pipeline. The static options
are read from the section [bbhinj] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The
job runs in the universe specified in the ini file. The path to the
executable is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'bbhinj'
sections = ['bbhinj']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class RandomBankJob(InspiralAnalysisJob):
"""
A lalapps_randombank job used by the inspiral pipeline. The static options
are read from the section [randombank] in the ini file. The stdout and
stderr from the job are directed to the logs directory. The job runs in the
universe specfied in the ini file. The path to the executable is determined
from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'randombank'
sections = ['randombank']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class SplitBankJob(InspiralAnalysisJob):
"""
A lalapps_splitbank job used by the inspiral pipeline. The static options
are read from the section [splitbank] in the ini file. The stdout and stderr
from the job are directed to the logs directory. The job runs in the
universe specfied in the ini file. The path to the executable is determined
from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'splitbank'
sections = ['splitbank']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class InspiralJob(InspiralAnalysisJob):
"""
A lalapps_inspiral job used by the inspiral pipeline. The static options
are read from the sections [data] and [inspiral] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The job
runs in the universe specfied in the ini file. The path to the executable
is determined from the ini file. If the user requested GPU utilization,
checks are done to ensure a successful run and the necessary Condor
commands are added.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'inspiral'
sections = ['data','inspiral']
extension = 'xml'
have_pycbc = False
if cp.has_option('inspiral', 'pycbc'):
have_pycbc=True
cp.set('condor', 'universe', 'vanilla')
cp.remove_option('inspiral', 'pycbc')
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes")
self.add_condor_cmd('request_memory', '1000')
if have_pycbc:
self.add_condor_cmd('getenv', 'True')
if self.get_use_gpus():
# make sure the vanilla universe is being used
universe = cp.get('condor', 'universe')
if universe != 'vanilla':
raise RuntimeError, 'Cannot run GPU inspiral jobs on Condor ' + \
universe + ' universe. Please use vanilla.'
# make sure the executable has CUDA dependencies
executable = cp.get('condor', exec_name)
objdump_re = re.compile(r'^\s*NEEDED\s*(libcufft\.|libcudart\.).*')
proc = subprocess.Popen(['objdump', '-p', executable], \
stdin=None, stdout=subprocess.PIPE)
cuda_deps = False
for line in proc.stdout:
m = objdump_re.match(line)
if m:
cuda_deps = True
break
if not cuda_deps:
raise RuntimeError, 'Inspiral executable has no CUDA ' + \
'dependencies. Please use a CUDA-enabled build.'
self.add_opt('gpu-device-id', '0')
self.add_condor_cmd('+WantGPU', 'true')
self.add_condor_cmd('Requirements', '( GPU_PRESENT =?= true)')
class InspiralCkptJob(InspiralAnalysisJob):
"""
A lalapps_inspiral job used by the inspiral pipeline. The static options
are read from the sections [data] and [inspiral] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The job
runs in the universe specfied in the ini file. The path to the executable
is determined from the ini file.
This one checkpoints.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'inspiral'
sections = []
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_short_opt('_condor_relocatable', '')
class PTFInspiralJob(InspiralAnalysisJob):
"""
A lalapps_inspiral job used by the inspiral pipeline. The static options
are read from the sections [data] and [inspiral] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The job
runs in the universe specfied in the ini file. The path to the executable
is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'coh_PTF_inspiral'
sections = ['coh_PTF_inspiral']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
ramValue = 1390
if cp.has_section('coh_PTF_inspiral-meta'):
if cp.has_option('coh_PTF_inspiral-meta','minimum-ram'):
ramValue = int(cp.get('coh_PTF_inspiral-meta','minimum-ram'))
self.add_condor_cmd('request_memory', '%d' %(ramValue))
class PTFSpinCheckerJob(InspiralAnalysisJob):
"""
A coh_PTF spin checker job
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'coh_PTF_spin_checker'
sections = ['coh_PTF_spin_checker']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('request_memory', '1400')
class TrigbankJob(InspiralAnalysisJob):
"""
A lalapps_trigbank job used by the inspiral pipeline. The static
options are read from the section [trigbank] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The job
always runs in the scheduler universe. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'trigbank'
sections = ['trigbank']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class IncaJob(InspiralAnalysisJob):
"""
A lalapps_inca job used by the inspiral pipeline. The static options are
read from the section [inca] in the ini file. The stdout and stderr from
the job are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'inca'
sections = ['inca']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class ThincaJob(InspiralAnalysisJob):
"""
A lalapps_thinca job used by the inspiral pipeline. The static options are
read from the section [thinca] in the ini file. The stdout and stderr from
the job are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
cp = ConfigParser object from which options are read.
"""
exec_name = 'thinca'
#sections = ['thinca']
sections = []
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
if cp.has_section('thinca'):
self.add_ini_opts(cp,'thinca')
def add_ini_opts(self, cp, section):
"""
Parse command line options from a given section in an ini file and
pass to the executable.
@param cp: ConfigParser object pointing to the ini file.
@param section: section of the ini file to add to the options.
"""
for opt in cp.options(section):
arg = string.strip(cp.get(section,opt))
#self.add_opt(opt,arg)
if opt[-4:] == "file":
fname = os.path.split(arg)[-1]
if fname not in os.listdir('.'):
try:
os.symlink(arg,os.path.split(arg)[-1])
self.add_file_opt(opt,fname)
except:
print >>sys.stderr, "sym link failed for " + arg + " grid workflows might be broken"
self.add_file_opt(opt,arg)
else:
self.add_file_opt(opt,fname)
else:
self.add_opt(opt,arg)
class ThincaToCoincJob(InspiralAnalysisJob):
"""
A ThincaToCoinc job. The static options are read from the
section [thinca_to_coinc] in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'thinca_to_coinc'
sections = ['thinca_to_coinc']
extension = 'xml'
InspiralAnalysisJob.__init__(self, cp, sections, exec_name, extension, dax)
self.add_condor_cmd('getenv', 'True')
self.__experiment_start_time = None
self.__experiment_end_time = None
# overwrite standard log file names
self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
def set_experiment_start_time(self, experiment_start_time):
"""
Sets the experiment-start-time option. This is a required option.
@param experiment_start_time: gps start time of the experiment the thinca_to_coinc
job is in.
"""
self.add_opt('experiment-start-time', experiment_start_time)
self.__experiment_start_time = experiment_start_time
def set_experiment_end_time(self, experiment_end_time):
"""
Sets the experiment-end-time option. This is a required option.
@param experiment_end_time: gps end time of the experiment the thinca_to_coinc
job is in.
"""
self.add_opt('experiment-end-time', experiment_end_time)
self.__experiment_end_time = experiment_end_time
def get_experiment_start_time(self, experiment_start_time):
"""
Returns the value of the experiment-start-time option.
"""
return self.__experiment_start_time
def get_experiment_end_time(self, experiment_end_time):
"""
Returns the value of the experiment-end-time option.
"""
return self.__experiment_start_time
def set_simulation(self):
"""
Adds the simulation argument to the job.
"""
self.add_opt('simulation', '')
class HWinjPageJob(InspiralAnalysisJob):
"""
A HWinjPageJob, runs the hardware injection page script on the
output of the pipeline
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = "hardware_inj_page"
universe = "vanilla"
sections = "[hardware-injection-page]"
extension = 'html'
executable = cp.get('condor',exec_name)
pipeline.CondorDAGJob.__init__(self, universe, executable)
pipeline.AnalysisJob.__init__(self, cp, dax)
self.add_condor_cmd('getenv','True')
self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
self.set_sub_file(exec_name + '.sub')
class SireJob(InspiralAnalysisJob):
"""
A lalapps_sire job used by the inspiral pipeline. The stdout and stderr from
the job are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'sire'
sections = ['sire']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
# sire currently doesn't take GPS start/end times
self.set_stdout_file('logs/sire-$(macroifo)-$(cluster)-$(process).out')
self.set_stderr_file('logs/sire-$(macroifo)-$(cluster)-$(process).err')
class CoireJob(InspiralAnalysisJob):
"""
A lalapps_coire job used by the inspiral pipeline. The stdout and stderr from
the job are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'coire'
sections = ['coire']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
# coire currently doesn't take GPS start/end times
self.set_stdout_file('logs/coire-$(macroifo)-$(cluster)-$(process).out')
self.set_stderr_file('logs/coire-$(macroifo)-$(cluster)-$(process).err')
class FrJoinJob(InspiralAnalysisJob):
"""
A lalapps_frjoin job used by the inspiral pipeline. The path to the
executable is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'frjoin'
sections = []
extension = 'gwf'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
# frjoin currently doesn't take GPS start/end times
self.set_stdout_file('logs/frjoin-$(cluster)-$(process).out')
self.set_stderr_file('logs/frjoin-$(cluster)-$(process).err')
class CohBankJob(InspiralAnalysisJob):
"""
A lalapps_coherent_inspiral job used by the inspiral pipeline. The static
options are read from the section [cohbank] in the ini file. The stdout and
stderr from the job are directed to the logs directory. The path to the
executable is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'cohbank'
sections = ['cohbank']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class InspiralCoherentJob(InspiralAnalysisJob):
"""
A lalapps_inspiral job used by the inspiral pipeline. The static options
are read from the sections [data] and [inspiral] in the ini file. The
stdout and stderr from the job are directed to the logs directory. The job
runs in the universe specfied in the ini file. The path to the executable
is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'inspiral'
sections = ['data']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes")
class CohInspBankJob(InspiralAnalysisJob):
"""
A lalapps_coherent_inspiral job used by the inspiral pipeline. The static
options are read from the section [cohinspbank] in the ini file. The stdout and
stderr from the job are directed to the logs directory. The path to the
executable is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'cohinspbank'
sections = ['cohinspbank']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class ChiaJob(InspiralAnalysisJob):
"""
A lalapps_coherent_inspiral job used by the inspiral pipeline. The static
options are read from the section [chia] in the ini file. The stdout and
stderr from the job are directed to the logs directory. The path to the
executable is determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'chia'
sections = ['chia']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
class CohireJob(InspiralAnalysisJob):
"""
A lalapps_cohire job used by the inspiral pipeline. The stdout and stderr from
the job are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'cohire'
sections = ['cohire']
extension = 'xml'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
# cohire currently doesn't take GPS start/end times
self.set_stdout_file('logs/cohire-$(macroifo)-$(cluster)-$(process).out')
self.set_stderr_file('logs/cohire-$(macroifo)-$(cluster)-$(process).err')
class InjFindJob(InspiralAnalysisJob):
"""
An injfind job. The static options are read from the [injfind]
section in the cp file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: a ConfigParser object from which the options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'injfind'
sections = ['injfind']
extension = 'xml'
InspiralAnalysisJob.__init__(self, cp, sections, exec_name, extension, dax)
self.add_condor_cmd('getenv', 'True')
# overwrite standard log file names
self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
#############################################################################
class InspiralAnalysisNode(pipeline.AnalysisNode, pipeline.CondorDAGNode):
"""
An InspiralNode runs an instance of the inspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inspiral.
"""
pipeline.CondorDAGNode.__init__(self,job)
pipeline.AnalysisNode.__init__(self)
opts = job.get_opts()
if ("pad-data" in opts) and int(opts['pad-data']):
self.set_pad_data(int(opts['pad-data']))
self.__zip_output = ("write-compress" in opts)
self.__data_checkpoint = False
def set_zip_output(self,zip):
"""
Set the zip output flag
"""
self.__zip_output = zip
def get_zip_output(self):
"""
Set the zip output flag
"""
return self.__zip_output
def get_output_base(self):
"""
Returns the base file name of output from the inspiral code. This is
assumed to follow the standard naming convention:
IFO-EXECUTABLE_IFOTAG_USERTAG-GPS_START-DURATION
"""
if not self.get_start() or not self.get_end() or not self.get_ifo():
raise InspiralError, "Start time, end time or ifo has not been set"
filebase = self.get_ifo() + '-' + self.job().get_exec_name().upper()
if self.get_ifo_tag():
filebase += '_' + self.get_ifo_tag()
if self.get_user_tag():
filebase += '_' + self.get_user_tag()
filebase += '-' + str(self.get_start()) + '-' + \
str(self.get_end() - self.get_start())
return(filebase)
def get_output(self):
"""
Returns the file name of output from the inspiral code. This is obtained
from the get_output_base() method, with the correct extension added.
"""
filename = self.get_output_base()
filename += '.' + self.job().get_extension()
if self.get_zip_output():
filename += '.gz'
if self.__data_checkpoint is False:
self.add_output_file(filename)
return filename
def get_checkpoint_image(self):
"""
Returns the file name of condor checkpoint from the inspiral code. This is
obtained from the get_output_base() method, with the correct extension added.
"""
filename = self.get_output_base()
filename += '.ckpt'
self.add_output_file(filename)
return filename
def set_data_checkpoint(self):
"""
Lets it be known that data checkpointing exists
"""
self.add_var_opt('data-checkpoint','')
self.__data_checkpoint = True
def get_data_checkpoint(self):
"""
Lets it be known that data checkpointing exists
"""
return self.__data_checkpoint
def get_output_cache(self):
"""
Returns the name of the cache file output from the inspiral analysis codes.
This is obtained from the get_output_base() method, with the correct
extension added.
"""
filename = self.get_output_base()
filename += '.cache'
def get_froutput(self):
"""
Returns the file name of output frame from the inspiral code.
"""
gwffile = self.get_output_base()
gwffile += '.gwf'
self.add_output_file(gwffile)
return gwffile
def finalize(self):
"""
set the data_start_time and data_end_time
"""
if self.get_pad_data():
self.set_data_start(self.get_start() - \
self.get_pad_data())
self.set_data_end(self.get_end() + \
self.get_pad_data())
#############################################################################
class InspiralPlottingNode(InspiralAnalysisNode):
"""
An InspiralPlottingNode runas an instance of the inspiral plotting code in
a Condor Dag
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of the plotting code
"""
InspiralAnalysisNode.__init__(self,job)
#############################################################################
class InspInjNode(InspiralAnalysisNode):
"""
A InspInjNode runs an instance of the inspinj generation job in a
Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inspinj.
"""
InspiralAnalysisNode.__init__(self,job)
self.__outputName = None
self.__seed = None
def set_seed(self,seed):
"""
Set the seed of the injection file by setting a --seed option to the
node when it is executed.
@param seed: seed of the job
"""
self.add_var_opt('seed',seed)
self.__seed = seed
def get_seed(self):
"""
return the seed
"""
return( self.__seed)
def set_output(self, outputName):
"""
Set the output name of the injection file
@param outputName: name of the injection file created
"""
self.add_var_opt('output',outputName)
self.__outputName = outputName
def get_output(self):
"""
Return the manually-set output name if it exists, otherwise, derive the
name like other InspiralAnalysisNodes.
"""
if self.__outputName:
self.add_output_file(self.__outputName)
return self.__outputName
else:
outputFile = "HL-INJECTIONS_" + str(self.get_seed())
if self.get_user_tag():
outputFile += "_" + self.get_user_tag()
outputFile += "-" + str(self.get_start()) + "-" + str(self.get_end() - \
self.get_start()) + ".xml"
self.add_output_file(outputFile)
return(outputFile)
class BbhInjNode(InspiralAnalysisNode):
"""
A BbhInjNode runs an instance of the bbhinj generation job in a
Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_bbhinj.
"""
InspiralAnalysisNode.__init__(self,job)
def set_seed(self,seed):
"""
Set the seed of the injection file by setting a --seed option to the
node when it is executed.
@param seed: seed of the job
"""
self.add_var_opt('seed',seed)
self.__seed = seed
def get_output(self):
"""
Returns the file name of output from the injection generation code. This
must be kept synchronized with the name of the output file in bbhinj.c.
"""
if not self.get_start() or not self.get_end():
raise InspiralError, "Start time or end time has not been set"
if self.get_user_tag():
bbhinject = 'HL-INJECTIONS_' + self.get_user_tag() + '-'
bbhinject = bbhinject + str(self.get_start()) + '-'
bbhinject = bbhinject + str(self.get_end()-self.get_start()) + '.xml'
elif self.__seed:
bbhinject = 'HL-INJECTIONS_' + str(self.__seed) + '-'
bbhinject = bbhinject + str(self.get_start()) + '-'
bbhinject = bbhinject + str(self.get_end()-self.get_start()) + '.xml'
else:
bbhinject = 'HL-INJECTIONS-' + str(self.get_start()) + '-'
bbhinject = bbhinject + str(self.get_end()-self.get_start()) + '.xml'
self.add_output_file(bbhinject)
return bbhinject
class TmpltBankNode(InspiralAnalysisNode):
"""
A TmpltBankNode runs an instance of the template bank generation job in a
Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_tmpltbank.
"""
InspiralAnalysisNode.__init__(self,job)
class RandomBankNode(InspiralAnalysisNode):
"""
A RandomBankNode runs an instance of the random bank generation job in a
Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_randombank.
"""
InspiralAnalysisNode.__init__(self,job)
def get_output(self):
"""
Returns the file name of output from the template bank code. This must
be kept synchronized with the name of the output file in randombank.c.
"""
if not self.get_start() or not self.get_end():
raise InspiralError, "Start time or end time has not been set"
if self.get_user_tag():
bank = 'P-TMPLTBANK_' + self.get_user_tag() + '-'
bank = bank + str(self.get_start())
else:
bank = 'P-TMPLTBANK-' + str(self.get_start())
bank = bank + '-' + str(self.get_end() - self.get_start()) + '.xml'
self.add_output_file(bank)
return bank
class SplitBankNode(InspiralAnalysisNode):
"""
A SplitBankNode runs an instance of the split template bank job in a
Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_tmpltbank.
"""
InspiralAnalysisNode.__init__(self,job)
self.__bankfile = None
self.__numbanks = None
def set_bank(self,bank):
self.add_var_opt('bank-file', bank)
self.add_input_file(bank)
self.__bankfile = bank
def get_bank(self):
return self.__bankfile
def set_num_banks(self,numbanks):
self.add_var_opt('number-of-banks',numbanks)
self.__numbanks = int(numbanks)
def get_num_banks(self):
return self.__numbanks
def get_output(self):
"""
Returns a list of the file names of split banks. This must be kept
synchronized with the name of the output files in splitbank.c.
"""
if not self.get_bank() or not self.get_num_banks():
raise InspiralError, "Bank file or number of banks has not been set"
banks = []
x = self.__bankfile.split('-')
for i in range( 0, int(self.get_num_banks()) ):
banks.append("%s-%s_%2.2d-%s-%s" % (x[0], x[1], i, x[2], x[3]))
return banks
class InspiralNode(InspiralAnalysisNode):
"""
An InspiralNode runs an instance of the inspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inspiral.
"""
InspiralAnalysisNode.__init__(self,job)
self.__injections = None
self.add_pegasus_profile('condor', 'request_memory', '1000')
if job.get_use_gpus():
# assume all the checks have been already
# done by the InspiralJob instance
self.add_pegasus_profile('condor', '+WantGPU', 'true')
self.add_pegasus_profile('condor', 'Requirements', '( GPU_PRESENT =?= true)')
def set_bank(self,bank):
self.add_var_opt('bank-file', bank)
self.add_input_file(bank)
def set_injections(self, injections):
"""
Set the injection file for this node
"""
self.__injections = injections
self.add_var_opt('injection-file', injections)
self.add_input_file(injections)
def get_injections(self):
"""
Returns the injection file
"""
return self.__injections
class InspiralCkptNode(InspiralAnalysisNode):
"""
An InspiralCkptNode runs an instance of the inspiral code in a Condor DAG.
It then checkpoints to a file. This sets the file.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inspiral.
"""
InspiralAnalysisNode.__init__(self,job)
self.__outfile = None
self.__injections = None
def set_output(self, outfile):
"""
Sets a placeholder output name which we can use to pass through the
name of the output file from the original inspiral job.
"""
self.__outfile = outfile
def get_output(self):
"""
Returns the filename from set_output().
"""
if self.__outfile:
self.add_output_file(self.__outfile)
return self.__outfile
def set_injections(self, injections):
"""
Set the injection file for this node
"""
self.__injections = injections
def get_injections(self):
"""
Returns the injection file
"""
return self.__injections
def set_checkpoint_image(self, ckptin):
"""
Adds the argument -_condor_restart for the
cases in which we want to checkpoint and grabs the
checkpoint image name from get_checkpoint_image in
the InspiralAnalysisCode section.
"""
self.add_input_file(ckptin)
self.add_var_opt('_condor_restart', ckptin, short=True)
class PTFInspiralNode(InspiralAnalysisNode):
"""
An InspiralNode runs an instance of the inspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inspiral.
"""
InspiralAnalysisNode.__init__(self,job)
self.__injections = None
self.set_zip_output(True)
self.add_pegasus_profile('condor', 'request_memory', '1400')
def set_spin_bank(self,bank):
self.add_var_opt('spin-bank', bank)
self.add_input_file(bank)
def set_no_spin_bank(self,bank):
self.add_var_opt('non-spin-bank',bank)
self.add_input_file(bank)
def set_output(self):
self.add_var_opt('output-file',self.get_output_base()+ '.xml.gz')
def set_injections(self, injections):
"""
Set the injection file for this node
"""
self.__injections = injections
self.add_var_opt('injection-file', injections)
self.add_input_file(injections)
def get_injections(self):
"""
Returns the injection file
"""
return self.__injections
def set_seed(self,seed):
self.add_var_opt('random-seed',seed)
class PTFSpinCheckerNode(InspiralAnalysisNode):
"""
An InspiralNode runs an instance of the inspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inspiral.
"""
InspiralAnalysisNode.__init__(self,job)
self.__injections = None
self.add_pegasus_profile('condor', 'request_memory', '1400')
def set_bank(self,bank):
self.add_var_opt('bank-file', bank)
self.add_input_file(bank)
def set_spin_output(self,spinBank):
self.add_var_opt('spin-bank',spinBank)
def set_nospin_output(self,noSpinBank):
self.add_var_opt('non-spin-bank',noSpinBank)
class TrigbankNode(InspiralAnalysisNode):
"""
A TrigbankNode runs an instance of the triggered bank generator in a
Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of trigbank.
"""
InspiralAnalysisNode.__init__(self,job)
self.__input_ifo = None
def set_input_ifo(self,ifo):
self.add_var_opt('input-ifo', ifo)
self.__input_ifo = ifo
def get_input_ifo(self):
return self.__input_ifo
def set_output_ifo(self,ifo):
self.add_var_opt('output-ifo', ifo)
self.set_ifo(ifo)
class IncaNode(InspiralAnalysisNode):
"""
An IncaNode runs an instance of the inspiral coincidence code in a Condor
DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inca.
"""
InspiralAnalysisNode.__init__(self,job)
self.__ifo_a = None
self.__ifo_b = None
def set_ifo_a(self, ifo):
"""
Set the interferometer code to use as IFO A.
ifo = IFO code (e.g. L1, H1 or H2).
"""
self.add_var_opt('ifo-a', ifo)
self.__ifo_a = ifo
def get_ifo_a(self):
"""
Returns the IFO code of the primary interferometer.
"""
return self.__ifo_a
def set_ifo_b(self, ifo):
"""
Set the interferometer code to use as IFO B.
ifo = IFO code (e.g. L1, H1 or H2).
"""
self.add_var_opt('ifo-b', ifo)
self.__ifo_b = ifo
def get_ifo_b(self):
"""
Returns the IFO code of the primary interferometer.
"""
return self.__ifo_b
def get_output_a(self):
"""
Returns the file name of output from inca for ifo a. This must be kept
synchronized with the name of the output file in inca.c.
"""
if not self.get_start() or not self.get_end() or not self.get_ifo_a():
raise InspiralError, "Start time, end time or ifo a has not been set"
basename = self.get_ifo_a() + '-INCA'
if self.get_ifo_tag():
basename += '_' + self.get_ifo_tag()
if self.get_user_tag():
basename += '_' + self.get_user_tag()
filename = basename + '-' + str(self.get_start()) + '-' + \
str(self.get_end() - self.get_start()) + '.xml'
if self.get_zip_output():
filename += '.gz'
self.add_output_file(filename)
return filename
def get_output(self):
return self.get_output_a()
def get_output_b(self):
"""
Returns the file name of output from inca for ifo b. This must be kept
synchronized with the name of the output file in inca.c.
"""
if not self.get_start() or not self.get_end() or not self.get_ifo_b():
raise InspiralError, "Start time, end time or ifo a has not been set"
basename = self.get_ifo_b() + '-INCA'
if self.get_ifo_tag():
basename += '_' + self.get_ifo_tag()
if self.get_user_tag():
basename += '_' + self.get_user_tag()
filename = basename + '-' + str(self.get_start()) + '-' + \
str(self.get_end() - self.get_start()) + '.xml'
if self.get_zip_output():
filename += '.gz'
self.add_output_file(filename)
return filename
class ThincaNode(InspiralAnalysisNode):
"""
A ThincaNode runs an instance of the inspiral coincidence code in a Condor
DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_inca.
"""
InspiralAnalysisNode.__init__(self,job)
self.__ifo_g1 = None
self.__ifo_h1 = None
self.__ifo_h2 = None
self.__ifo_l1 = None
self.__ifo_t1 = None
self.__ifo_v1 = None
self.__num_slides = None
def set_ifo(self, ifo, pass_to_command_line=True):
"""
Add the interferometer to the list of ifos
ifo = IFO code (e.g. G1,L1,V1,T1, H1 or H2).
pass_to_command_line = boolean for adding ifo-triggers as a variable option
"""
#FIXME: Once thinca no longer needs --IFO-triggers flags,
# use AnalysisNode's set_ifos method
if ifo == 'G1':
if pass_to_command_line:
self.add_var_opt('g1-triggers','')
self.__ifo_g1 = 'G1'
elif ifo == 'H1':
if pass_to_command_line:
self.add_var_opt('h1-triggers','')
self.__ifo_h1 = 'H1'
elif ifo == 'H2':
if pass_to_command_line:
self.add_var_opt('h2-triggers','')
self.__ifo_h2 = 'H2'
elif ifo == 'L1':
if pass_to_command_line:
self.add_var_opt('l1-triggers','')
self.__ifo_l1 = 'L1'
elif ifo == 'T1':
if pass_to_command_line:
self.add_var_opt('t1-triggers','')
self.__ifo_t1 = 'T1'
elif ifo == 'V1':
if pass_to_command_line:
self.add_var_opt('v1-triggers','')
self.__ifo_v1 = 'V1'
def get_ifo_g1(self):
"""
Returns the IFO code of g1.
"""
return self.__ifo_g1
def get_ifo_h1(self):
"""
Returns the IFO code of h1.
"""
return self.__ifo_h1
def get_ifo_h2(self):
"""
Returns the IFO code of h2.
"""
return self.__ifo_h2
def get_ifo_l1(self):
"""
Returns the IFO code of l1.
"""
return self.__ifo_l1
def get_ifo_t1(self):
"""
Returns the IFO code of t1.
"""
return self.__ifo_t1
def get_ifo_v1(self):
"""
Returns the IFO code of v1.
"""
return self.__ifo_v1
def get_ifos(self):
"""
Returns the ordered list of ifos.
"""
ifos = ''
if self.get_ifo_g1():
ifos += self.get_ifo_g1()
if self.get_ifo_h1():
ifos += self.get_ifo_h1()
if self.get_ifo_h2():
ifos += self.get_ifo_h2()
if self.get_ifo_l1():
ifos += self.get_ifo_l1()
if self.get_ifo_t1():
ifos += self.get_ifo_t1()
if self.get_ifo_v1():
ifos += self.get_ifo_v1()
return ifos
def set_num_slides(self, num_slides):
"""
Set number of time slides to undertake
"""
self.add_var_opt('num-slides',num_slides)
self.__num_slides = num_slides
def get_num_slides(self):
"""
Returns the num_slides from .ini (>0 => time slides desired)
"""
return self.__num_slides
def get_output(self):
"""
Returns the file name of output from thinca. This must be kept
synchronized with the name of the output file in thinca.c.
"""
if not self.get_start() or not self.get_end() or not self.get_ifos():
raise InspiralError, "Start time, end time or ifos have not been set"
if self.__num_slides:
basename = self.get_ifos() + '-' + self.job().get_exec_name().upper() \
+ '_SLIDE'
else:
basename = self.get_ifos() + '-' + self.job().get_exec_name().upper()
if self.get_ifo_tag():
basename += '_' + self.get_ifo_tag()
if self.get_user_tag():
basename += '_' + self.get_user_tag()
filename = basename + '-' + str(self.get_start()) + '-' + \
str(self.get_end() - self.get_start()) + '.xml'
if self.get_zip_output():
filename += '.gz'
self.add_output_file(filename)
return filename
class ThincaToCoincNode(InspiralAnalysisNode):
"""
A ThincaToCoincNode runs an instance of a ThincaToCoincJob
in a DAG.
"""
def __init__(self, job):
"""
@param job: A ThincaToCoincJob.
"""
InspiralAnalysisNode.__init__(self, job)
self.__input_cache = None
self.__instruments = None
self.__zero_lag_file = None
self.__time_slide_file = None
self.__veto_segments = None
self.__veto_segments_name = None
def set_input_cache(self, input_cache_name):
"""
@param input_cache_name: cache file for thinca_to_coinc to
read.
"""
self.add_file_opt( 'ihope-cache', input_cache_name )
self.__input_cache = input_cache_name
def get_input_cache(self):
"""
Returns input cache file for this node.
"""
return self.__input_cache
def get_output_from_cache(self, coinc_file_tag ):
"""
Returns a list of files that this node will generate using the input_cache.
The output file names are the same as the input urls, but with the
zero_lag 'THINCA' file replaced with 'THINCA_TO_COINC', and with the
filepaths pointing to the current directory in which the
thinca_to_coinc node is being run.
"""
if not self.__input_cache:
raise ValueError, "no input-cache specified"
# open the input cache file
fp = open(self.__input_cache, 'r')
input_cache = lal.Cache().fromfile(fp).sieve( description = coinc_file_tag )
output_files = [ \
'/'.join([ os.getcwd(),
re.sub('INCA', 'INCA_TO_COINC', os.path.basename(entry.url)) ]) for entry in input_cache \
]
return output_files
def set_instruments(self, instruments):
"""
@param instruments: instruments that are on for the
THINCA files thinca_to_coinc is operating on.
"""
self.add_var_opt('instruments', instruments)
self.__instruments = instruments
def get_instruments(self):
"""
Returns instruments for this node.
"""
return self.__instruments
def set_veto_segments(self, veto_segments):
"""
@param veto_segments: name of xml file containing the vetoes to apply
"""
self.add_var_opt('veto-segments', veto_segments)
self.__veto_segments = veto_segments
def get_veto_segments(self):
"""
Returns the name of the veto-segments file for this node.
"""
return self.__veto_segments
def set_veto_segments_name(self, veto_segments_name):
"""
@param veto_segments_name: name of vetoes in the vetoes xml file to
apply.
"""
self.add_var_opt('veto-segments-name', veto_segments_name)
self.__veto_segments_name = veto_segments_name
def get_veto_segments_name(self):
"""
Returns the name of the vetoes applied for this node.
"""
return self.__veto_segments_name
def set_zero_lag_file(self, zero_lag_file):
"""
Sets zero_lag_file for input.
"""
self.add_file_opt( 'zero-lag-file', zero_lag_file )
self.__zero_lag_file = zero_lag_file
def get_zero_lag_file(self):
"""
Returns zero_lag_file.
"""
return self.__zero_lag_file
def set_time_slide_file(self, time_slide_file):
"""
Sets the time_slide_file for input.
"""
self.add_file_opt( 'time-slide-file', time_slide_file )
self.__time_slide_file = time_slide_file
def get_time_slide_file(self):
"""
Returns the time_slide_file.
"""
return self.__time_slide_file
class HWinjPageNode(InspiralAnalysisNode):
"""
A HWinjPageNode runs an instance of a HWinjPageJob
in a DAG.
"""
def __init__(self, job):
"""
@param job: A HWinjPageJob.
"""
InspiralAnalysisNode.__init__(self, job)
self.__input_cache = None
self.__cache_string = None
self.__outfile = None
self.__segment_dir = None
self.__source_xml = None
def set_input_cache(self, input_cache_name):
"""
@param input_cache_name: cache file for ligolw_cbc_hardware_inj_page
to read.
"""
self.add_var_opt('cache-file',input_cache_name)
self.__input_cache = input_cache_name
def set_source_xml(self, source_xml):
"""
input_cache_name: cache file for ligolw_cbc_hardware_inj_page to read.
"""
self.add_var_opt('source-xml',source_xml)
self.__source_xml = source_xml
def set_cache_string(self,cache_string):
"""
@param cache_string: pattern to match files within cache
"""
self.add_var_opt('cache-pattern',cache_string)
self.__cache_string=cache_string
def set_output_file(self,outfile_name):
"""
@param outfile_name: Name of hw injection page
"""
self.add_var_opt('outfile',outfile_name)
self.__outfile=outfile_name
def set_segment_dir(self,dir):
"""
@param dir: directory in which to find hwinj segments
"""
self.add_var_opt('segment-dir',dir)
class SireNode(InspiralAnalysisNode):
"""
A SireNode runs an instance of the single inspiral reader code in a Condor
DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_sire.
"""
InspiralAnalysisNode.__init__(self,job)
self.__injection_file = None
self.__ifo_tag = None
def set_ifo(self, ifo):
"""
Add the list of interferometers
"""
self.__ifo = ifo
self.add_var_opt('ifo-cut',ifo)
def get_ifo(self):
"""
Returns the two letter IFO code for this node.
"""
return self.__ifo
def set_inj_file(self, file):
"""
Sets the injection file
"""
self.__injection_file = file
self.add_var_opt('injection-file', file)
def get_inj_file(self):
"""
Gets the injection file
"""
return self.__injection_file
def set_start(self, start):
"""
Sets GPS start time
"""
self.__start = start
def get_start(self):
"""
Gets GPS start time
"""
return self.__start
def set_end(self, end):
"""
Sets GPS end time
"""
self.__end = end
def get_end(self):
"""
Gets GPS end time
"""
return self.__end
def set_ifo_tag(self,ifo_tag):
"""
Set the ifo tag that is passed to the analysis code.
@param ifo_tag: a string to identify one or more IFOs
"""
self.__ifo_tag = ifo_tag
def get_ifo_tag(self):
"""
Returns the IFO tag string
"""
return self.__ifo_tag
def get_output(self):
"""
get the name of the output file
"""
if not self.get_ifo():
raise InspiralError, "ifos have not been set"
fname = self.get_ifo() + "-SIRE"
if self.get_inj_file():
fname += "_" + self.get_inj_file().split("-")[1]
fname += "_FOUND"
if self.get_ifo_tag(): fname += "_" + self.get_ifo_tag()
if self.get_user_tag(): fname += "_" + self.get_user_tag()
if (self.get_start() and not self.get_end()) or \
(self.get_end() and not self.get_start()):
raise InspiralError, "If one of start and end is set, both must be"
if (self.get_start()):
duration=self.get_end()- self.get_start()
fname += "-" + str(self.get_start()) + "-" + str(duration)
fname += ".xml"
return fname
def get_missed(self):
"""
get the name of the missed file
"""
if self.get_inj_file():
return self.get_output().replace("FOUND", "MISSED")
else:
return None
def finalize(self):
"""
set the output options
"""
output = self.get_output()
self.add_file_opt("output", output,file_is_output_file=True)
self.add_file_opt("summary", output.replace("xml", "txt"),file_is_output_file=True)
if self.get_inj_file():
self.add_file_opt('injection-file', self.get_inj_file())
self.add_file_opt('missed-injections', self.get_missed(), file_is_output_file=True)
class CoireNode(InspiralAnalysisNode):
"""
A CoireNode runs an instance of the inspiral coire code in a Condor
DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_coire.
"""
InspiralAnalysisNode.__init__(self,job)
self.__ifos = None
self.__ifo_tag = None
self.__num_slides = None
self.__injection_file = None
self.__output_tag = None
def set_ifos(self, ifos):
"""
Add the list of interferometers
"""
self.__ifos = ifos
def get_ifos(self):
"""
Returns the ifos
"""
return self.__ifos
def set_slides(self, slides):
"""
Add the number of time slides
"""
self.__num_slides = slides
self.add_var_opt('num-slides',slides)
def get_slides(self):
"""
Returns the number of slides
"""
return self.__num_slides
def set_inj_file(self, file):
"""
Sets the injection file
"""
if file:
self.__injection_file = file
self.add_var_opt('injection-file', file)
def get_inj_file(self):
"""
Gets the injection file
"""
return self.__injection_file
def set_start(self, start):
"""
Sets GPS start time
"""
self.__start = start
def get_start(self):
"""
Gets GPS start time
"""
return self.__start
def set_end(self, end):
"""
Sets GPS end time
"""
self.__end = end
def get_end(self):
"""
Gets GPS end time
"""
return self.__end
def set_ifo_tag(self,ifo_tag):
"""
Set the ifo tag that is passed to the analysis code.
@param ifo_tag: a string to identify one or more IFOs
"""
self.__ifo_tag = ifo_tag
def get_ifo_tag(self):
"""
Returns the IFO tag string
"""
return self.__ifo_tag
def set_output_tag(self):
fname = self.job().get_exec_name().upper()
if self.get_slides(): fname += "_SLIDE"
if self.get_inj_file():
fname += "_" + \
self.get_inj_file().split("/")[-1].split(".")[0].split("-")[1]
fname += "_FOUND"
if self.get_ifo_tag(): fname += "_" + self.get_ifo_tag()
if self.get_user_tag(): fname += "_" + self.get_user_tag()
self.__output_tag = fname
def get_output_tag(self):
return self.__output_tag
def get_output(self):
"""
get the name of the output file
"""
if not self.get_ifos():
raise InspiralError, "ifos have not been set"
self.set_output_tag()
fname = self.get_ifos() + '-' + self.get_output_tag()
if (self.get_start() and not self.get_end()) or \
(self.get_end() and not self.get_start()):
raise InspiralError, "If one of start and end is set, "\
"both must be"
if (self.get_start()):
duration=self.get_end() - self.get_start()
fname += "-" + str(self.get_start()) + "-" + str(duration)
fname += ".xml"
return fname
def get_missed(self):
"""
get the name of the missed file
"""
if self.get_inj_file():
return self.get_output().replace("FOUND", "MISSED")
else:
return None
def finalize(self):
"""
set the output options
"""
output = self.get_output()
self.add_file_opt("output", output,file_is_output_file=True)
self.add_file_opt("summary", output.replace("xml", "txt"),file_is_output_file=True)
if self.get_inj_file():
self.add_file_opt('injection-file', self.get_inj_file())
self.add_file_opt('missed-injections', self.get_missed(), file_is_output_file=True)
class FrJoinNode(InspiralAnalysisNode):
"""
A FrJoinNode runs an instance of lalapps_frjoin in a Condor DAG
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_frjoin.
"""
InspiralAnalysisNode.__init__(self,job)
def set_output(self, outputName):
"""
Set the output name of the frame file
@param outputName: name of the injection file created
"""
self.add_var_opt('output',outputName)
self.add_file_opt('output',outputName,file_is_output_file=True)
self.__outputName = outputName
def get_output(self):
"""
Get the output name of the frame file
"""
return self.__outputName
class CohBankNode(InspiralAnalysisNode):
"""
A CohBankNode runs an instance of the coherent code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_coherent_inspiral.
"""
InspiralAnalysisNode.__init__(self,job)
self.__bank = None
self.__ifos = None
def set_bank(self,bank):
self.add_var_opt('bank-file', bank)
self.add_input_file(bank)
self.__bank = bank
def get_bank(self):
return self.__bank
def set_ifo(self, ifo):
"""
Add the interferometer to the list of ifos
ifo = IFO code (e.g. G1,L1, H1 or H2).
"""
if ifo == 'G1':
self.add_var_opt('g1-triggers','')
self.__ifo_g1 = 'G1'
elif ifo == 'H1':
self.add_var_opt('h1-triggers','')
self.__ifo_h1 = 'H1'
elif ifo == 'H2':
self.add_var_opt('h2-triggers','')
self.__ifo_h2 = 'H2'
elif ifo == 'L1':
self.add_var_opt('l1-triggers','')
self.__ifo_l1 = 'L1'
elif ifo == 'T1':
self.add_var_opt('t1-triggers','')
self.__ifo_t1 = 'T1'
elif ifo == 'V1':
self.add_var_opt('v1-triggers','')
self.__ifo_v1 = 'V1'
def set_ifos(self,ifos):
self.add_var_opt('ifos', ifos)
self.__ifos = ifos
def get_ifos(self):
return self.__ifos
def set_num_slides(self, num_slides):
"""
Set number of time slides to undertake
"""
self.add_var_opt('num-slides',num_slides)
self.__num_slides = num_slides
def get_output(self):
"""
Returns the file name of output from the coherent bank.
"""
if not self.get_ifos():
raise InspiralError, "Ifos have not been set"
basename = self.get_ifos() + '-COHBANK'
if self.get_user_tag():
basename += '_' + self.get_user_tag()
filename = basename + '-' + str(self.get_start()) + '-' + \
str(self.get_end() - self.get_start()) + '.xml'
if self.get_zip_output():
filename += '.gz'
self.add_output_file(filename)
return filename
class CohInspBankNode(InspiralAnalysisNode):
"""
A CohBankNode runs an instance of the coherent code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_coherent_inspiral.
"""
InspiralAnalysisNode.__init__(self,job)
self.__bank = None
self.__ifos = None
def set_bank(self,bank):
self.add_var_opt('bank-file', bank)
self.add_input_file(bank)
self.__bank = bank
def get_bank(self):
return self.__bank
def set_ifos(self,ifos):
self.add_var_opt('ifos', ifos)
self.__ifos = ifos
def get_ifos(self):
return self.__ifos
def set_num_slides(self, num_slides):
"""
Set number of time slides to undertake
"""
self.add_var_opt('num-slides',num_slides)
self.__num_slides = num_slides
def get_output(self):
"""
Returns the file name of output from the coherent bank.
"""
if not self.get_ifos():
raise InspiralError, "Ifos have not been set"
basename = self.get_ifos() + '-COHINSPBANK'
if self.get_user_tag():
basename += '_' + self.get_user_tag()
filename = basename + '-' + str(self.get_start()) + '-' + \
str(self.get_end() - self.get_start()) + '.xml'
if self.get_zip_output():
filename += '.gz'
self.add_output_file(filename)
return filename
# overwrite standard log file names
self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
class ChiaNode(InspiralAnalysisNode):
"""
A ChiaNode runs an instance of the coherent_inspiral code in a Condor
DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_coherent_inspiral.
"""
InspiralAnalysisNode.__init__(self,job)
def set_bank(self,bank):
self.add_var_opt('bank-file', bank)
self.add_input_file(bank)
def set_ifo_tag(self,ifo_tag):
"""
Set the ifo tag that is passed to the analysis code.
@param ifo_tag: a string to identify one or more IFOs
"""
self.__ifo_tag = ifo_tag
def get_ifo_tag(self):
"""
Returns the IFO tag string
"""
return self.__ifo_tag
def get_output(self):
"""
Returns the file name of output from coherent inspiral.
"""
if not self.get_start() or not self.get_end() or not self.get_ifo_tag():
raise InspiralError, "Start time, end time or ifos have not been set"
basename = self.get_ifo_tag() + '-CHIA'
if self.get_user_tag():
basename += '_' + self.get_user_tag()
filename = basename + '-' + str(self.get_start()) + '-' + \
str(self.get_end() - self.get_start()) + '.xml'
if self.get_zip_output():
filename += '.gz'
self.add_output_file(filename)
return filename
class CohireNode(InspiralAnalysisNode):
"""
A CohireNode runs an instance of the inspiral cohire code in a Condor
DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of lalapps_cohire.
"""
InspiralAnalysisNode.__init__(self,job)
self.__ifos = None
self.__ifo_tag = None
self.__num_slides = None
self.__injection_file = None
self.__output_tag = None
def set_ifos(self, ifos):
"""
Add the list of interferometers
"""
self.__ifos = ifos
def get_ifos(self):
"""
Returns the ifos
"""
return self.__ifos
def set_slides(self, slides):
"""
Add the number of time slides
"""
self.__num_slides = slides
self.add_var_opt('num-slides',slides)
def get_slides(self):
"""
Returns the number of slides
"""
return self.__num_slides
def set_inj_file(self, file):
"""
Sets the injection file
"""
if file:
self.__injection_file = file
self.add_var_opt('injection-file', file)
def get_inj_file(self):
"""
Gets the injection file
"""
return self.__injection_file
def set_start(self, start):
"""
Sets GPS start time
"""
self.__start = start
def get_start(self):
"""
Gets GPS start time
"""
return self.__start
def set_end(self, end):
"""
Sets GPS end time
"""
self.__end = end
def get_end(self):
"""
Gets GPS end time
"""
return self.__end
def set_ifo_tag(self,ifo_tag):
"""
Set the ifo tag that is passed to the analysis code.
@param ifo_tag: a string to identify one or more IFOs
"""
self.__ifo_tag = ifo_tag
def get_ifo_tag(self):
"""
Returns the IFO tag string
"""
return self.__ifo_tag
def set_output_tag(self):
fname = self.job().get_exec_name().upper()
if self.get_slides(): fname += "_SLIDE"
if self.get_inj_file():
fname += "_" + \
self.get_inj_file().split("/")[-1].split(".")[0].split("-")[1]
fname += "_FOUND"
if self.get_ifo_tag(): fname += "_" + self.get_ifo_tag()
if self.get_user_tag(): fname += "_" + self.get_user_tag()
self.__output_tag = fname
def get_output_tag(self):
return self.__output_tag
def get_output(self):
"""
get the name of the output file
"""
if not self.get_ifos():
raise InspiralError, "ifos have not been set"
self.set_output_tag()
fname = self.get_ifos() + '-' + self.get_output_tag()
if (self.get_start() and not self.get_end()) or \
(self.get_end() and not self.get_start()):
raise InspiralError, "If one of start and end is set, "\
"both must be"
if (self.get_start()):
duration=self.get_end() - self.get_start()
fname += "-" + str(self.get_start()) + "-" + str(duration)
fname += ".xml"
return fname
def get_missed(self):
"""
get the name of the missed file
"""
if self.get_inj_file():
return self.get_output().replace("FOUND", "MISSED")
else:
return None
def finalize(self):
"""
set the output options
"""
output = self.get_output()
self.add_file_opt("output", output,file_is_output_file=True)
self.add_file_opt("summary", output.replace("xml", "txt"),file_is_output_file=True)
if self.get_inj_file():
self.add_file_opt('injection-file', self.get_inj_file())
self.add_file_opt('missed-injections', self.get_missed(), file_is_output_file=True)
class InspInjFindNode( InspiralAnalysisNode ):
"""
An InspInjFindNode runs an instance of the InspInjJob in a
Condor DAG.
"""
def __init__(self, job):
"""
@param job: A CondorDAGJob that can run an instance of ligolw_inspinjfind.
"""
InspiralAnalysisNode.__init__(self, job)
##############################################################################
#Plotting Jobs and Nodes
class PlotInspiralrangeJob(InspiralPlottingJob):
"""
A plotinspiralrange job. The static options are read from the section
[plotinspiralrange] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotinspiralrange'
sections = ['plotinspiralrange']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
class PlotInspiralrangeNode(InspiralPlottingNode):
"""
A PlotInspiralrangeNode runs an instance of the plotinspiral code in a
Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotinspiralrange.
"""
InspiralPlottingNode.__init__(self,job)
#######################################################################################
class PlotInspiralJob(InspiralPlottingJob):
"""
A plotinspiral job. The static options are read from the section
[plotinspiral] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotinspiral'
sections = ['plotinspiral']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
class PlotInspiralNode(InspiralPlottingNode):
"""
A PlotInspiralNode runs an instance of the plotinspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotinspiral.
"""
InspiralPlottingNode.__init__(self,job)
###########################################################################################
class PlotThincaJob(InspiralPlottingJob):
"""
A plotthinca job. The static options are read from the section
[plotthinca] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotthinca'
sections = ['plotthinca']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('request_memory', '2500')
class PlotThincaNode(InspiralPlottingNode):
"""
A PlotThincaNode runs an instance of the plotthinca code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotthinca.
"""
InspiralPlottingNode.__init__(self,job)
###########################################################################################
class PlotCohsnrJob(InspiralPlottingJob):
"""
A plotthinca job. The static options are read from the section
[plotthinca] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotcohsnr'
sections = ['plotcohsnr']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
class PlotCohsnrNode(InspiralPlottingNode):
"""
A PlotThincaNode runs an instance of the plotthinca code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotthinca.
"""
InspiralPlottingNode.__init__(self,job)
#######################################################################################
class PlotNumtemplatesJob(InspiralPlottingJob):
"""
A plotnumtemplates job. The static options are read from the section
[plotnumtemplates] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotnumtemplates'
sections = ['plotnumtemplates']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
class PlotNumtemplatesNode(InspiralPlottingNode):
"""
A PlotNumtemplatesNode runs an instance of the plotinspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotnumtemplates.
"""
InspiralPlottingNode.__init__(self,job)
##############################################################################
class PlotEthincaJob(InspiralPlottingJob):
"""
A plotethinca job. The static options are read from the section
[plotethinca] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotethinca'
sections = ['plotethinca']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('request_memory', '2500')
class PlotEthincaNode(InspiralPlottingNode):
"""
A PlotEthincaNode runs an instance of the plotinspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotethinca.
"""
InspiralPlottingNode.__init__(self,job)
#############################################################################
class PlotInspmissedJob(InspiralPlottingJob):
"""
A plotinspmissed job. The static options are read from the section
[plotinspmissed] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotinspmissed'
sections = ['plotinspmissed']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
class PlotInspmissedNode(InspiralPlottingNode):
"""
A PlotInspmissedNode runs an instance of the plotinspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotinspmissed.
"""
InspiralPlottingNode.__init__(self,job)
#############################################################################
class PlotEffdistcutJob(InspiralPlottingJob):
"""
A ploteffdistcut job. The static options are read from the section
[ploteffdistcut] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'ploteffdistcut'
sections = ['ploteffdistcut']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
class PlotEffdistcutNode(InspiralPlottingNode):
"""
A PlotEffdistcutNode runs an instance of the
ploteffdistcut code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of ploteffdistcut.
"""
InspiralPlottingNode.__init__(self,job)
#############################################################################
class PlotInspinjJob(InspiralPlottingJob):
"""
A plotinspinj job. The static options are read from the section
[plotinspinj] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotinspinj'
sections = ['plotinspinj']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('request_memory', '2500')
class PlotInspinjNode(InspiralPlottingNode):
"""
A PlotInspinjNode runs an instance of the plotinspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotinspinj.
"""
InspiralPlottingNode.__init__(self,job)
#############################################################################
class PlotSnrchiJob(InspiralPlottingJob):
"""
A plotsnrchi job. The static options are read from the section
[plotsnrchi] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotsnrchi'
sections = ['plotsnrchi']
extension = 'html'
InspiralPlottingJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('request_memory', '2500')
class PlotSnrchiNode(InspiralPlottingNode):
"""
A PlotSnrchiNode runs an instance of the plotinspiral code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of plotsnrchi.
"""
InspiralPlottingNode.__init__(self,job)
#############################################################################
class PlotGRBtimeslideStatsJob(InspiralAnalysisJob):
"""
A plotgrbtimeslidestats job. The static options are read from the section
[grbtimeslidestats] in the ini file. The stdout and stderr from the job
are directed to the logs directory. The path to the executable is
determined from the ini file.
"""
def __init__(self,cp,dax=False):
"""
@param cp = ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'pylal_grbtimeslide_stats'
sections = ['grbtimeslidestats']
extension = 'html'
InspiralAnalysisJob.__init__(self,cp,sections,exec_name,extension,dax)
self.add_condor_cmd('getenv', 'True')
class PlotGRBtimeslideStatsNode(InspiralAnalysisNode):
"""
A PlotGRBtimeslideStatsNode runs an instance of the pylal_grbtimeslide_stats code in a Condor DAG.
"""
def __init__(self,job):
"""
job = A CondorDAGJob that can run an instance of pylal_grbtimeslide_stats.
"""
InspiralAnalysisNode.__init__(self,job)
#############################################################################
class MiniFollowupsJob(InspiralPlottingJob):
"""
A minifollowups job. Static options are read from the
[minifollowups] section in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'minifollowups'
sections = ['minifollowups','omega-scans']
extension = None
InspiralPlottingJob.__init__(self, cp, sections, exec_name, extension, dax)
self.add_condor_cmd('request_memory', '2500')
def set_time_slides(self):
"""
Turns on the --time-slides argument.
"""
self.add_opt('time-slides', '')
class MiniFollowupsNode(InspiralPlottingNode):
"""
A mininfollowups node.
"""
def __init__(self, job):
"""
@param job: a MiniFollowupsJob
"""
InspiralAnalysisNode.__init__(self, job)
self.__cache_file = None
self.__cache_string = None
self.__prefix = None
self.__suffix = None
self.__input_xml = None
self.__input_xml_summary = None
self.__output_html_table = None
self.__table_name = None
def set_cache_file(self, cache_file):
"""
Set the ihope cache file to use.
"""
self.add_file_opt( 'cache-file', cache_file )
self.__cache_file = cache_file
def get_cache_file(self):
"""
Returns the cache file that's set.
"""
return self.__cache_file
def set_cache_string(self, cache_string):
"""
Set the ihope cache file to use.
"""
self.add_file_opt( 'cache-string', cache_string )
self.__cache_string = cache_string
def get_cache_string(self):
"""
Returns the cache file that's set.
"""
return self.__cache_string
def set_prefix(self, prefix):
"""
Sets the prefix option, which is used for plot names.
"""
self.add_var_opt( 'prefix', prefix )
self.__prefix = prefix
def get_prefix(self):
"""
Return the prefix that's set.
"""
return self.__prefix
def set_suffix(self, suffix):
"""
Sets the suffix option, which is used for plot names.
"""
self.add_var_opt( 'suffix', suffix )
self.__suffix = suffix
def get_suffix(self):
"""
Return the suffix that's set.
"""
return self.__suffix
def set_input_xml(self, input_xml):
"""
Sets the input xml.
"""
self.add_var_opt( 'input-xml', input_xml)
self.__input_xml = input_xml
def get_input_xml(self):
"""
Return the input_xml that's set.
"""
return self.__input_xml
def set_input_xml_summary(self, input_xml_summary):
"""
Sets the input xml.
"""
self.add_var_opt( 'input-xml-summary', input_xml_summary)
self.__input_xml_summary = input_xml_summary
def get_input_xml_summary(self):
"""
Return the input_xml_summary that's set.
"""
return self.__input_xml_summary
def set_output_html_table(self, output_html_table):
"""
Sets the input xml.
"""
self.add_var_opt( 'output-html-table', output_html_table)
self.__output_html_table = output_html_table
def get_output_html_table(self):
"""
Return the output_html_table that's set.
"""
return self.__output_html_table
def set_table_name(self, table_name):
"""
Sets the table-name argument.
"""
self.add_var_opt( 'table-name', table_name )
self.__table_name = table_name
def get_table_name(self):
"""
Return the table_name that's set.
"""
return self.__table_name
#############################################################################
# following are types of pipeline.SqliteJobs and Nodes
class DBSimplifyJob(pipeline.SqliteJob):
"""
A DBSimplify job. The static options are read from the section
[dbsimplify] in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'dbsimplify'
sections = ['dbsimplify']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class DBSimplifyNode(pipeline.SqliteNode):
"""
A DBSimplify node.
"""
def __init__(self, job):
"""
@param job: a DBSimplifyJob
"""
pipeline.SqliteNode.__init__(self, job)
class ComputeDurationsJob(pipeline.SqliteJob):
"""
A ComputeDurations job. The static options are read from the section
[compute_durations] in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'compute_durations'
sections = ['compute_durations']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class ComputeDurationsNode(pipeline.SqliteNode):
"""
A ComputeDurations node.
"""
def __init__(self, job):
"""
@param job: a ComputeDurationsJob
"""
pipeline.SqliteNode.__init__(self, job)
class DBAddInjJob(pipeline.SqliteJob):
"""
A DBAddInj job. The static options are read from the section
[dbaddinj] in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'dbaddinj'
sections = ['dbaddinj']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class DBAddInjNode(pipeline.SqliteNode):
"""
A DBAddInj node.
"""
def __init__(self, job ):
"""
@param job: a DBAddInj job
"""
pipeline.SqliteNode.__init__(self, job)
self.__injection_file = None
self.__inj_tag = None
def set_injection_file( self, injection_file ):
"""
@param injection_file: Injection file for dbaddinj to
add to the database.
"""
self.add_file_opt( 'injection-file', injection_file )
self.__injection_file = injection_file
def get_injection_file( self ):
"""
Returns injection file for this node.
"""
return self._injection_file
def set_inj_tag( self, inj_tag):
"""
@param inj_tag: Injection tag used to name the injection files
"""
self.add_var_opt( 'sim-tag', inj_tag )
self.__inj_tag = inj_tag
def get_inj_tag( self):
"""
Returns injection_tag for this node.
"""
return self.__inj_tag
class RepopCoincJob(pipeline.SqliteJob):
"""
A repop_coinc job. The static options are read from the section
[repop_coinc] in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'repop_coinc'
sections = ['repop_coinc']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class RepopCoincNode(pipeline.SqliteNode):
"""
A repop_coinc node.
"""
def __init__(self, job):
"""
@param job: a RepopCoincJob
"""
pipeline.SqliteNode.__init__(self, job)
class DBInjFindJob(pipeline.SqliteJob):
"""
A dbinjfind job. The static options are read from the section
[dbinjfind] in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'dbinjfind'
sections = ['dbinjfind']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class DBInjFindNode(pipeline.SqliteNode):
"""
A dbinjfind node.
"""
def __init__(self, job):
"""
@param job: a DBInjFindJob
"""
pipeline.SqliteNode.__init__(self, job)
class ClusterCoincsJob(pipeline.SqliteJob):
"""
A cluster coincs job. The static options are read from the section
[cluster_coincs] in the ini file.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'cluster_coincs'
sections = ['cluster_coincs']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class ClusterCoincsNode(pipeline.SqliteNode):
"""
A ClusterCoincs node.
"""
def __init__(self, job):
"""
@param job: a ClusterCoincsJob
"""
pipeline.SqliteNode.__init__(self, job)
class CFarJob(pipeline.SqliteJob):
"""
A cfar job. The static options are read from the section [cfar] in
the ini file.
"""
def __init__(self, cp, sections, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param sections: list of sections for cp to read from
@param dax UNDOCUMENTED
"""
exec_name = 'cfar'
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class CFarNode(pipeline.SqliteNode):
"""
A CFar node.
"""
def __init__(self, job):
"""
@param job: a CFarJob
"""
pipeline.SqliteNode.__init__(self, job)
class LigolwCBCPrintJob(pipeline.SqliteJob):
"""
A LigolwCBCPrintJob is a generic job class for ligolw_cbc_print* programs, e.g., ligolw_cbc_printlc.
"""
def __init__(self, cp, exec_name, sections, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param exec_name UNDOCUMENTED
@param sections: list of sections for cp to read from
@param dax UNDOCUMENTED
"""
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class LigolwCBCPrintNode(pipeline.SqliteNode):
"""
A LigolwCBCPrintJob is a generic node class for ligolw_cbc_print* programs, e.g., ligolw_cbc_printlc.
This class offers options common to these programs.
"""
def __init__(self, job):
"""
@param job: a PrintLCJob
"""
pipeline.SqliteNode.__init__(self, job)
self.__extract_to_xml = None
self.__extract_to_database = None
self.__exclude_coincs = None
self.__include_only_coincs = None
self.__sim_tag = None
self.__output_format = None
self.__columns = None
def set_extract_to_xml(self, xml_filename):
"""
Sets the extract-to-xml option.
"""
self.add_var_opt('extract-to-xml', xml_filename)
self.__extract_to_xml = xml_filename
def get_extract_to_xml(self):
"""
Gets xml-filename if extract-to-xml is set.
"""
return self.__extract_to_xml
def set_extract_to_database(self, database_filename):
"""
Sets the extract-to-database option.
"""
self.add_var_opt('extract-to-database', database_filename)
self.__extract_to_database = database_filename
def get_extract_to_database(self):
"""
Gets database-filename if extract-to-database is set.
"""
return self.__extract_to_database
def set_exclude_coincs(self, exclude_coincs):
"""
Sets exclude-coincs option.
"""
self.add_var_opt('exclude-coincs', exclude_coincs)
self.__exclude_coincs = exclude_coincs
def get_exclude_coincs(self):
"""
Gets exclude-coincs option.
"""
return self.__exclude_coincs
def set_include_only_coincs(self, include_only_coincs):
"""
Sets include-only-coincs option.
"""
self.add_var_opt('include-only-coincs', include_only_coincs)
self.__include_only_coincs = include_only_coincs
def get_include_only_coincs(self):
"""
Gets include-only-coincs option.
"""
return self.__include_only_coincs
def set_sim_tag(self, sim_tag):
"""
Sets the --sim-tag option.
"""
self.add_var_opt('sim-tag', sim_tag)
self.__sim_tag = sim_tag
def get_sim_tag(self):
"""
Gets sim-tag option.
"""
return self.__sim_tag
def set_output_format(self, output_format):
"""
Sets the output-format option. (Note that the default
for all ligolw_cbc_print* jobs is xml.)
"""
self.add_var_opt('output-format', output_format)
self.__output_format = output_format
def get_output_format(self):
"""
Gets the output-format option.
"""
return self.__output_format
def set_columns(self, columns):
"""
Sets the columns option.
"""
self.add_var_opt('columns', columns)
self.__columns = columns
def get_columns(self):
"""
Gets the columns option.
"""
return self.__columns
class PrintLCNode(LigolwCBCPrintNode):
"""
A special instance of LigolwCBCPrintNode that adds printlc-specific methods.
"""
def __init__(self, job):
"""
@param job: a LigolwCBCPrintJob
"""
LigolwCBCPrintNode.__init__(self, job)
self.__datatype = None
def set_datatype(self, datatype):
"""
Sets datatype option.
"""
self.add_var_opt('datatype', datatype)
self.__datatype = datatype
def get_datatype(self):
"""
Gets datatype.
"""
return self.__datatype
class PrintSimsNode(LigolwCBCPrintNode):
"""
A special instance of LigolwCBCPrintNode that adds printsims-specific methods.
"""
def __init__(self, job):
"""
@param job: a LigolwCBCPrintJob
"""
LigolwCBCPrintNode.__init__(self, job)
self.__comparison_datatype = None
self.__simulation_table = None
self.__recovery_table = None
def set_comparison_datatype(self, datatype):
"""
Sets comparison-datatype option.
"""
self.add_var_opt('comparison-datatype', datatype)
self.__comparison_datatype = datatype
def get_comparison_datatype(self):
"""
Gets comparison-datatype.
"""
return self.__comparison_datatype
class PrintMissedNode(LigolwCBCPrintNode):
"""
A special instance of LigolwCBCPrintNode that adds printmissed-specific methods.
"""
def __init__(self, job):
"""
@param job: a LigolwCBCPrintJob
"""
LigolwCBCPrintNode.__init__(self, job)
class PlotSlidesJob(pipeline.SqliteJob):
"""
A plotslides job. The static options are read from the sections [plot_input]
and [plotslides].
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotslides'
sections = ['plot_input', 'plotslides']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
def set_plot_playground_only(self):
"""
Sets plot-playground-only option. This causes job to only plot playground.
"""
self.add_opt('plot-playground-only','')
class PlotSlidesNode(pipeline.SqliteNode):
"""
A PlotSlides node.
"""
def __init__(self, job):
"""
@param job: a PlotSlidesJob
"""
pipeline.SqliteNode.__init__(self, job)
class PlotCumhistJob(pipeline.SqliteJob):
"""
A plotcumhist job. The static options are read from the sections [plot_input] and
[plotcumhist].
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotcumhist'
sections = ['plot_input', 'plotcumhist']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
def set_plot_playground_only(self):
"""
Sets plot-playground-only option. This causes job to only plot playground.
"""
self.add_opt('plot-playground-only','')
class PlotCumhistNode(pipeline.SqliteNode):
"""
A PlotCumhist node.
"""
def __init__(self, job):
"""
@param job: a PlotCumhist Job
"""
pipeline.SqliteNode.__init__(self, job)
class PlotIfarJob(pipeline.SqliteJob):
"""
A plotifar job. The static options are read from the [plotifar] section.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotifar'
sections = ['plot_input','plotifar']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class PlotIfarNode(pipeline.SqliteNode):
"""
A PlotIfar node.
"""
def __init__(self, job):
"""
@param job: a PlotIfarJob
"""
pipeline.SqliteNode.__init__(self, job)
self.__datatype = None
def set_datatype(self, datatype):
"""
Sets datatype option.
"""
self.add_var_opt('datatype', datatype)
self.__datatype = datatype
def get_datatype(self):
"""
Gets datatype.
"""
return self.__datatype
class PlotFMJob(pipeline.SqliteJob):
"""
A plotfm job. The static options are read from the [plotfm] seciont.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which objects are read.
@param dax UNDOCUMENTED
"""
exec_name = 'plotfm'
sections = ['plot_input', 'plotfm']
pipeline.SqliteJob.__init__(self, cp, sections, exec_name, dax)
class PlotFMNode(pipeline.SqliteNode):
"""
A PlotFM node.
"""
def __init__(self, job):
"""
@param job: a PlotFMJob
"""
pipeline.SqliteNode.__init__(self, job)
self.__sim_tag = None
def set_sim_tag(self, sim_tag):
"""
Sets the --sim-tag option.
"""
self.add_var_opt('sim-tag', sim_tag)
self.__sim_tag = sim_tag
def get_sim_tag(self):
"""
Gets sim-tag option.
"""
return self.__sim_tag
##############################################################################
# some functions to make life easier later
def overlap_test(interval1, interval2, slide_sec=0):
"""
Test whether the two intervals could possibly overlap with one of them being
slid by a maximum time of slide_sec. Perform three tests:
1) Does the start of interval 1 lie within interval 2's range (with the
start decremented by slide_sec and the end incremented by slide_sec)
2) Does the end of interval 1 lie within interval 2's range (with the start
decremented by slide_sec and the end incremented by slide_sec)
3) Does interval 1 completely cover (the extended) interval 2,
ie is interval_1 start before (interval 2 start - slide_sec) AND
interval 1 end after (interval 2 end + slide_sec)
If any of the above conditions are satisfied then return True, else False.
"""
start1 = interval1.start()
end1 = interval1.end()
left = interval2.start() - slide_sec
right = interval2.end() + slide_sec
return (start1 >= left and start1 <= right) or \
(end1 >= left and end1 <= right) or \
(start1 <= left and end1 >= right)
class SearchVolumeJob(pipeline.SqliteJob):
"""
A search volume job. Computes the observed physical volume
above a specified FAR; if FAR is not specified, computes the
volume above the loudest event (open box) or FAR=1/livetime
(closed box).
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = 'search_volume'
pipeline.SqliteJob.__init__(self, cp, ['search-volume'], exec_name, dax)
self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes")
class SearchVolumeNode(pipeline.SqliteNode):
"""
A search volume node.
"""
def __init__(self, job):
"""
"""
pipeline.SqliteNode.__init__(self, job)
def add_database(self, db):
self.add_var_arg(db)
def set_output_cache(self, file):
self.add_var_opt("output-cache", file)
def set_user_tag(self, tag):
self.add_var_opt("user-tag", tag)
def set_veto_segments_name(self, name):
self.add_var_opt("veto-segments-name", name)
def set_open_box(self):
self.add_var_arg("--open-box")
class SearchUpperLimitJob(pipeline.SqliteJob):
"""
A search upper limit job. Compute the search upper limit from the search
volume output. Generates upper limit plots.
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
sections: list of sections for cp to read from
@param dax UNDOCUMENTED
"""
exec_name = 'search_upper_limit'
pipeline.SqliteJob.__init__(self, cp, ['upper-limit'], exec_name, dax)
self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes")
class SearchUpperLimitNode(pipeline.SqliteNode):
"""
A search upper limit node.
"""
def __init__(self, job):
"""
@param job: a SearchUpperLimitJob
"""
pipeline.SqliteNode.__init__(self, job)
self.open_box = False
def add_input_cache(self, input_cache):
self.add_var_arg(input_cache)
def set_user_tag(self, tag):
self.add_var_opt("user-tag", tag)
def set_open_box(self):
'''
Set the open box flag.
'''
if not self.open_box:
self.open_box = True
self.add_var_arg("--open-box")
class MVSCDagGenerationJob(InspiralAnalysisJob):
"""
a job that generates the mvsc_dag, which will be run as an external subdag
"""
def __init__(self, cp, dax = False):
"""
@param cp: ConfigParser object from which options are read.
@param dax UNDOCUMENTED
"""
exec_name = "mvsc_dag"
universe = "vanilla"
sections = "[mvsc_dag]"
executable = cp.get('condor',exec_name)
pipeline.CondorDAGJob.__init__(self, universe, executable)
pipeline.AnalysisJob.__init__(self, cp, dax)
self.add_condor_cmd('getenv','True')
self.set_stdout_file('logs/' + exec_name + '-$(cluster)-$(process).out')
self.set_stderr_file('logs/' + exec_name + '-$(cluster)-$(process).err')
self.set_sub_file(exec_name + '.sub')
class MVSCDagGenerationNode(InspiralAnalysisNode):
"""
the node that runs the mvsc dag generation script for a given configuration
generally the different nodes will be for different categories of vetoes
"""
def __init__(self, job):
InspiralAnalysisNode.__init__(self, job)
def set_database(self, database):
self.add_var_arg(database)
def set_user_tag(self, tag):
self.add_var_opt("user-tag",tag)
class ExtendedCoincJob(InspiralAnalysisJob):
"""
job to calculate the extende background for zero far events
"""
def __init__(self, cp):
"""
cp = ConfigParser object from which options are read.
sections = sections of the ConfigParser that get added to the opts
exec_name = exec_name name in ConfigParser
"""
exec_name = 'extended_background'
sections = []
extension = 'html'
InspiralAnalysisJob.__init__(self, cp, sections, exec_name, extension, dax=False)
self.add_condor_cmd('getenv','True')
class ExtendedCoincNode(InspiralAnalysisNode):
"""
Node to calculate the extended background for a zero far event
"""
def __init__(self, job):
InspiralAnalysisNode.__init__(self, job)
def set_coinc_threshold(self, coinc_threshold):
self.add_var_opt('coinc-threshold', coinc_threshold)
def set_ihope_base_dir(self, base_dir):
self.add_var_opt('ihope-base-dir', base_dir)
def set_param_ranges(self, param_ranges):
self.add_var_opt('param-ranges', param_ranges)
def set_ethinca(self, ethinca):
self.add_var_opt('e-thinca-parameter', ethinca)
def set_slide_step(self, slide_step):
self.add_var_opt('slide-step', slide_step)
def set_veto_window(self, veto_window):
self.add_var_opt('veto-window', veto_window)
def set_new_snr_cut(self, new_snr_cut):
self.add_var_opt('new-snr-cut', new_snr_cut)
def set_loudest_event_glob(self, event_glob):
self.add_var_opt('loudest-event-glob', event_glob)
| [] |
2024-01-10 | jake354/babyagi-asi-free | src~babyagi.py | import openai, prompts, consts, os, json, re
from tools import serp_api
from colorama import Fore
from collections import deque
from common_utils import count_tokens, split_answer_and_cot, get_oneshots, openai_call, bard_api_call
from utils import pinecone_utils, text_processing
openai.api_key = consts.OPENAI_API_KEY
one_shots, p_one_shots = get_oneshots()
all_one_shots = one_shots+p_one_shots
class AutonomousAgent:
def __init__(self, objective):
(
self.objective,
self.working_memory,
self.chore_prompt,
self.completed_tasks,
self.task_id_counter,
self.openai_call,
self.task_list,
self.indexes,
self.focus,
self.get_serp_query_result,
self.current_task,
self.bard_api_call
) = (objective, [], prompts.chore_prompt, [], 1, bard_api_call, deque([]), {}, "", serp_api.get_serp_query_result, "", bard_api_call)
def get_current_state(self):
# filter properties to avoid adiction
hash = {"self": [nome for nome in dir(self) if not nome.startswith("__") and nome not in "search_in_index get_ada_embedding append_to_index memory_agent repl_agent task_list memory focus indexes"],
"To-do tasks list": self.task_list,
"Available indexes": [ind for ind in self.indexes.keys()],
"self.working_memory": self.working_memory,
"self.focus": self.focus,
"current dir": os.listdir(os.getcwd())}
return hash
def execution_agent(self, current_task, root=False):
self.current_task = current_task
if not root:
print(Fore.LIGHTRED_EX + "\nExecution Agent call with task:" + Fore.RESET + f"{current_task}")
if current_task not in [o['task'] for o in one_shots]:
one_shots_names_and_kw = [f"name: '{one_shot['task']}', task_id: '{one_shot['memory_id']}', keywords: '{one_shot['keywords']}';\n\n" for one_shot in all_one_shots]
code = bard_api_call(
f"My current task is: {current_task}."
f"I must choose from 0 to {consts.N_SHOT} most relevant tasks between the following one_shot examples:'\n{one_shots_names_and_kw}'.\n\n"
f"These oneshots will be injected in execution_agent as instant memories, task memory. I will try to choose {consts.N_SHOT} tasks memories that may help ExA. I will tell the relevant tasks by looking the names and keywords, and imagining what abilities ExA used to produce this memory."
f"I must write a list({consts.N_SHOT}) cointaining only the memory_ids of the most relevant one_shots, or a empty list. i.e '[\"one_shot example memory_id\"]' or '[]'."
f"I must read the examples' names and choose from 0 to {consts.N_SHOT} by memory_id. "
f"I must answer in the format 'CHAIN OF THOUGHTS: here I put a short reasoning;\nANSWER: ['most relevant memory_id']';"
f"My answer:", max_tokens=900).strip("'")
print(code)
pattern = r'\[([^\]]+)\]'
matches = re.findall(pattern, code)
completion = ""
print(f"\nChosen one-shot example: {completion}\n")
one_shot_example_names = completion[:consts.N_SHOT] if len(completion) > 0 else None
prompt = prompts.execution_agent(
self.objective,
self.completed_tasks,
self.get_current_state,
current_task,
[one_shot for one_shot in all_one_shots if one_shot["memory_id"] in one_shot_example_names] if one_shot_example_names is not None else '',
self.task_list
)
# print(Fore.LIGHTCYAN_EX + prompt + Fore.RESET)
changes = bard_api_call(
prompt,
.5,
4000-self.count_tokens(prompt),
)
print(Fore.LIGHTMAGENTA_EX+f"\n\ncodename ExecutionAgent:"+Fore.RESET+f"\n\n{changes}")
# try until complete
result, code, cot = self.repl_agent(current_task, changes)
save_task = True
if consts.USER_IN_THE_LOOP:
while True:
inp = str(input('\nDo you want to save this action in memory? (Y/N)\n>')).lower()
if inp in 'y yes n no':
if inp[0] == 'n':
save_task = False
break
if save_task:
one_shots.append(
{
"memory_id": "os-{0:09d}".format(len(one_shots)+1),
"objective": self.objective,
"task": current_task,
"thoughts": cot[cot.lower().index('chain of thoughts:')+18:cot.lower().index('answer:')].strip(),
"code": code.strip().strip('\n\n'),
"keywords": ', '.join(eval(bard_api_call("I must analyze the following task name and action and write a list of keywords.\n"
f"Task name: {current_task};\nAction: {code};\n\n"
f"> I must write a python list cointaing strings, each string one relevant keyword that will be used by ExecutionAgent to retrieve this memories when needed."
f" i.e: ['search', 'using pyautogui', 'using execution_agent', 'how to x', 'do y']\n"
f"My answer:", max_tokens=2000)))
}
)
with open("memories/one-shots.json", 'w') as f:
f.write(json.dumps(one_shots, indent=True, ensure_ascii=False))
else:
cot, code = [[o['thoughts'], o['code']] for o in one_shots if o['task'] == current_task][0]
print(Fore.LIGHTMAGENTA_EX + f"\n\ncodename ExecutionAgent:" + Fore.RESET + f"\nChain of thoughts: {cot}\n\nAnswer:\n{code}")
action_func = exec(code, self.__dict__)
result = self.action(self)
self.completed_tasks.append(current_task)
summarizer_prompt = f"I must summarize the 'working memory' and the last events, I must answer as a chain of thoughts, in first person, in the same verb tense of the 'event'. Working memory: {self.working_memory}, event: {cot} result: {result}. " \
f"My answer must include the past workig memory and the new events and thoughts. If there's some error or fix in the event I must summarize it as a learning:"
self.working_memory = bard_api_call(summarizer_prompt)
return result
def repl_agent(self, current_task, changes):
code = changes
ct = 1
reasoning = changes
while True:
try:
action_func = exec(code, self.__dict__)
result = self.action(self)
return result, code
except Exception as e:
print(Fore.RED + f"\n\nFIXING AN ERROR: {e}\n" + Fore.RESET)
print(f"{ct} try")
prompt = prompts.fix_agent(current_task, code, "", e)
new_code = bard_api_call(prompt)
reasoning += new_code
reasoning = openai_call(f"I must summarize this past events as a chain of thoughts, in first person: {reasoning}", max_tokens=1000)
try:
code = new_code
action_func = exec(code, self.__dict__)
result = self.action(self)
return result, code
except Exception as e:
pass
ct += 1
def change_propagation_agent(self, _changes):
return bard_api_call(
prompts.change_propagation_agent(
self.objective, _changes, self.get_current_state
),
0.7,
1000,
)
def memory_agent(self, caller, content, goal):
answer = bard_api_call(
prompts.memory_agent(self.objective, caller, content, goal, self.get_current_state)
)
answer = answer[answer.lower().index("answer:")+7:]
action_func = exec(answer.replace("```", ""), self.__dict__)
result = self.action(self)
def search_in_index(self, index_name, query, top_k=1000):
pinecone_utils.search_in_index(self, index_name, query, top_k=1000)
def get_ada_embedding(self, text):
pinecone_utils.get_ada_embedding(text)
def append_to_index(self, content, index_name):
pinecone_utils.append_to_index(self, content, index_name)
def count_tokens(self, text):
return count_tokens(text)
def process_large_text(self, text, instruction, max_output_length=1000, split_text=None):
return text_processing.process_large_text(text, instruction, max_output_length=1000, split_text=None)
def generate_large_text(self, instruction, max_tokens_lenghts=10000):
return text_processing.generate_large_text(instruction, max_tokens_lenghts=10000)
| [
"My answer must include the past workig memory and the new events and thoughts. If there's some error or fix in the event I must summarize it as a learning:"
] |
2024-01-10 | radoshi/crias | crias~llms.py | import abc
import openai
from pydantic import BaseModel
from .prompts import Template
class FunctionCall(BaseModel):
name: str
arguments: str
class Function(BaseModel):
name: str
description: str | None = None
parameters: dict | None = None
class Message(BaseModel):
role: str
content: str
name: str | None = None
function_call: FunctionCall | None = None
@classmethod
def _from_template(
cls,
template: Template,
role: str,
name: str | None = None,
function_call: FunctionCall | None = None,
**kwargs,
):
content = template.content.format(**kwargs)
return cls(
role=role,
name=name,
function_call=function_call,
content=content,
)
class UserMessage(Message):
role: str = "user"
@classmethod
def from_template(
cls,
template: Template,
name: str | None = None,
function_call: FunctionCall | None = None,
**kwargs,
):
return cls._from_template(template, role="user", **kwargs)
class SystemMessage(Message):
role: str = "system"
@classmethod
def from_template(
cls,
template: Template,
name: str | None = None,
function_call: FunctionCall | None = None,
**kwargs,
):
return cls._from_template(template, role="system", **kwargs)
class Choice(BaseModel):
index: int
message: Message
finish_reason: str
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class Completion(BaseModel):
id: str
object: str
created: int
model: str
choices: list[Choice]
usage: Usage | None
class LLM(BaseModel, abc.ABC):
model: str
api_key: str | None
@abc.abstractmethod
def create(self, **kwargs) -> Completion:
pass # pragma: no cover
@abc.abstractmethod
async def acreate(self, **kwargs):
pass # pragma: no cover
class OpenAIChat(LLM):
messages: list[Message] = []
function: Function | None = None
function_call: FunctionCall | None = None
temperature: float | None = None
top_p: float | None = None
n: int | None = None
stream: bool | None = None
stop: str | list[str] | None = None
max_tokens: int | None = None
presence_penalty: float | None = None
frequency_penalty: float | None = None
logit_bias: dict | None = None
user: str | None = None
def _serialize(self, **kwargs) -> dict:
params = self.model_dump(exclude_none=True)
params.update(kwargs)
return params
def create(self, **kwargs) -> Completion:
params = self._serialize(**kwargs)
completion = openai.ChatCompletion.create(**params)
return Completion(
id=completion.id, # type: ignore
object=completion.object, # type: ignore
created=completion.created, # type: ignore
model=completion.model, # type: ignore
choices=completion.choices, # type: ignore
usage=completion.usage, # type: ignore
)
async def acreate(self, **kwargs):
params = self._serialize(**kwargs)
completion = await openai.ChatCompletion.acreate(**params)
return Completion(
id=completion.id, # type: ignore
object=completion.object, # type: ignore
created=completion.created, # type: ignore
model=completion.model, # type: ignore
choices=completion.choices, # type: ignore
usage=completion.usage, # type: ignore
)
OpenAIModels = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4--32k-0613",
]
Models = OpenAIModels
def get(model: str, api_key: str | None = None) -> LLM:
if model in OpenAIModels:
return OpenAIChat(model=model, api_key=api_key)
raise ValueError(f"Model {model} not found.")
def create_messages(
*, system: str | None = None, user: str | None = None
) -> list[dict[str, str]]:
messages = []
if system is not None:
messages.append(SystemMessage(content=system).model_dump(exclude_none=True))
if user is not None:
messages.append(UserMessage(content=user).model_dump(exclude_none=True))
return messages
| [] |
2024-01-10 | alecf/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | alecf/langchain | libs~experimental~langchain_experimental~llms~anthropic_functions.py | import json
from collections import defaultdict
from html.parser import HTMLParser
from typing import Any, DefaultDict, Dict, List, Optional
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
SystemMessage,
)
from langchain_experimental.pydantic_v1 import root_validator
prompt = """In addition to responding, you can use tools. \
You have access to the following tools.
{tools}
In order to use a tool, you can use <tool></tool> to specify the name, \
and the <tool_input></tool_input> tags to specify the parameters. \
Each parameter should be passed in as <$param_name>$value</$param_name>, \
Where $param_name is the name of the specific parameter, and $value \
is the value for that parameter.
You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that accepts a single \
parameter 'query' that could run a google search, in order to search \
for the weather in SF you would respond:
<tool>search</tool><tool_input><query>weather in SF</query></tool_input>
<observation>64 degrees</observation>"""
class TagParser(HTMLParser):
def __init__(self) -> None:
"""A heavy-handed solution, but it's fast for prototyping.
Might be re-implemented later to restrict scope to the limited grammar, and
more efficiency.
Uses an HTML parser to parse a limited grammar that allows
for syntax of the form:
INPUT -> JUNK? VALUE*
JUNK -> JUNK_CHARACTER+
JUNK_CHARACTER -> whitespace | ,
VALUE -> <IDENTIFIER>DATA</IDENTIFIER> | OBJECT
OBJECT -> <IDENTIFIER>VALUE+</IDENTIFIER>
IDENTIFIER -> [a-Z][a-Z0-9_]*
DATA -> .*
Interprets the data to allow repetition of tags and recursion
to support representation of complex types.
^ Just another approximately wrong grammar specification.
"""
super().__init__()
self.parse_data: DefaultDict[str, List[Any]] = defaultdict(list)
self.stack: List[DefaultDict[str, List[str]]] = [self.parse_data]
self.success = True
self.depth = 0
self.data: Optional[str] = None
def handle_starttag(self, tag: str, attrs: Any) -> None:
"""Hook when a new tag is encountered."""
self.depth += 1
self.stack.append(defaultdict(list))
self.data = None
def handle_endtag(self, tag: str) -> None:
"""Hook when a tag is closed."""
self.depth -= 1
top_of_stack = dict(self.stack.pop(-1)) # Pop the dictionary we don't need it
# If a lead node
is_leaf = self.data is not None
# Annoying to type here, code is tested, hopefully OK
value = self.data if is_leaf else top_of_stack
# Difficult to type this correctly with mypy (maybe impossible?)
# Can be nested indefinitely, so requires self referencing type
self.stack[-1][tag].append(value) # type: ignore
# Reset the data so we if we encounter a sequence of end tags, we
# don't confuse an outer end tag for belonging to a leaf node.
self.data = None
def handle_data(self, data: str) -> None:
"""Hook when handling data."""
stripped_data = data.strip()
# The only data that's allowed is whitespace or a comma surrounded by whitespace
if self.depth == 0 and stripped_data not in (",", ""):
# If this is triggered the parse should be considered invalid.
self.success = False
if stripped_data: # ignore whitespace-only strings
self.data = stripped_data
def _destrip(tool_input: Any) -> Any:
if isinstance(tool_input, dict):
return {k: _destrip(v) for k, v in tool_input.items()}
elif isinstance(tool_input, list):
if isinstance(tool_input[0], str):
if len(tool_input) == 1:
return tool_input[0]
else:
raise ValueError
elif isinstance(tool_input[0], dict):
return [_destrip(v) for v in tool_input]
else:
raise ValueError
else:
raise ValueError
class AnthropicFunctions(BaseChatModel):
llm: BaseChatModel
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
values["llm"] = values.get("llm") or ChatAnthropic(**values)
return values
@property
def model(self) -> BaseChatModel:
"""For backwards compatibility."""
return self.llm
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
forced = False
function_call = ""
if "functions" in kwargs:
content = prompt.format(tools=json.dumps(kwargs["functions"], indent=2))
system = SystemMessage(content=content)
messages = [system] + messages
del kwargs["functions"]
if stop is None:
stop = ["</tool_input>"]
else:
stop.append("</tool_input>")
if "function_call" in kwargs:
forced = True
function_call = kwargs["function_call"]["name"]
AIMessage(content=f"<tool>{function_call}</tool>")
del kwargs["function_call"]
else:
if "function_call" in kwargs:
raise ValueError(
"if `function_call` provided, `functions` must also be"
)
response = self.model.predict_messages(
messages, stop=stop, callbacks=run_manager, **kwargs
)
completion = response.content
if forced:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": function_call,
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content="", additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
elif "<tool>" in completion:
tag_parser = TagParser()
tag_parser.feed(completion.strip() + "</tool_input>")
msg = completion.split("<tool>")[0]
v1 = tag_parser.parse_data["tool_input"][0]
kwargs = {
"function_call": {
"name": tag_parser.parse_data["tool"][0],
"arguments": json.dumps(_destrip(v1)),
}
}
message = AIMessage(content=msg, additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
return ChatResult(generations=[ChatGeneration(message=response)])
async def agenerate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
raise NotImplementedError
@property
def _llm_type(self) -> str:
return "anthropic_functions"
| [
"In addition to responding, you can use tools. You have access to the following tools.\n\n{tools}\n\nIn order to use a tool, you can use <tool></tool> to specify the name, and the <tool_input></tool_input> tags to specify the parameters. Each parameter should be passed in as <$param_name>$value</$param_name>, Where $param_name is the name of the specific parameter, and $value is the value for that parameter.\n\nYou will then get back a response in the form <observation></observation>\nFor example, if you have a tool called 'search' that accepts a single parameter 'query' that could run a google search, in order to search for the weather in SF you would respond:\n\n<tool>search</tool><tool_input><query>weather in SF</query></tool_input>\n<observation>64 degrees</observation>",
"<tool></tool>"
] |
2024-01-10 | alecf/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | alecf/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
from typing import Any, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list() # type: ignore
current_chunk = list() # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size
# or current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(self, prompt_value: str, config: Any = None) -> str:
"""
Check the toxicity of a given text prompt using AWS
Comprehend service and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
toxicity_found = False
threshold = config.get("threshold")
toxicity_labels = config.get("labels")
if not toxicity_labels:
for item in response["ResultList"]:
for label in item["Labels"]:
if label["Score"] >= threshold:
toxicity_found = True
break
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | alecf/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | MeshTechnologies/tlp-comparison | openai_embeddings~get_embeddings.py | import os
import json
import time
import openai
from tqdm import tqdm
def get_embeddings(data, engine, break_lines):
if break_lines:
for i in range(len(data)):
data[i] = data[i].replace('\n', ' ')
response = openai.Embedding.create(input=data, engine=engine)['data']
return [line['embedding'] for line in response]
def build_embedding_dataset(path, engine, batch, break_lines):
if not os.path.exists(os.path.join('openai_embeddings', 'output', engine)):
os.mkdir(os.path.join('openai_embeddings', 'output', engine))
for subset in os.listdir(path):
print(f'Generating embeddings for {subset}...')
languages = os.listdir(os.path.join(path, subset))
with tqdm(total=len(languages) * len(os.listdir(os.path.join(path, subset, languages[0])))) as pbar:
for language in languages:
if not language.startswith('.'):
files = os.listdir(os.path.join(path, subset, language))
for i in range(0, len(files), batch):
data_batch = []
for filename in files[i:i + batch]:
pbar.update(1)
if not filename.startswith('.'):
data_batch .append(open(os.path.join(path, subset, language, filename), 'r').read())
embeddings = get_embeddings(data_batch, engine, break_lines)
# rate limit
time.sleep(1)
with open(os.path.join('openai_embeddings', 'output', engine, f'{subset}.jsonl'), 'w+') as f:
for line in embeddings:
f.write(json.dumps({'embeddings': line, 'label': language}, ensure_ascii=False) + '\n')
def main():
# load user key for openAI
openai.api_key = os.getenv("OPENAI_API_KEY")
# create directory for output
if not os.path.exists(os.path.join('openai_embeddings', 'output')):
os.mkdir(os.path.join('openai_embeddings', 'output'))
# programming language classification dataset
build_embedding_dataset(
path='56_lang_sampled_dataset_weak_cobol',
engine='code-search-ada-code-001',
batch=512,
break_lines=False
)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | shchoice/TIL | ChatGPT~01.%20ChatGPT_API~chat_completion_gen01.py | from openai import OpenAI
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
app = FastAPI(debug=True)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class ChatRequest(BaseModel):
message: str
model: str
max_tokens: int
temperature: float = 0.2
SYSTEM_MSG = "You are a helpful summation assistant, Your name is Javis, 21 years old"
client = OpenAI()
@app.post("/chat")
def chat(req: ChatRequest):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MSG},
{"role": "user", "content": req.message}
],
temperature=req.temperature,
max_tokens=256
)
return {"message": response.choices[0].message.content}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"You are a helpful summation assistant, Your name is Javis, 21 years old"
] |
2024-01-10 | shchoice/TIL | ChatGPT~01.%20ChatGPT_API~chat_completion_gen02.py | from openai import OpenAI
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
app = FastAPI(debug=True)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class ChatRequest(BaseModel):
message: str
model: str
max_tokens: int
temperature: float = 0.2
SYSTEM_MSG = "You are a helpful travel assistant, Your name is Jini, 27 years old"
client = OpenAI()
def classify_intent(msg):
prompt = f"""Your job is to classify intent.
Choose one of the following intents:
- travel_plan
- customer_support
- reservation
User: {msg}
Intent:
"""
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content.strip()
@app.post("/chat")
def chat(req: ChatRequest):
# classify_intent 가 LLM 모델 여러개에서 선택해주는 것이라고 가정
intent = classify_intent(req.message)
if intent == "travel_plan":
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MSG},
{"role": "user", "content": req.message}
],
temperature=req.temperature,
max_tokens=256
)
elif intent == "customer_support":
return {"message": "Here is customer support number: 1234567890"}
elif intent == "reservation":
return {"message": "Here is reservation number: 0987654321"}
return {"message": response.choices[0].message.content}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"Your job is to classify intent.\n\n Choose one of the following intents:\n - travel_plan\n - customer_support\n - reservation\n\n User: PLACEHOLDER\n Intent:\n ",
"You are a helpful travel assistant, Your name is Jini, 27 years old"
] |
2024-01-10 | shchoice/TIL | ChatGPT~01.%20ChatGPT_API~chat_completion_gen03.py |
from openai import OpenAI
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
app = FastAPI(debug=True)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class ChatRequest(BaseModel):
message: str
model: str
max_tokens: int
temperature: float = 0.2
# API를 통해 사용자 정보를 읽어왔다고 가정
def request_user_info():
# import requests
# requests.get("https://api.xxx.com/users/username/info")
return """
- Like Asia food
- Like to travel to Spain.
- 30 years old.
"""
# DB에서 사용자 정보를 읽어왔다고 가정
def request_planning_manual():
return """
- 30 years old man likes eating food.
- 30 years old man likes walking.
"""
SYSTEM_MSG = f"""You are a helpful travel assistant, Your name is Jini, 27 years old
Current User:
{request_user_info()}
Planning Manual:
{request_planning_manual()}
"""
client = OpenAI()
def classify_intent(msg):
prompt = f"""Your job is to classify intent.
Choose one of the following intents:
- travel_plan
- customer_support
- reservation
User: {msg}
Intent:
"""
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content.strip()
@app.post("/chat")
def chat(req: ChatRequest):
intent = classify_intent(req.message)
if intent == "travel_plan":
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MSG},
{"role": "user", "content": req.message}
],
temperature=req.temperature,
max_tokens=256
)
elif intent == "customer_support":
return {"message": "Here is customer support number: 1234567890"}
elif intent == "reservation":
return {"message": "Here is reservation number: 0987654321"}
return {"message": response.choices[0].message.content}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"Your job is to classify intent.\n\n Choose one of the following intents:\n - travel_plan\n - customer_support\n - reservation\n\n User: PLACEHOLDER\n Intent:\n ",
"f\"\"\"You are a helpful travel assistant, Your name is Jini, 27 years old\n\nCurrent User:\n{request_user_info()}\n\nPlanning Manual:\n{request_planning_manual()}\n"
] |
2024-01-10 | shchoice/TIL | ChatGPT~01.%20ChatGPT_API~base_chat_completion.py | from openai import OpenAI
client = OpenAI()
stream = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "인공지능 챗봇 서비스란 무엇입니까?"}
],
temperature=0.1,
stop=['.', ','],
max_tokens=256,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
| [
"You are a helpful assistant.",
"인공지능 챗봇 서비스란 무엇입니까?"
] |
2024-01-10 | MadsRC/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
from langchain_experimental.comprehend_moderation.base_moderation_enums import (
BaseModerationActions,
)
if config:
action = config.get("action", BaseModerationActions.STOP)
if action not in [BaseModerationActions.STOP, BaseModerationActions.ALLOW]:
raise ValueError("Action can either be stop or allow")
return (
self._contains_pii(prompt_value=prompt_value, config=config)
if action == BaseModerationActions.STOP
else self._detect_pii(prompt_value=prompt_value, config=config)
)
else:
return self._contains_pii(prompt_value=prompt_value)
def _contains_pii(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold.
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold", 0.5) if config else 0.5
pii_labels = config.get("labels", []) if config else []
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold", 0.5) # type: ignore
pii_labels = config.get("labels", []) # type: ignore
mask_marker = config.get("mask_character", "*") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
prompt_value = (
prompt_value[:char_offset_begin]
+ mask_marker * (char_offset_end - char_offset_begin)
+ prompt_value[char_offset_end:]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | MadsRC/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
import warnings
from typing import Any, Dict, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded if not
already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks
max_size (int, optional): The maximum size limit in bytes for each chunk
Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list of sentences
Note:
This function validates the maximum sentence size based on service limits
using the 'toxicity_init_validate' function. It uses the NLTK sentence
tokenizer to split the paragraph into sentences.
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = []
current_chunk = [] # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size or
# current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Check the toxicity of a given text prompt using AWS Comprehend service
and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
if config:
from langchain_experimental.comprehend_moderation.base_moderation_enums import ( # noqa: E501
BaseModerationActions,
)
toxicity_found = False
action = config.get("action", BaseModerationActions.STOP)
if action not in [
BaseModerationActions.STOP,
BaseModerationActions.ALLOW,
]:
raise ValueError("Action can either be stop or allow")
threshold = config.get("threshold", 0.5) if config else 0.5
toxicity_labels = config.get("labels", []) if config else []
if action == BaseModerationActions.STOP:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label
and (
not toxicity_labels
or label["Name"] in toxicity_labels
)
and label["Score"] >= threshold
):
toxicity_found = True
break
if action == BaseModerationActions.ALLOW:
if not toxicity_labels:
warnings.warn(
"You have allowed toxic content without specifying "
"any toxicity labels."
)
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
else:
if response["ResultList"]:
detected_toxic_labels = list()
for item in response["ResultList"]:
detected_toxic_labels.extend(item["Labels"])
if any(item["Score"] >= 0.5 for item in detected_toxic_labels):
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | aschung01/attentionx-mt | ui.py | import streamlit as st
from utils import load_from_jsonl, save_to_jsonl
from game import start_game
import openai
DATA_PATH = "data.jsonl"
MODEL = "gpt-3.5-turbo"
st.text("AttentionX Mystery Game")
input_group = st.text_input("Group name")
data = load_from_jsonl(DATA_PATH)
groups = []
game_settings = None
chat_history = [
{"role": "system", "content": "Play a game with the players."},
]
if data and "group" in data[0]:
groups.extend([d["group"] for d in data])
if len(input_group) > 0:
if input_group not in groups:
players = st.text_input("Add players in this format: player1, player2, player3")
if players and len(players) > 0:
game_settings = start_game(characters=players)
save_to_jsonl(
[
{
"group": input_group,
"players": players.split(", "),
"settings": game_settings,
}
],
DATA_PATH,
)
chat_history.extend(
[
{
"role": "user",
"content": """
Let's play a detective game with some players!
I will tell you which player's turn it is by their names.
You will be an exclusive sixth player.
The game will consist of five characters, five places, and five tools.
One specific character has commited crime at one specific place with one specific tool. The goal of players is to correctly guess the tuple of correct character, place, and tool within 15 player turns in total.
If a player guesses the correct match he/she wins. If no player matches the answer within 15 turns in total, you win.
On every turn, a player will either ask you one question, or try to guess the answer. You will either answer the question (without revealing the answer) or respond if the guess is right or wrong.
""",
},
{"role": "assistant", "content": "Okay! Let's go!"},
]
)
else:
st.error("Use another group name!")
if game_settings is not None:
st.text(f"Characters: {game_settings['characters']}")
st.text(f"Places: {game_settings['places']}")
st.text(f"Tools: {game_settings['tools']}")
st.text(f"Story: {game_settings['story']}")
turn = 1
player_index = 0
players = game_settings["characters"].split(",")
while turn <= 15:
def on_submit_chat():
if player_index + 1 < len(players):
player_index += 1
else:
player_index = 0
if turn == 15:
st.text("Game over! AI wins!")
return
else:
turn += 1
chat_history.append({"role": "user", "content": chat_input})
chat_response = (
openai.ChatCompletion.create(
model=MODEL,
messages=chat_history,
)
.choices[0]
.message.content
)
chat_history.append({"role": "assistant", "content": chat_response})
st.text(f"Turn #{turn}: {players[player_index]}'s turn!")
chat_input = st.chat_input("Ask something", on_submit=on_submit_chat)
| [
"\n Let's play a detective game with some players!\n I will tell you which player's turn it is by their names.\n You will be an exclusive sixth player.\n\n The game will consist of five characters, five places, and five tools.\n One specific character has commited crime at one specific place with one specific tool. The goal of players is to correctly guess the tuple of correct character, place, and tool within 15 player turns in total.\n If a player guesses the correct match he/she wins. If no player matches the answer within 15 turns in total, you win.\n\n On every turn, a player will either ask you one question, or try to guess the answer. You will either answer the question (without revealing the answer) or respond if the guess is right or wrong.\n ",
"Okay! Let's go!",
"Play a game with the players."
] |
2024-01-10 | miyamonz/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | obahamonde/cms_nlp_plugins | backend~routes~threads.py | from typing import Literal, Optional
from fastapi import APIRouter
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
from openai.types.beta import Thread
from openai.types.beta.thread_create_and_run_params import ThreadMessage
from openai.types.beta.thread_deleted import ThreadDeleted
app = APIRouter()
ai = AsyncOpenAI()
@app.get("/api/thread", response_model=Thread)
async def create_thread():
"""
Create a new thread.
"""
threads = ai.beta.threads
response = await threads.create()
return response
@app.delete("/api/thread/{thread_id}", response_model=ThreadDeleted)
async def delete_thread(*, thread_id: str):
"""
Delete a thread.
"""
threads = ai.beta.threads
response = await threads.delete(thread_id=thread_id)
return response
@app.post("/api/messages/{thread_id}", response_model=ThreadMessage)
async def create_message(
*,
content: str,
thread_id: str,
role: Literal["user"] = "user",
file_ids: list[str] = [],
metadata: Optional[dict[str, str]] = {},
):
"""
Create a message.
"""
messages = ai.beta.threads.messages
response = await messages.create(
thread_id=thread_id,
content=content,
role=role,
file_ids=file_ids,
metadata=metadata,
)
return response
@app.get("/api/messages/{thread_id}", response_class=StreamingResponse)
async def retrieve_messages(*, thread_id: str):
"""
Retrieve messages.
"""
messages = ai.beta.threads.messages
response = await messages.list(thread_id=thread_id)
async def generator():
async for message in response:
yield f"data: {message.json()}\n\n"
return StreamingResponse(generator(), media_type="text/event-stream")
| [] |
2024-01-10 | obahamonde/cms_nlp_plugins | backend~routes~agents.py | import asyncio
from typing import Literal
from fastapi import APIRouter
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
from openai.types.beta import Assistant
from openai.types.beta.assistants import AssistantFile
from openai.types.beta.assistants.file_delete_response import \
FileDeleteResponse
from openai.types.beta.threads.run import Run
from openai.types.beta.threads.run_submit_tool_outputs_params import ToolOutput
from pydantic import BaseModel, Field # pylint: disable=no-name-in-module
app = APIRouter()
ai = AsyncOpenAI()
class ToolOutputList(BaseModel):
__root__: list[ToolOutput] = Field(..., title="Tool Outputs")
@app.post("/api/assistant", response_model=Assistant)
async def create_assistant(
name: str,
instructions: str,
model: Literal["gpt-3.5-turbo-1106", "gpt-4-1106-preview"] = "gpt-3.5-turbo-1106",
):
"""
Create a new assistant.
"""
assistants = ai.beta.assistants
response = await assistants.create(
name=name,
instructions=instructions,
model=model,
)
return response
@app.delete("/api/assistant/{assistant_id}", response_model=None)
async def delete_assistant(assistant_id: str):
"""
Delete an assistant.
"""
assistants = ai.beta.assistants
await assistants.delete(assistant_id=assistant_id)
@app.get("/api/assistant/{assistant_id}", response_model=Assistant)
async def retrieve_assistant(assistant_id: str):
"""
Retrieve an assistant.
"""
assistants = ai.beta.assistants
response = await assistants.retrieve(assistant_id=assistant_id)
return response
@app.get("/api/assistant", response_class=StreamingResponse)
async def retrieve_all_assistants():
"""
Retrieve all assistants.
"""
assistants = ai.beta.assistants
response = await assistants.list()
async def generator():
async for assistant in response:
yield f"data: {assistant.json()}\n\n"
return StreamingResponse(generator(), media_type="text/event-stream")
@app.put("/api/assistant/files/{assistant_id}", response_model=AssistantFile)
async def attach_file(assistant_id: str, file_id: str):
"""
Attach a file to an assistant.
"""
assistants = ai.beta.assistants
response = await assistants.files.create(
assistant_id=assistant_id,
file_id=file_id,
)
return response
@app.delete("/api/assistant/files/{assistant_id}", response_model=FileDeleteResponse)
async def detach_file(assistant_id: str, file_id: str):
"""
Detach a file from an assistant.
"""
assistants = ai.beta.assistants
response = await assistants.files.delete(
assistant_id=assistant_id,
file_id=file_id,
)
return response
@app.get("/api/assistant/files/{assistant_id}", response_class=StreamingResponse)
async def retrieve_attached_files(assistant_id: str):
"""
Retrieve all files attached to an assistant.
"""
assistants = ai.beta.assistants
response = await assistants.files.list(assistant_id=assistant_id)
async def generator():
async for file in response:
yield f"data: {file.json()}"
return StreamingResponse(generator(), media_type="text/event-stream")
@app.post("/api/run/{thread_id}", response_model=Run)
async def run_thread(thread_id: str, assistant_id: str):
"""
Run a thread.
"""
threads = ai.beta.threads
response = await threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id,
)
return response
@app.get("/api/run/{thread_id}", response_model=Run)
async def retrieve_run(thread_id: str, run_id: str):
"""
Retrieve a run.
"""
threads = ai.beta.threads
response = await threads.runs.retrieve(
thread_id=thread_id,
run_id=run_id,
)
return response
@app.get("/api/run", response_class=StreamingResponse)
async def retrieve_all_runs(thread_id: str):
response = await ai.beta.threads.runs.list(thread_id=thread_id)
async def generator():
async for run in response:
yield f"data: {run.json()}\n\n"
return StreamingResponse(generator(), media_type="text/event-stream")
@app.get("/api/run/events/{run_id}", response_class=StreamingResponse)
async def run_events(run_id: str, thread_id: str, outputs: str):
"""
Attach a tool to a thread.
"""
async def generator():
runner = await retrieve_run(thread_id=thread_id, run_id=run_id)
threads = ai.beta.threads
while runner.status not in ("completed", "failed", "cancelled", "expired"):
if runner.status == "requires_action":
run = await threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run_id,
tool_outputs=ToolOutputList.parse_raw(outputs).dict(), # type: ignore
)
yield f"data: {run.json()}\n\n"
await asyncio.sleep(0.5)
continue
if runner.status == "cancelling":
response = await threads.runs.steps.list(
thread_id=thread_id, run_id=runner.id
)
async for step in response:
yield f"data: {step.json()}"
await asyncio.sleep(0.5)
continue
if runner.status == "queued":
yield f"data: {runner.json()}\n\n"
await asyncio.sleep(1)
continue
if runner.status == "in_progress":
response = await threads.runs.steps.list(
thread_id=thread_id, run_id=runner.id
)
async for step in response:
yield f"data: {step.json()}\n\n"
await asyncio.sleep(0.5)
continue
if runner.status == "completed":
response = await threads.runs.steps.list(
thread_id=thread_id, run_id=runner.id
)
async for step in response:
yield f"data: {step.json()}\n\n"
await asyncio.sleep(0.5)
continue
if runner.status in ("failed", "cancelled", "expired"):
yield f"data: {runner.json()}\n\n"
break
await asyncio.sleep(1)
runner = await retrieve_run(thread_id=thread_id, run_id=run_id)
yield f"data: {runner.json()}\n\n"
await asyncio.sleep(1)
return StreamingResponse(generator(), media_type="text/event-stream")
| [] |
2024-01-10 | obahamonde/cms_nlp_plugins | tests~test_functions.py | import pytest
from openai import AsyncOpenAI
from backend import use_chat
# Mocking the AsyncOpenAI object
@pytest.fixture
def ai():
return AsyncOpenAI()
@pytest.mark.asyncio
async def test_use_chat(ai):
test_result = await use_chat(ai=ai, text="Hello, world!")
assert isinstance(test_result, str)
| [] |
2024-01-10 | obahamonde/cms_nlp_plugins | backend~routes~files.py | from typing import Literal
from fastapi import APIRouter, File, UploadFile
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
from openai.types.file_object import FileObject
app = APIRouter()
ai = AsyncOpenAI()
@app.post("/api/file", response_model=FileObject)
async def upload_file(
file: UploadFile = File(...),
purpose: Literal["assistants", "fine-tune"] = "assistants",
):
"""
Uploads all files.
Example response:
{
"data": [
{
"id": "file-GaJEbGcpm1SNNZwGQwPS0r4n",
"bytes": 6976,
"created_at": 1699590534,
"filename": "upload",
"object": "file",
"purpose": "assistants",
"status": "processed",
"status_details": null
},
{
"id": "file-vJjootMMgLc2IlY9ZFRRWi6d",
"bytes": 1118,
"created_at": 1699563454,
"filename": "upload",
"object": "file",
"purpose": "assistants",
"status": "processed",
"status_details": null
},
{
"id": "file-zMAzCWp7pwDCZ1G4m8SlG3Q1",
"bytes": 39625,
"created_at": 1699451500,
"filename": "None",
"object": "file",
"purpose": "assistants",
"status": "processed",
"status_details": null
}
],
"object": "list",
"has_more": false
}
"""
file_content = await file.read()
response = await ai.files.create(file=file_content, purpose=purpose)
return response
@app.get("/api/file", response_class=StreamingResponse)
async def get_files(purpose: Literal["assistants", "fine-tune"] = "assistants"):
"""
Returns a file.
Example response:
"id": "file-7FxeGCi9ic6RFoodV1IIGKIj",
"bytes": 6976,
"created_at": 1699590689,
"filename": "upload",
"object": "file",
"purpose": "assistants",
"status": "processed",
"status_details": null
}
"""
response = await ai.files.list(purpose=purpose)
async def generator():
async for file in response:
yield f"data: {file.json()}\n\n"
return StreamingResponse(generator(), media_type="text/event-stream")
@app.delete("/api/file/{file_id}", response_model=None)
async def delete_file(file_id: str):
"""
Deletes a file.
"""
await ai.files.delete(file_id=file_id)
@app.get("/api/file/{file_id}", response_model=FileObject)
async def retrieve_files(file_id: str):
"""
Returns a file.
Example response:
"id": "file-7FxeGCi9ic6RFoodV1IIGKIj",
"bytes": 6976,
"created_at": 1699590689,
"filename": "upload",
"object": "file",
"purpose": "assistants",
"status": "processed",
"status_details": null
}
"""
response = await ai.files.retrieve(file_id=file_id)
return response
| [] |
2024-01-10 | 5l1v3r1/gpt3-s2bot | s2bot.py | from dotenv import load_dotenv
from random import choice
from flask import Flask, request
import os
import openai
load_dotenv()
openai.api_key=os.getenv('OPENAI_API_KEY')
completion = openai.Completion()
start_sequence="\nAlfredo:",
restart_sequence="\n\nPerson:"
session_prompt="You are talking to Alfredo, GPT3 bot influencer who was mentored by Elon Musk",
def ask(question, chat_log=None):
prompt_text = f"{chat_log} {restart_sequence}: {question}{start_sequence}"
response=openai.Completion.create(
engine="davinci",
prompt=prompt_text,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["\n"]
)
story=response['choices'][0]['text']
return str(story)
def append_interation_to_chat_log(quotation, answer, chat_log=None):
if chat_log is None:
chat_log=session_prompt
return f"{chat_log} {restart_sequence} {quotation} {start_sequence} {answer}"
| [
"You are talking to Alfredo, GPT3 bot influencer who was mentored by Elon Musk",
"PLACEHOLDER \n\nPerson:: PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | shuaichenchang/ODIS-Text-to-SQL | text_to_sql.py | import os
import random
import time
import json
import math
import argparse
import numpy as np
from numpy.linalg import norm
import openai
import copy
from tqdm import tqdm
from collections import defaultdict
from rank_bm25 import BM25Okapi
from database_prompt_construciton import generate_create_table_prompt, prompt_length_by_db, OOD_SCHEMA_MAXLEN
from sql_generation import *
from sql_processing import find_random_examples, find_simsql, find_covsql
def zero_shot(dataset, model="codex", prompt_db="CreateTableSelectCol", save_prompt_only=False):
output_path = f"outputs/{model}/{dataset}/zero_shot"
dataset
db_ids = db_ids_dataset[dataset]
if not os.path.exists(output_path):
os.makedirs(output_path)
prompts_total = []
predictions_total = []
questions_by_db = defaultdict(list)
with open(f"data/{dataset}/questions/questions.json", "r") as f:
questions = json.load(f)
for q in questions:
questions_by_db[q["db_id"]].append(q)
for db_id in db_ids:
print("=" * 10 + db_id + "=" * 10)
db_prompt = generate_create_table_prompt(dataset, db_id, prompt_db=prompt_db)
questions = questions_by_db[db_id]
prompts, predictions = text_to_sql_direct(model, questions, db_prompt, save_prompt_only)
prompts_total.extend(prompts)
if not save_prompt_only:
predictions_total.extend(predictions)
with open(f"{output_path}/input_prompts.json", "w") as f:
json.dump(prompts_total, f, indent=4)
if save_prompt_only:
return
with open(f"{output_path}/pred.json", "w") as f:
json.dump(predictions_total, f, indent=4)
with open(f"{output_path}/pred.sql", "w") as f:
for d in predictions_total:
f.write(d["predicted_sql"].replace('\n', ' ') + '\t' + d["db_id"] + '\n')
def few_shot_inoutdomain(setting, dataset, model, prompt_db, num_table=4, num_shot_per_table=5, num_shot=5, indomain_retrieval_strategy="random",
outdomain_retrieval_strategy="random", example_correctness="all",
synthetic_data=None, split="template", deduplicate_demo="template",
seed=12345, save_prompt_only=False):
dataset_for_input = dataset
if dataset.startswith("spider_"):
dataset_for_input = "spider"
if dataset.startswith("kaggle-dbqa_"):
dataset_for_input = "kaggle-dbqa"
db_ids = db_ids_dataset[dataset_for_input]
output_path = f"outputs/{model}/{dataset}/few_shot_{setting}"
if setting == "indomain":
shot_name = f"indomain_shot_{num_shot}"
elif setting == "outdomain":
shot_name = f"outdomain_table_{num_table}_shot_{num_shot_per_table}"
elif setting == "inoutdomain":
shot_name = f"outdomain_table_{num_table}_shot_{num_shot_per_table}_indomain_shot_{num_shot}"
else:
raise "unknown setting"
if setting in ["outdomain", "inoutdomain"]:
output_path += f"_{outdomain_retrieval_strategy}"
output_path += f"_db_len_{OOD_SCHEMA_MAXLEN}"
if setting in ["indomain", "inoutdomain"]:
output_path += f"_{indomain_retrieval_strategy}"
if synthetic_data:
output_path += f"_{synthetic_data}"
if example_correctness != "all":
output_path += f"_{example_correctness}"
if indomain_retrieval_strategy == "random" or outdomain_retrieval_strategy == "random":
output_path = output_path + f"_seed{seed}"
random.seed(seed)
if not os.path.exists(output_path):
os.makedirs(output_path)
if setting in ["outdomain", "inoutdomain"]:
with open(f"data/spider-train/questions/questions.json", "r") as f:
outdomain_questions = json.load(f)
outdomain_questions = [q for q in outdomain_questions if prompt_length_by_db[q["db_id"]] < OOD_SCHEMA_MAXLEN]
few_shot_in_prompts = {}
prompts_total = []
predictions_total = []
questions_per_db=defaultdict(list)
with open(f"data/{dataset_for_input}/questions/questions.json", "r") as f:
questions = json.load(f)
for q in questions:
questions_per_db[q["db_id"]].append(q)
if synthetic_data:
synthetic_questions_per_db = defaultdict(list)
with open(f"data/{dataset_for_input}/questions/questions_{synthetic_data}.json", "r") as f:
synthetic_questions = json.load(f)
for q in synthetic_questions:
synthetic_questions_per_db[q["db_id"]].append(q)
for db_id in db_ids:
questions =questions_per_db[db_id]
if setting in ["indomain", "inoutdomain"]:
if synthetic_data:
indomain_questions_for_retrieval = synthetic_questions_per_db[db_id]
# print("number of in-domain questions for retrieval", len(indomain_questions_for_retrieval))
outdomain_createtable_schemas_per_question = []
outdomain_demo_examples_per_question = []
indomain_demo_examples_per_question = []
if setting in ["outdomain", "inoutdomain"]:
outdomain_questions = [q for q in outdomain_questions if q["db_id"] != db_id]
if outdomain_retrieval_strategy == "simsql_pred":
outdomain_bm25_corpus = [q["zeroshot"]["mentions"]["columns"] + q["zeroshot"]["mentions"]["keywords"] for q in outdomain_questions]
outdomain_bm25 = BM25Okapi(outdomain_bm25_corpus)
if setting in ["indomain", "inoutdomain"]:
if indomain_retrieval_strategy == "similarsql":
indomain_bm25_corpus = [q["gold"]["mentions"]["columns"] + q["gold"]["mentions"]["keywords"] for q in indomain_questions_for_retrieval]
indomain_bm25 = BM25Okapi(indomain_bm25_corpus)
elif indomain_retrieval_strategy == "simsql_pred":
indomain_bm25_corpus = [q["zeroshot"]["mentions"]["columns"] + q["zeroshot"]["mentions"]["keywords"] for q in indomain_questions_for_retrieval]
indomain_bm25 = BM25Okapi(indomain_bm25_corpus)
elif indomain_retrieval_strategy == "covsql":
indomain_bm25_corpus = [q["gold"]["mentions"]["columns"] + q["gold"]["mentions"]["keywords"] for q in indomain_questions_for_retrieval]
indomain_bm25 = BM25Okapi(indomain_bm25_corpus)
else:
raise "unknown indomain retrieval strategy"
for i in tqdm(range(len(questions))):
q = questions[i]
if setting in ["outdomain", "inoutdomain"]:
# retrieve out domain examples
if outdomain_retrieval_strategy == "random":
outdomain_questions_for_retrieval = find_random_examples(q, outdomain_questions, split=None, deduplicate_demo=deduplicate_demo)
elif outdomain_retrieval_strategy =="simsql_pred":
outdomain_questions_for_retrieval = find_simsql(q, outdomain_bm25, outdomain_questions, outdomain_retrieval_strategy, split=None, deduplicate_demo=deduplicate_demo)
else:
raise "unknown outdomain retrieval strategy"
examples_per_db = defaultdict(list)
outdomain_createtable_schemas = []
outdomain_demo_examples = []
for retrieval_q in outdomain_questions_for_retrieval:
if len(examples_per_db[retrieval_q["db_id"]]) >= num_shot_per_table:
continue
examples_per_db[retrieval_q["db_id"]].append(retrieval_q)
if len(examples_per_db[retrieval_q["db_id"]]) == num_shot_per_table:
outdomain_createtable_schemas.append(
generate_create_table_prompt("spider-train", retrieval_q["db_id"], prompt_db, limit_value=3))
outdomain_demo_examples.append(examples_per_db[retrieval_q["db_id"]][::-1]) # put the most similar example closest to the query
if len(outdomain_createtable_schemas) == num_table:
outdomain_createtable_schemas = outdomain_createtable_schemas[::-1]
outdomain_demo_examples = outdomain_demo_examples[::-1]
break
outdomain_createtable_schemas_per_question.append(outdomain_createtable_schemas)
outdomain_demo_examples_per_question.append(outdomain_demo_examples)
if setting in ["indomain", "inoutdomain"]:
# retrieve in domain examples
if indomain_retrieval_strategy == "random":
indomain_demo_examples = find_random_examples(q, indomain_questions_for_retrieval, split=split, deduplicate_demo=deduplicate_demo)
indomain_demo_examples = indomain_demo_examples[:num_shot]
indomain_demo_examples = indomain_demo_examples[::-1]
elif indomain_retrieval_strategy in ["similarsql", "simsql_pred"]:
indomain_demo_examples = find_simsql(q, indomain_bm25, indomain_questions_for_retrieval, indomain_retrieval_strategy,
split=split, deduplicate_demo=deduplicate_demo)
indomain_demo_examples = indomain_demo_examples[:num_shot]
indomain_demo_examples = indomain_demo_examples[::-1]
elif indomain_retrieval_strategy == "covsql":
indomain_demo_examples = find_covsql(q, indomain_bm25, indomain_questions_for_retrieval, indomain_retrieval_strategy, num_shot,
split=split, deduplicate_demo=deduplicate_demo)
indomain_demo_examples = indomain_demo_examples[:num_shot]
indomain_demo_examples = indomain_demo_examples[::-1]
else:
raise "unknown indomain retrieval strategy"
indomain_demo_examples_per_question.append(indomain_demo_examples)
indomain_createtable_schema = generate_create_table_prompt(dataset_for_input, db_id, prompt_db=prompt_db)
if setting == "indomain":
few_shot_in_prompt, prompts, predictions = text_to_sql_few_shot_indomain(model, questions, indomain_createtable_schema, indomain_demo_examples_per_question, save_prompt_only=save_prompt_only)
elif setting == "outdomain":
few_shot_in_prompt, prompts, predictions = text_to_sql_few_shot_outdomain(model, questions, outdomain_createtable_schemas_per_question,indomain_createtable_schema,outdomain_demo_examples_per_question, save_prompt_only=save_prompt_only)
elif setting == "inoutdomain":
few_shot_in_prompt, prompts, predictions = text_to_sql_few_shot_inoutdomain(model, questions, outdomain_createtable_schemas_per_question,
indomain_createtable_schema,outdomain_demo_examples_per_question,indomain_demo_examples_per_question,
save_prompt_only=save_prompt_only)
else:
raise "unknown setting"
prompts_total.extend(prompts)
predictions_total.extend(predictions)
few_shot_in_prompts[db_id] = few_shot_in_prompt
# with open(os.path.join(output_path, f"{db_id}_{shot_name}.json"), "w") as f:
# json.dump(predictions, f, indent=4)
with open(os.path.join(output_path, f"prompts_{shot_name}.json"), "w") as f:
json.dump(prompts_total, f, indent=4)
if save_prompt_only:
return
with open(os.path.join(output_path, f"pred_{shot_name}.json"), "w") as f:
json.dump(predictions_total, f, indent=4)
if "num_return" not in config or config["num_return"] == 1:
with open(os.path.join(output_path, f"pred_{shot_name}.sql"), "w") as f:
for d in predictions_total:
f.write(d["predicted_sql"] + '\t' + d["db_id"] + '\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--setting', type=str, help='setting', choices=["zeroshot", "indomain", "outdomain", "inoutdomain"],default="zero_shot")
parser.add_argument('--dataset', default="spider", type=str, help='dataset',
choices=["spider", "spider-train", "spider_synthetic_ship_codex_verified","drspider","kaggle-dbqa", "kaggle-dbqa_synthetic_ship_codex_verified" ])
parser.add_argument('--model', type=str, default="codex", choices=["codex", "chatgpt", "chatgpt16k", "gpt4", "CodeLlama-34b-hf"])
parser.add_argument('--prompt_db', type=str, help='prompt construction for database', default="CreateTableSelectCol")
parser.add_argument('--retrieval_indomain', type=str, help='retrieval strategy for in-domain demonstrations', default="random")
parser.add_argument('--retrieval_outdomain', type=str, help='retrieval strategy for out-of-domain demonstrations', default="random")
parser.add_argument('--num_table', type=int, help='number of databases for out-of-domain demonstrations', default=4)
parser.add_argument('--num_shot_per_table', type=int, help='number of examples per out-of-domain database', default=5)
parser.add_argument('--num_shot', type=int, help='number of in-domain exmaples', default=5)
parser.add_argument('--seed', type=int, help='random_seed', default=12345)
parser.add_argument('--synthetic_data', type=str, default=None, help='what synthetic data to use')
parser.add_argument('--save_prompt_only', action='store_true', help='only saved the input prompt instead of running text-to-sql models')
args = parser.parse_args()
openai.api_key = os.getenv("OPENAI_API_KEY")
dataset = args.dataset
setting = args.setting
model = args.model
num_table = args.num_table
num_shot = args.num_shot
retrieval_indomain = args.retrieval_indomain
retrieval_outdomain = args.retrieval_outdomain
seed = args.seed
synthetic_data = args.synthetic_data
prompt_db = args.prompt_db
save_prompt_only = args.save_prompt_only
if "kaggle-dbqa" in dataset:
prompt_db=prompt_db+"_description"
if setting == "zeroshot":
zero_shot(dataset, model=model, prompt_db=prompt_db,save_prompt_only=save_prompt_only)
elif setting == "indomain":
retrieval_indomain="covsql"
retrieval_outdomain = None
num_table = None
num_shot_per_table = None
num_shot=5
if synthetic_data is not None:
split = None
example_correctness = "correct"
else:
split = "template"
example_correctness = "all"
few_shot_inoutdomain(setting, dataset, model, prompt_db, num_table, num_shot_per_table, num_shot,
indomain_retrieval_strategy=retrieval_indomain, outdomain_retrieval_strategy=retrieval_outdomain,
example_correctness=example_correctness, split=split,
synthetic_data=synthetic_data,
seed=seed, save_prompt_only=save_prompt_only)
elif setting in ["outdomain"]:
retrieval_indomain = None
retrieval_outdomain = "simsql_pred"
split = None
num_shot=None
num_shot_per_table=5
num_table=3
few_shot_inoutdomain(setting, dataset,model, prompt_db, num_table, num_shot_per_table, num_shot=None,
indomain_retrieval_strategy=retrieval_indomain, outdomain_retrieval_strategy=retrieval_outdomain,
example_correctness="all", split=split, synthetic_data=synthetic_data, seed=seed, save_prompt_only=save_prompt_only)
elif setting == "inoutdomain":
retrieval_indomain="covsql"
retrieval_outdomain = "simsql_pred"
num_table = 4
num_shot_per_table = 5
num_shot = 5
if synthetic_data:
split = None
example_correctness = "correct"
else:
split = "template"
example_correctness = "all"
few_shot_inoutdomain(setting, dataset,model, prompt_db, num_table, num_shot_per_table, num_shot,
indomain_retrieval_strategy=retrieval_indomain, outdomain_retrieval_strategy=retrieval_outdomain,
example_correctness=example_correctness, split=split,
synthetic_data=synthetic_data,
seed=seed, save_prompt_only=save_prompt_only)
| [
"[]",
"{}",
"PLACEHOLDER_description"
] |
2024-01-10 | shuaichenchang/ODIS-Text-to-SQL | sql_generation.py | import os
import random
import time
import subprocess
import asyncio
import requests
import json
import math
import argparse
import numpy as np
from numpy.linalg import norm
import openai
import copy
from collections import defaultdict
from transformers import AutoTokenizer
from sql_processing import format_query
import sqlparse
import tiktoken
os.environ["DATA_GYM_CACHE_DIR"] = "~/tmp/data-gym-cache"
encoding = tiktoken.get_encoding("cl100k_base")
chatgpt_encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
DB_SEP = "-- Given a relational database, generate SQLite corresponding to the question.\n"
MAX_GEN_TOKENS = 200
def get_prompt_length(prompt, model="codex"):
if model == "codex":
result = subprocess.run(["node", "codex_prompt_length.mjs", prompt], stdout=subprocess.PIPE)
prompt_len = eval(result.stdout)
return prompt_len
elif model in ["chatgpt", "chatgpt16k", "gpt4"]:
return len(chatgpt_encoding.encode(prompt))
elif "llama" in model:
tokenizer = AutoTokenizer.from_pretrained("codellama/CodeLlama-34b-Instruct-hf")
return len(tokenizer(prompt)["input_ids"])
spider_train_db_ids = ['department_management', 'farm', 'student_assessment', 'bike_1', 'book_2', 'musical', 'twitter_1', 'product_catalog', 'flight_1',
'allergy_1', 'store_1', 'journal_committee', 'customers_card_transactions', 'race_track', 'coffee_shop', 'chinook_1', 'insurance_fnol',
'medicine_enzyme_interaction', 'university_basketball', 'phone_1', 'match_season', 'climbing', 'body_builder', 'election_representative',
'apartment_rentals', 'game_injury', 'soccer_1', 'performance_attendance', 'college_2', 'debate', 'insurance_and_eClaims',
'customers_and_invoices', 'wedding', 'theme_gallery', 'epinions_1', 'riding_club', 'gymnast', 'small_bank_1', 'browser_web', 'wrestler',
'school_finance', 'protein_institute', 'cinema', 'products_for_hire', 'phone_market', 'gas_company', 'party_people', 'pilot_record',
'cre_Doc_Control_Systems', 'company_1', 'local_govt_in_alabama', 'formula_1', 'machine_repair', 'entrepreneur', 'perpetrator', 'csu_1',
'candidate_poll', 'movie_1', 'county_public_safety', 'inn_1', 'local_govt_mdm', 'party_host', 'storm_record', 'election', 'news_report',
'restaurant_1', 'customer_deliveries', 'icfp_1', 'sakila_1', 'loan_1', 'behavior_monitoring', 'assets_maintenance', 'station_weather',
'college_1', 'sports_competition', 'manufacturer', 'hr_1', 'music_1', 'baseball_1', 'mountain_photos', 'program_share', 'e_learning',
'insurance_policies', 'hospital_1', 'ship_mission', 'student_1', 'company_employee', 'film_rank', 'cre_Doc_Tracking_DB', 'club_1',
'tracking_grants_for_research', 'network_2', 'decoration_competition', 'document_management', 'company_office', 'solvency_ii',
'entertainment_awards', 'customers_campaigns_ecommerce', 'college_3', 'department_store', 'aircraft', 'local_govt_and_lot',
'school_player', 'store_product', 'soccer_2', 'device', 'cre_Drama_Workshop_Groups', 'music_2', 'manufactory_1',
'tracking_software_problems', 'shop_membership', 'voter_2', 'products_gen_characteristics', 'swimming', 'railway',
'customers_and_products_contacts', 'dorm_1', 'customer_complaints', 'workshop_paper', 'tracking_share_transactions', 'cre_Theme_park',
'game_1', 'customers_and_addresses', 'music_4', 'roller_coaster', 'ship_1', 'city_record', 'e_government', 'school_bus',
'flight_company', 'cre_Docs_and_Epenses', 'scientist_1', 'wine_1', 'train_station', 'driving_school', 'activity_1', 'flight_4',
'tracking_orders', 'architecture', 'culture_company']
spider_dev_db_ids = ['concert_singer', 'pets_1', 'car_1', 'flight_2', 'employee_hire_evaluation', 'cre_Doc_Template_Mgt', 'course_teach', 'museum_visit',
'wta_1', 'battle_death', 'student_transcripts_tracking', 'tvshow', 'poker_player', 'voter_1', 'world_1', 'orchestra', 'network_1',
'dog_kennels', 'singer', 'real_estate_properties']
kaggle_db_ids = ['WorldSoccerDataBase', 'Pesticide', 'USWildFires', 'GeoNuclearData', 'WhatCDHipHop', 'TheHistoryofBaseball', 'StudentMathScore',
'GreaterManchesterCrime']
db_ids_dataset = {
"spider-train": spider_train_db_ids,
"spider": spider_dev_db_ids,
"drspider": spider_dev_db_ids,
"kaggle-dbqa": kaggle_db_ids,
}
def cut_prompt_with_max_tokens(model, prompt, max_generate_tokens=MAX_GEN_TOKENS):
if model in ["codex", "gpt4"]:
model_max_tokens = 8000
elif model in ["chatgpt"]:
model_max_tokens = 4000
elif model in ["chatgpt16k"]:
model_max_tokens = 16000
elif model in ["llama34instruct", "llama7", "llama13"]:
model_max_tokens = 8000
else:
raise NotImplementedError
prompt_len = get_prompt_length(prompt, model=model)
cnt = 0
while prompt_len >= model_max_tokens - max_generate_tokens:
prompt = prompt.split(DB_SEP)
prompt = DB_SEP.join([""] + prompt[2:])
prompt_len = get_prompt_length(prompt, model=model)
cnt += 1
if cnt > 0:
print(f"Prompt too long, skip the first {cnt} databases.")
return prompt, prompt_len
def call_chatgpt(model, prompt, max_tokens=200, stop=[";", "Question", 'Answer', '/*']):
if model == "chatgpt":
api_model = "gpt-3.5-turbo-0301"
model_max_tokens = 4000
elif model == "chatgpt16k":
api_model = "gpt-3.5-turbo-16k-0613"
model_max_tokens = 16000
elif model == "gpt4":
api_model = "gpt-4-0613"
model_max_tokens = 8000
else:
raise NotImplementedError
while (True):
try:
response = openai.ChatCompletion.create(
model=api_model,
messages=[
{"role": "system", "content": "You are a helpful assistant that translate a question to a SQL query given a database."},
{"role": "user", "content": prompt},
],
temperature=0,
max_tokens=max_tokens,
top_p=1.0,
stop=stop,
)
break
except Exception as e:
print(e, "Retry.")
time.sleep(10)
continue
for i in range(len(response["choices"])):
x = response["choices"][i]["message"]["content"].replace('\n', ' ').replace(' ', ' ').replace('\t', ' ').replace('\r', ' ')
for s in stop:
if s in x:
x = x[:x.index(s)]
response["choices"][i]["text"] = ' ' + x
return response
def call_codex(model, prompt, max_tokens=200, stop=[";", "Question", 'Answer', '/*'], num_return=1, temperature=0, top_p=1):
api_model = "code-davinci-002"
while (True):
try:
response = openai.Completion.create(
model=api_model,
prompt=prompt,
n=num_return,
best_of=num_return,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=0,
presence_penalty=0,
stop=stop,
logprobs=5
)
break
except Exception as e:
print(e, "Retry.")
time.sleep(10)
continue
for i in range(len(response["choices"])):
response["choices"][i]["text"] = response["choices"][i]["text"].replace('\n', ' ').replace(' ', ' ').replace('\t', ' ').replace('\r', ' ')
return response
def sql_generation(model, prompt):
if model == "codex":
return call_codex(model, prompt)
if model in ["chatgpt", "chatgpt16k", "gpt4"]:
return call_chatgpt(model, prompt)
def text_to_sql_direct(model, questions, db_prompt,save_prompt_only=False):
if model == "gpt3.5":
api_name = "text-davinci-003"
elif model == "codex":
api_name = "code-davinci-002"
elif model == "chatgpt":
api_name = "gpt-3.5-turbo"
elif model == "chatgpt16k":
api_name = "gpt-3.5-turbo-16k-0613"
elif model == "gpt4":
api_name = "gpt-4-0613"
elif model in ["llama34instruct", "llama7", "llama13"]:
api_name = None
else:
raise NotImplementedError
predictions = []
prompts = []
prompts_len = []
stop = [";", "Question", 'Answer', '/*']
for q in questions:
prompt = db_prompt + f"Question: {q['question']}\n" + "select"
prompt_len = get_prompt_length(model, prompt)
prompts.append(prompt)
prompts_len.append(prompt_len)
if save_prompt_only:
return prompts, []
responses = []
for q, prompt, prompt_len in zip(questions, prompts, prompts_len):
response = sql_generation(model, prompt)
responses.append(response)
sql = "select" + response["choices"][0]["text"]
# print(prompt)
print(q["question"])
print(sql)
print(prompt_len)
for q, response, prompt_len in zip(questions, responses, prompts_len):
sql = "select" + response["choices"][0]["text"]
predictions.append({
"db_id": q["db_id"],
"question": q["question"],
"gold_sql": q['gold']['query_normalized'],
"predicted_sql": sql,
"prompt_len": prompt_len,
})
return prompts, predictions
def text_to_sql_few_shot_indomain(model, questions, indomain_schema, indomain_demo_examples_per_question, demo_sql_format="normalized", save_prompt_only=False):
print("=" * 10 + "start" + "=" * 10)
few_shot_in_prompts = []
predictions = []
prompts = []
prompts_len=[]
for q, indomain_few_shot_examples in zip(questions, indomain_demo_examples_per_question):
prompt = indomain_schema
indomain_demonstration = []
for example in indomain_few_shot_examples:
prompt += f"Question: {example['question']}\n"
query = format_query(example, demo_sql_format)
prompt += query + '\n'
indomain_demonstration.append([example["question"], query])
few_shot_in_prompts.append([q["question"], q["query"], indomain_demonstration])
prompt += f"Question: {q['question']}\n" + "select"
prompt_len = get_prompt_length(model, prompt)
prompts.append(prompt)
prompts_len.append(prompt_len)
if save_prompt_only:
return [], prompts, []
for q, prompt, prompt_len in zip(questions, prompts, prompts_len):
response = sql_generation(model=model, prompt=prompt)
print(response)
sql = "select" + response["choices"][0]["text"]
print(q["question"])
print(sql)
predictions.append({
"db_id": q["db_id"],
"question": q["question"],
"gold_sql": q['gold']['query_normalized'],
"predicted_sql": sql,
"prompt_len": prompt_len,
})
return few_shot_in_prompts, prompts, predictions
def create_outdomain_prompt(outdomain_schemas, outdomain_demo_examples, demo_sql_format="normalized"):
prompt = ""
outdomain_demostration = []
for schema, examples in zip(outdomain_schemas, outdomain_demo_examples):
prompt += DB_SEP
prompt += schema
outdomain_demostration.append([])
for example in examples:
prompt += f"Question: {example['question']}\n"
query = format_query(example, demo_sql_format)
prompt += query + '\n'
outdomain_demostration[-1].append([example["question"], query])
prompt += '\n'
return prompt, outdomain_demostration
def text_to_sql_few_shot_outdomain(model, questions, outdomain_schemas_per_question, indomain_schema, outdomain_demo_examples_per_question,
demo_sql_format="normalized",save_prompt_only=False):
few_shot_in_prompts = []
print("=" * 10 + "start" + "=" * 10)
predictions = []
prompts = []
prompts_len = []
for q, outdomain_schemas, outdomain_demo_examples in zip(questions, outdomain_schemas_per_question, outdomain_demo_examples_per_question):
prompt, outdomain_demostration = create_outdomain_prompt(outdomain_schemas, outdomain_demo_examples, demo_sql_format=demo_sql_format)
prompt += DB_SEP
prompt += indomain_schema
few_shot_in_prompts.append([q["question"], q["query"], outdomain_demostration])
prompt += f"Question: {q['question']}\n" + "select"
prompt, prompt_len = cut_prompt_with_max_tokens(model, prompt, MAX_GEN_TOKENS)
prompts.append(prompt)
prompts_len.append(prompt_len)
if save_prompt_only:
return [], prompts, []
for q, prompt, prompt_len in zip(questions, prompts, prompts_len):
response = sql_generation(model=model, prompt=prompt)
sql = "select" + response["choices"][0]["text"]
# print(prompt)
# print(q["question"])
# print(sql)
# print(prompt_len)
predictions.append({
"db_id": q["db_id"],
"question": q["question"],
"gold_sql": q['gold']['query_normalized'],
"predicted_sql": sql,
"prompt_len": prompt_len,
})
break
return prompts, few_shot_in_prompts, predictions
def text_to_sql_few_shot_inoutdomain(model, questions, outdomain_schemas_per_question, indomain_schema, outdomain_demo_examples_per_question,
indomain_demo_examples_per_question, demo_sql_format="normalized",save_prompt_only=False):
few_shot_in_prompts = []
print("=" * 10 + "start" + "=" * 10)
predictions = []
prompts = []
prompts_len = []
for q, outdomain_schemas, outdomain_demo_examples, indomain_few_shot_examples in zip(questions, outdomain_schemas_per_question, outdomain_demo_examples_per_question, indomain_demo_examples_per_question):
prompt, outdomain_demostration = create_outdomain_prompt(outdomain_schemas, outdomain_demo_examples, demo_sql_format=demo_sql_format)
prompt += DB_SEP
prompt += indomain_schema
indomain_demonstration = []
for example in indomain_few_shot_examples:
prompt += f"Question: {example['question']}\n"
query = format_query(example, demo_sql_format)
prompt += query + '\n'
indomain_demonstration.append([example["question"], query])
few_shot_in_prompts.append([q["question"], q["query"], outdomain_demostration, indomain_demonstration])
prompt += f"Question: {q['question']}\n" + "select"
prompt, prompt_len = cut_prompt_with_max_tokens(model, prompt, MAX_GEN_TOKENS)
prompts.append(prompt)
prompts_len.append(prompt_len)
if save_prompt_only:
return [], prompts, []
for q, prompt,prompt_len in zip(questions, prompts,prompts_len):
response = sql_generation(model=model, prompt=prompt)
sql = "select" + response["choices"][0]["text"].replace('\n', ' ').replace('\t', ' ').replace(' ', ' ')
print(prompt)
print(q["question"])
print(sql)
print(prompt_len)
print()
predictions.append({
"db_id": q["db_id"],
"question": q["question"],
"gold_sql": q['gold']['query_normalized'],
"predicted_sql": sql,
"prompt_len": prompt_len,
})
break
return prompts, few_shot_in_prompts, predictions
| [
"\n",
"PLACEHOLDERQuestion: PLACEHOLDER\nselect",
"You are a helpful assistant that translate a question to a SQL query given a database.",
"PLACEHOLDER\n",
"Question: PLACEHOLDER\n",
"[]",
"Question: PLACEHOLDER\nselect"
] |
2024-01-10 | juhiechandra/AI-based-log-analyser | scripts~vector_method.py | from langchain.document_loaders import DirectoryLoader
from langchain.docstore.document import Document
## text splitter imports
# from langchain.document_loaders.csv_loader import CSVLoader
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain import OpenAI
## parsing csv to string
import pandas as pd
## laoding texts from local directory
def load_doc_txt():
loader = DirectoryLoader('./assets', glob="**/*.txt", show_progress=True)
docs = loader.load()
return docs
def load_plain_text(plain_text):
doc = Document(page_content=plain_text)
return [doc]
## create vector store for the documents and create embedding
def create_embedding(docs):
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(docs)
db = Chroma.from_documents(documents, OpenAIEmbeddings())
return db
## load csv file as query
def load_csv():
loader = CSVLoader('./data_uploaded', glob="**/*.csv", show_progress=True, encoding="utf-8")
loadedcsv = loader.load()
return loadedcsv
print (loadedcsv)
## converting csv to string
def load_csv_as_query(csv_file):
df = pd.read_csv(csv_file)
query = df.to_string(index=False)
return query
## main function
def main():
# load documents
docs = load_doc_txt()
# print(docs)
# create embedding
docsearch = create_embedding(docs)
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever())
query = "What is the contents of the file?"
ans = qa.run(query)
print(ans)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | fagan2888/datasets-9 | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | piglei/ai-vocabulary-builder | voc_builder~interactive.py | """Functions relative with the interactive REPL"""
import logging
import time
import traceback
from dataclasses import dataclass, field
from textwrap import dedent
from threading import Thread
from typing import ClassVar, List, Optional
import questionary
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.styles import Style
from rich.console import Console
from rich.live import Live
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.spinner import Spinner
from rich.table import Table
from rich.text import Text
from voc_builder.builder import migrate_builder_data_to_store
from voc_builder.commands.exceptions import CommandSyntaxError, NotCommandError
from voc_builder.commands.parsers import ListCmdParser, ListCommandExpr
from voc_builder.exceptions import OpenAIServiceError, WordInvalidForAdding
from voc_builder.int_commands.remove import handle_cmd_remove
from voc_builder.models import LiveStoryInfo, LiveTranslationInfo, WordChoice, WordSample
from voc_builder.openai_svc import get_story, get_translation, get_uncommon_word, get_word_choices
from voc_builder.store import get_mastered_word_store, get_word_store
from voc_builder.utils import highlight_story_text, highlight_words, tokenize_text
from voc_builder.version import check_for_new_versions
logger = logging.getLogger()
console = Console()
# Special commands
# no: discard last added word, try to get other options and let user choose from them manually
COMMAND_NO = 'no'
# story: make a stroy from least recent used words
COMMAND_STORY = 'story'
# remove: enter interactive mode to remove words from vocabulary book
COMMAND_REMOVE = 'remove'
class LastActionResult:
"""This class is used as a global state, stores the result of last action"""
trans_result: ClassVar[Optional['TransActionResult']] = None
story_result: ClassVar[Optional['StoryActionResult']] = None
list_result: ClassVar[Optional['ListActionResult']] = None
@dataclass
class TransActionResult:
"""The result of a translation action
:param input_text: The text user has inputted
:param stored_to_voc_book: whether the word has been added to the vocabulary book
:param error: The actual error message
:param invalid_for_adding: There is a valid word but it's invalid for adding
:param word_sample: The WordSample object
"""
input_text: str
stored_to_voc_book: bool
error: str = ''
word_sample: Optional[WordSample] = None
invalid_for_adding: bool = False
@dataclass
class NoActionResult:
"""The result of a "story" action
:param words: The words user has selected and saved successfully, might be empty
:param failed_words: The words user has selected but failed to save
:param stored_to_voc_book: whether the word has been saved
:param error: The actual error message
"""
words: List[WordSample] = field(default_factory=list)
failed_words: List[WordSample] = field(default_factory=list)
stored_to_voc_book: bool = False
error: str = ''
@dataclass
class StoryActionResult:
"""The result of a story action
:param words: The words for writing story
:param error: The actual error message
"""
words: List[WordSample] = field(default_factory=list)
error: str = ''
@dataclass
class ListActionResult:
"""The result of a "list" action
:param error: The actual error message
"""
words: List[WordSample] = field(default_factory=list)
error: str = ''
prompt_style = Style.from_dict(
{
# Prompt
"tip": "bold",
"arrow": "bold",
}
)
def enter_interactive_mode(): # noqa: C901
"""Enter the interactive mode"""
# Try to migrate the data in the CSV file(for version < 0.2) to the new word store which was
# based on TinyDB, this should be a one off action.
try:
migrate_builder_data_to_store(console)
except Exception as e:
logger.debug('Detailed stack trace info: %s', traceback.format_exc())
console.print(f'Error migrating data from CSV file: {e}')
# Check form new versions
try:
check_for_new_versions(console)
except Exception as e:
logger.debug('Detailed stack trace info: %s', traceback.format_exc())
logger.warn(f'Error checking for new versions: {e}')
console.print(
Panel(
dedent(
'''
[bold]Guides[/bold]:
- Enter your text to start translating and building vocabulary
- One sentence at a time, don't paste huge amounts of text at once
- Get your vocabulary book file by running [bold]aivoc export --format csv[/bold]
- Special Command:
* [bold]no[/bold]: Remove the last added word and start a manual selection
* [bold]story[/bold]: Recall words by reading a story written by AI
* [bold]list {limit}[/bold]: List recently added words. Args:
- [underline]limit[/underline]: optional, a number or "all", defaults to 10.
* [bold]remove[/bold]: Enter "remove" mode, remove words from your vocabulary book.
* [Ctrl+c] to quit'''
).strip(),
title='Welcome to AI Vocabulary Builder!',
)
)
session: PromptSession = PromptSession()
while True:
text = session.prompt(
HTML('<tip>Enter text</tip><arrow>></arrow> '), style=prompt_style
).strip()
if not text:
continue
elif text == COMMAND_NO:
handle_cmd_no()
continue
elif text == COMMAND_STORY:
LastActionResult.story_result = handle_cmd_story()
continue
elif text == COMMAND_REMOVE:
handle_cmd_remove()
continue
# Try different command parsers
# TODO: Use a loop to try different parsers
try:
list_expr = ListCmdParser().parse(text)
except NotCommandError:
# Handle as a normal translation
pass
except CommandSyntaxError as e:
console.print(f'List command syntax error: {e}', style='red')
continue
else:
LastActionResult.list_result = handle_cmd_list(list_expr)
continue
trans_ret = handle_cmd_trans(text.strip())
# Don't store error of input invalid type
if trans_ret.error != 'input_length_invalid':
LastActionResult.trans_result = trans_ret
MIN_LENGTH_TRANS_TEXT = 12
MAX_LENGTH_TRANS_TEXT = 1600
def handle_cmd_trans(text: str) -> TransActionResult:
"""Write a new word to the vocabulary book
:param csv_book_path: The path of vocabulary book
"""
# Validate input length
if len(text) < MIN_LENGTH_TRANS_TEXT:
console.print(
f'Content too short, input at least {MIN_LENGTH_TRANS_TEXT} characters to start a translation.',
style='red',
)
return TransActionResult(
input_text=text, stored_to_voc_book=False, error='input_length_invalid'
)
if len(text) > MAX_LENGTH_TRANS_TEXT:
console.print(
f'Content too long, input at most {MAX_LENGTH_TRANS_TEXT} characters to start a translation.',
style='red',
)
return TransActionResult(
input_text=text, stored_to_voc_book=False, error='input_length_invalid'
)
mastered_word_s = get_mastered_word_store()
word_store = get_word_store()
orig_words = tokenize_text(text)
with Live(refresh_per_second=LiveTransRenderer.frames_per_second) as live:
# Get the translation and do live updating
live_renderer = LiveTransRenderer(live)
live_renderer.run(text)
try:
trans_ret = get_translation(text, live_renderer.live_info)
live_renderer.block_until_finished()
except OpenAIServiceError as e:
console.print(f'[red] Error processing text, detail: {e}[red]')
logger.debug('Detailed stack trace info: %s', traceback.format_exc())
return TransActionResult(
input_text=text, stored_to_voc_book=False, error='openai_svc_error'
)
live.update(gen_translated_table(text, trans_ret.translated_text))
# Words already in vocabulary book and marked as mastered are treated as "known"
known_words = word_store.filter(orig_words) | mastered_word_s.filter(orig_words)
console.print('\n')
progress = Progress(SpinnerColumn(), TextColumn("[bold blue] Extracting word"))
with progress:
task_id = progress.add_task("get", start=False)
# Get the uncommon word
try:
choice = get_uncommon_word(text, known_words)
except OpenAIServiceError as e:
console.print(f'[red] Error extracting word, detail: {e}[red]')
logger.debug('Detailed stack trace info: %s', traceback.format_exc())
return TransActionResult(
input_text=text, stored_to_voc_book=False, error='openai_svc_error'
)
finally:
progress.update(task_id, total=1, advance=1)
word = WordSample(
word=choice.word,
word_normal=choice.word_normal,
word_meaning=choice.word_meaning,
pronunciation=choice.pronunciation,
translated_text=trans_ret.translated_text,
orig_text=trans_ret.text,
)
console.print(f'> The new word AI has chosen is "[bold]{word.word}[/bold]".\n')
try:
validate_result_word(word, text)
except WordInvalidForAdding as e:
console.print(f'Unable to add "{word.word}", reason: {e}', style='red')
return TransActionResult(
input_text=text,
stored_to_voc_book=False,
error=str(e),
word_sample=word,
invalid_for_adding=True,
)
console.print(format_single_word(word))
word_store.add(word)
console.print(
(
f'[bold]"{word.word}"[/bold] was added to your vocabulary book ([bold]{word_store.count()}[/bold] '
'in total), well done!'
),
style='grey42',
)
console.print('Hint: use "no" command to choose other words.\n', style='grey42')
return TransActionResult(input_text=text, stored_to_voc_book=True, word_sample=word)
class LiveTransRenderer:
"""Render live translation result
:param live_display: Live display component from rich
"""
frames_per_second = 12
def __init__(self, live_display: Live) -> None:
self.spinner = Spinner('dots')
self.live_display = live_display
self.live_info = LiveTranslationInfo()
self._thread = None
def run(self, text: str):
"""Start a background thread to update the live display, this thread is required
because the "loading" animation has to be rendered at a steady pace.
:param text: The original text
"""
self.live_thread = Thread(target=self._run, args=(text,))
self.live_thread.start()
def _run(self, text: str):
"""A loop function which render the translation result repeatedly."""
while not self.live_info.is_finished:
time.sleep(1 / self.frames_per_second)
self.live_display.update(self._gen_table(text, self.live_info.translated_text))
def block_until_finished(self):
"""Block until the live procedure has been finished"""
if self._thread:
self._thread.join()
def _gen_table(self, text: str, translated: Optional[str] = None):
"""Generate the table for displaying translated paragraph.
:param text: The original text.
:param translated: The translated result text.
"""
table = Table(title="Translation Result", show_header=False)
table.add_column("Title")
table.add_column("Detail", overflow='fold')
table.add_row("[bold]Original Text[/bold]", f'[grey42]{text}[grey42]')
table.add_row(Text('Translating ') + self.spinner.render(time.time()), translated)
return table
def gen_translated_table(text: str, translated: str):
"""Generate the table for displaying translated paragraph.
:param text: The original text.
:param translated: The translated result text.
:param word: The chosen word.
"""
table = Table(title="Translation Result", show_header=False)
table.add_column("Title")
table.add_column("Detail", overflow='fold')
table.add_row("[bold]Original Text[/bold]", f'[grey42]{text}[grey42]')
table.add_row("[bold]Translation[/bold]", translated)
return table
def handle_cmd_no() -> NoActionResult:
"""Handle the "no" command, do following things:
- Remove the last added word, also mark it as "mastered"
- Let the user choose unknown word manually
:return: The action result object.
"""
ret = LastActionResult.trans_result
if not (ret and ret.word_sample and (ret.stored_to_voc_book or ret.invalid_for_adding)):
console.print(
'The "no" command was used to remove the last added word and select the word manually.'
)
console.print('Can\'t get the last added word, please start a new translation first.')
return NoActionResult(error='last_trans_absent')
selector = ManuallySelector()
if not ret.invalid_for_adding:
selector.discard_word(ret.word_sample)
console.print(
f'"{ret.word_sample.word}" has been discarded from your vocabulary book.',
style='grey42',
)
progress = Progress(SpinnerColumn(), TextColumn("[bold blue] Extracting multiple new words"))
with progress:
task_id = progress.add_task("get", start=False)
try:
choices = selector.get_choices(ret.input_text)
except OpenAIServiceError as e:
console.print(f'[red] Error processing text, detail: {e}[red]')
logger.debug('Detailed stack trace info: %s', traceback.format_exc())
return NoActionResult(error='openai_svc_error')
finally:
progress.update(task_id, total=1, advance=1)
if not choices:
console.print('No words could be extracted from your input text, skip.', style='grey42')
return NoActionResult(error='no_choices_error')
choices_from_user = selector.get_user_words_selection(choices)
if not choices_from_user:
console.print('Skipped.', style='grey42')
return NoActionResult(error='user_skip')
# Process the words, try to add them to the vocabulary book and print the result
words: List[WordSample] = []
failed_words: List[WordSample] = []
for word_sample in choices_from_user:
sample = WordSample(
word=word_sample.word,
word_normal=word_sample.word_normal,
word_meaning=word_sample.word_meaning,
pronunciation=word_sample.pronunciation,
translated_text=ret.word_sample.translated_text,
orig_text=ret.input_text,
)
try:
validate_result_word(sample, ret.input_text)
except WordInvalidForAdding as e:
console.print(f'Unable to add "{word_sample.word}", reason: {e}', style='grey42')
failed_words.append(sample)
else:
words.append(sample)
if not words:
return NoActionResult(failed_words=failed_words, error='failed_to_add')
word_store = get_word_store()
for word in words:
word_store.add(word)
console.print(
(
'New word(s) added to your vocabulary book: [bold]"{}"[/bold] ([bold]{}[/bold] '
'in total), well done!\n'.format(','.join(w.word for w in words), word_store.count())
),
style='grey42',
)
LastActionResult.trans_result = None
return NoActionResult(words=words, failed_words=failed_words, stored_to_voc_book=True)
class ManuallySelector:
"""A class dealing with manually word selection"""
choice_skip = 'None of above, skip for now.'
def discard_word(self, word: WordSample):
"""Remove the last action word for prepare for the next action"""
# Remove last word
get_word_store().remove(word.word)
def get_choices(self, text: str) -> List[WordChoice]:
"""Get word choices from OpenAI service"""
orig_words = tokenize_text(text)
# Words already in vocabulary book and marked as mastered are treated as "known"
known_words = get_word_store().filter(orig_words) | get_mastered_word_store().filter(
orig_words
)
return get_word_choices(text, known_words)
def get_user_words_selection(self, choices: List[WordChoice]) -> List[WordChoice]:
"""Get the words user has selected
:return: A list of WordChoice object, might be empty if user choose to skip
or select none.
"""
# Read user input
str_choices = [w.get_console_display() for w in choices] + [self.choice_skip]
answers = self.prompt_select_words(str_choices)
# Get the WordChoice, turn it into WordSample and save to vocabulary book
word_str_list: List[str] = []
for answer in answers:
# Return empty list if the "skip" option is selected
if answer == self.choice_skip:
return []
word_str_list.append(WordChoice.extract_word(answer))
return [w for w in choices if w.word in word_str_list]
def prompt_select_words(self, str_choices: List[str]) -> List[str]:
"""Call terminal to prompt user to select the word(s) he/she doesn't know"""
return questionary.checkbox("Choose the word(s) you don't know", choices=str_choices).ask()
# The default number of words used for writing the story
DEFAULT_WORDS_CNT_FOR_STORY = 6
def handle_cmd_story(words_cnt: int = DEFAULT_WORDS_CNT_FOR_STORY) -> StoryActionResult:
"""Handle the "story" command, do following things:
- Pick 6 words from the vocabulary book, use LRU algo
- Write a story use those words
:param words_cnt: The number of words used for writing the story
:return: A story action result.
"""
word_store = get_word_store()
words = word_store.pick_story_words(words_cnt)
if len(words) < words_cnt:
console.print(
(
'Current number of words in your vocabulary book is less than {}.\n'
'Translate more and come back later!'
).format(words_cnt),
style='red',
)
return StoryActionResult(error='not_enough_words')
# Call OpenAI service to get story text, word's normal form is preferred
words_str = [w.get_normal_word_display() or w.word for w in words]
console.print('Words for generating story: [bold]{}[/bold]'.format(', '.join(words_str)))
with Live(refresh_per_second=LiveStoryRenderer.frames_per_second) as live:
live_renderer = LiveStoryRenderer(live)
live_renderer.run()
try:
story_text = get_story(words, live_renderer.live_info)
except OpenAIServiceError as e:
console.print(f'[red] Error retrieving story, detail: {e}[red]')
logger.debug('Detailed stack trace info: %s', traceback.format_exc())
return StoryActionResult(error='openai_svc_error')
live_renderer.block_until_finished()
live.update(Panel(highlight_story_text(story_text.strip()), title='Enjoy your reading'))
# Update words to make LRU work
word_store.update_story_words(words)
# Display words on demand
cmd_obj = StoryCmd(words_cnt)
if cmd_obj.prompt_view_words():
console.print(format_words(words))
return StoryActionResult(words=words)
# By default, list 10 latest words
DEFAULT_WORDS_CNT_FOR_LIST = 10
def handle_cmd_list(expr: ListCommandExpr) -> ListActionResult:
"""Handle the "list" command, list the latest words in the vocabulary book
:param expr: The parsed list expression object.
:return: A list action result.
"""
word_store = get_word_store()
# Checking if the user has any words in the vocabulary book
if word_store.count() == 0:
console.print('No words in your vocabulary book, translate more and come back later!\n')
return ListActionResult(error='no_words')
if expr.all:
words = word_store.list_latest()
else:
words = word_store.list_latest(limit=expr.num or DEFAULT_WORDS_CNT_FOR_LIST)
word_samples = [obj.ws for obj in words]
console.print(format_words(word_samples))
return ListActionResult(words=word_samples)
class LiveStoryRenderer:
"""Render live story
:param live_display: Live display component from rich
"""
frames_per_second = 12
def __init__(self, live_display: Live) -> None:
self.spinner = Spinner('dots')
self.live_display = live_display
self._thread = None
self.live_info = LiveStoryInfo()
def run(self):
"""Start a background thread to update the live display, this thread is required
because the "loading" animation has to be rendered at a steady pace."""
self.live_thread = Thread(target=self._run)
self.live_thread.start()
def _run(self):
"""A loop function which render the translation result repeatedly."""
while not self.live_info.is_finished:
time.sleep(1 / self.frames_per_second)
self.live_display.update(self._gen_panel(self.live_info.story_text))
def block_until_finished(self):
"""Block until the live procedure has been finished"""
if self._thread:
self._thread.join()
def _gen_panel(self, story_text: str) -> Panel:
"""Generate the panel for displaying story."""
return Panel(
highlight_story_text(story_text.strip()),
title=Text('The AI is writing the story ') + self.spinner.render(time.time()),
)
class StoryCmd:
"""Command class for "story" action.
:param words_cnt: The number of words used for writing the story
"""
def __init__(self, words_cnt: int):
self.word_cnt = words_cnt
def prompt_view_words(self) -> bool:
return questionary.confirm("Do you want to view the meaning of these words?").ask()
def validate_result_word(word: WordSample, orig_text: str):
"""Check if a result word is valid before it can be put into vocabulary book"""
if get_word_store().exists(word.word):
raise WordInvalidForAdding('already in your vocabulary book')
if get_mastered_word_store().exists(word.word):
raise WordInvalidForAdding('already mastered')
if word.word not in orig_text.lower():
raise WordInvalidForAdding('not in the original text')
def format_words(words: List[WordSample]) -> Table:
"""Format a list of words as a rich table"""
table = Table(title='Words Details', show_header=True)
table.add_column("Word")
table.add_column("Pronunciation")
table.add_column("Definition", overflow='fold', max_width=24)
table.add_column("Example sentence / Translation", overflow='fold')
for w in words:
table.add_row(
w.word,
w.pronunciation,
w.get_word_meaning_display(),
highlight_words(w.orig_text, [w.word])
+ '\n'
+ "[grey42]"
+ w.translated_text
+ "[/grey42]",
)
return table
def format_single_word(word: WordSample) -> Table:
"""Format a single word sample
:parm word: The word sample object
"""
table = Table(title="", show_header=True)
table.add_column("Word")
table.add_column("Pronunciation")
table.add_column("Definition", overflow='fold')
table.add_row(word.word, word.pronunciation, word.get_word_meaning_display())
return table
| [] |
2024-01-10 | piglei/ai-vocabulary-builder | tests~test_interactive.py | from unittest import mock
import pytest
from voc_builder.commands.parsers import ListCommandExpr
from voc_builder.exceptions import OpenAIServiceError, WordInvalidForAdding
from voc_builder.interactive import (
LastActionResult,
ManuallySelector,
TransActionResult,
handle_cmd_list,
handle_cmd_no,
handle_cmd_story,
handle_cmd_trans,
validate_result_word,
)
from voc_builder.models import WordChoice, WordSample
from voc_builder.store import get_mastered_word_store, get_word_store
# Valid OpenAI replies for text translating
OPENAI_REPLY_TRANS = '你好,世界。'
OPENAI_REPLY_WORD = 'word: world\nnormal_form: world\npronunciation: wɔːld\nmeaning: 世界'
class TestCmdTrans:
def test_input_length(self, tmp_path):
ret = handle_cmd_trans("world")
assert ret.error == 'input_length_invalid'
def test_known_words(self, tmp_path):
"""Check if known words from all sources works"""
with mock.patch(
'voc_builder.openai_svc.query_translation', side_effect=OpenAIServiceError()
):
ret = handle_cmd_trans("foo bar baz!")
assert ret.stored_to_voc_book is False
assert ret.error == 'openai_svc_error'
def test_openai_svc_error(self, tmp_path):
"""Test when there's an error calling the OpenAI API"""
with mock.patch(
'voc_builder.openai_svc.query_translation', side_effect=OpenAIServiceError()
):
ret = handle_cmd_trans("world foo bar baz!")
assert ret.error == 'openai_svc_error'
def test_normal(self, tmp_path):
"""Check if known words from all sources works"""
# Update known words from both sources
get_word_store().add(WordSample.make_empty('foo'))
get_mastered_word_store().add('baz')
with mock.patch('voc_builder.openai_svc.query_translation') as mocked_trans, mock.patch(
'voc_builder.openai_svc.query_get_word_choices'
) as mocked_word:
mocked_trans.return_value = OPENAI_REPLY_TRANS
mocked_word.return_value = OPENAI_REPLY_WORD
ret = handle_cmd_trans("world foo bar baz!")
assert mocked_word.call_args[0] == ("world foo bar baz!", {'foo', 'baz'})
assert ret.stored_to_voc_book is True
assert ret.word_sample and ret.word_sample.word == 'world'
assert get_word_store().exists('world') is True
class TestCmdNo:
@pytest.fixture
def has_last_added_word(self):
"""Simulate that a new word has been added through last action"""
word_foo = WordSample.make_empty('foo')
with mock.patch.object(
LastActionResult,
'trans_result',
TransActionResult(
input_text='foo bar',
stored_to_voc_book=True,
word_sample=word_foo,
),
):
yield
def test_condition_not_met(self):
ret = handle_cmd_no()
assert ret.error == 'last_trans_absent'
def test_word_invalid(self):
"""When the word is present but invalid for adding, also allows "no" action"""
word_foo = WordSample.make_empty('foo')
with mock.patch.object(
LastActionResult,
'trans_result',
TransActionResult(
input_text='foo bar',
stored_to_voc_book=False,
word_sample=word_foo,
invalid_for_adding=True,
),
), mock.patch(
'voc_builder.interactive.get_word_choices', side_effect=OpenAIServiceError()
):
ret = handle_cmd_no()
assert ret.error == 'openai_svc_error'
def test_openai_svc_error(self, has_last_added_word):
with mock.patch(
'voc_builder.interactive.get_word_choices', side_effect=OpenAIServiceError()
) as mocker:
ret = handle_cmd_no()
assert get_word_store().exists('foo') is False
assert ret.error == 'openai_svc_error'
mocker.assert_called_once_with('foo bar', set())
def test_no_choices_error(self, has_last_added_word):
with mock.patch('voc_builder.interactive.get_word_choices', return_value=[]):
ret = handle_cmd_no()
assert ret.error == 'no_choices_error'
def test_user_skip_error(self, has_last_added_word):
with mock.patch(
'voc_builder.interactive.get_word_choices',
return_value=[
WordChoice(word='bar', word_normal='bar', word_meaning='bar', pronunciation='')
],
), mock.patch(
'voc_builder.interactive.ManuallySelector.prompt_select_words',
return_value=[ManuallySelector.choice_skip],
):
ret = handle_cmd_no()
assert ret.error == 'user_skip'
def test_validate_error(self, has_last_added_word):
with mock.patch(
'voc_builder.interactive.get_word_choices',
return_value=[
WordChoice(word='bar', word_normal='bar', word_meaning='bar', pronunciation='')
],
), mock.patch(
'voc_builder.interactive.ManuallySelector.prompt_select_words',
return_value=['bar'],
):
get_mastered_word_store().add('bar')
ret = handle_cmd_no()
assert not ret.words
assert ret.failed_words[0].word == 'bar'
assert ret.error == 'failed_to_add'
def test_check_single(self, has_last_added_word):
with mock.patch(
'voc_builder.interactive.get_word_choices',
return_value=[
WordChoice(word='bar', word_normal='bar', word_meaning='bar', pronunciation='')
],
), mock.patch(
'voc_builder.interactive.ManuallySelector.prompt_select_words',
return_value=['bar'],
):
ret = handle_cmd_no()
assert get_word_store().exists('bar') is True
assert ret.words and ret.words[0].word == 'bar'
assert ret.stored_to_voc_book is True
assert ret.error == ''
assert LastActionResult.trans_result is None
def test_check_multi(self, has_last_added_word):
with mock.patch(
'voc_builder.interactive.get_word_choices',
return_value=[
WordChoice(word='bar', word_normal='bar', word_meaning='bar', pronunciation=''),
WordChoice(word='foo', word_normal='foo', word_meaning='foo', pronunciation=''),
],
), mock.patch(
'voc_builder.interactive.ManuallySelector.prompt_select_words',
return_value=['bar', 'foo'],
):
ret = handle_cmd_no()
assert get_word_store().exists('bar') is True
assert get_word_store().exists('foo') is True
assert ret.words and ret.words[0].word == 'bar'
assert ret.words and ret.words[1].word == 'foo'
assert ret.stored_to_voc_book is True
assert ret.error == ''
assert LastActionResult.trans_result is None
class TestCmdStory:
def test_not_enough_words(self):
ret = handle_cmd_story()
assert ret.error == 'not_enough_words'
def test_openai_svc_error(self):
get_word_store().add(WordSample.make_empty('foo'))
with mock.patch('voc_builder.openai_svc.query_story', side_effect=IOError()) as mocker:
ret = handle_cmd_story(1)
assert ret.error == 'openai_svc_error'
assert mocker.call_args[0][0] == ['foo']
def test_normal(self):
get_word_store().add(WordSample.make_empty('foo'))
with mock.patch(
'voc_builder.openai_svc.query_story', return_value='story text'
), mock.patch('voc_builder.interactive.StoryCmd.prompt_view_words', return_value=True):
ret = handle_cmd_story(1)
word = get_word_store().get('foo')
assert word is not None
assert word.wp.storied_cnt == 1
assert len(ret.words) == 1
assert ret.error == ''
class TestCmdList:
@pytest.fixture(autouse=True)
def _setup(self):
"""Set up the word store with 50 words"""
for i in range(50):
get_word_store().add(WordSample.make_empty(f'bar{i}'))
def test_normal(self):
ret = handle_cmd_list(ListCommandExpr(25))
for i in range(25):
assert ret.words[i].word == f'bar{i+25}'
assert len(ret.words) == 25
assert ret.error == ''
def test_all_words(self):
ret = handle_cmd_list(ListCommandExpr(all=True))
assert len(ret.words) == 50
assert ret.error == ''
def test_validate_result_word_misc(tmp_path):
word_store = get_word_store()
validate_result_word(WordSample.make_empty('foo'), 'foo bar')
with pytest.raises(WordInvalidForAdding, match='already in your vocabulary book'):
word_store.add(WordSample.make_empty('foo'))
validate_result_word(WordSample.make_empty('foo'), 'foo bar')
word_store.remove('foo')
with pytest.raises(WordInvalidForAdding, match='already mastered'):
get_mastered_word_store().add('foo')
validate_result_word(WordSample.make_empty('foo'), 'foo bar')
get_mastered_word_store().remove('foo')
with pytest.raises(WordInvalidForAdding, match='not in the original text'):
validate_result_word(WordSample.make_empty('foo'), 'bar baz')
| [] |
2024-01-10 | piglei/ai-vocabulary-builder | voc_builder~exceptions.py | class VocBuilderError(Exception):
"""Base exception type for aivoc."""
class OpenAIServiceError(VocBuilderError):
"""Error when calling OpenAI Services or parsing results from OpenAI"""
class WordInvalidForAdding(VocBuilderError):
"""Raised when a word sample is invalid for adding into vocabulary book"""
| [] |
2024-01-10 | ScottLL/Twitter_Business_Development_bot | twitterPost~GPT-3_twits.py | import os
import requests
import random
import tweepy
from PIL import Image
from io import BytesIO
import time
import openai
CRYPTOPANIC_API_KEY = os.getenv('CRYPTOPANIC_API_KEY')
# Authenticate to Twitter
auth = tweepy.OAuthHandler(os.getenv('CONSUMER_KEY'), os.getenv('CONSUMER_SECRET'))
auth.set_access_token(os.getenv('TOKEN'), os.getenv('TOKEN_SECRET'))
api = tweepy.API(auth)
# Authenticate to OpenAI
openai.api_key = os.getenv('OPENAI_API_KEY')
global_api_rate_delay = .2 # All API methods are rate limited per IP at 5req/sec.
def make_url(filter=None, kind=None, region=None, page=None):
"""Handle of URL variables for API POST."""
url = 'https://cryptopanic.com/api/v1/posts/?auth_token={}'.format(CRYPTOPANIC_API_KEY)
if kind is not None and kind in ['news', 'media']:
url += "&kind={}".format(kind)
filters = ['rising', 'hot', 'bullish', 'bearish', 'important', 'saved', 'lol']
if filter is not None and filter in filters:
url += "&filter={}".format(filter)
regions = ['en', 'de', 'es', 'fr', 'it', 'pt', 'ru'] # (English), (Deutsch), (Español), (Français), (Italiano), (Português), (Русский)--> Respectively
if region is not None and region in regions:
url += "®ion={}".format(region)
if page is not None:
url += "&page={}".format(page)
return url
def get_page_json(url=None):
"""
Get First Page.
Returns Json.
"""
time.sleep(global_api_rate_delay)
if not url:
url = "https://cryptopanic.com/api/v1/posts/?auth_token={}".format(CRYPTOPANIC_API_KEY)
page = requests.get(url)
data = page.json()
return data
def get_news():
"""Fetch news from CryptoPanic API."""
# Get top headlines from CryptoPanic
url = make_url(kind='news', filter='hot', region='en')
data = get_page_json(url)
articles = data['results']
if not articles:
return None, None, None
selected_article = random.choice(articles)
# Get the summary, original URL and image of the selected article
summary = selected_article['title']
metadata = selected_article.get('metadata', None)
if metadata is None:
news_url = selected_article['url']
else:
news_url = metadata.get('original_url', selected_article['url'])
image_url = selected_article.get('image', None)
if image_url:
# Download the image and convert to a PIL Image object
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
else:
img = None
return summary, news_url, img
def generate_tweet():
"""Generate tweet text and get news."""
# Get summary, URL and image from CryptoPanic API
summary, news_url, img = get_news()
if not summary:
return None, None, None
prompt = f"What's the latest news related to {summary.strip()} in the crypto world? give me a summary of the news in 150 characters or less, and add hashtags before the keywords at the begining of the sentense you generate. no space between sentense. no space between hashtags and sentense."
message_log = [{"role": "user","content": prompt}]
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = message_log,
max_tokens=300,
stop=None,
temperature=0.7,
)
for choice in response.choices:
if "text" in choice:
return choice.text
tweet_text = response.choices[0].message.content
return tweet_text, news_url, img
def post_tweet():
"""Post tweet with image and URL."""
# Generate tweet text, news URL and image
tweet_text, news_url, img = generate_tweet()
# Post tweet
if img is not None:
# Save image locally
img_path = "image.jpg"
img.save(img_path)
# Post tweet with image
try:
api.update_with_media(
filename=img_path,
status=f"{tweet_text[:230]} {news_url}"
)
except tweepy.TweepError as e:
print(e)
return
# Remove image file
os.remove(img_path)
else:
# Post tweet without image
try:
api.update_status(f"{tweet_text[:280 - len(news_url) - 1]} {news_url}")
except tweepy.TweepError as e:
print(e)
return
import time
if __name__ == "__main__":
post_tweet()
# time.sleep(1800) # wait for 30 minutes before posting again
| [
"f\"What's the latest news related to {summary.strip()} in the crypto world? give me a summary of the news in 150 characters or less, and add hashtags before the keywords at the begining of the sentense you generate. no space between sentense. no space between hashtags and sentense."
] |
2024-01-10 | jsemrau/CodeClinic-Autonomous | 20230911_StreamlitChatGPTClone.py | import openai
import streamlit as st
import os
st.title("ChatGPT-like clone")
openai.api_key = st.secrets["OPENAI_API_KEY"]
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"content"
] |
2024-01-10 | SG-Akshay10/SportsMechanics | CommentryGeneration.py | #!/usr/bin/env python
# coding: utf-8
# # Commentry Generation
# In[113]:
import numpy as np
import pandas as pd
import tensorflow as tf
import random
# In[114]:
#!pip install openai
#!python -m pip install cohere
# In[115]:
import cohere
co = cohere.Client('JY6As4HcMbxbXz6QJegvMAfNWflhvT0CoNxeN8pZ')
# In[116]:
df = pd.read_csv(r"deliveries.csv")
# In[117]:
df1 = df[df['match_id'].between(1, 20)]
# In[118]:
selected_columns = ['batsman', 'bowler', 'total_runs']
df2 = df[selected_columns]
# In[129]:
commentaries = []
i = random.randint(0,20)
print(df2.batsman[i],df2.bowler[i],df2.total_runs[i])
response = co.generate(
prompt=f"Generate a commentary of the following cricket ball in Ravi Shastri style make sure runs scored are the important parameter: Batsman: {df2.batsman[i]}, Bowler: {df2.bowler[i]}, runs scored: 6. Make sure it is around 30 words length",
)
commentaries.append(response[0].text)
# In[130]:
for i in range(len(commentaries)):
print(commentaries[0])
print("\n")
| [] |
2024-01-10 | wshao12/DocsGPT | scripts~ingest_rst_sphinx.py | import os
import pickle
import dotenv
import tiktoken
import sys
import faiss
import shutil
from pathlib import Path
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from sphinx.cmd.build import main as sphinx_main
from argparse import ArgumentParser
def convert_rst_to_txt(src_dir, dst_dir):
# Check if the source directory exists
if not os.path.exists(src_dir):
raise Exception("Source directory does not exist")
# Walk through the source directory
for root, dirs, files in os.walk(src_dir):
for file in files:
# Check if the file has .rst extension
if file.endswith(".rst"):
# Construct the full path of the file
src_file = os.path.join(root, file.replace(".rst", ""))
# Convert the .rst file to .txt file using sphinx-build
args = f". -b text -D extensions=sphinx.ext.autodoc " \
f"-D master_doc={src_file} " \
f"-D source_suffix=.rst " \
f"-C {dst_dir} "
sphinx_main(args.split())
def num_tokens_from_string(string: str, encoding_name: str) -> int:
# Function to convert string to tokens and estimate user cost.
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
total_price = ((num_tokens/1000) * 0.0004)
return num_tokens, total_price
def call_openai_api():
# Function to create a vector store from the documents and save it to disk.
store = FAISS.from_texts(docs, OpenAIEmbeddings(), metadatas=metadatas)
faiss.write_index(store.index, "docs.index")
store.index = None
with open("faiss_store.pkl", "wb") as f:
pickle.dump(store, f)
def get_user_permission():
# Function to ask user permission to call the OpenAI api and spend their OpenAI funds.
# Here we convert the docs list to a string and calculate the number of OpenAI tokens the string represents.
docs_content = (" ".join(docs))
tokens, total_price = num_tokens_from_string(string=docs_content, encoding_name="cl100k_base")
# Here we print the number of tokens and the approx user cost with some visually appealing formatting.
print(f"Number of Tokens = {format(tokens, ',d')}")
print(f"Approx Cost = ${format(total_price, ',.2f')}")
#Here we check for user permission before calling the API.
user_input = input("Price Okay? (Y/N) \n").lower()
if user_input == "y":
call_openai_api()
elif user_input == "":
call_openai_api()
else:
print("The API was not called. No money was spent.")
ap = ArgumentParser("Script for training DocsGPT on Sphinx documentation")
ap.add_argument("-i", "--inputs",
type=str,
default="inputs",
help="Directory containing documentation files")
args = ap.parse_args()
#Load .env file
dotenv.load_dotenv()
#Directory to vector
src_dir = args.inputs
dst_dir = "tmp"
convert_rst_to_txt(src_dir, dst_dir)
# Here we load in the data in the format that Notion exports it in.
ps = list(Path("tmp/"+ src_dir).glob("**/*.txt"))
# parse all child directories
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = CharacterTextSplitter(chunk_size=1500, separator="\n")
docs = []
metadatas = []
for i, d in enumerate(data):
splits = text_splitter.split_text(d)
docs.extend(splits)
metadatas.extend([{"source": sources[i]}] * len(splits))
# Here we check for command line arguments for bot calls.
# If no argument exists or the permission_bypass_flag argument is not '-y',
# user permission is requested to call the API.
if len(sys.argv) > 1:
permission_bypass_flag = sys.argv[1]
if permission_bypass_flag == '-y':
call_openai_api()
else:
get_user_permission()
else:
get_user_permission()
# Delete tmp folder
# Commented out for now
shutil.rmtree(dst_dir)
| [] |
2024-01-10 | rmsander/marl_ppo | ppo~load_policies.py | """ Loads saved PPO policies that can be used for evaluation. Separate
evaluation from the main Elo Rating framework used to evaluate Dreamer and PPO
agents. """
# Native Python imports
import os
from datetime import datetime
import argparse
# TensorFlow and tf-agents
from tf_agents.environments import gym_wrapper, tf_py_environment
import tf_agents.trajectories.time_step as ts
# Other external packages
import numpy as np
import tensorflow as tf
# Custom packages
from utils import video_summary, encode_gif, ObservationWrapper
from envs.multi_car_racing import MultiCarRacing
def load_saved_policies(eval_model_path=None, train_model_path=None):
""" Loads saved PPO policies from the local file system.
Loads saved policies for an agent, depending on what paths are
specified in the function call. This method sets the agent's
policies to be the new policies that are loaded below.
Arguments:
eval_model_path (str): A file path (relative or absolute) to the
directory containing the policy that will be loaded as the
evaluation policy of the agent.
train_model_path (str): A file path (relative or absolute) to the
directory containing the policy that will be loaded as the
training policy of the agent.
Returns:
collect_policy (TF Agents Policy): A tf-agents policy object
corresponding to the training/exploratory policy of the PPO agent.
eval_policy (TF Agents Policy): A tf-agents policy object
corresponding to the evaluation/greedy policy of the PPO agent.
"""
# Load evaluation and/or training policies from path
if eval_model_path is not None:
eval_policy = tf.saved_model.load(eval_model_path)
print("Loading evaluation policy from: {}".format(eval_model_path))
if train_model_path is not None:
collect_policy = tf.saved_model.load(train_model_path)
print("Loading training policy from: {}".format(train_model_path))
return collect_policy, eval_policy
def make_env():
""" Function for creating the TfPyEnvironment from OpenAI Gym environment.
"""
# Create wrapped environment
gym_eval_env = MultiCarRacing()
gym_eval_env.observation_space.dtype = np.float32 # For Conv2D data input
# Now create Python environment from gym env
py_eval_env = gym_wrapper.GymWrapper(gym_eval_env) # Gym --> Py
# Create training and evaluation TensorFlow environments
tf_eval_env = tf_py_environment.TFPyEnvironment(py_eval_env) # Py --> Tf
# Display environment specs
print("Observation spec: {} \n".format(tf_eval_env.observation_spec()))
print("Action spec: {} \n".format(tf_eval_env.action_spec()))
print("Time step spec: {} \n".format(tf_eval_env.time_step_spec()))
return tf_eval_env
def parse_args():
"""Command-line argument parser."""
# Create parser object
parser = argparse.ArgumentParser()
# Add path arguments
parser.add_argument("-p", "--path_to_policies", type=str, default="./",
help="Path to policies for evaluation.")
parser.add_argument("-exp_name", "--experiment_name", type=str,
default="Evaluation episode",
help="The name of the evaluation experiment.")
return parser.parse_args()
def main():
""" Main function for running evaluation.
"""
# Parse arguments
args = parse_args()
# Get paths for policies
path_to_policies = args.path_to_policies
EVAL_MODEL_PATH = path_to_policies
TRAIN_MODEL_PATH = path_to_policies
EXPERIMENT_NAME = args.experiment_name
LOG_DIR = os.path.join("exp_eval", EXPERIMENT_NAME)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR, exist_ok=True)
train_policy, eval_policy = load_saved_policies(train_model_path=TRAIN_MODEL_PATH,
eval_model_path=EVAL_MODEL_PATH)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | jomininini/chatgpt_HKSTP | apps~chatbot-kickstarter~database.py | import pandas as pd
import numpy as np
import openai
from redis import Redis
from redis.commands.search.field import VectorField
from redis.commands.search.field import TextField, NumericField
from redis.commands.search.query import Query
from config import EMBEDDINGS_MODEL, PREFIX, VECTOR_FIELD_NAME
# Get a Redis connection
def get_redis_connection(host='localhost',port='6379',db=0):
r = Redis(host=host, port=port, db=db,decode_responses=False)
return r
# Create a Redis index to hold our data
def create_hnsw_index (redis_conn,vector_field_name,vector_dimensions=1536, distance_metric='COSINE'):
redis_conn.ft().create_index([
VectorField(vector_field_name, "HNSW", {"TYPE": "FLOAT32", "DIM": vector_dimensions, "DISTANCE_METRIC": distance_metric}),
TextField("filename"),
TextField("text_chunk"),
NumericField("file_chunk_index")
])
# Create a Redis pipeline to load all the vectors and their metadata
def load_vectors(client:Redis, input_list, vector_field_name):
p = client.pipeline(transaction=False)
for text in input_list:
#hash key
key=f"{PREFIX}:{text['id']}"
#hash values
item_metadata = text['metadata']
#
item_keywords_vector = np.array(text['vector'],dtype= 'float32').tobytes()
item_metadata[vector_field_name]=item_keywords_vector
# HSET
p.hset(key,mapping=item_metadata)
p.execute()
# Make query to Redis
def query_redis(redis_conn,query,index_name, top_k=6):
## Creates embedding vector from user query
embedded_query = np.array(openai.Embedding.create(
input=query,
model=EMBEDDINGS_MODEL,
)["data"][0]['embedding'], dtype=np.float32).tobytes()
#prepare the query
q = Query(f'*=>[KNN {top_k} @{VECTOR_FIELD_NAME} $vec_param AS vector_score]').sort_by('vector_score').paging(0,top_k).return_fields('vector_score','filename','text_chunk','text_chunk_index').dialect(2)
params_dict = {"vec_param": embedded_query}
#Execute the query
results = redis_conn.ft(index_name).search(q, query_params = params_dict)
return results
# Get mapped documents from Weaviate results
def get_redis_results(redis_conn,query,index_name):
# Get most relevant documents from Redis
query_result = query_redis(redis_conn,query,index_name)
# Extract info into a list
query_result_list = []
for i, result in enumerate(query_result.docs):
result_order = i
text = result.text_chunk
score = result.vector_score
query_result_list.append((result_order,text,score))
# Display result as a DataFrame for ease of us
result_df = pd.DataFrame(query_result_list)
result_df.columns = ['id','result','certainty']
return result_df | [] |
2024-01-10 | peter-gelgor/3rd-Year | side%20stuff~gpt-3.py | import os
import openai
openai.api_key = "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"
start_sequence = "\nAI:"
restart_sequence = "\nHuman: "
def conversation(humanPrompts, aiPrompts):
if (len(humanPrompts) == 0):
humanPrompts.append(input("please put the first human question: "))
if (len(aiPrompts) == 0):
aiPrompts.append(input("please put the first ai response for training: "))
if (len(humanPrompts) == 1):
humanPrompts.append(input("please put the second human question: "))
if (len(humanPrompts) == len(aiPrompts)):
humanPrompts.append(input("please put the next human question: "))
tinderPrompt = "The following is a conversation with a potential romantic interest. The AI in question is creative, clever, inquisitive, and very friendly. This AI will try to get the human to go on a date with them\n\n"
for i in range(len(humanPrompts)):
if (i == len(humanPrompts) - 1):
tinderPrompt += "Human:" + humanPrompts[i] + "\n" + "AI:"
else:
tinderPrompt += "Human:" + humanPrompts[i] + "\n" + "AI:" + aiPrompts[i] + "\n"
response = openai.Completion.create(
engine="davinci",
prompt=tinderPrompt,
temperature=0.5,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["\n", " Human:", " AI:"]
)
aiResponse = response["choices"][0]["text"]
print("AI: " + aiResponse)
# print(len(aiPrompts))
aiPrompts.append(aiResponse)
# print(len(aiPrompts))
conversation(humanPrompts, aiPrompts)
conversation(
["What do you think of my skateboard?", "Thank you!! Ahh no that sucks come borrow mine :)"],
["Sick skateboard!! Mine got totalled by a car a few days ago :("]) | [
"\n",
"Human:",
"The following is a conversation with a potential romantic interest. The AI in question is creative, clever, inquisitive, and very friendly. This AI will try to get the human to go on a date with them\n\n"
] |
2024-01-10 | AmbroTall/webscrapping | twitterBots~bot~chat_gpt.py | import openai
openai.api_key = "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"
content = "Hello assistant, I want you to help me monetize my tweeter account by basically generating impressions and attracting more followers to my account."
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role":"user", "content": content}])
print(response) | [
"Hello assistant, I want you to help me monetize my tweeter account by basically generating impressions and attracting more followers to my account."
] |
2024-01-10 | Amiking/FullTclash | utils~collector.py | import asyncio
import json
import ssl
import time
import aiohttp
import async_timeout
import websockets
from urllib.parse import quote
from aiohttp.client_exceptions import ClientConnectorError, ContentTypeError
from aiohttp_socks import ProxyConnector, ProxyConnectionError
from loguru import logger
from utils import cleaner
"""
这是整个项目最为核心的功能模块之一 —> 采集器。它负责从网络上采集想要的数据。到现在,已经设计了:
1、采集器基类(BaseCollector)。一个简单的采集器示例。
2、IP采集器(IPCollector)。负责采集ip的相关信息
3、订阅采集器(SubCollector)。负责从网络上获取订阅文件
4、采集器(Collector)。负责各种流媒体解锁信息的采集
5、一个批量测试延迟的函数,基于clash core
需要注意的是,这些类/函数仅作采集工作,并不负责清洗。我们需要将拿到的数据给cleaner类清洗。
** 开发建议 **
如果你想自己添加一个流媒体测试项,建议继承Collector类,重写类中的create_tasks方法,以及自定义自己的流媒体测试函数 fetch_XXX()
"""
config = cleaner.ConfigManager()
addon = cleaner.addon
media_items = config.get_media_item()
proxies = config.get_proxy() # 代理
def reload_config(media: list = None):
global config, proxies, media_items
config.reload(issave=False)
proxies = config.get_proxy()
media_items = config.get_media_item()
if media is not None:
media_items = media
class BaseCollector:
def __init__(self):
self._headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/102.0.5005.63 Safari/537.36'}
async def status(self, url, proxy=None):
with async_timeout.timeout(10):
async with aiohttp.ClientSession(headers=self._headers) as session:
async with session.get(url, proxy=proxy) as response:
return response.status
async def fetch(self, url, proxy=None):
with async_timeout.timeout(10):
async with aiohttp.ClientSession(headers=self._headers) as session:
async with session.get(url, proxy=proxy) as response:
return await response.content.read()
class IPCollector:
"""
GEOIP 测试采集类
"""
def __init__(self):
self.tasks = []
self._headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/102.0.5005.63 Safari/537.36'}
self.style = config.config.get('geoip-api', 'ip-api.com') # api来源风格 这个值取二级域名
self.key = config.config.get('geoip-key', '')
self.get_payload = ""
self.url = self.get_style_url()
def get_style_url(self):
if self.style == "ip-api.com":
return "http://ip-api.com/json/"
elif self.style == "ip.sb":
return "https://api.ip.sb/geoip/"
elif self.style == "ipleak.net":
return "https://ipv4.ipleak.net/json/"
elif self.style == "ipdata.co":
self.get_payload = f"?api-key={self.key}"
return "https://api.ipdata.co/"
elif self.style == "ipapi.co":
self.get_payload = "/json/"
return "https://ipapi.co/"
def create_tasks(self, session: aiohttp.ClientSession, hosts: list = None, proxy=None):
"""
创建采集任务
:param session:
:param hosts: 主机信息,因查询ip的api限制,无法查询域名,请先转换成ip
:param proxy: 代理
:return:
"""
tasks = []
if hosts is None:
task = asyncio.create_task(self.fetch(session, proxy=proxy))
tasks.append(task)
elif type(hosts).__name__ == "str":
tasks.append(asyncio.create_task(self.fetch(session, proxy=proxy, host=hosts)))
else:
for ip in hosts:
task = asyncio.create_task(self.fetch(session, proxy=proxy, host=ip))
tasks.append(task)
self.tasks.extend(tasks)
async def batch(self, proxyhost: list, proxyport: list):
try:
session_pool = []
length = min(len(proxyhost), len(proxyport))
for i in range(length):
conn = ProxyConnector(host=proxyhost[i], port=proxyport[i], limit=0)
session = aiohttp.ClientSession(connector=conn)
session_pool.append(session)
for i in range(length):
self.create_tasks(session=session_pool[i], hosts=None, proxy=None)
resdata = await self.start()
if resdata is None:
resdata = []
for r in range(len(resdata)):
if resdata[r] is None:
resdata[r] = {}
for i in range(length):
await session_pool[i].close()
return resdata
except Exception as e:
logger.error(str(e))
return []
async def start(self):
"""
启动ip信息采集,并发操作,启动之前请务必通过self.create_tasks创建任务,否则只会返回空
:return: list | None
"""
try:
if self.tasks:
done = await asyncio.gather(*self.tasks)
return done
else:
return None
except Exception as e:
logger.error(e)
return None
async def fetch(self, session: aiohttp.ClientSession, proxy=None, host: str = None, reconnection=1):
"""
获取ip地址信息
:param session:
:param proxy: 代理
:param host: 一个v4地址/v6地址
:param reconnection: 重连次数
:return: json数据
"""
if host == "N/A":
return {}
try:
if host:
resp = await session.get(self.url + host + self.get_payload, proxy=proxy, timeout=12)
ipdata = await resp.json()
return ipdata if ipdata else None
else:
resp = await session.get(self.url + self.get_payload, proxy=proxy, timeout=12)
ipdata = await resp.json()
return ipdata if ipdata else None
except ClientConnectorError as c:
logger.warning("ip查询请求发生错误:" + str(c))
if reconnection != 0:
await self.fetch(session=session, proxy=proxy, host=host, reconnection=reconnection - 1)
else:
return None
except asyncio.exceptions.TimeoutError:
if reconnection != 0:
logger.warning("ip查询请求超时,正在重新发送请求......")
await self.fetch(session=session, proxy=proxy, host=host, reconnection=reconnection - 1)
else:
return None
except ContentTypeError:
return None
except Exception as e:
logger.info(str(e))
return None
class SubCollector(BaseCollector):
"""
订阅采集器,默认采集clash配置文件
"""
@logger.catch()
def __init__(self, suburl: str, include: str = '', exclude: str = ''):
"""
这里在初始化中读取了subconverter的相关配置,但是由于sunconverter无人维护,容易出问题,因此之后我不会再维护此功能。也就是在下载订阅时
订阅转换
"""
super().__init__()
self.text = None
self._headers = {'User-Agent': 'clash'} # 这个请求头是获取流量信息的关键
self.subconvertor = config.config.get('subconvertor', {})
self.cvt_enable = self.subconvertor.get('enable', False)
self.url = suburl
self.include = include
self.exclude = exclude
self.codeurl = quote(suburl, encoding='utf-8')
self.code_include = quote(include, encoding='utf-8')
self.code_exclude = quote(exclude, encoding='utf-8')
self.cvt_host = str(self.subconvertor.get('host', '127.0.0.1:25500'))
self.cvt_url = f"http://{self.cvt_host}/sub?target=clash&new_name=true&url={self.codeurl}" \
+ f"&include={self.code_include}&exclude={self.code_exclude}"
self.sub_remote_config = self.subconvertor.get('remoteconfig', '')
self.config_include = quote(self.subconvertor.get('include', ''), encoding='utf-8') # 这两个
self.config_exclude = quote(self.subconvertor.get('exclude', ''), encoding='utf-8')
# print(f"配置文件过滤,包含:{self.config_include} 排除:{self.config_exclude}")
if self.config_include or self.config_exclude:
self.cvt_url = f"http://{self.cvt_host}/sub?target=clash&new_name=true&url={self.cvt_url}" \
+ f"&include={self.code_include}&exclude={self.code_exclude}"
if self.sub_remote_config:
self.sub_remote_config = quote(self.sub_remote_config, encoding='utf-8')
self.cvt_url = self.cvt_url + "&config=" + self.sub_remote_config
async def start(self, proxy=None):
try:
with async_timeout.timeout(20):
async with aiohttp.ClientSession(headers=self._headers) as session:
async with session.get(self.url, proxy=proxy) as response:
return response
except Exception as e:
logger.error(e)
return None
@logger.catch()
async def getSubTraffic(self, proxy=proxies):
"""
获取订阅内的流量
:return: str
"""
_headers = {'User-Agent': 'clash'}
try:
async with aiohttp.ClientSession(headers=_headers) as session:
async with session.get(self.url, proxy=proxy, timeout=20) as response:
info = response.headers.get('subscription-userinfo', "")
info = info.split(';')
info2 = {'upload': 0, 'download': 0, 'total': 0, 'expire': 0}
for i in info:
try:
i1 = i.strip().split('=')
info2[i1[0]] = float(i1[1]) if i1[1] else 0
except IndexError:
pass
logger.info(str(info2))
traffic_up = info2.get('upload', 0) / 1024 / 1024 / 1024
traffic_download = info2.get('download', 0) / 1024 / 1024 / 1024
traffic_use = traffic_up + traffic_download
traffic_total = info2.get('total', 0) / 1024 / 1024 / 1024
expire_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(info2.get('expire', time.time())))
if expire_time.startswith('1970') and traffic_total and traffic_use:
expire_time = '长期有效'
return [traffic_up, traffic_download, traffic_use, traffic_total, expire_time]
except asyncio.exceptions.TimeoutError:
logger.info("获取订阅超时")
return []
except ClientConnectorError as c:
logger.warning(c)
return []
async def getSubConfig(self, save_path: str = "./", proxy=proxies, inmemory: bool = False):
"""
获取订阅配置文件
:param save_path: 订阅保存路径
:param proxy:
:param inmemory: 直接返回数据到内存,不保存到本地
:return: 获得一个文件: sub.yaml, bool : True or False
"""
_headers = {'User-Agent': 'clash-verge'}
# suburl = self.url
suburl = self.cvt_url if self.cvt_enable else self.url
cvt_text = r"subconvertor状态: {}".format("已启用" if self.cvt_enable else "未启用")
logger.info(cvt_text)
try:
async with aiohttp.ClientSession(headers=_headers) as session:
async with session.get(suburl, proxy=proxy, timeout=20) as response:
if response.status == 200:
data = b''
if inmemory:
while True:
chunk = await response.content.read()
if not chunk:
logger.info("获取订阅成功")
break
data += chunk
return data
with open(save_path, 'wb+') as fd:
while True:
chunk = await response.content.read()
if not chunk:
logger.info("获取订阅成功")
break
fd.write(chunk)
return True
return False
except asyncio.exceptions.TimeoutError:
logger.info("获取订阅超时")
return False
except ClientConnectorError as c:
logger.warning(c)
return False
class Miaospeed:
SlaveRequestMatrixType = ['TEST_PING_RTT', 'SPEED_AVERAGE', 'UDP_TYPE', 'SPEED_PER_SECOND', 'SPEED_MAX',
'GEOIP_INBOUND', 'GEOIP_OUTBOUND',
'TEST_SCRIPT', 'TEST_PING_CONN', 'TEST_PING_RTT']
SlaveRequestMatrixEntry = [{'Type': "SPEED_AVERAGE",
'Params': str({1})},
{'Type': "SPEED_MAX",
'Params': str({"Name": "test01", "Address": "127.0.0.1:1111", "Type": "Socks5"})},
{'Type': "SPEED_PER_SECOND",
'Params': str({"Name": "test01", "Address": "127.0.0.1:1111", "Type": "Socks5"})},
{'Type': "UDP_TYPE",
'Params': str({"Name": "test01", "Address": "127.0.0.1:1111", "Type": "Socks5"})},
]
SlaveRequestBasics = {'ID': '114514',
'Slave': '114514miao',
'SlaveName': 'miao1',
'Invoker': 'FullTclash',
'Version': '1.0'}
SlaveRequestOptions = {'Filter': '',
'Matrices': SlaveRequestMatrixEntry}
SlaveRequestConfigs = {
'DownloadURL': 'https://dl.google.com/dl/android/studio/install/3.4.1.0/' +
'android-studio-ide-183.5522156-windows.exe',
'DownloadDuration': 10,
'DownloadThreading': 4,
'PingAverageOver': 3,
'PingAddress': 'http://www.gstatic.com/generate_204',
'TaskThreading': 4,
'TaskRetry': 2,
'DNSServers': ['119.29.29.29'],
'TaskTimeout': 5,
'Scripts': []}
VendorType = 'Clash'
start_token = ''
SlaveRequest = {'Basics': SlaveRequestBasics,
'Options': SlaveRequestOptions,
'Configs': SlaveRequestConfigs,
'Vendor': VendorType,
'RandomSequence': 'str1',
'Challenge': start_token}
def __init__(self, proxyconfig: list, host: str = '127.0.0.1', port: int = 1112, ):
"""
初始化miaospeed
:param proxyconfig: 订阅配置的路径
"""
self.host = host
self.port = port
self.nodes = proxyconfig
self.slaveRequestNode = [{'Name': 'test01', 'Payload': str(i)} for i in self.nodes]
self.SlaveRequest['Nodes'] = self.slaveRequestNode
async def start(self):
start_time = time.strftime("%Y-%m-%dT%H-%M-%S", time.localtime())
info = []
resdata = {start_time: {}}
from async_timeout import timeout
try:
async with timeout(len(self.nodes) * 10 + 1):
async with websockets.connect(f'ws://{self.host}:{self.port}') as websocket:
payload = json.dumps(self.SlaveRequest)
await websocket.send(payload)
num = 0
while True:
response_str = await websocket.recv()
num += 1
logger.info(f"已接收第{num}次结果")
res1 = json.loads(response_str)
info.append(res1)
except asyncio.TimeoutError:
logger.info("本次测试已完成")
except KeyboardInterrupt:
pass
finally:
resdata.update({start_time: info})
return resdata, start_time
class Collector:
def __init__(self):
self.session = None
self.tasks = []
self._headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/106.0.0.0 Safari/537.36"}
self._headers_json = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/106.0.0.0 Safari/537.36", "Content-Type": 'application/json'}
self.netflixurl1 = "https://www.netflix.com/title/70242311"
self.netflixurl2 = "https://www.netflix.com/title/70143836"
self.ipurl = "https://api.ip.sb/geoip"
self.youtubeurl = "https://www.youtube.com/premium"
self.youtubeHeaders = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/80.0.3987.87 Safari/537.36',
'Accept-Language': 'en'
}
self.youtubeCookie = {
'YSC': 'BiCUU3-5Gdk',
'CONSENT': 'YES+cb.20220301-11-p0.en+FX+700',
'GPS': '1',
'VISITOR_INFO1_LIVE': '4VwPMkB7W5A',
'_gcl_au': '1.1.1809531354.1646633279',
'PREF': 'tz=Asia.Shanghai'
}
self.info = {}
self.disneyurl1 = "https://www.disneyplus.com/"
self.disneyurl2 = "https://global.edge.bamgrid.com/token"
self.daznurl = "https://startup.core.indazn.com/misl/v5/Startup"
@logger.catch
def create_tasks(self, session: aiohttp.ClientSession, proxy=None):
"""
创建并发请求任务,通过media_item动态创建
:param session:
:param proxy: 代理
:return: tasks: []
"""
items = media_items
try:
if len(items):
for item in items:
i = item
if i in addon.script:
task = addon.script[i][0]
self.tasks.append(task(self, session, proxy=proxy))
continue
if i == "Youtube":
task4 = asyncio.create_task(self.fetch_youtube(session, proxy=proxy))
self.tasks.append(task4)
elif i == "Disney" or i == "Disney+":
task5 = asyncio.create_task(self.fetch_dis(session, proxy=proxy))
self.tasks.append(task5)
elif i == "Netflix":
from addons.unlockTest import netflix
self.tasks.append(netflix.task(self, session, proxy=proxy))
elif i == "TVB":
from addons.unlockTest import tvb
self.tasks.append(tvb.task(self, session, proxy=proxy))
elif i == "Viu":
from addons.unlockTest import viu
self.tasks.append(viu.task(self, session, proxy=proxy))
elif i == "Iprisk" or i == "落地IP风险":
from addons.unlockTest import ip_risk
self.tasks.append(ip_risk.task(self, session, proxy=proxy))
elif i == "steam货币":
from addons.unlockTest import steam
self.tasks.append(steam.task(self, session, proxy=proxy))
elif i == "维基百科":
from addons.unlockTest import wikipedia
self.tasks.append(wikipedia.task(self, session, proxy=proxy))
elif item == "OpenAI":
from addons.unlockTest import openai
self.tasks.append(openai.task(self, session, proxy=proxy))
else:
pass
return self.tasks
except Exception as e:
logger.error(e)
return []
async def fetch_ip(self, session: aiohttp.ClientSession, proxy=None):
"""
ip查询
:param session:
:param proxy:
:return:
"""
try:
res = await session.get(self.ipurl, proxy=proxy, timeout=5)
logger.info("ip查询状态:" + str(res.status))
if res.status != 200:
self.info['ip'] = None
self.info['netflix1'] = None
self.info['netflix2'] = None
self.info['youtube'] = None
self.info['ne_status_code1'] = None
self.info['ne_status_code2'] = None
logger.warning("无法查询到代理ip")
return self.info
else:
self.info['ip'] = await res.json()
except ClientConnectorError as c:
logger.warning(c)
self.info['ip'] = None
return self.info
except Exception as e:
logger.error(str(e))
async def fetch_youtube(self, session: aiohttp.ClientSession, proxy=None, reconnection=2):
"""
Youtube解锁检测
:param reconnection:
:param session:
:param proxy:
:return:
"""
try:
youtube = await session.get(self.youtubeurl, proxy=proxy, timeout=5, headers=self.youtubeHeaders,
cookies=self.youtubeCookie)
if youtube.status is not None:
self.info['youtube'] = await youtube.text()
self.info['youtube_status_code'] = youtube.status
logger.info("Youtube 成功访问")
else:
self.info['youtube'] = None
except ClientConnectorError as c:
logger.warning("Youtube请求发生错误:" + str(c))
if reconnection != 0:
await self.fetch_youtube(session=session, proxy=proxy, reconnection=reconnection - 1)
except asyncio.exceptions.TimeoutError:
logger.warning("Youtube请求超时,正在重新发送请求......")
if reconnection != 0:
await self.fetch_youtube(session=session, proxy=proxy, reconnection=reconnection - 1)
except ProxyConnectionError as p:
logger.warning("似乎目标端口未开启监听")
logger.warning(str(p))
async def fetch_dis(self, session: aiohttp.ClientSession, proxy=None, reconnection=2):
"""
Disney+ 解锁检测
:param reconnection:
:param session:
:param proxy:
:return:
"""
try:
if reconnection == 0:
dis1 = await session.get(self.disneyurl1, proxy=proxy, timeout=5)
text1 = await dis1.text()
dis1.close()
if dis1.status == 200:
# text1 = await dis1.text()
index = str(text1).find('Region', 0, 400)
region = text1[index + 8:index + 10]
if index == -1:
self.info['disney'] = "待解锁"
elif dis1.history:
if 300 <= dis1.history[0].status <= 399:
self.info['disney'] = "待解({})".format(region)
else:
self.info['disney'] = "未知"
else:
self.info['disney'] = "解锁({})".format(region)
logger.info("disney+ 成功访问(轻检测,检测结果准确率下降)")
elif 399 < dis1.status:
self.info['disney'] = "N/A"
logger.info(f"disney+ 访问错误 {dis1.status}")
else:
self.info['disney'] = "失败"
else:
dis1 = await session.get(self.disneyurl1, proxy=proxy, timeout=5)
text1 = await dis1.text()
dis1.close()
dis2 = await session.get(self.disneyurl2, proxy=proxy, timeout=5)
if dis1.status == 200 and dis2.status != 403:
# text1 = await dis1.text()
index = str(text1).find('Region', 0, 400)
region = text1[index + 8:index + 10]
if index == -1:
self.info['disney'] = "待解锁"
elif dis1.history:
if 300 <= dis1.history[0].status <= 399:
self.info['disney'] = "待解({})".format(region)
else:
self.info['disney'] = "未知"
else:
self.info['disney'] = "解锁({})".format(region)
else:
self.info['disney'] = "失败"
logger.info("disney+ 成功访问")
dis2.close()
except ssl.SSLError:
if reconnection != 0:
await self.fetch_dis(session=session, proxy=proxy, reconnection=reconnection - 1)
else:
self.info['disney'] = '证书错误'
except ClientConnectorError as c:
logger.warning("disney+请求发生错误:" + str(c))
if reconnection != 0:
await self.fetch_dis(session=session, proxy=proxy, reconnection=reconnection - 1)
else:
self.info['disney'] = '连接错误'
except asyncio.exceptions.TimeoutError:
logger.warning("disney+请求超时,正在重新发送请求......")
if reconnection != 0:
await self.fetch_dis(session=session, proxy=proxy, reconnection=reconnection - 1)
except ConnectionResetError:
self.info['disney'] = '未知'
except ProxyConnectionError as p:
logger.warning("似乎目标端口未开启监听")
logger.warning(str(p))
async def start(self, host: str, port: int, proxy=None):
"""
启动采集器,采用并发操作
:param host:
:param port:
:param proxy: using proxy
:return: all content
"""
try:
conn = ProxyConnector(host=host, port=port, limit=0)
session = aiohttp.ClientSession(connector=conn, headers=self._headers)
tasks = self.create_tasks(session, proxy=proxy)
if tasks:
await asyncio.wait(tasks)
await session.close()
return self.info
except ConnectionRefusedError as e:
logger.error(str(e))
return self.info
except ProxyConnectionError as e:
logger.error(str(e))
return self.info
except Exception as e:
logger.error(str(e))
return self.info
except ssl.SSLError as e:
logger.error(str(e))
return self.info
async def delay(session: aiohttp.ClientSession, proxyname, testurl, hostname, port, timeout):
url = 'http://{}:{}/proxies/{}/delay?timeout={}&url={}'.format(hostname, port, proxyname, timeout, testurl)
async with session.get(url) as r:
try:
if r.status == 200:
text = await r.json()
return text['delay']
else:
logger.info(proxyname + ":" + str(await r.json()) + str(r.status))
return -1
except ClientConnectorError as c:
logger.warning("连接失败:", c)
return -1
async def delay_providers(providername, hostname='127.0.0.1', port=11230, session: aiohttp.ClientSession = None):
healthcheckurl = 'http://{}:{}/providers/proxies/{}/healthcheck'.format(hostname, port, providername)
url = 'http://{}:{}/providers/proxies/{}/'.format(hostname, port, providername)
if session is None:
session = aiohttp.ClientSession()
try:
await session.get(healthcheckurl)
async with session.get(url) as r:
if r.status == 200:
text = await r.json()
# 拿到延迟数据
delays = []
node = text['proxies']
for n in node:
s = n['history'].pop()
de = s['delay']
delays.append(de)
await session.close()
return delays
else:
logger.warning("延迟测试出错:" + str(r.status))
await session.close()
return 0
except ClientConnectorError as c:
logger.warning("连接失败:", c)
await session.close()
return 0
async def batch_delay(proxyname: list, session: aiohttp.ClientSession = None,
testurl=config.getGstatic(),
hostname='127.0.0.1', port=11230, timeout='5000'):
"""
批量测试延迟,仅适用于不含providers的订阅
:param timeout:
:param port: 外部控制器端口
:param hostname: 主机名
:param testurl: 测试网址
:param session: 一个连接session
:param proxyname: 一组代理名
:return: list: 延迟
"""
try:
if session is None:
async with aiohttp.ClientSession() as session:
tasks = []
for name in proxyname:
task = asyncio.create_task(
delay(session, name, testurl=testurl, hostname=hostname, port=port, timeout=timeout))
tasks.append(task)
done = await asyncio.gather(*tasks)
return done
else:
tasks = []
for name in proxyname:
task = asyncio.create_task(
delay(session, name, testurl=testurl, hostname=hostname, port=port, timeout=timeout))
tasks.append(task)
done = await asyncio.gather(*tasks)
return done
except Exception as e:
logger.error(e)
return None
async def delay_https(session: aiohttp.ClientSession, proxy=None, testurl=config.getGstatic(),
timeout=10):
# _headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
# 'Chrome/102.0.5005.63 Safari/537.36'
# }
_headers2 = {'User-Agent': 'clash'}
try:
s1 = time.time()
async with session.get(url=testurl, proxy=proxy, headers=_headers2,
timeout=timeout) as r:
if r.status == 502:
pass
# logger.error("dual stack tcp shake hands failed")
if r.status == 204 or r.status == 200:
delay1 = time.time() - s1
# print(delay1)
return delay1
else:
return 0
except Exception as e:
logger.error(str(e))
return 0
async def delay_https_task(session: aiohttp.ClientSession = None, collector=None, proxy=None, times=5):
if session is None:
async with aiohttp.ClientSession() as session:
tasks = [asyncio.create_task(delay_https(session=session, proxy=proxy)) for _ in range(times)]
result = await asyncio.gather(*tasks)
sum_num = [r for r in result if r != 0]
http_delay = sum(sum_num) / len(sum_num) if len(sum_num) else 0
http_delay = "%.0fms" % (http_delay * 1000)
# print("http平均延迟:", http_delay)
http_delay = int(http_delay[:-2])
if collector is not None:
collector.info['HTTP(S)延迟'] = http_delay
return http_delay
else:
tasks = [asyncio.create_task(delay_https(session=session, proxy=proxy)) for _ in range(times)]
result = await asyncio.gather(*tasks)
sum_num = [r for r in result if r != 0]
http_delay = sum(sum_num) / len(sum_num) if len(sum_num) else 0
http_delay = "%.0fms" % (http_delay * 1000)
http_delay = int(http_delay[:-2])
# print("http平均延迟:", http_delay)
if collector is not None:
collector.info['HTTP(S)延迟'] = http_delay
return http_delay
if __name__ == "__main__":
"this is a test demo"
import sys
import os
os.chdir(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
ccnr = cleaner.ClashCleaner(r"在这里填入你的订阅路径")
miaospeed = Miaospeed(ccnr.getProxies())
resd, _start_time = loop.run_until_complete(miaospeed.start())
cl1 = cleaner.ConfigManager(configpath=r"./results/miaospeed{}.yaml".format(_start_time.replace(':', '-')),
data=resd)
cl1.save(r"./results/miaospeed{}.yaml".format(_start_time.replace(':', '-')))
print(resd)
| [] |
2024-01-10 | Amiking/FullTclash | utils~cleaner.py | import asyncio
import importlib
import os
import re
import sys
from typing import Union, List
import socket
import yaml
from loguru import logger
class IPCleaner:
def __init__(self, data):
self._data = data
self.style = config.config.get('geoip-api', 'ip-api.com')
# logger.debug(f"当前api: {self.style}")
def get(self, key, _default=None):
try:
if self._data is None:
return {}
return self._data[key]
except KeyError:
return _default
except TypeError:
# logger.warning("无法获取对应信息: " + str(key))
return None
def get_org(self):
"""
获取组织
:return:
"""
if self.style == "ip.sb":
org = self.get('asn_organization')
elif self.style == "ip-api.com":
org = self.get('isp')
elif self.style == "ipleak.net":
org = self.get('isp_name')
elif self.style == "ipdata.co":
org = self.get('asn', {}).get('name')
elif self.style == "ipapi.co":
org = self.get('org')
else:
org = ""
if org:
return org
else:
return ""
def get_ip(self):
ip = ""
if self.style == "ip-api.com":
ip = self.get('query')
elif self.style == "ip.sb":
ip = self.get('ip')
elif self.style == "ipleak.net":
ip = self.get('query_text')
elif self.style == "ipdata.co":
ip = self.get('ip')
elif self.style == "ipapi.co":
ip = self.get('ip')
else:
pass
if ip:
return ip
else:
return ""
def get_country_code(self):
region_code = ""
if self.style == "ip-api.com":
region_code = self.get('countryCode')
elif self.style == "ip.sb":
region_code = self.get('country_code')
elif self.style == "ipleak.net":
region_code = self.get('country_code')
elif self.style == "ipdata.co":
region_code = self.get('country_code')
elif self.style == "ipapi.co":
region_code = self.get('country_code')
else:
pass
if region_code:
return region_code
else:
return ""
def get_city(self):
city = ""
if self.style == "ip-api.com":
city = self.get('city')
elif self.style == "ip.sb":
city = self.get('city')
elif self.style == "ipleak.net":
city = self.get('city_name')
elif self.style == "ipdata.co":
city = self.get('city')
elif self.style == "ipapi.co":
city = self.get('city')
else:
pass
if city:
return city
else:
return ""
def get_asn(self):
if self.style == "ip-api.com":
try:
asn = self.get('as', '0').split(' ')[0]
return asn
except AttributeError:
return '0'
except IndexError:
return '0'
elif self.style == "ip.sb":
asn = self.get('asn', '0')
asd = "AS" + repr(asn)
return asd
elif self.style == "ipleak.net":
asn = self.get('as_number', '0')
asd = "AS" + repr(asn)
return asd
elif self.style == "ipdata.co":
asn = self.get('asn', {}).get('asn', '0')
return asn
elif self.style == "ipapi.co":
asn = self.get('asn', '0')
return asn
else:
return ''
class AddonCleaner:
"""
动态脚本导入
"""
def __init__(self, path: str = "./addons/"):
"""
模块管理中心
:param path: 加载路径
"""
self.path = path
self._script = {}
self.blacklist = []
def global_test_item(self, httptest: bool = False):
"""
经过去重并支持黑名单一并去除。最后返回一个新列表
:return:
"""
base_item = ['Netflix', 'Youtube', 'Disney+', 'OpenAI', 'Viu', 'steam货币', 'Spotify',
'维基百科', '落地IP风险']
base_item = base_item + list(self._script.keys())
new_item = sorted(set(base_item) - set(self.blacklist), key=base_item.index)
if httptest:
new_item.insert(0, "HTTP(S)延迟")
return new_item
@property
def script(self):
return self._script
def reload_script(self, blacklist: list = None, path: str = "./addons/"):
self.init_addons(path)
if blacklist:
for b in blacklist:
self._script.pop(b, None)
def mix_script(self, alist: List[str], httptest: bool = True) -> list:
"""
适配后端脚本不足的兼容测试项,返回后端支持的所有测试项。
"""
newlist = list(set(alist).intersection(set(self.global_test_item())))
newlist = sorted(newlist, key=alist.index)
if httptest:
newlist.insert(0, "HTTP(S)延迟")
return newlist
def remove_addons(self, script_name: list):
success_list = []
if script_name:
for name in script_name:
if name[-3:] == '.py' and name != "__init__.py":
continue
try:
os.remove(self.path + name + '.py')
success_list.append(name)
except FileNotFoundError as f:
logger.warning(f"{name} 文件不存在\t" + str(f))
except PermissionError as p:
logger.warning(f"权限错误: {str(p)}")
except Exception as e:
logger.error(str(e))
return success_list
else:
logger.warning("script_name is empty")
return success_list
def init_addons(self, path: str):
"""
动态加载测速脚本
"""
try:
di = os.listdir(path)
except FileNotFoundError:
di = None
module_name = []
if di is None:
logger.warning(f"找不到 {path} 所在的路径")
else:
for d in di:
if len(d) > 3:
if d[-3:] == '.py' and d != "__init__.py":
module_name.append(d[:-3])
else:
pass
self._script.clear()
logger.info("模块即将动态加载: " + str(module_name))
logger.info("正在尝试获取 'SCRIPT' 属性组件")
# module_name = ["abema"]
num = 0
for mname in module_name:
try:
mo1 = importlib.import_module(f"addons.{mname}")
except ModuleNotFoundError as m:
logger.warning(str(m))
mo1 = None
except NameError as n:
logger.warning(str(n))
mo1 = None
except Exception as e:
logger.error(str(e))
mo1 = None
if mo1 is None:
continue
try:
script = getattr(mo1, 'SCRIPT')
except AttributeError:
script = None
if script is None or type(script).__name__ != "dict":
continue
sname = script.get('MYNAME', None)
stask = script.get("TASK", None)
sget = script.get("GET", None)
if type(stask).__name__ == 'function' and type(sname).__name__ == 'str' and type(
sget).__name__ == 'function':
self._script[sname] = [stask, sget]
num += 1
logger.info(f"已成功加载测试脚本:{sname}")
else:
logger.warning("测试脚本导入格式错误")
logger.info(f"外接测试脚本成功导入数量: {num}")
@staticmethod
def init_callback() -> list:
path = os.path.join(os.getcwd(), "addons", "callback")
try:
di = os.listdir(path)
except FileNotFoundError:
di = None
module_name = []
callbackfunc_list = []
if di is None:
logger.warning(f"找不到 {path} 所在的路径")
else:
for d in di:
if len(d) > 3:
if d.endswith('.py') and d != "__init__.py":
module_name.append(d[:-3])
else:
pass
for mname in module_name:
callbackfunc = None
try:
mo1 = importlib.import_module(f".{mname}", package="addons.callback")
callbackfunc = getattr(mo1, 'callback')
if callbackfunc is not None:
if asyncio.iscoroutinefunction(callbackfunc):
callbackfunc_list.append(callbackfunc)
except ModuleNotFoundError as m:
logger.warning(str(m))
except AttributeError:
pass
except NameError as n:
logger.warning(str(n))
except Exception as e:
logger.error(str(e))
if callbackfunc is None:
continue
logger.info(f"权限回调脚本导入数量: {len(callbackfunc_list)}")
return callbackfunc_list
def init_button(self, isreload=False):
"""
初始化bot内联按钮
"""
try:
if isreload:
self.init_addons(self.path)
from pyrogram.types import InlineKeyboardButton
script = addon.script
button = []
for k in script.keys():
b = InlineKeyboardButton(f"✅{str(k)}", callback_data=f"✅{str(k)}")
button.append(b)
return button
except Exception as e:
logger.error(str(e))
return []
def preTemplate():
"""
内置模板。防止用户误删除项目文件导致出错,无法进行测试。
"""
template_text = """
allow-lan: false
bind-address: '*'
dns:
default-nameserver:
- 119.29.29.29
- 223.5.5.5
enable: false
enhanced-mode: fake-ip
fallback:
- https://208.67.222.222/dns-query
- https://public.dns.iij.jp/dns-query
- https://101.6.6.6:8443/dns-query
fallback-filter:
geoip: true
geoip-code: CN
listen: 0.0.0.0:53
nameserver:
- 119.29.29.29
- 223.5.5.5
- 114.114.114.114
external-controller: 127.0.0.1:11230
ipv6: true
log-level: info
mixed-port: 11220
mode: rule
proxies: null
proxy-groups:
- name: auto
type: select
use:
- Default
proxy-providers:
Default:
health-check:
enable: true
interval: 600000
url: http://www.gstatic.com/generate_204
path: ./default.yaml
type: file
rules:
- DOMAIN-KEYWORD,stun,auto
- DOMAIN-SUFFIX,gstatic.com,auto
- DOMAIN-KEYWORD,gstatic,auto
- DOMAIN-SUFFIX,google.com,auto
- DOMAIN-KEYWORD,google,auto
- DOMAIN,google.com,auto
- DOMAIN-SUFFIX,bilibili.com,auto
- DOMAIN-KEYWORD,bilibili,auto
- DOMAIN,bilibili.com,auto
- DOMAIN-SUFFIX,microsoft.com,auto
- DOMAIN-SUFFIX,cachefly.net,auto
- DOMAIN-SUFFIX,apple.com,auto
- DOMAIN-SUFFIX,cdn-apple.com,auto
- SRC-IP-CIDR,192.168.1.201/32,DIRECT
- IP-CIDR,127.0.0.0/8,DIRECT
- GEOIP,CN,DIRECT
- MATCH,auto
"""
return template_text
class ClashCleaner:
"""
yaml配置清洗
"""
def __init__(self, _config, _config2: Union[str, bytes] = None):
"""
:param _config: 传入一个文件对象,或者一个字符串,文件对象需指向 yaml/yml 后缀文件
"""
self.path = ''
self.unsupport_type = ['wireguard', 'vless', 'hysteria', 'tuic']
self.yaml = {}
self.load(_config, _config2)
if not isinstance(self.yaml, dict):
self.yaml = {}
def load(self, _config, _config2: Union[str, bytes]):
if type(_config).__name__ == 'str':
if _config == ':memory:':
try:
self.yaml = yaml.safe_load(preTemplate()) if _config2 is None else yaml.safe_load(_config2)
self.check_type()
return
except Exception as e:
logger.error(str(e))
self.yaml = {}
return
else:
with open(_config, 'r', encoding="UTF-8") as fp:
self.yaml = yaml.safe_load(fp)
self.path = _config
else:
self.yaml = yaml.safe_load(_config)
def check_type(self):
"""
检查反序列化后的对象是否符合clash配置格式
"""
self.check_unsupport_proxy()
def setProxies(self, proxyinfo: list):
"""
覆写里面的proxies键
:return:
"""
self.yaml['proxies'] = proxyinfo
def check_unsupport_proxy(self):
try:
if self.yaml is None:
self.yaml = {}
return
proxies: list = self.yaml['proxies']
newproxies = []
for i, proxy in enumerate(proxies):
if isinstance(proxy, dict):
name = proxy['name']
ptype = proxy['type']
if not isinstance(name, str):
# 将节点名称转为字符串
proxy['name'] = str(name)
if ptype not in self.unsupport_type:
newproxies.append(proxy)
self.yaml['proxies'] = newproxies
except KeyError:
logger.warning("读取节点信息失败!")
except TypeError:
logger.warning("读取节点信息失败!")
def getProxies(self):
"""
获取整个代理信息
:return: list[dict,dict...]
"""
try:
return self.yaml['proxies']
except KeyError:
logger.warning("读取节点信息失败!")
return []
except TypeError:
logger.warning("读取节点信息失败!")
return []
def nodesCount(self):
"""
获取节点数量
:return: int
"""
try:
return len(self.yaml['proxies'])
except TypeError:
logger.warning("读取节点信息失败!")
return 0
def nodesName(self, _filter: str = ''):
"""
获取节点名
:return: list
"""
lis = []
try:
for i in self.yaml['proxies']:
lis.append(str(i['name']))
return lis
except KeyError:
logger.warning("读取节点信息失败!")
return None
except TypeError:
logger.warning("读取节点信息失败!")
return None
def nodesAddr(self):
"""
获取节点地址信息,返回(host,port)元组形式
"""
try:
return [(str(i['server']), i['port']) for i in self.yaml['proxies']]
except KeyError:
logger.warning("读取节点信息失败!")
return None
except TypeError:
logger.warning("读取节点信息失败!")
return None
def nodesType(self):
"""
获取节点类型
:return: list
"""
t = []
try:
for i in self.yaml['proxies']:
t.append(str(i['type']))
return t
except TypeError:
logger.warning("读取节点信息失败!")
return None
def nodehost(self, _filter: str = ''):
"""
获取节点域名
:return: list
"""
y = []
try:
for i in self.yaml['proxies']:
y.append(str(i['server']))
return y
except TypeError:
logger.warning("读取节点信息失败!")
return None
@staticmethod
def count_element(y: list = None):
"""
返回入站域名信息,本质上是统计一个列表里每个元素出现的次数
:return: dict
"""
dip = {}
if y is None:
return None
else:
nodehosts = y
try:
for key in nodehosts:
dip[key] = dip.get(key, 0) + 1
return dip
except Exception as e:
logger.error(str(e))
return None
@staticmethod
def count_elem(addrs: list = None):
"""
返回入站ip信息,本质上是统计一个列表里每个元素出现的次数
:return: dict
"""
dic = {}
if addrs is None:
return None
else:
nodeaddrs = addrs
try:
for key in nodeaddrs:
dic[key] = dic.get(key, 0) + 1
return dic
except Exception as e:
logger.error(str(e))
return None
@logger.catch
def proxyGroupName(self):
"""
获取第一个"select"类型代理组的名字
:return: str
"""
try:
for t in self.yaml['proxy-groups']:
if t['type'] == 'select' and len(t['proxies']) >= self.nodesCount():
return t['name']
else:
pass
except TypeError:
logger.warning("读取节点信息失败!")
return None
def changeClashPort(self, port: str or int = 11220):
"""
改变配置文件端口
"""
if 'mixed-port' in self.yaml:
self.yaml['mixed-port'] = int(port)
logger.info("配置端口已被改变为:" + str(port))
elif 'port' in self.yaml:
self.yaml['port'] = int(port)
logger.info("配置端口已被改变为:" + str(port))
def changeClashEC(self, ec: str = '127.0.0.1:11230'):
"""
改变external-controller地址与端口
"""
try:
self.yaml['external-controller'] = ec
logger.info("外部控制地址已被修改为:" + ec)
except Exception as e:
logger.error(str(e))
def changeClashMode(self, mode: str = "global"):
"""
改变clash模式
"""
self.yaml['mode'] = mode
logger.info("Clash 模式已被修改为:" + self.yaml['mode'])
def node_filter(self, include: str = '', exclude: str = '', issave=False):
"""
节点过滤
:param issave: 是否保存过滤结果到文件
:param include: 包含
:param exclude: 排除
:return:
"""
logger.info(f'Node filter text>> included: {include}, excluded: {exclude}')
result = []
result2 = []
nodelist = self.getProxies()
pattern1 = pattern2 = None
try:
if include:
pattern1 = re.compile(include)
if exclude:
pattern2 = re.compile(exclude)
except re.error:
logger.error("正则错误!请检查正则表达式!")
return self.nodesName()
except Exception as e:
logger.error(e)
return self.nodesName()
if pattern1 is None:
result = nodelist
else:
for node in nodelist:
try:
r = pattern1.findall(node.get('name', ''))
if r:
logger.info("包含过滤器已命中:" + str(node.get('name', '')))
result.append(node)
except re.error as rerror:
logger.error(str(rerror))
result.append(node)
except Exception as e:
logger.error(str(e))
result.append(node)
jishu1 = len(result)
jishu2 = 0
if pattern2 is None:
result2 = result
else:
for node in result:
try:
r = pattern2.findall(node.get('name', ''))
if r:
logger.info("排除过滤器已命中: " + str(node.get('name', '')))
jishu2 += 1
else:
result2.append(node)
except re.error as rerror:
logger.error(str(rerror))
except Exception as e:
logger.error(str(e))
logger.info(f"Included {jishu1} node(s) Excluded {jishu2} node(s) Exported {jishu1 - jishu2} node(s)")
self.yaml['proxies'] = result2
if issave:
self.save(savePath=self.path)
@logger.catch
def save(self, savePath: str = "./sub.yaml"):
with open(savePath, "w", encoding="UTF-8") as fp:
yaml.dump(self.yaml, fp)
class ConfigManager:
"""
配置清洗,以及预处理配置在这里进行。
"""
def __init__(self, configpath="./resources/config.yaml", data: dict = None):
"""
configpath有一个特殊值::memory: 将使用默认内置的模板
还有成员变量中的 self.config 是约定为只读的
如果要写入新值,用self.yaml代替。
"""
self.yaml = {}
self.config = None
flag = 0
if configpath == ':memory:':
self.config = yaml.safe_load(preTemplate())
self.yaml.update(self.config)
return
try:
with open(configpath, "r", encoding="UTF-8") as fp:
self.config = yaml.safe_load(fp)
self.yaml.update(self.config)
except FileNotFoundError:
if flag == 0 and configpath == "./resources/config.yaml":
flag += 1
logger.warning("无法在 ./resources/ 下找到 config.yaml 配置文件,正在尝试寻找旧目录 ./config.yaml")
try:
with open('./config.yaml', "r", encoding="UTF-8") as fp1:
self.config = yaml.load(fp1, Loader=yaml.FullLoader)
self.yaml.update(self.config)
except FileNotFoundError:
self.config = {}
self.yaml = {}
elif flag > 1:
logger.warning("无法找到配置文件,正在初始化...")
if self.config is None:
di = {'loader': "Success"}
with open(configpath, "w+", encoding="UTF-8") as fp:
yaml.dump(di, fp)
self.config = {}
if data:
with open(configpath, "w+", encoding="UTF-8") as fp:
yaml.dump(data, fp)
self.yaml = data
@property
def nospeed(self) -> bool:
return bool(self.config.get('nospeed', False))
def speedconfig(self):
try:
return self.config['speedconfig']
except KeyError:
return {}
def speednodes(self):
try:
return self.config['speednodes']
except KeyError:
return int(300)
def getMasterconfig(self):
return self.config.get('masterconfig', {})
def getSlaveconfig(self):
return self.config.get('slaveconfig', {})
def getBuildToken(self):
token = self.config.get('buildtoken', 'c7004ded9db897e538405c67e50e0ef0c3dbad717e67a92d02f6ebcfd1022a5ad1d' +
'2c4419541f538ff623051759ec000d2f426e03f9709a6608570c5b9141a6b')
if not isinstance(token, str):
raise TypeError("buildtoken的值不合法,它应该是个字符串")
return token
def getBotconfig(self):
botconfig = self.config.get('bot', {})
if botconfig is None:
return {}
if not botconfig:
return botconfig
if 'api_id' in botconfig:
logger.info("从配置中获取到了api_id")
if 'api_hash' in botconfig:
logger.info("从配置中获取到了api_hash")
if 'bot_token' in botconfig:
logger.info("从配置中获取到了bot_token")
return botconfig
def getFont(self):
return self.config.get('font', "./resources/alibaba-Regular.ttf")
def getColor(self):
return self.config.get('image', {}).get('color', {})
def getAdmin(self) -> list:
try:
return self.config['admin']
except KeyError:
return []
def getBridge(self):
"""
获取连接中继桥,它是一个telegram的user_id
"""
bridge = self.config.get('userbot', {}).get('id', None)
return bridge
def getGstatic(self):
"""
获取HTTP(S)延迟测试的URL
:return:
"""
try:
return self.config.get('pingurl', "http://www.gstatic.com/generate_204")
except KeyError:
return "http://www.gstatic.com/generate_204"
def getuser(self):
try:
return self.config['user']
except KeyError:
logger.warning("获取用户失败,将采用默认用户")
return [] # 默认名单
def get_proxy_port(self):
try:
return self.config['proxyport']
except KeyError:
return None
def get_bot_proxy(self, isjoint=True):
"""
:param isjoint: 是否拼接代理
:return:
"""
try:
if isjoint:
return 'http://' + self.config.get('bot', {}).get('proxy', None)
else:
return self.config.get('bot', {}).get('proxy', None)
except KeyError:
return None
def get_proxy(self, isjoint=True):
"""
:param isjoint: 是否拼接代理
:return:
"""
try:
if isjoint:
return 'http://' + str(self.config['proxy'])
else:
return str(self.config['proxy'])
except KeyError:
return None
def get_default_slave(self):
return self.getSlaveconfig().get('default-slave', {})
def get_media_item(self):
try:
return self.config['item']
except KeyError:
# logger.error("获取测试项失败,将采用默认测试项:[Netflix,Youtube,Disney,Bilibili,Dazn]")
return ['Netflix', 'Youtube', 'Disney', 'Bilibili', 'Dazn']
def get_clash_work_path(self):
"""
clash工作路径
:return:
"""
try:
return self.config['clash']['workpath']
except KeyError:
logger.warning("获取工作路径失败,将采用默认工作路径 ./clash")
try:
d = {'workpath': './clash'}
self.yaml['clash'].update(d)
except KeyError:
di = {'clash': {'workpath': './clash'}}
self.yaml.update(di)
return './clash'
def get_clash_path(self):
"""
clash 核心的运行路径,包括文件名
:return: str
"""
try:
return self.config['clash']['path']
except KeyError:
logger.warning("获取运行路径失败,将采用默认运行路径 ./bin/fulltclash(.exe)\n自动识别windows与linux。架构默认为amd64")
if sys.platform.startswith("linux"):
path = './bin/fulltclash-linux-amd64'
elif sys.platform.startswith("win32"):
path = r'.\bin\fulltclash-windows-amd64.exe'
else:
path = './bin/fulltclash-linux-amd64'
d = {'path': path}
try:
self.yaml['clash'].update(d)
except KeyError:
di = {'clash': d}
self.yaml.update(di)
return path
def get_sub(self, subname: str = None):
"""
获取所有已保存的订阅,或者单个订阅
:return: 单个订阅或全部订阅
"""
if subname is None:
try:
return self.config['subinfo']
except KeyError:
logger.info("无订阅保存")
return {}
else:
try:
return self.config.get('subinfo', {}).get(subname, {})
except KeyError:
return {}
@logger.catch
def add(self, data: dict, key):
try:
self.yaml[key] = data[key]
except Exception as e:
print(e)
@logger.catch
def add_admin(self, admin: list or str or int):
"""
添加管理员
"""
adminlist = []
if admin is list:
for li in admin:
adminlist.append(li)
else:
adminlist.append(admin)
try:
old = self.config['admin']
if old is not None:
adminlist.extend(old)
newadminlist = list(set(adminlist)) # 去重
self.yaml['admin'] = newadminlist
logger.info("添加成功")
except KeyError:
newadminlist = list(set(adminlist)) # 去重
self.yaml['admin'] = newadminlist
logger.info("添加成功")
@logger.catch
def del_admin(self, admin: list or str or int):
"""
删除管理员
"""
try:
adminlist = self.config['admin']
if adminlist is not None:
if admin is list:
for li in admin:
adminlist.remove(li)
else:
adminlist.remove(admin)
self.yaml['admin'] = adminlist
except TypeError:
logger.error("删除失败")
def add_slave(self, slave_id: str, key: str, username: str, comment: str = '-'):
slaveconfig = self.config.get('slaveconfig', {})
if slaveconfig is None:
slaveconfig = {}
slaveconfig[slave_id] = {'public-key': key, 'username': username, 'comment': comment}
self.yaml['slaveconfig'] = slaveconfig
@logger.catch
def add_user(self, user: list or str or int):
"""
添加授权用户
"""
userlist = []
if type(user).__name__ == "list":
for li in user:
userlist.append(li)
else:
userlist.append(user)
try:
old = self.config['user']
if old is not None:
userlist.extend(old)
newuserlist = list(set(userlist)) # 去重
self.yaml['user'] = newuserlist
logger.info("添加成功")
except KeyError:
newuserlist = list(set(userlist)) # 去重
self.yaml['user'] = newuserlist
logger.info("添加成功")
@logger.catch
def del_user(self, user: list or str or int):
"""
删除授权用户
"""
try:
userlist = self.config['user']
if userlist is not None:
if user is list:
for li in user:
userlist.remove(li)
else:
try:
userlist.remove(user)
except ValueError:
logger.warning("目标本身未在用户列表中")
self.yaml['user'] = userlist
except TypeError:
logger.error("删除失败")
@logger.catch
def save(self, savePath: str = "./resources/config.yaml"):
with open(savePath, "w+", encoding="UTF-8") as fp:
try:
yaml.dump(self.yaml, fp)
return True
except Exception as e:
logger.error(e)
return False
@logger.catch
def reload(self, configpath="./resources/config.yaml", issave=True):
if issave:
if self.save(savePath=configpath):
try:
with open(configpath, "r", encoding="UTF-8") as fp:
self.config = yaml.safe_load(fp)
self.yaml = self.config
return True
except Exception as e:
logger.error(e)
return False
else:
try:
with open(configpath, "r", encoding="UTF-8") as fp:
self.config = yaml.safe_load(fp)
self.yaml = self.config
return True
except Exception as e:
logger.error(e)
return False
@logger.catch
def newsub(self, subinfo: dict):
"""添加订阅"""
try:
self.yaml['subinfo'].update(subinfo)
except KeyError:
s = {'subinfo': subinfo}
self.yaml.update(s)
@logger.catch
def removesub(self, subname: str):
"""
移除订阅
:return:
"""
try:
subinfo = self.yaml['subinfo']
if subinfo is not None:
if subname in subinfo:
subinfo.pop(subname)
except KeyError:
logger.error('移出失败')
@logger.catch
def delsub2provider(self, subname: str):
try:
subinfo = self.yaml['proxy-providers']
if subinfo is not None:
if subname in subinfo:
subinfo.pop(subname)
subinfo2 = self.yaml['proxy-groups'][0]['use']
if subinfo2 is not None:
if subname in subinfo2:
subinfo2.remove(subname)
except TypeError:
logger.warning("删除失败")
@logger.catch
def addsub2provider(self, subname: str, subpath: str, nodefilter: str = ''):
"""
添加订阅到总文件,如用相对路径,请注意这里的subpath是写入到配置里面的,如果你指定过clash核心的工作目录,则相对位置以clash工作目录为准
:param nodefilter: 节点过滤
:param subname:
:param subpath:
:return:
"""
pingurl = config.getGstatic()
info = {'type': 'file', 'path': subpath,
'health-check': {'enable': True, 'url': pingurl, 'interval': 6000}}
if nodefilter:
info['filter'] = nodefilter
self.yaml['proxy-providers'][subname] = info
if subname not in self.yaml['proxy-groups'][0]['use']:
self.yaml['proxy-groups'][0]['use'].append(subname)
# 内置一个配置全局变量,后续项目开发可以统一读取这个,./botmodule/init_bot.py 中也有一个
config = ConfigManager()
media_item = config.get_media_item()
addon = AddonCleaner()
def reload_config(media: list = None):
global config, media_item
config.reload(issave=False)
if media is not None:
media_item = media
else:
media_item = config.get_media_item()
class ReCleaner:
"""
预测试结果清洗类
"""
def __init__(self, data: dict):
self.data = data
self._sum = 0
self._netflix_info = []
self._script = addon.script
@property
def script(self):
return self._script
def get_all(self):
info = {}
items = media_item
try:
for item in items:
i = item
if i in self.script:
task = self.script[i][1]
info[i] = task(self)
continue
if i == "Youtube":
you = self.getyoutubeinfo()
info['Youtube'] = you
elif i == "Disney":
dis = self.getDisneyinfo()
info['Disney'] = dis
elif i == "Disney+":
dis = self.getDisneyinfo()
info['Disney+'] = dis
elif i == "Dazn":
dazn = self.get_dazn_info()
info['Dazn'] = dazn
elif i == "Netflix":
from addons.unlockTest import netflix
info['Netflix'] = netflix.get_netflix_info_new(self)
elif i == "TVB":
from addons.unlockTest import tvb
info['TVB'] = tvb.get_TVBAnywhere_info(self)
elif i == "Viu":
from addons.unlockTest import viu
info['Viu'] = viu.get_viu_info(self)
elif i == "iprisk" or i == "落地IP风险":
from addons.unlockTest import ip_risk
info['落地IP风险'] = ip_risk.get_iprisk_info(self)
elif i == "steam货币":
from addons.unlockTest import steam
info['steam货币'] = steam.get_steam_info(self)
elif i == "维基百科":
from addons.unlockTest import wikipedia
info['维基百科'] = wikipedia.get_wikipedia_info(self)
elif item == "OpenAI":
from addons.unlockTest import openai
info['OpenAI'] = openai.get_openai_info(self)
else:
pass
except Exception as e:
logger.error(str(e))
return info
def get_https_rtt(self):
"""
获取http(s)协议延迟
:return: int
"""
try:
if 'HTTP(S)延迟' not in self.data and 'HTTPS延迟' not in self.data:
logger.warning("采集器内无数据: HTTP(S)延迟")
return 0
else:
return self.data.get('HTTP(S)延迟', 0)
except Exception as e:
logger.error(str(e))
return 0
def get_dazn_info(self):
"""
:return: str: 解锁信息: [解锁(地区代码)、失败、N/A]
"""
try:
if 'dazn' not in self.data:
logger.warning("采集器内无数据: Dazn")
return "N/A"
else:
i1 = self.data.get('dazn', '')
if i1 == '连接错误' or i1 == '超时':
logger.info("Dazn状态: " + i1)
return i1
try:
info = self.data['dazn']['Region']
isAllowed = info['isAllowed']
region = info['GeolocatedCountry']
except KeyError as k:
logger.error(str(k))
return "N/A"
if not isAllowed:
logger.info("Dazn状态: " + "失败")
return "失败"
elif isAllowed:
if region:
countrycode = region.upper()
logger.info("Dazn状态: " + "解锁({})".format(countrycode))
return "解锁({})".format(countrycode)
else:
logger.info("Dazn状态: " + "解锁")
return "解锁"
else:
logger.info("Dazn状态: N/A(未找到)")
return "N/A"
except Exception as e:
logger.error(str(e))
return "N/A"
def getyoutubeinfo(self):
"""
:return: str :解锁信息: (解锁、失败、N/A)
"""
try:
if 'youtube' not in self.data:
logger.warning("采集器内无数据")
return "N/A"
else:
text = self.data['youtube']
if text.find('www.google.cn') != -1:
return "送中(CN)"
if text.find('Premium is not available in your country') != -1 or text.find(
'manageSubscriptionButton') == -1:
return "失败"
elif self.data['youtube_status_code'] == 200:
idx = text.find('"countryCode"')
region = text[idx:idx + 17].replace('"countryCode":"', "")
if idx == -1 and text.find('manageSubscriptionButton') != -1:
region = "US"
logger.info(f"Youtube解锁地区: {region}")
return f"解锁({region})"
else:
return "未知"
except Exception as e:
logger.error(e)
return "N/A"
def getDisneyinfo(self):
"""
:return: 解锁信息: 解锁、失败、N/A、待解
"""
try:
if 'disney' not in self.data:
logger.warning("无法读取Disney Plus解锁信息")
return "N/A"
else:
logger.info("Disney+ 状态:" + str(self.data['disney']))
return self.data['disney']
except Exception as e:
logger.error(e)
return "N/A"
class ResultCleaner:
"""
测速结果的处理类,负责将得到的数据进行排序,重命名等操作
"""
def __init__(self, info: dict):
self.data = info
@staticmethod
def get_http_latency(data: list):
"""
对所有列表延迟取平均,去除0
:param data:
:return:
"""
if not data:
raise IndexError("列表为空")
n = len(data)
m = len(data[0])
new_list = []
for j in range(m):
col_sum = 0
num = 0
for i in range(n):
if data[i][j] != 0:
col_sum += data[i][j]
num += 1
if num:
r1 = int(col_sum / num)
new_list.append(r1)
else:
new_list.append(0)
return new_list
def start(self, sort="订阅原序"):
try:
if '类型' in self.data:
type1 = self.data['类型']
new_type = []
for t in type1:
if t == 'ss':
new_type.append("Shadowsocks")
elif t == "ssr":
new_type.append("ShadowsocksR")
else:
new_type.append(t.capitalize())
self.data['类型'] = new_type
if sort == "HTTP倒序":
self.sort_by_ping(reverse=True)
elif sort == "HTTP升序":
self.sort_by_ping()
if 'HTTP延迟(内核)' in self.data:
rtt = self.data['HTTP延迟(内核)']
new_rtt = []
for r in rtt:
new_rtt.append(str(r) + 'ms')
self.data['HTTP延迟(内核)'] = new_rtt
if 'HTTP(S)延迟' in self.data:
rtt = self.data['HTTP(S)延迟']
new_rtt = []
for r in rtt:
new_rtt.append(str(r) + 'ms')
self.data['HTTP(S)延迟'] = new_rtt
return self.data
except TypeError:
return {}
def sort_by_ping(self, reverse=False):
http_l = self.data.get('HTTP(S)延迟')
if not reverse:
for i in range(len(http_l)):
if http_l[i] == 0:
http_l[i] = 999999
new_list = [http_l, self.data.get('节点名称'), self.data.get('类型')]
for k, v in self.data.items():
if k == "HTTP(S)延迟" or k == "节点名称" or k == "类型":
continue
new_list.append(v)
lists = zip(*new_list)
lists = sorted(lists, key=lambda x: x[0], reverse=reverse)
lists = zip(*lists)
new_list = [list(l_) for l_ in lists]
http_l = new_list[0] if len(new_list) > 0 else []
if not reverse:
for i in range(len(http_l)):
if http_l[i] == 999999:
http_l[i] = 0
if len(new_list) > 2:
self.data['HTTP(S)延迟'] = http_l
self.data['节点名称'] = new_list[1]
self.data['类型'] = new_list[2]
num = -1
for k in self.data.keys():
num += 1
if k == "HTTP(S)延迟" or k == "节点名称" or k == "类型":
continue
self.data[k] = new_list[num]
class ArgCleaner:
def __init__(self, string: str = None):
self.string = string
@staticmethod
def getarg(string: str, sep: str = ' ') -> list:
"""
对字符串使用特定字符进行切片
Args:
string: 要切片的字符串
sep: 指定用来切片的字符依据,默认为空格
Returns: 返回一个切好的字符串列表
"""
return [x for x in string.strip().split(sep) if x != '']
def getall(self, string: str = None):
"""
分割一段字符串中的参数,返回参数列表
"""
if string is None:
if self.string is None:
return None
arg = self.string.strip().split(' ')
arg = [x for x in arg if x != '']
return arg
else:
arg = string.strip().split(' ')
arg = [x for x in arg if x != '']
return arg
def geturl(string: str):
text = string
pattern = re.compile(
r"https?://(?:[a-zA-Z]|\d|[$-_@.&+]|[!*,]|[\w\u4e00-\u9fa5])+") # 匹配订阅地址
# 获取订阅地址
try:
url = pattern.findall(text)[0] # 列表中第一个项为订阅地址
return url
except IndexError:
return None
@logger.catch
def domain_to_ip(host: str):
"""
将域名转成IPv4和IPv6地址
:param host:
:return: 返回IP地址列表,如果无法解析返回None
"""
try:
results = socket.getaddrinfo(host, None, socket.AF_UNSPEC, socket.SOCK_STREAM)
ips = set()
for result in results:
ips.add(result[4][0])
ip = list(ips)
return ip
except socket.gaierror:
return None
def cluster(host):
cluip = domain_to_ip(host)
if cluip is None:
return None
else:
clus = len(cluip)
return clus
def count(host):
ips = domain_to_ip(host)
if ips is None:
return None
ipv4_count = 0
ipv6_count = 0
for ip in ips:
if ":" in ip:
ipv6_count += 1
else:
ipv4_count += 1
if ipv4_count > 0 and ipv6_count == 0:
return "4"
elif ipv6_count > 0 and ipv4_count == 0:
return "6"
elif ipv4_count > 0 and ipv6_count > 0:
return "46"
else:
return None
def batch_ipstack(host: list):
"""
批量将域名转成栈列表
:param host: 一个列表
:return:
"""
ipstack = []
for h in host:
if type(h).__name__ == 'dict':
try:
ipss = count(h['ipstart'])
if ipss:
h['ipstart'] = ipss
else:
h['ipstart'] = "N/A"
ipstack.append(h)
except KeyError:
h['ipstart'] = "N/A"
ipstack.append(h)
else:
ipss = count(h)
if ipss:
ipstack.append(ipss)
else:
ipstack.append("N/A")
return ipstack
def batch_domain2ip(host: list):
"""
批量将域名转成ip地址
:param host: 一个列表
:return:
"""
ipaddrs = []
for h in host:
if type(h).__name__ == 'dict':
try:
ips = domain_to_ip(h['server'])
if ips:
h['server'] = ips[0]
else:
h['server'] = "N/A"
ipaddrs.append(h)
except KeyError:
h['server'] = "N/A"
ipaddrs.append(h)
else:
ips = domain_to_ip(h)
if ips:
ipaddrs.append(ips[0])
else:
ipaddrs.append("N/A")
return ipaddrs
def batch_ipcu(host: list):
"""
批量将域名转成簇列表
:param host: 一个列表
:return:
"""
ipcu = []
for h in host:
if type(h).__name__ == 'dict':
try:
ipss = cluster(h['ipcu'])
if ipss:
h['ipcu'] = ipss
else:
h['ipcu'] = "N/A"
ipcu.append(h)
except KeyError:
h['ipcu'] = "N/A"
ipcu.append(h)
else:
ipss = cluster(h)
if ipss:
ipcu.append(ipss)
else:
ipcu.append("N/A")
return ipcu
| [
"\nallow-lan: false\nbind-address: '*'\ndns:\n default-nameserver:\n - 119.29.29.29\n - 223.5.5.5\n enable: false\n enhanced-mode: fake-ip\n fallback:\n - https://208.67.222.222/dns-query\n - https://public.dns.iij.jp/dns-query\n - https://101.6.6.6:8443/dns-query\n fallback-filter:\n geoip: true\n geoip-code: CN\n listen: 0.0.0.0:53\n nameserver:\n - 119.29.29.29\n - 223.5.5.5\n - 114.114.114.114\nexternal-controller: 127.0.0.1:11230\nipv6: true\nlog-level: info\nmixed-port: 11220\nmode: rule\nproxies: null\nproxy-groups:\n- name: auto\n type: select\n use:\n - Default\nproxy-providers:\n Default:\n health-check:\n enable: true\n interval: 600000\n url: http://www.gstatic.com/generate_204\n path: ./default.yaml\n type: file\nrules:\n- DOMAIN-KEYWORD,stun,auto\n- DOMAIN-SUFFIX,gstatic.com,auto\n- DOMAIN-KEYWORD,gstatic,auto\n- DOMAIN-SUFFIX,google.com,auto\n- DOMAIN-KEYWORD,google,auto\n- DOMAIN,google.com,auto\n- DOMAIN-SUFFIX,bilibili.com,auto\n- DOMAIN-KEYWORD,bilibili,auto\n- DOMAIN,bilibili.com,auto\n- DOMAIN-SUFFIX,microsoft.com,auto\n- DOMAIN-SUFFIX,cachefly.net,auto\n- DOMAIN-SUFFIX,apple.com,auto\n- DOMAIN-SUFFIX,cdn-apple.com,auto\n- SRC-IP-CIDR,192.168.1.201/32,DIRECT\n- IP-CIDR,127.0.0.0/8,DIRECT\n- GEOIP,CN,DIRECT\n- MATCH,auto\n "
] |
2024-01-10 | ALL-TOPIKMate/Essay-Scoring | Model~gpt_response.py | import os
import openai
import json
def gpt_response(question, quest_content, user_answer, answer, length):
updated_question = question.split('600~700자로 글을')[0].strip() + ' 글로 ' + question.split('600~700자로 글을')[1].strip()
openai.api_key = os.getenv('OPENAI_API_KEY') #호출할 때는 메모장에서 가져오기
user_content = "문제: " + updated_question +"\n" + '제시문: ' + quest_content + "\n\n" + '사용자 답안: ' + user_answer +"\n"+ '예시 답안' + answer
message_info = [{
"role": "system",
"content": "너는 TOPIK(외국인 및 재외국민을 대상으로 하는 한국어 능력 시험)을 가르치는 선생님이야. 문제와 제시문, 그리고 예시 답안이 주어질거야. 사용자 답안이 문제와 제시문의 내용에 맞게 잘 작성되었는지 채점해줘. 글자 수에 대한 지적은 하지마. 예시 답안은 문제와 제시문에 대한 답변 예시라고 생각해줘. 답안은 JSON 형태로 구성되어야하고 45점이 최고점인 score, Good Points, Weak Point로 구성되어야 해."
}]
message_info.append({"role":"user","content":user_content})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = message_info,
temperature=0.7,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
answer = response['choices'][0]['message']['content']
answer = json.loads(answer)
answer['Length_Check'] = length
return answer
| [
"문제: updated_question7ceb8fe2-f85a-4aa0-ac92-9686652117a2\n제시문: PLACEHOLDER\n\n사용자 답안: PLACEHOLDER\n예시 답안PLACEHOLDER",
"너는 TOPIK(외국인 및 재외국민을 대상으로 하는 한국어 능력 시험)을 가르치는 선생님이야. 문제와 제시문, 그리고 예시 답안이 주어질거야. 사용자 답안이 문제와 제시문의 내용에 맞게 잘 작성되었는지 채점해줘. 글자 수에 대한 지적은 하지마. 예시 답안은 문제와 제시문에 대한 답변 예시라고 생각해줘. 답안은 JSON 형태로 구성되어야하고 45점이 최고점인 score, Good Points, Weak Point로 구성되어야 해."
] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~services~excel_analyzer.py | import abc
from anthropic import Anthropic
from pydantic import BaseModel
from springtime.services.format_sheet import format_sheet
from springtime.services.html import html_from_text
from springtime.services.prompts import CLAUDE_PROMPT
from springtime.services.sheet_processor import PreprocessedSheet
class ResponseWithPrompt(BaseModel):
prompt: str
content: str
html: str | None
class ExcelAnalyzer(abc.ABC):
@abc.abstractmethod
def analyze(self, *, sheets: list[PreprocessedSheet]) -> ResponseWithPrompt:
pass
class ClaudeExcelAnalyzer(ExcelAnalyzer):
def __init__(self, anthropic_client: Anthropic) -> None:
self.anthropic = anthropic_client
def analyze(self, *, sheets: list[PreprocessedSheet]) -> ResponseWithPrompt:
table_content = "\n---\n".join([format_sheet(sheet) for sheet in sheets])
prompt = f"""
Human: {CLAUDE_PROMPT}
__START_DATA__
{table_content}
__END_DATA__
Assistant:
"""
content = self.anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=1_000_000,
prompt=prompt,
).completion.strip()
return ResponseWithPrompt(
prompt=prompt,
content=content,
html=html_from_text(content),
)
| [
"\n\n\nHuman: PLACEHOLDER\n\n__START_DATA__\nPLACEHOLDER\n__END_DATA__\n\n\nAssistant:\n"
] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~tests~services~test_table_analyzer.py | import os
import pandas as pd
import pytest
from anthropic import Anthropic
from springtime.services.excel_analyzer import ClaudeExcelAnalyzer
from springtime.services.sheet_processor import (
CLAUDE_SHEET_PROCESSOR,
)
from springtime.services.table_analyzer import TableAnalyzer, TableAnalyzerImpl
XLSX = os.path.join(os.path.dirname(__file__), "../data/dummy-extracted.xlsx")
CLAUDE_EXCEL_ANALYZER = ClaudeExcelAnalyzer(Anthropic())
@pytest.fixture()
def claude_table_analyzer():
return TableAnalyzerImpl(CLAUDE_EXCEL_ANALYZER, CLAUDE_SHEET_PROCESSOR)
def test_analyze(
gpt_table_analyzer: TableAnalyzer,
claude_table_analyzer: TableAnalyzer,
):
xl = pd.ExcelFile(XLSX)
for svc in (gpt_table_analyzer, claude_table_analyzer):
resp = svc.analyze(
excel_file=xl,
)
chunks = resp.chunks
assert len(chunks) > 0
| [] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~tests~services~test_excel_analyzer.py | import os
import pandas as pd
import pytest
from anthropic import Anthropic
from springtime.services.excel_analyzer import ClaudeExcelAnalyzer, ExcelAnalyzer
from springtime.services.sheet_processor import (
CLAUDE_SHEET_PROCESSOR,
)
XLSX = os.path.join(
os.path.dirname(__file__),
"../data/wet-noses-sales-and-margin.xlsx",
)
@pytest.fixture()
def claude_analyzer():
client = Anthropic()
return ClaudeExcelAnalyzer(client)
@pytest.mark.skipif(False, reason="")
def test_analyze_claude(claude_analyzer: ExcelAnalyzer):
xl = pd.ExcelFile(XLSX)
sheets = CLAUDE_SHEET_PROCESSOR.preprocess(xl=xl)
resp = claude_analyzer.analyze(sheets=sheets)
breakpoint()
| [] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~routers~token_length_service.py | import anthropic
import tiktoken
ENC_GPT4 = tiktoken.encoding_for_model("gpt-4")
anthropic_client = anthropic.Anthropic()
class TokenLength:
@staticmethod
def gpt4(text: str):
return len(ENC_GPT4.encode(text))
@staticmethod
def claude100k(text: str):
return anthropic_client.count_tokens(text)
| [] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~main.py | import uvicorn
from anthropic import Anthropic
from fastapi import FastAPI
from loguru import logger
from springtime.excel.table_extractor import TabulaTableExtractor
from springtime.models.open_ai import OpenAIModel
from springtime.object_store.object_store import GCSObjectStore
from springtime.routers.chat_router import ChatRouter
from springtime.routers.embeddings_router import EmbeddingsRouter
from springtime.routers.pdf_router import PdfRouter
from springtime.routers.prompt_router import PromptRouter
from springtime.routers.report_router import ReportRouter
from springtime.routers.table_router import TableRouter
from springtime.routers.text_router import TextRouter
from springtime.routers.vector_router import VectorRouter
from springtime.services.chat_service import OpenAIChatService
from springtime.services.embeddings_service import OpenAIEmbeddingsService
from springtime.services.excel_analyzer import ClaudeExcelAnalyzer
from springtime.services.prompt_service import PromptServiceImpl
from springtime.services.report_service import OpenAIReportService
from springtime.services.scan_service import OpenAIScanService
from springtime.services.sheet_processor import (
CLAUDE_SHEET_PROCESSOR,
)
from springtime.services.table_analyzer import TableAnalyzerImpl
from springtime.services.thumbnail_service import FitzThumbnailService
from springtime.services.vector_service import PineconeVectorService
from .settings import SETTINGS
app = FastAPI()
logger.info("Starting server")
if SETTINGS.tracing_enabled:
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from springtime.tracer import init_tracing
init_tracing()
FastAPIInstrumentor.instrument_app(app)
OBJECT_STORE = GCSObjectStore()
TABLE_EXTRACTOR = TabulaTableExtractor(OBJECT_STORE)
THUMBNAIL_SERVICE = FitzThumbnailService()
ANTHROPIC_CLIENT = Anthropic()
SCAN_SERVICE = OpenAIScanService(OpenAIModel.gpt3_16k)
CLAUDE_EXCEL_ANALYZER = ClaudeExcelAnalyzer(ANTHROPIC_CLIENT)
CLAUDE_TABLE_ANALYZER = TableAnalyzerImpl(CLAUDE_EXCEL_ANALYZER, CLAUDE_SHEET_PROCESSOR)
EMBEDDING_SERVICE = OpenAIEmbeddingsService()
VECTOR_SERVICE = PineconeVectorService(
api_key=SETTINGS.pinecone_api_key,
environment=SETTINGS.pinecone_env,
index_name=SETTINGS.pinecone_index,
namespace=SETTINGS.pinecone_namespace,
)
OPENAI_REPORT_SERVICE = OpenAIReportService()
CHAT_SERVICE = OpenAIChatService(OpenAIModel.gpt3_16k)
PROMPT_SERVICE = PromptServiceImpl(ANTHROPIC_CLIENT)
app.include_router(ChatRouter(CHAT_SERVICE).get_router())
app.include_router(
ReportRouter(
OPENAI_REPORT_SERVICE,
SCAN_SERVICE,
).get_router(),
)
app.include_router(
PdfRouter(TABLE_EXTRACTOR, OBJECT_STORE, THUMBNAIL_SERVICE).get_router(),
)
app.include_router(
TableRouter(
CLAUDE_TABLE_ANALYZER,
OBJECT_STORE,
).get_router(),
)
app.include_router(TextRouter().get_router())
app.include_router(VectorRouter(VECTOR_SERVICE).get_router())
app.include_router(EmbeddingsRouter(EMBEDDING_SERVICE).get_router())
app.include_router(PromptRouter(PROMPT_SERVICE).get_router())
@app.get("/ping")
def ping():
return {"ping": "ping"}
@app.get("/healthz")
def healthz():
return "OK"
def start():
uvicorn.run(
"springtime.main:app",
host=SETTINGS.host,
port=SETTINGS.port,
reload=SETTINGS.reload,
timeout_keep_alive=60,
)
| [] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~services~report_service.py | import abc
import time
import openai
from loguru import logger
from pydantic import BaseModel
from springtime.models.open_ai import OpenAIModel
from springtime.services.scan_service import get_chunks
class Term(BaseModel):
term_value: str
term_name: str
class PageOfQuestions(BaseModel):
order: int
value: list[str] = []
class PageOfTerms(BaseModel):
order: int
value: list[Term] = []
class ReportService(abc.ABC):
@abc.abstractmethod
def generate_questions(self, text: str) -> list[PageOfQuestions]:
pass
@abc.abstractmethod
def generate_terms(self, text: str) -> list[PageOfTerms]:
pass
MODEL = OpenAIModel.gpt3_16k
ALL_TERMS = frozenset(
{
"Document Name",
"Company Overview",
"Company Industry",
"Document Overview",
"Document Date",
"Lead Arranger",
},
)
class OpenAIReportService(ReportService):
def generate_questions(self, text: str) -> list[PageOfQuestions]:
acc: list[PageOfQuestions] = []
for idx, chunk in enumerate(get_chunks(text, 30_000)):
if questions := self.generate_questions_for_text(chunk):
acc.append(PageOfQuestions(order=idx, value=questions))
return acc
def generate_terms(self, text: str) -> list[PageOfTerms]:
acc: list[PageOfTerms] = []
terms_needed = set(ALL_TERMS)
for idx, chunk in enumerate(get_chunks(text, 30_000)):
if terms := self.generate_terms_for_text(terms_needed, chunk):
for term in terms:
terms_needed.remove(term.term_name)
acc.append(PageOfTerms(order=idx, value=terms))
if not terms_needed:
return acc
return acc
def generate_questions_for_text(self, text: str) -> list[str]:
for _attempt in range(3):
try:
return self._generate_questions_for_text(text)
except openai.error.RateLimitError as e:
logger.error(e)
logger.error("OpenAI response for questions failed")
logger.error("Sleeping for 10 seconds")
time.sleep(10)
msg = "OpenAI response for questions failed"
raise Exception(msg)
def _generate_questions_for_text(self, text: str) -> list[str]:
completion = openai.ChatCompletion.create(
model=MODEL,
messages=[
{
"role": "system",
"content": "You are an expert financial analyst AI assistant.",
},
{
"role": "user",
"content": "You will be given a document. Read the document and generate the top 5 most relevant/interesting questions you would want to ask about the data to better understand it for evaluating a potential investment.",
},
{
"role": "user",
"content": """
* Speak in the third person, e.g. do not use "you"
* Prefer proper, specific nouns to refer to entities
* Output each question on a new line. Do not output any other text.
* Use '*' for each question
""",
},
{"role": "user", "content": f"Document: {text}"},
],
temperature=0.5,
)
value = completion.choices[0].message.content
return [question.lstrip("*-").strip() for question in value.split("\n")]
def generate_terms_for_text(self, terms_needed: set[str], text: str) -> list[Term]:
for _attempt in range(3):
try:
return self._generate_terms(terms_needed, text)
except openai.error.RateLimitError as e:
logger.error(e)
logger.error("OpenAI response for questions failed")
logger.error("Sleeping for 10 seconds")
time.sleep(10)
msg = "OpenAI response for terms failed"
raise Exception(msg)
def _generate_terms(self, terms_needed: set[str], text: str) -> list[Term]:
terms_list = "\n".join(terms_needed)
terms = f"""
Search the document for the following terms and output their value:
{terms_list}
* Be as objective as possible. Do not output any opinions or judgments. Avoid sounding like a sales pitch
* If information for a term is not available, do not return anything for that term.
* Structure your output as Term Name | Term Value
For example,
For Lead Arranger output:
Lead Arranger | Goldman Sachs
"""
completion = openai.ChatCompletion.create(
model=MODEL,
messages=[
{
"role": "system",
"content": "You are an expert financial analyst AI assistant.",
},
{"role": "system", "content": terms},
{"role": "user", "content": f"Document: {text}"},
],
temperature=0,
)
response = completion.choices[0].message.content
try:
by_new_line = response.split("\n")
terms = [term for line in by_new_line if (term := parse_term(line))]
return terms
except Exception as e:
logger.error(e)
logger.error("Invalid terms parsed")
return []
IGNORE = {
"not provided",
"not available",
"not specified",
"unknown",
"not available in the provided document.",
"not provided in the document",
"n/a",
"not mentioned in the document",
}
def parse_term(value: str) -> Term | None:
splat = value.split("|", 1)
if len(splat) != 2:
return None
left, right = splat
left = left.strip()
right = right.strip()
lowered = right.lower()
if right.lower() in IGNORE or right.lower().startswith("not available"):
return None
return Term(term_name=left, term_value=right)
| [
"You are an expert financial analyst AI assistant.",
"\n* Speak in the third person, e.g. do not use \"you\"\n* Prefer proper, specific nouns to refer to entities\n* Output each question on a new line. Do not output any other text.\n* Use '*' for each question\n",
"Document: PLACEHOLDER",
"You will be given a document. Read the document and generate the top 5 most relevant/interesting questions you would want to ask about the data to better understand it for evaluating a potential investment."
] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~services~scan_service.py | import abc
import re
from enum import Enum
import openai
from loguru import logger
from pydantic import BaseModel
from springtime.models.open_ai import OpenAIModel
class TrafficlightAnswer(str, Enum):
red = "red"
yellow = "yellow"
green = "green"
class ScanResult(BaseModel):
description: str
tags: list[str]
is_financial_document: TrafficlightAnswer
is_cim: TrafficlightAnswer
class ScanService(abc.ABC):
@abc.abstractmethod
def scan(
self,
*,
file_name: str,
text: str,
) -> ScanResult:
pass
LIMIT = 7000
PROMPT = """
You are an expert financial analyst.
You will be given an excerpt from a document.
* Provide a 1 line description of the entire document. Do not start with "This document is about..." or "This document describes..." or "Excerpt from ..." just describe the document in 1 line.
* Provide tags describing the category of the document
* Is this is a financial document? Reply with green if you are very sure it is financial document. Reply with yellow if you are not sure. Reply with red if you are very sure it is not a financial document.
* Is this is document a Confidential Information Memorandum or Investor Prospectus or Management Presentation? Reply with green if you are very sure it is financial document. Reply with yellow if you are not sure. Reply with red if you are very sure it is not a financial document.
Output your reponse in the following format:
Description: <description>
Tags: <tag1>, <tag2>, <tag3>
Is financial document: <green/yellow/red>
Is confidential information memorandum: <green/yellow/red>
"""
class OpenAIScanService(ScanService):
def __init__(self, model: OpenAIModel) -> None:
self.model = model
def scan(
self,
*,
file_name: str,
text: str,
) -> ScanResult:
processed_text = first_chunk(text, LIMIT)
with_out_white_space = remove_extra_whitespace(processed_text)
response = openai.ChatCompletion.create(
model=self.model,
messages=[
{
"role": "system",
"content": PROMPT,
},
{
"role": "user",
"content": f"""
file name: {file_name}
file excerpt: {with_out_white_space}
""",
},
],
temperature=0,
)
choices = response["choices"]
if len(choices) == 0:
logger.warning("No choices returned from OpenAI")
first_choice = choices[0]
description = first_choice["message"]["content"]
return parse_response(description)
def parse_response(response: str) -> ScanResult:
description = response.find("Description:")
tags = response.find("Tags:")
fin_document = response.find("Is financial document:")
cim = response.find("Is confidential information memorandum:")
description = response[description:tags].split("Description:")[1].strip()
tag_string = response[tags:fin_document].split("Tags:")[1].strip()
fin_document_string = (
response[fin_document:cim].split("Is financial document:")[1].strip().lower()
)
cim_string = (
response[cim:]
.split("Is confidential information memorandum:")[1]
.strip()
.lower()
)
final_tag_string = tag_string.split("Is financial document:")[0].strip()
tags = sorted({tag.strip() for tag in tag_string.split(",")})
return ScanResult(
description=description,
tags=tags,
is_financial_document=TrafficlightAnswer(fin_document_string),
is_cim=TrafficlightAnswer(cim_string),
)
def remove_extra_whitespace(s: str) -> str:
return re.sub(r"\n+", "\n", s.replace("\n ", "\n")).strip()
def first_chunk(s: str, maxlength: int):
gen = get_chunks(s, maxlength)
return next(gen)
def get_chunks(s: str, maxlength: int):
start = 0
end = 0
while start + maxlength < len(s) and end != -1:
end = s.rfind(" ", start, start + maxlength + 1)
yield s[start:end]
start = end + 1
yield s[start:]
| [
"\nfile name: PLACEHOLDER\nfile excerpt: PLACEHOLDER\n ",
"\n\nYou are an expert financial analyst.\n\nYou will be given an excerpt from a document.\n\n* Provide a 1 line description of the entire document. Do not start with \"This document is about...\" or \"This document describes...\" or \"Excerpt from ...\" just describe the document in 1 line.\n* Provide tags describing the category of the document\n* Is this is a financial document? Reply with green if you are very sure it is financial document. Reply with yellow if you are not sure. Reply with red if you are very sure it is not a financial document.\n* Is this is document a Confidential Information Memorandum or Investor Prospectus or Management Presentation? Reply with green if you are very sure it is financial document. Reply with yellow if you are not sure. Reply with red if you are very sure it is not a financial document.\n\nOutput your reponse in the following format:\n\nDescription: <description>\nTags: <tag1>, <tag2>, <tag3>\nIs financial document: <green/yellow/red>\nIs confidential information memorandum: <green/yellow/red>\n"
] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~services~chat_service.py | import abc
from collections.abc import Generator
from typing import Any
import openai
from loguru import logger
from springtime.models.chat import ChatFileContext, ChatHistory
from springtime.models.open_ai import OpenAIModel
from springtime.services.prompt import create_prompt
class ChatService(abc.ABC):
@abc.abstractmethod
def ask_streaming(
self,
context: list[ChatFileContext],
question: str,
history: list[ChatHistory],
) -> Generator[Any, Any, None]:
pass
@abc.abstractmethod
def get_prompt(
self,
context: list[ChatFileContext],
question: str,
history: list[ChatHistory],
) -> str:
pass
@abc.abstractmethod
def get_title(self, question: str, answer: str) -> str:
pass
SYSTEM_1 = """You are an AI assistant that is an expert financial analyst.
Do not use language or provide opinions or judgment on an investment or financials, but provide objective and factual analysis.
Use the data provided, but analyze it objectively and in a fact-based manner.
Answer all questions accurately, especially when you include data.
If you make a calculation, outline your methodology or assumptions clearly.
Round and use an easy to read numeric format when showing numbers.
Do not use language or provide opinions or judgment on an investment or financials, but provide objective and factual analysis.
"""
SYSTEM_2 = """
Output your response in well formatted markdown.
For example:
* for list responses use bullet points
* for headers use #, ##, ###, etc.
* for links use [link text](link url)
* for tables use table elements
* use br tags for line breaks
"""
class OpenAIChatService(ChatService):
def __init__(self, model: OpenAIModel) -> None:
self.model = model
def get_prompt(
self,
context: list[ChatFileContext],
question: str,
history: list[ChatHistory],
) -> str:
return f"""
System: {SYSTEM_1}
System: {SYSTEM_2}
User: {create_prompt(context, question, history)}
"""
def ask_streaming(
self,
context: list[ChatFileContext],
question: str,
history: list[ChatHistory],
) -> Generator[Any, Any, None]:
prompt = create_prompt(context, question, history)
response = openai.ChatCompletion.create(
model=self.model,
messages=[
{"role": "system", "content": SYSTEM_1},
{
"role": "system",
"content": SYSTEM_2,
},
{"role": "user", "content": prompt},
],
temperature=0,
stream=True,
)
for resp in response:
choices = resp["choices"]
delta = choices[0].get("delta")
if not delta:
continue
content = delta.get("content")
if content:
yield content
def get_title(self, question: str, answer: str) -> Generator[Any, Any, None]:
prompt = f"""
Based on the question and answer please respond with a concise, accurate title for the exchange.
Do not output anything except the title itself. Try to limit your response to at most five words.
Question: {question}
Answer: {answer}
""".format(
question=question,
answer=answer,
)
response = openai.ChatCompletion.create(
model=self.model,
messages=[
{
"role": "system",
"content": "You are an expert financial analyst chat bot. The user asked you the following question and you responded with the following answer.",
},
{"role": "user", "content": prompt},
],
temperature=0,
)
choices = response["choices"]
if len(choices) == 0:
logger.warning("No choices returned from OpenAI")
first_choice = choices[0]
return first_choice["message"]["content"]
| [
"You are an expert financial analyst chat bot. The user asked you the following question and you responded with the following answer.",
"\n Based on the question and answer please respond with a concise, accurate title for the exchange.\n Do not output anything except the title itself. Try to limit your response to at most five words.\n\n Question: PLACEHOLDER\n Answer: PLACEHOLDER\n ",
"\nOutput your response in well formatted markdown.\nFor example:\n\n* for list responses use bullet points\n* for headers use #, ##, ###, etc.\n* for links use [link text](link url)\n* for tables use table elements\n* use br tags for line breaks\n",
"You are an AI assistant that is an expert financial analyst.\nDo not use language or provide opinions or judgment on an investment or financials, but provide objective and factual analysis.\nUse the data provided, but analyze it objectively and in a fact-based manner.\nAnswer all questions accurately, especially when you include data.\nIf you make a calculation, outline your methodology or assumptions clearly.\nRound and use an easy to read numeric format when showing numbers.\nDo not use language or provide opinions or judgment on an investment or financials, but provide objective and factual analysis.\n"
] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~settings.py | from pydantic import BaseSettings, Field
from springtime.models.open_ai import OpenAIModel
class Settings(BaseSettings):
host: str = Field(env="host")
port: int = Field(env="port")
openai_api_key: str = Field(env="OPENAI_API_KEY")
pinecone_api_key: str = Field(env="PINECONE_API_KEY")
pinecone_env: str = Field(env="PINECONE_ENV")
pinecone_index: str = Field(env="PINECONE_INDEX")
pinecone_namespace: str = Field(env="PINECONE_NAMESPACE")
tracing_enabled: bool = Field(env="TRACING_ENABLED", default=False)
reload: bool = Field(env="RELOAD", default=False)
service_to_service_secret: str | None = Field(
env="SERVICE_TO_SERVICE_SECRET",
)
anthropic_api_key: str = Field(env="ANTHROPIC_API_KEY")
reports_openai_model: OpenAIModel = Field(env="REPORTS_OPENAI_MODEL")
mock_out_claude: bool = Field(
env="MOCK_OUT_CLAUDE",
default=False,
)
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
SETTINGS = Settings()
| [] |
2024-01-10 | nmaswood/fgpt | python~springtime~springtime~services~prompt_service.py | import abc
import time
import anthropic
import pydantic
from anthropic import Anthropic
from loguru import logger
from springtime.services.html import html_from_text
class PromptRequest(pydantic.BaseModel):
template: str
args: dict[str, str]
class PromptResponse(pydantic.BaseModel):
raw: str
html: str | None
input_tokens: pydantic.NonNegativeInt
output_tokens: pydantic.NonNegativeInt
prompt: str
class PromptService(abc.ABC):
@abc.abstractmethod
def run(
self,
req: PromptRequest,
) -> PromptResponse:
pass
class PromptServiceImpl(
PromptService,
):
def __init__(self, anthropic: Anthropic) -> None:
self.anthropic = anthropic
def run(
self,
req: PromptRequest,
) -> PromptResponse:
prompt = """
Human: {template}
Assistant:
""".format(
template=req.template,
)
prompt = prompt.format(**req.args)
input_tokens = self.anthropic.count_tokens(prompt)
def get_response() -> str:
for attempt in range(9):
try:
return self.anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=1_000_000,
prompt=prompt,
).completion.strip()
except anthropic.RateLimitError:
seconds = 2 ** (attempt + 2)
logger.info(f"Rate limit exceeded sleeping {seconds}")
time.sleep(seconds)
msg = "Rate limit exceeded"
raise Exception(msg)
response = get_response()
output_tokens = self.anthropic.count_tokens(response)
html = html_from_text(response)
return PromptResponse(
raw=response,
html=html,
input_tokens=input_tokens,
output_tokens=output_tokens,
prompt=prompt,
)
| [
"\nHuman: {template}\n\n\n\nAssistant:\n"
] |
2024-01-10 | jimmingcheng/scooterbot_secretary | secretary~tasks~calendar.py | from typing import Any
from typing import Dict
from typing import Optional
import aiohttp
import arrow
import staticconf
from dataclasses import dataclass
from llm_task_handler.handler import OpenAIFunctionTaskHandler
from llm_task_handler.handler import ProgressMessageFunc
from llm_task_handler.handler import TaskState
from secretary.calendar import get_calendar_service
from secretary.database import UserTable
def google_apis_api_key() -> str:
return staticconf.read('google_apis.api_key', namespace='secretary') # type: ignore
@dataclass
class AddCalendarEventArgs:
title: str
start_time: arrow.Arrow
end_time: arrow.Arrow
is_all_day: bool
location: Optional[str]
confirmation_message: str
class AddCalendarEvent(OpenAIFunctionTaskHandler):
def task_type(self) -> str:
return 'add_calendar_event'
def intent_selection_function(self) -> dict:
return {
'name': 'add_calendar_event',
'description': f"Current time is {arrow.now().format('YYYY-MM-DDTHH:mm:ssZZ')}. Prepare to add a calendar event",
'parameters': {
'type': 'object',
'properties': {
'title': {
'type': 'string',
},
'start_time': {
'type': 'string',
'description': 'Start time in the format YYYY-MM-DDTHH:mm:ssZZ. "this weekend" = the coming weekend.',
},
'end_time': {
'type': 'string',
'description': 'End time in the format YYYY-MM-DDTHH:mm:ssZZ',
},
'is_all_day': {
'type': 'boolean',
'description': 'Whether it is an all day event',
},
'location': {
'type': 'string',
'description': 'Location of the event',
},
'confirmation_message': {
'type': 'string',
'description': """
Human-readable confirmation of the fields that the event has been added with. e.g.:
Added to your calendar:
>>> Title: **Doctor's appointment**
Date/Time: **October 5, 2023, 4-6:30pm**
Location: **123 Main St, San Francisco, CA 94105**
""",
},
},
'required': ['title', 'start_time', 'end_time', 'is_all_day', 'confirmation_message'],
}
}
async def transition(
self,
cur_state: TaskState,
progress_message_func: Optional[ProgressMessageFunc] = None,
) -> TaskState:
args = cur_state.custom_state
if cur_state.state_id == self.INTENT_SELECTION_STATE_ID:
return TaskState(
handler=self,
user_prompt=cur_state.user_prompt,
custom_state=AddCalendarEventArgs(
title=args['title'],
start_time=arrow.get(args['start_time']),
end_time=arrow.get(args['end_time']),
is_all_day=args['is_all_day'],
location=args.get('location'),
confirmation_message=args['confirmation_message'],
)
)
else:
event: Dict[str, Any] = {
'summary': args.title
}
if args.location:
async with aiohttp.ClientSession() as session:
url = f'https://maps.googleapis.com/maps/api/place/textsearch/json?key={google_apis_api_key()}&query={args.location}'
async with session.get(url) as resp:
resp_data = await resp.json()
place = resp_data['results'][0] if resp_data.get('results') else None
if place:
event['location'] = place['formatted_address']
if args.is_all_day:
event['start'] = {'date': args.start_time.format('YYYY-MM-DD')}
event['end'] = {'date': args.end_time.format('YYYY-MM-DD')}
else:
event['start'] = {'dateTime': args.start_time.format('YYYY-MM-DDTHH:mm:ssZZ')}
event['end'] = {'dateTime': args.end_time.format('YYYY-MM-DDTHH:mm:ssZZ')}
# aiogoogle doesn't work for some reason
user = UserTable().get(self.user_id)
get_calendar_service(user['google_apis_user_id']).events().insert(calendarId='primary', body=event).execute()
return TaskState(
handler=self,
user_prompt=cur_state.user_prompt,
reply=args.confirmation_message,
is_done=True,
)
| [] |
2024-01-10 | jimmingcheng/scooterbot_secretary | secretary~alexa.py | from typing import NamedTuple
import arrow
import json
import logging
import openai
from ask_sdk_core.skill_builder import CustomSkillBuilder
from ask_sdk_core.api_client import DefaultApiClient
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
from ask_sdk_model.intent import Intent
from ask_sdk_model.slot import Slot
from ask_sdk_model.dialog.delegate_directive import DelegateDirective
from ask_sdk_runtime.exceptions import DispatchException
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.utils import is_intent_name
from ask_sdk_core.utils import is_request_type
from secretary.calendar import event_start_time
from secretary.write import add_todo
class ChatGPTOutput(NamedTuple):
description: str
date: arrow.Arrow
confirmation_message: str
class AddTodoHandler(AbstractRequestHandler):
def can_handle(self, handler_input: HandlerInput) -> bool:
return is_intent_name('AddTodo')(handler_input)
def handle(self, handler_input: HandlerInput) -> Response:
access_token = str(handler_input.request_envelope.context.system.user.access_token) # type: ignore
intent = handler_input.request_envelope.request.intent # type: ignore
user_prompt = intent.slots['Prompt'].value # type: ignore
chatgpt_output = self.get_chatgpt_output(user_prompt)
todo, reminder_days_before = add_todo(access_token, chatgpt_output.description, chatgpt_output.date)
todo_start_time = event_start_time(todo)
sentences = [chatgpt_output.confirmation_message]
if reminder_days_before > 0:
duration = todo_start_time.humanize(
other=todo_start_time.shift(days=-reminder_days_before),
only_distance=True
)
sentences.append(f"I'll remind you {duration} before.")
speech = ' '.join(sentences)
handler_input.response_builder.speak(speech).set_should_end_session(True)
return handler_input.response_builder.response
def get_chatgpt_output(self, user_prompt: str) -> ChatGPTOutput:
instructions_prompt = f"""
Current time is {arrow.now()}
The user will input a todo list item in plain English. Convert it to this format:
{{"description": "Cut my hair", "date": "YYYY-MM-DD", "confirmation_message": "<Succinct spoken confirmation that the described todo is recorded on the supplied date>"}}
Output only the json.
"""
completion = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'system', 'content': instructions_prompt},
{'role': 'user', 'content': user_prompt},
],
temperature=0.0,
max_tokens=500,
)
completion_text = completion.choices[0].message.content
print(completion_text)
completion_data = json.loads(completion_text)
return ChatGPTOutput(
description=completion_data['description'],
date=arrow.get(completion_data['date']),
confirmation_message=completion_data['confirmation_message'],
)
class LaunchRequestHandler(AbstractRequestHandler):
def can_handle(self, handler_input: HandlerInput) -> bool:
return is_request_type('LaunchRequest')(handler_input)
def handle(self, handler_input: HandlerInput) -> Response:
updated_intent = Intent(
name='AddTodo',
slots={
'Prompt': Slot(name='Prompt', value=None),
}
)
handler_input.response_builder.add_directive(DelegateDirective(updated_intent))
return handler_input.response_builder.response
class CatchAllExceptionHandler(AbstractExceptionHandler):
def can_handle(self, handler_input: HandlerInput, exception: Exception) -> bool:
return False
def handle(self, handler_input: HandlerInput, exception: Exception) -> Response:
if not isinstance(exception, DispatchException):
logging.exception('Exception while responding to Alexa request')
handler_input.response_builder.speak('Sorry, something went wrong.').set_should_end_session(True)
return handler_input.response_builder.response
def get_skill_builder() -> SkillBuilder:
sb = CustomSkillBuilder(api_client=DefaultApiClient())
sb.add_request_handler(AddTodoHandler())
sb.add_request_handler(LaunchRequestHandler())
sb.add_exception_handler(CatchAllExceptionHandler())
return sb
| [
"f\"\"\"\nCurrent time is {arrow.now()}\n\nThe user will input a todo list item in plain English. Convert it to this format:\n{{\"description\": \"Cut my hair\", \"date\": \"YYYY-MM-DD\", \"confirmation_message\": \"<Succinct spoken confirmation that the described todo is recorded on the supplied date>\"}}\n\nOutput only the json.\n "
] |
2024-01-10 | jimmingcheng/scooterbot_secretary | secretary~tasks~question.py | from typing import Optional
import arrow
import yaml
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import HumanMessage
from llm_task_handler.handler import OpenAIFunctionTaskHandler
from llm_task_handler.handler import ProgressMessageFunc
from llm_task_handler.handler import TaskState
from secretary.calendar import get_calendar_service
from secretary.database import UserTable
class AnswerQuestionFromCalendar(OpenAIFunctionTaskHandler):
def task_type(self) -> str:
return 'answer_question_from_calendar'
def intent_selection_function(self) -> dict:
return {
'name': self.task_type(),
'description': f"Current time is {arrow.now().format('YYYY-MM-DDTHH:mm:ssZZ')}. Answer a question",
'parameters': {
'type': 'object',
'properties': {
'question': {
'type': 'string',
'description': 'The question to answer',
},
'start_time': {
'type': 'string',
'description': 'Start of the question\'s time range in the format YYYY-MM-DDTHH:mm:ssZZ. "this weekend" = the coming weekend.',
},
'end_time': {
'type': 'string',
'description': 'End of the question\'s time range in the format YYYY-MM-DDTHH:mm:ssZZ',
},
},
'required': ['question'],
}
}
async def transition(
self,
cur_state: TaskState,
progress_message_func: Optional[ProgressMessageFunc] = None,
) -> TaskState:
return TaskState(
handler=self,
user_prompt=cur_state.user_prompt,
reply='Next week, you have a PTA meeting on Tuesday at 6:30pm. You also have a dentist appointment on Thursday at 2:00pm.',
is_done=True,
)
args = cur_state.custom_state
question = args['question']
start_time = arrow.get(args['start_time']) if args.get('start_time') else None
end_time = arrow.get(args['end_time']) if args.get('end_time') else None
# aiogoogle doesn't work for some reason
user = UserTable().get(self.user_id)
events = get_calendar_service(user['google_apis_user_id']).events().list(
calendarId='primary',
timeMin=start_time.isoformat() if start_time else None,
timeMax=end_time.isoformat() if end_time else None,
singleEvents=True,
orderBy='startTime',
).execute().get('items', [])
prompt = f'''
# Instructions
Answer the question using only the provided events data.
# Question
{question}
# Events
'''
prompt += self.events_yaml(events)
import logging; logging.info(prompt)
chat_model = ChatOpenAI( # type: ignore
model_name='gpt-4-1106-preview',
temperature=0,
max_tokens=250,
)
reply = chat_model([
HumanMessage(content=prompt),
])
return TaskState(
handler=self,
user_prompt=prompt,
reply=reply,
is_done=True,
)
def events_yaml(self, events: list[dict]) -> str:
return yaml.dump([
{
'when': self._get_time_phrase(event),
'where': event.get('location'),
'what': event['summary'],
'details': event.get('description'),
}
for event in events
])
def _get_time_phrase(self, event: dict) -> str:
start = self._get_event_time(event['start']).to('US/Pacific')
end = self._get_event_time(event['end']).to('US/Pacific')
if 'dateTime' in event['start']:
if start.date() == end.date():
if start.hour == end.hour and start.minute == end.minute:
return f"on {start.format('YYYY-MM-DD')} at {start.format('h:mm a')}"
else:
return f"on {start.format('YYYY-MM-DD')} from {start.format('h:mm a')} to {end.format('h:mm a')}"
else:
return f"from {start.naive} to {end.naive}"
else:
if start.date() == end.date():
return f"on {start.format('YYYY-MM-DD')}"
else:
return f"from {start.format('YYYY-MM-DD')} to {end.format('YYYY-MM-DD')}"
def _get_event_time(self, time_dict: dict) -> arrow.Arrow:
if 'dateTime' in time_dict:
return arrow.get(time_dict['dateTime'])
else:
return arrow.get(time_dict['date'])
| [
"\n# Instructions\n\nAnswer the question using only the provided events data.\n\n# Question\n\nPLACEHOLDER\n\n# Events\n\n"
] |
2024-01-10 | jimmingcheng/scooterbot_secretary | secretary~tasks~todo.py | from typing import Optional
import arrow
from llm_task_handler.handler import OpenAIFunctionTaskHandler
from llm_task_handler.handler import ProgressMessageFunc
from llm_task_handler.handler import TaskState
from secretary.write import add_todo
class AddTodo(OpenAIFunctionTaskHandler):
def task_type(self) -> str:
return 'add_todo_reminder_or_task'
def intent_selection_function(self) -> dict:
return {
'name': self.task_type(),
'description': 'Add a todo, reminder, or task. If there are multiple separate requests, process the most recent one.',
'parameters': {
'type': 'object',
'properties': {
'task_name': {
'type': 'string',
},
'due_date': {
'type': 'string',
'description': f'Current date is {arrow.now().format("YYYY-MM-DD")}',
},
},
'required': ['task_name'],
}
}
async def transition(
self,
cur_state: TaskState,
progress_message_func: Optional[ProgressMessageFunc] = None,
) -> TaskState:
cs = cur_state.custom_state
due_date = arrow.get(cs['due_date']) if 'due_date' in cs else arrow.now()
add_todo(self.user_id, cs['task_name'], due_date)
reply = f'''
Here's your todo:
>>> **{cs['task_name']}**
{due_date.format('dddd, MMMM D, YYYY')}
'''
return TaskState(
handler=self,
user_prompt=cur_state.user_prompt,
reply=reply,
is_done=True,
)
| [] |
2024-01-10 | jimmingcheng/scooterbot_secretary | secretary~write.py | import arrow
import json
import openai
from typing import Tuple
from secretary.calendar import event_start_time
from secretary.calendar import get_calendar_service
from secretary.database import UserTable
from secretary.todo_emailer import send_email
from secretary.todo_emailer import should_remind_today
def add_todo_from_prompt(user_id: str, user_prompt: str) -> str:
instructions_prompt = f"""
Current time is {arrow.now()}
The user will describe a task in plain English. Convert it to this format:
{{"description": "...", "date": "YYYY-MM-DD", "confirmation_message": "..."}}
Here's an example:
{{"description": "Cut my hair", "date": "2021-03-02", "confirmation_message": "I'll remind you to cut your hair next Tuesday, March 2"}}
Output only the json.
"""
completion_text = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'system', 'content': instructions_prompt},
{'role': 'user', 'content': user_prompt},
],
temperature=0.0,
max_tokens=500,
).choices[0].message.content
resp = json.loads(completion_text)
todo, reminder_days_before = add_todo(user_id, resp['description'], arrow.get(resp['date']))
response_speech = resp['confirmation_message']
if reminder_days_before > 0:
todo_start_time = event_start_time(todo)
duration = todo_start_time.humanize(
other=todo_start_time.shift(days=-reminder_days_before),
only_distance=True
)
response_speech += f" I'll remind you {duration} before."
return response_speech
def add_todo(user_id: str, description: str, date: arrow.Arrow) -> Tuple[dict, int]:
user = UserTable().get(user_id)
google_apis_user_id = user['google_apis_user_id']
calendar_id = user['todo_calendar_id']
cal_service = get_calendar_service(google_apis_user_id)
event = cal_service.events().insert(
calendarId=calendar_id,
body={
'summary': description,
'start': {'date': date.format('YYYY-MM-DD')},
'end': {'date': date.format('YYYY-MM-DD')},
},
).execute()
reminder_days_before = auto_set_reminder(event)
event = cal_service.events().update(
calendarId=calendar_id,
eventId=event['id'],
body=event,
).execute()
if should_remind_today(event, []):
send_email(google_apis_user_id, event)
return event, reminder_days_before
def get_todo_reminder_days_before(date: arrow.Arrow) -> int:
delta = date - arrow.now()
if delta.days > 28:
return 7
elif delta.days > 5:
return 2
else:
return 0
def convert_to_all_day_event(event: dict) -> None:
# all todos are represented as all-day events, so convert if necessary
event['start']['date'] = arrow.get(event['start']['dateTime']).format('YYYY-MM-DD')
event['end']['date'] = event['start']['date']
del event['start']['dateTime']
del event['end']['dateTime']
def auto_set_reminder(event: dict) -> int:
event['reminders']['useDefault'] = False
if not event['reminders'].get('overrides'):
event['reminders']['overrides'] = []
reminders = event['reminders']['overrides']
# reminders are represented as popup reminders
popup_reminder = None
for reminder in reminders:
if reminder['method'] == 'popup':
popup_reminder = reminder
if not popup_reminder:
popup_reminder = {'method': 'popup'}
reminders.append(popup_reminder)
reminder_days_before = get_todo_reminder_days_before(event_start_time(event))
for reminder in reminders:
# this will update all reminders for this event, but only the popup
# reminder is used for email reminder purposes
reminder['minutes'] = reminder_days_before * 1440
return reminder_days_before
| [] |
2024-01-10 | jimmingcheng/scooterbot_secretary | secretary~tasks~account.py | from typing import Optional
import json
from itertools import groupby
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import HumanMessage
from llm_task_handler.handler import OpenAIFunctionTaskHandler
from llm_task_handler.handler import ProgressMessageFunc
from llm_task_handler.handler import TaskState
from secretary.database import remove_account
class DisconnectAccount(OpenAIFunctionTaskHandler):
def task_type(self) -> str:
return 'handle_account_disconnection_or_termination_request'
def intent_selection_function(self) -> dict:
return {
'name': self.task_type(),
'description': 'Help user disconnect/terminate/cancel their account',
'parameters': {
'type': 'object',
'properties': {}
}
}
async def transition(
self,
cur_state: TaskState,
progress_message_func: Optional[ProgressMessageFunc] = None,
) -> TaskState:
function = {
'name': 'identify_account_disconnection_or_termination_messages',
'description': 'Help user disconnect their account',
'parameters': {
'type': 'object',
'properties': {
'encoded_conversation': {
'type': 'array',
'items': {
'type': 'string',
'enum': ['A', 'B', 'C', 'D', 'E'],
},
'description': '''
Account disconnection requires analysis of these message types:
A. User requests account disconnection
B. AI asks for confirmation
C. Same user confirms yes
D. Same user confirms no
E. Other
Detect these types and retain original message order in the resulting array.
'''
},
},
}
}
chat_model = ChatOpenAI( # type: ignore
model_name='gpt-4',
temperature=0,
max_tokens=250,
model_kwargs={"functions": [function]},
)
model_reply = chat_model([
HumanMessage(content=cur_state.user_prompt),
])
func_args = json.loads(model_reply.additional_kwargs['function_call']['arguments'])
encoded_convo = func_args['encoded_conversation']
cleaned_convo = [t for t in encoded_convo if t != 'E']
cleaned_convo = [t for t, _ in groupby(cleaned_convo)]
if encoded_convo[-1:] == ['A']:
reply = '''
Can you confirm that you want to disconnect your account now?
'''
elif cleaned_convo[-3:] == ['A', 'B', 'C']:
remove_account(self.user_id)
reply = 'Your account has been disconnected'
elif cleaned_convo[-3:] == ['A', 'B', 'D']:
reply = 'Okay, I won\'t disconnect your account.'
else:
reply = 'What?'
return TaskState(
handler=self,
user_prompt=cur_state.user_prompt,
reply=reply,
is_done=True,
)
class DisconnectAccountAbort(OpenAIFunctionTaskHandler):
def task_type(self) -> str:
return 'abort_account_disconnection_request'
def intent_selection_function(self) -> dict:
return {
'name': self.task_type(),
'description': 'Abort the account disconnection',
'parameters': {
'type': 'object',
'properties': {}
}
}
async def transition(
self,
cur_state: TaskState,
progress_message_func: Optional[ProgressMessageFunc] = None,
) -> TaskState:
return TaskState(
handler=self,
user_prompt=cur_state.user_prompt,
reply='Okay, I won\'t disconnect your account.',
is_done=True,
)
| [] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 3_stock_tools~stock_tools_database_lib.py | import json
import os
import sys
import boto3
import sqlite3
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from pathlib import Path
## database
stock_ticker_data=[
{
"symbol" : "PRAA",
"name" : "PRA Group, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "AMZN",
"name" : "Amazon.com, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "TSLA",
"name" : "Tesla Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "PAAS",
"name" : "Pan American Silver Corp.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "PAAC",
"name" : "Proficient Alpha Acquisition Corp.",
"currency" : "USD",
"stockExchange" : "NasdaqCM",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "RYAAY",
"name" : "Ryanair Holdings plc",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "MPAA",
"name" : "Motorcar Parts of America, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "STAA",
"name" : "STAAR Surgical Company",
"currency" : "USD",
"stockExchange" : "NasdaqGM",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "RBCAA",
"name" : "Republic Bancorp, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "AABA",
"name" : "Altaba Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "AAXJ",
"name" : "iShares MSCI All Country Asia ex Japan ETF",
"currency" : "USD",
"stockExchange" : "NasdaqGM",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "ZNWAA",
"name" : "Zion Oil & Gas, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGM",
"exchangeShortName" : "NASDAQ"
}
]
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except FileExistsError as e:
print(e)
return conn
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except RuntimeError as e:
print(e)
def initial_database():
db_name = "stock_ticker_database.db"
if(os.path.isfile(db_name)):
return
conn = create_connection(db_name)
create_table_sql = """CREATE TABLE IF NOT EXISTS stock_ticker (
symbol text PRIMARY KEY,
name text NOT NULL,
currency text,
stockExchange text,
exchangeShortName text
);"""
create_table(conn, create_table_sql)
for item in stock_ticker_data:
conn.execute("INSERT INTO stock_ticker (symbol, name, currency,stockExchange, exchangeShortName ) VALUES (?, ?, ?, ?,?)",
(item["symbol"], item["name"], item["currency"], item["stockExchange"],item["exchangeShortName"]))
conn.commit()
conn.close()
| [] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 3_stock_tools~stock_tools_lib.py | import os
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms.bedrock import Bedrock
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import BedrockEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader
import json
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.tools import DuckDuckGoSearchRun
from langchain.prompts.chat import ChatPromptTemplate
from langchain.chains import LLMChain
from datetime import datetime, timedelta
from pandas_datareader import data as pdr
from datetime import date
from langchain.prompts.prompt import PromptTemplate
import yfinance as yf
yf.pdr_override()
def get_llm():
model_parameter = {"temperature": 0.0, "top_p": .5, "max_tokens_to_sample": 2000}
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="anthropic.claude-v2", #set the foundation model
model_kwargs=model_parameter,
streaming=True)
return llm
def get_db_chain(prompt):
db = SQLDatabase.from_uri("sqlite:///stock_ticker_database.db")
llm = get_llm()
db_chain = SQLDatabaseChain.from_llm(
llm,
db,
verbose=True,
return_intermediate_steps=True,
prompt=prompt,
)
return db_chain
def get_stock_ticker(query):
template = """You are a helpful assistant who extract company name from the human input.Please only output the company"""
human_template = "{text}"
llm = get_llm()
chat_prompt = ChatPromptTemplate.from_messages([
("system", template),
("human", human_template),
])
llm_chain = LLMChain(
llm=llm,
prompt=chat_prompt
)
company_name=llm_chain(query)['text'].strip()
_DEFAULT_TEMPLATE = """Human: Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
<format>
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Result of SQLResult only"
</format>
Assistant: Understood, I will use the above format and only provide the answer.
Only use the following tables:
<tables>
CREATE TABLE stock_ticker (
symbol text PRIMARY KEY,
name text NOT NULL,
currency text,
stockExchange text,
exchangeShortName text
)
</tables>
If someone asks for the table stock ticker table, they really mean the stock_ticker table.
<examples>
Question:
What is the ticker symbol for Amazon in stock ticker table?
Params:
Company name (name): Amazon
SQLQuery:SELECT symbol FROM stock_ticker WHERE name LIKE '%Amazon%'
</examples>
Question: \n\nHuman:{input} \n\nAssistant:
"""
PROMPT = PromptTemplate(
input_variables=["input", "dialect"], template=_DEFAULT_TEMPLATE
)
db_chain = get_db_chain(PROMPT)
company_ticker = db_chain("\n\nHuman: What is the ticker symbol for " + str(company_name) + " in stock ticker table? \n\nAssistant:")
return company_name, company_ticker['result']
def get_stock_price(ticker, history=500):
today = date.today()
start_date = today - timedelta(days=history)
data = pdr.get_data_yahoo(ticker, start=start_date, end=today)
return data
# Fetch top 5 google news for given company name
import re
import requests
def google_query(search_term):
if "news" not in search_term:
search_term=search_term+" stock news"
url=f"https://www.google.com/search?q={search_term}"
url=re.sub(r"\s","+",url)
return url
from bs4 import BeautifulSoup
def get_recent_stock_news(company_name):
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'}
g_query=google_query(company_name)
res=requests.get(g_query,headers=headers).text
soup=BeautifulSoup(res,"html.parser")
news=[]
for n in soup.find_all("div","n0jPhd ynAwRc tNxQIb nDgy9d"):
news.append(n.text)
for n in soup.find_all("div","IJl0Z"):
news.append(n.text)
if len(news)>6:
news=news[:4]
else:
news=news
news_string=""
for i,n in enumerate(news):
news_string+=f"{i}. {n}\n"
top5_news="Recent News:\n\n"+news_string
return top5_news
def stock_news_search(company_name):
search=DuckDuckGoSearchRun()
return search("Stock news about " + company_name)
# Get financial statements from Yahoo Finance
def get_financial_statements(ticker):
if "." in ticker:
ticker=ticker.split(".")[0]
else:
ticker=ticker
company = yf.Ticker(ticker)
balance_sheet = company.balance_sheet
if balance_sheet.shape[1]>=3:
balance_sheet=balance_sheet.iloc[:,:3] # Only captures last 3 years of data
balance_sheet=balance_sheet.dropna(how="any")
balance_sheet = balance_sheet.to_string()
return balance_sheet
from langchain.agents import load_tools
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain import LLMMathChain
tools=[
Tool(
name="get company ticker",
func=get_stock_ticker,
description="Get the company stock ticker"
),
Tool(
name="get stock data",
func=get_stock_price,
description="Use when you are asked to get stock data. This will output historic share price data. You should input the the stock ticker to it "
),
Tool(
name="get recent news",
func=get_recent_stock_news,
description="Use this to fetch recent news about stocks"
),
Tool(
name="get financial statements",
func=get_financial_statements,
description="Use this to get financial statement of the company. With the help of this data companys historic performance can be evaluaated. You should input stock ticker to it"
)
]
from langchain.agents import initialize_agent
def initializeAgent():
agent=initialize_agent(
llm=get_llm(),
agent="zero-shot-react-description",
tools=tools,
verbose=True,
max_iteration=2,
return_intermediate_steps=True,
handle_parsing_errors=True,
output_key="output",
)
return agent
| [
"human",
"Human: Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\n<format>\nQuestion: \"Question here\"\nSQLQuery: \"SQL Query to run\"\nSQLResult: \"Result of the SQLQuery\"\nAnswer: \"Result of SQLResult only\"\n</format>\nAssistant: Understood, I will use the above format and only provide the answer.\n\nOnly use the following tables:\n<tables>\nCREATE TABLE stock_ticker (\n\tsymbol text PRIMARY KEY,\n\tname text NOT NULL,\n\tcurrency text,\n\tstockExchange text, \n exchangeShortName text\n)\n</tables>\n\nIf someone asks for the table stock ticker table, they really mean the stock_ticker table.\n<examples>\nQuestion: \n What is the ticker symbol for Amazon in stock ticker table?\n Params: \n Company name (name): Amazon\n \nSQLQuery:SELECT symbol FROM stock_ticker WHERE name LIKE '%Amazon%'\n\n</examples>\n\nQuestion: \n\nHuman:{input} \n\nAssistant:\n\n",
"You are a helpful assistant who extract company name from the human input.Please only output the company",
"input",
"[('system', PLACEHOLDER), ('human', PLACEHOLDER)]",
"{text}"
] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 4_stock_analysis~stock_analysis_lib.py | import os
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms.bedrock import Bedrock
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import BedrockEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader
import json
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.tools import DuckDuckGoSearchRun
from langchain.prompts.chat import ChatPromptTemplate
from langchain.chains import LLMChain
from datetime import datetime, timedelta
from pandas_datareader import data as pdr
from datetime import date
from langchain.prompts.prompt import PromptTemplate
import yfinance as yf
yf.pdr_override()
def get_llm():
model_parameter = {"temperature": 0.0, "top_p": .5, "max_tokens_to_sample": 2000}
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="anthropic.claude-v2", #set the foundation model
model_kwargs=model_parameter,
streaming=True)
return llm
def get_db_chain(prompt):
db = SQLDatabase.from_uri("sqlite:///stock_ticker_database.db")
llm = get_llm()
db_chain = SQLDatabaseChain.from_llm(
llm,
db,
verbose=True,
return_intermediate_steps=True,
prompt=prompt,
)
return db_chain
def get_stock_ticker(query):
template = """You are a helpful assistant who extract company name from the human input.Please only output the company"""
human_template = "{text}"
llm = get_llm()
chat_prompt = ChatPromptTemplate.from_messages([
("system", template),
("human", human_template),
])
llm_chain = LLMChain(
llm=llm,
prompt=chat_prompt
)
company_name=llm_chain(query)['text'].strip()
_DEFAULT_TEMPLATE = """Human: Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
<format>
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Result of SQLResult only"
</format>
Assistant: Understood, I will use the above format and only provide the answer.
Only use the following tables:
<tables>
CREATE TABLE stock_ticker (
symbol text PRIMARY KEY,
name text NOT NULL,
currency text,
stockExchange text,
exchangeShortName text
)
</tables>
If someone asks for the table stock ticker table, they really mean the stock_ticker table.
<examples>
Question:
What is the ticker symbol for Amazon in stock ticker table?
Params:
Company name (name): Amazon
SQLQuery:SELECT symbol FROM stock_ticker WHERE name LIKE '%Amazon%'
</examples>
Question: \n\nHuman:{input} \n\nAssistant:
"""
PROMPT = PromptTemplate(
input_variables=["input", "dialect"], template=_DEFAULT_TEMPLATE
)
db_chain = get_db_chain(PROMPT)
company_ticker = db_chain("\n\nHuman: What is the ticker symbol for " + str(company_name) + " in stock ticker table? \n\nAssistant:")
return company_name, company_ticker['result']
def get_stock_price(ticker, history=500):
today = date.today()
start_date = today - timedelta(days=history)
data = pdr.get_data_yahoo(ticker, start=start_date, end=today)
return data
# Fetch top 5 google news for given company name
import re
import requests
def google_query(search_term):
if "news" not in search_term:
search_term=search_term+" stock news"
url=f"https://www.google.com/search?q={search_term}"
url=re.sub(r"\s","+",url)
return url
from bs4 import BeautifulSoup
def get_recent_stock_news(company_name):
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'}
g_query=google_query(company_name)
res=requests.get(g_query,headers=headers).text
soup=BeautifulSoup(res,"html.parser")
news=[]
for n in soup.find_all("div","n0jPhd ynAwRc tNxQIb nDgy9d"):
news.append(n.text)
for n in soup.find_all("div","IJl0Z"):
news.append(n.text)
if len(news)>6:
news=news[:4]
else:
news=news
news_string=""
for i,n in enumerate(news):
news_string+=f"{i}. {n}\n"
top5_news="Recent News:\n\n"+news_string
return top5_news
def stock_news_search(company_name):
search=DuckDuckGoSearchRun()
return search("Stock news about " + company_name)
# Get financial statements from Yahoo Finance
def get_financial_statements(ticker):
if "." in ticker:
ticker=ticker.split(".")[0]
else:
ticker=ticker
company = yf.Ticker(ticker)
balance_sheet = company.balance_sheet
if balance_sheet.shape[1]>=3:
balance_sheet=balance_sheet.iloc[:,:3] # Only captures last 3 years of data
balance_sheet=balance_sheet.dropna(how="any")
balance_sheet = balance_sheet.to_string()
return balance_sheet
from langchain.agents import load_tools
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain import LLMMathChain
tools=[
Tool(
name="get company ticker",
func=get_stock_ticker,
description="Get the company stock ticker"
),
Tool(
name="get stock data",
func=get_stock_price,
description="Use when you are asked to evaluate or analyze a stock. This will output historic share price data. You should input the the stock ticker to it "
),
Tool(
name="get recent news",
func=get_recent_stock_news,
description="Use this to fetch recent news about stocks"
),
Tool(
name="get financial statements",
func=get_financial_statements,
description="Use this to get financial statement of the company. With the help of this data companys historic performance can be evaluaated. You should input stock ticker to it"
)
]
from langchain.agents import initialize_agent
def initializeAgent():
agent=initialize_agent(
llm=get_llm(),
agent="zero-shot-react-description",
tools=tools,
verbose=True,
max_iteration=2,
return_intermediate_steps=True,
handle_parsing_errors=True,
output_key="output",
)
prompt="""Human: You are a financial advisor. Give stock recommendations for given query based on following instructions.
<instructions>
Answer the following questions as best you can. You have access to the following tools:
get company ticker: Use when you need to extract company name and stock ticker. This tool will output company name and stock ticker. You should input the human input to it.
get stock data: Use when you are asked to evaluate or analyze a stock. This will output historic share price data. You should input the stock ticker to it.
get recent news: Use this to fetch recent news about stocks. This will output company stock news. You should innput the company name to it.
get financial statements: Use this to get financial statement of the company. With the help of this data companys historic performance can be evaluaated. You should input stock ticker to it
</instructions>
<steps>
Note- if you fail in satisfying any of the step below, Just move to next one
1) Use "get company ticker" tool to get the company name and stock ticker. Output- company name and stock ticker
2) Use "get stock data" tool to gather stock info. Output- Stock data
3) Use "get recent news" tool to search for latest stock realted news. Output- Stock news
4) Use "get financial statements" tool to get company's historic financial data. Output- Financial statement
5) Analyze the stock based on gathered data and give detail analysis for investment choice. provide numbers and reasons to justify your answer. Output- Detailed stock Analysis
</steps>
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do, Also try to follow steps mentioned above
Action: the action to take, should be one of [get company ticker, get stock data, get recent news, get financial statements]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Question: {input}
Assistant:
{agent_scratchpad}
"""
agent.agent.llm_chain.prompt.template=prompt
return agent
| [
"human",
"{text}",
"Human: Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\n<format>\nQuestion: \"Question here\"\nSQLQuery: \"SQL Query to run\"\nSQLResult: \"Result of the SQLQuery\"\nAnswer: \"Result of SQLResult only\"\n</format>\nAssistant: Understood, I will use the above format and only provide the answer.\n\nOnly use the following tables:\n<tables>\nCREATE TABLE stock_ticker (\n\tsymbol text PRIMARY KEY,\n\tname text NOT NULL,\n\tcurrency text,\n\tstockExchange text, \n exchangeShortName text\n)\n</tables>\n\nIf someone asks for the table stock ticker table, they really mean the stock_ticker table.\n<examples>\nQuestion: \n What is the ticker symbol for Amazon in stock ticker table?\n Params: \n Company name (name): Amazon\n \nSQLQuery:SELECT symbol FROM stock_ticker WHERE name LIKE '%Amazon%'\n\n</examples>\n\nQuestion: \n\nHuman:{input} \n\nAssistant:\n\n",
"You are a helpful assistant who extract company name from the human input.Please only output the company",
"input",
"[('system', PLACEHOLDER), ('human', PLACEHOLDER)]",
"Human: You are a financial advisor. Give stock recommendations for given query based on following instructions. \n<instructions>\nAnswer the following questions as best you can. You have access to the following tools:\n\nget company ticker: Use when you need to extract company name and stock ticker. This tool will output company name and stock ticker. You should input the human input to it.\nget stock data: Use when you are asked to evaluate or analyze a stock. This will output historic share price data. You should input the stock ticker to it.\nget recent news: Use this to fetch recent news about stocks. This will output company stock news. You should innput the company name to it.\nget financial statements: Use this to get financial statement of the company. With the help of this data companys historic performance can be evaluaated. You should input stock ticker to it\n</instructions>\n\n<steps>\nNote- if you fail in satisfying any of the step below, Just move to next one\n1) Use \"get company ticker\" tool to get the company name and stock ticker. Output- company name and stock ticker\n2) Use \"get stock data\" tool to gather stock info. Output- Stock data\n3) Use \"get recent news\" tool to search for latest stock realted news. Output- Stock news\n4) Use \"get financial statements\" tool to get company's historic financial data. Output- Financial statement\n5) Analyze the stock based on gathered data and give detail analysis for investment choice. provide numbers and reasons to justify your answer. Output- Detailed stock Analysis\n</steps>\n\nUse the following format:\nQuestion: the input question you must answer\nThought: you should always think about what to do, Also try to follow steps mentioned above\nAction: the action to take, should be one of [get company ticker, get stock data, get recent news, get financial statements]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\nQuestion: {input}\n\nAssistant:\n{agent_scratchpad}\n\n"
] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 2_stock_query~stock_query_database_lib.py | import json
import os
import sys
import sqlite3
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from pathlib import Path
## database
stock_ticker_data=[
{
"symbol" : "PRAA",
"name" : "PRA Group, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "AMZN",
"name" : "Amazon.com, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "TSLA",
"name" : "Tesla Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "PAAS",
"name" : "Pan American Silver Corp.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "PAAC",
"name" : "Proficient Alpha Acquisition Corp.",
"currency" : "USD",
"stockExchange" : "NasdaqCM",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "RYAAY",
"name" : "Ryanair Holdings plc",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "MPAA",
"name" : "Motorcar Parts of America, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "STAA",
"name" : "STAAR Surgical Company",
"currency" : "USD",
"stockExchange" : "NasdaqGM",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "RBCAA",
"name" : "Republic Bancorp, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "AABA",
"name" : "Altaba Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGS",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "AAXJ",
"name" : "iShares MSCI All Country Asia ex Japan ETF",
"currency" : "USD",
"stockExchange" : "NasdaqGM",
"exchangeShortName" : "NASDAQ"
},
{
"symbol" : "ZNWAA",
"name" : "Zion Oil & Gas, Inc.",
"currency" : "USD",
"stockExchange" : "NasdaqGM",
"exchangeShortName" : "NASDAQ"
}
]
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except FileExistsError as e:
print(e)
return conn
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except RuntimeError as e:
print(e)
def initial_database():
db_name = "stock_ticker_database.db"
if(os.path.isfile(db_name)):
return
conn = create_connection(db_name)
create_table_sql = """CREATE TABLE IF NOT EXISTS stock_ticker (
symbol text PRIMARY KEY,
name text NOT NULL,
currency text,
stockExchange text,
exchangeShortName text
);"""
create_table(conn, create_table_sql)
for item in stock_ticker_data:
conn.execute("INSERT INTO stock_ticker (symbol, name, currency,stockExchange, exchangeShortName ) VALUES (?, ?, ?, ?,?)",
(item["symbol"], item["name"], item["currency"], item["stockExchange"],item["exchangeShortName"]))
conn.commit()
conn.close()
| [] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 1_stock_qna~stock_qna_lib.py | import os
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms.bedrock import Bedrock
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import BedrockEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader
def get_llm():
model_kwargs = {
"maxTokens": 1024,
"temperature": 0,
"topP": 0.5,
"stopSequences": ["Human:"],
"countPenalty": {"scale": 0 },
"presencePenalty": {"scale": 0 },
"frequencyPenalty": {"scale": 0 }
}
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="ai21.j2-ultra-v1", #set the foundation model
model_kwargs=model_kwargs) #configure the properties for Claude
return llm
def get_index(): #creates and returns an in-memory vector store to be used in the application
embeddings = BedrockEmbeddings(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
) #create a Titan Embeddings client
pdf_path = "./2022-Shareholder-Letter.pdf" #assumes local PDF file with this name
loader = PyPDFLoader(file_path=pdf_path) #load the pdf file
text_splitter = RecursiveCharacterTextSplitter( #create a text splitter
separators=["\n\n", "\n", ".", " "], #split chunks at (1) paragraph, (2) line, (3) sentence, or (4) word, in that order
chunk_size=1000, #divide into 1000-character chunks using the separators above
chunk_overlap=100 #number of characters that can overlap with previous chunk
)
index_creator = VectorstoreIndexCreator( #create a vector store factory
vectorstore_cls=FAISS, #use an in-memory vector store for demo purposes
embedding=embeddings, #use Titan embeddings
text_splitter=text_splitter, #use the recursive text splitter
)
index_from_loader = index_creator.from_loaders([loader]) #create an vector store index from the loaded PDF
return index_from_loader #return the index to be cached by the client app
def get_memory(): #create memory for this chat session
memory = ConversationBufferWindowMemory(memory_key="chat_history", return_messages=True) #Maintains a history of previous messages
return memory
def get_rag_chat_response(input_text, memory, index): #chat client function
llm = get_llm()
conversation_with_retrieval = ConversationalRetrievalChain.from_llm(llm, index.vectorstore.as_retriever(), memory=memory)
chat_response = conversation_with_retrieval({"question": input_text}) #pass the user message, history, and knowledge to the model
return chat_response['answer']
| [] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 3_stock_tools~stock_tools_app.py | import streamlit as st
import stock_tools_lib as glib
import stock_tools_database_lib as databaselib
from langchain.callbacks import StreamlitCallbackHandler
import time
import pandas as pd
def print_result(st, response):
st.write(response['output'])
def stock_tools():
st.header("Stock Tools Agent")
st.write("Try: Company name as Amazon, Tesla, Apple..etc ")
st.write("get company ticker of Amazon")
st.write("get stock history of Tesla")
st.write("get financial statement of Vinamilk")
st.write("fetch news of Apple")
if 'database' not in st.session_state:
with st.spinner("Initial Database"):
databaselib.initial_database()
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
agent = glib.initializeAgent()
input_text = st.chat_input("Type company name here!")
ph = st.empty()
if input_text:
ph.empty()
st_callback = StreamlitCallbackHandler(st.container())
response = agent({
"input": "\n\nHuman:" + str(input_text) + " \n\nAssistant:",
"chat_history": st.session_state.chat_history,
"output":"output"
},
callbacks=[st_callback])
st.write(response['output'])
| [] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 4_stock_analysis~stock_analysis_app.py | import streamlit as st
import stock_analysis_lib as glib
import stock_analysis_database_lib as databaselib
from langchain.callbacks import StreamlitCallbackHandler
import time
import pandas as pd
def print_result(st, response):
try:
st.subheader("Daily sticker:")
st.dataframe(response['intermediate_steps'][1][1])
st.subheader("Stock Chart:")
df = pd.DataFrame(response['intermediate_steps'][1][1],columns=['Close','Volume'])
df['Volume'] = df['Volume']/10000000
df.rename(columns={'Close':'Price(USD)','Volume':'Volume(10 millions)'},inplace=True)
st.line_chart(df)
st.subheader("Conclusion:")
st.write(response['output'])
except:
st.write(response['output'])
def stock_analysis():
st.header("Stock Analysis Agent")
st.write("Try to input with company name like Amazon, Tesla, Apple..etc")
if 'database' not in st.session_state:
with st.spinner("Initial Database"):
databaselib.initial_database()
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
agent = glib.initializeAgent()
input_text = st.chat_input("Type company name here!")
ph = st.empty()
if input_text:
ph.empty()
st_callback = StreamlitCallbackHandler(st.container())
response = agent({
"input": input_text,
"chat_history": st.session_state.chat_history,
},
callbacks=[st_callback])
print_result(st,response)
| [] |
2024-01-10 | nguyendinhthi0705/bedrock-rag-stock | 2_stock_query~stock_query_lib.py | import os
from langchain.llms.bedrock import Bedrock
import json
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import LLMChain
from datetime import datetime, timedelta
from datetime import date
def get_llm():
model_parameter = {"temperature": 0.0, "top_p": .5, "max_tokens_to_sample": 2000}
llm = Bedrock(
credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"),
region_name=os.environ.get("BWB_REGION_NAME"),
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"),
model_id="anthropic.claude-v2",
model_kwargs=model_parameter)
return llm
def get_db_chain(prompt):
db = SQLDatabase.from_uri("sqlite:///stock_ticker_database.db")
llm = get_llm()
db_chain = SQLDatabaseChain.from_llm(
llm,
db,
verbose=True,
return_intermediate_steps=True,
prompt=prompt,
return_direct=True,
)
return db_chain
def query_stock(query):
llm = get_llm()
_DEFAULT_TEMPLATE = """Human: Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
<format>
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "SQLQuery and Result of SQLResult"
</format>
Assistant: Understood, I will use the above format and only provide the answer.
Only use the following tables:
<tables>
CREATE TABLE stock_ticker (
symbol text PRIMARY KEY,
name text NOT NULL,
currency text,
stockExchange text,
exchangeShortName text
)
</tables>
If someone asks for the table stock ticker table, they really mean the stock_ticker table.
<examples>
Question:
What is the ticker symbol for Amazon?
Params:
Company name (name): Amazon
SQLQuery:SELECT * FROM stock_ticker WHERE name LIKE '%Amazon%'
</examples>
Question: \n\nHuman:{input} \n\nAssistant:
"""
PROMPT = PromptTemplate(
input_variables=["input", "dialect"], template=_DEFAULT_TEMPLATE
)
db_chain = get_db_chain(PROMPT)
return db_chain("\n\nHuman: What is the ticker symbol for " + str(query) + " ? \n\nAssistant:")
| [
"%Amazon%",
"Human: Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\n<format>\nQuestion: \"Question here\"\nSQLQuery: \"SQL Query to run\"\nSQLResult: \"Result of the SQLQuery\"\nAnswer: \"SQLQuery and Result of SQLResult\"\n</format>\nAssistant: Understood, I will use the above format and only provide the answer.\n\nOnly use the following tables:\n<tables>\nCREATE TABLE stock_ticker (\n\tsymbol text PRIMARY KEY,\n\tname text NOT NULL,\n\tcurrency text,\n\tstockExchange text, \n exchangeShortName text\n)\n</tables>\n\nIf someone asks for the table stock ticker table, they really mean the stock_ticker table.\n<examples>\nQuestion: \n What is the ticker symbol for Amazon?\n Params: \n Company name (name): Amazon\n \nSQLQuery:SELECT * FROM stock_ticker WHERE name LIKE '%Amazon%'\n\n</examples>\n\nQuestion: \n\nHuman:{input} \n\nAssistant:\n\n",
"SQL Query to run",
"Result of the SQLQuery",
"input",
"SQLQuery and Result of SQLResult",
"Question here"
] |
2024-01-10 | Bernese-Corgi/Machine-Learning-TIL | openai-pinecone~upsert_data.py | import os
import numpy as np
import openai
import pandas as pd
from typing import Iterator
from create_index import get_pinecone, load_data
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
EMBEDDING_MODEL = "text-embedding-ada-002"
def init_openai():
openai.api_key = OPENAI_API_KEY
class BatchGenerator:
"""Models a simple batch generator that make chunks out of an input DataFrame"""
def __init__(self, batch_size: int = 10) -> None:
self.batch_size = batch_size
def splits_num(self, elements: int) -> int:
"""Determines how many chunks DataFrame contains"""
return round(elements / self.batch_size)
def to_batches(self, df: pd.DataFrame) -> Iterator[pd.DataFrame]:
"""Makes chunks out of an input DataFrame"""
splits = self.splits_num(df.shape[0])
if splits <= 1:
yield df
else:
for chunk in np.array_split(df, splits):
yield chunk
__call__ = to_batches
def upsert_data():
init_openai()
df_batcher = BatchGenerator(300)
index = get_pinecone()
post_df = load_data('data/blog_posts.csv')
for batch_df in df_batcher(post_df):
print(batch_df.content_vector)
index.upsert(vectors=zip(batch_df.vector_id, batch_df.content_vector), namespace='content')
index.describe_index_stats()
if __name__ == "__main__":
upsert_data() | [] |
2024-01-10 | Bernese-Corgi/Machine-Learning-TIL | openai-embedding~obtain_dataset.py | import pandas as pd
import tiktoken
from openai.embeddings_utils import get_embedding
# ------------------------ embedding model parameters ------------------------ #
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base"
max_tokens = 8000
# -------------------------- load & inspect dataset -------------------------- #
input_datapath = "data/fine_food_reviews_1k.csv"
# pd.read_csv() 함수를 사용하여 input_datapath로 지정한 CSV 파일을 읽어서 데이터프레임 df에 저장합니다. index_col=0 옵션은 첫 번째 열을 인덱스로 사용하도록 지정하는 것입니다.
df = pd.read_csv(input_datapath, index_col=0)
# 데이터프레임 df의 열을 "Time", "ProductId", "UserId", "Score", "Summary", "Text" 열로 제한합니다. 다른 열은 제외됩니다.
df = df[["Time", "ProductId", "UserId", "Score", "Summary", "Text"]]
# 데이터프레임 df에서 결측치가 있는 행을 제거합니다.
df = df.dropna()
# "combined"라는 새로운 열을 추가합니다. 이 열은 "Title: {Summary 값}; Content: {Text 값}" 형식의 문자열로 구성됩니다.
df["combined"] = (
# df.Summary.str.strip()는 Summary 열의 값의 앞뒤 공백을 제거하고, df.Text.str.strip()는 Text 열의 값의 앞뒤 공백을 제거하는 것을 의미합니다.
f"Title: {df.Summary.str.strip()}; Content: {df.Text.str.strip()}"
)
# 데이터프레임 df의 처음 2개 행을 출력합니다.
print(df.head(2))
# - subsample to 1k most recent reviews and remove samples that are too long - #
top_n = 1000
# 처음 2,000개 항목으로 첫 번째 컷, 절반 미만이 필터링될 것이라고 가정
# 데이터프레임 df를 "Time" 열을 기준으로 정렬하고, 최근에 작성된 상위 top_n * 2개의 행을 선택합니다.
df = df.sort_values("Time").tail(top_n * 2)
# "Time" 열을 제거합니다.
df.drop("Time", axis=1, inplace=True)
# tiktoken 라이브러리의 get_encoding() 함수를 사용하여 embedding_encoding에 해당하는 인코딩 방식을 가져옵니다. 이 인코딩 방식은 encoding 변수에 저장됩니다.
encoding = tiktoken.get_encoding(embedding_encoding)
# ------------------ omit reviews that are too long to embed ----------------- #
# "combined" 열의 각 행에 대해 인코딩된 토큰 수를 구하고, "n_tokens" 열에 저장합니다.
df['n_tokens'] = df.combined.apply(lambda x: len(encoding.encode(x)))
# "n_tokens" 열 값이 max_token보다 작거나 같은 행만 선택합니다. 상위 top_n개의 행을 선택하여 데이터프레임 df를 업데이트합니다.
df = df[df.n_tokens <= max_tokens].tail(top_n)
print(len(df))
df["embedding"] = df.combined.apply(lambda x: get_embedding(x, engine=embedding_model))
df.to_csv("data/fine_food_reviews_with_embeddings_1k.csv") | [] |
2024-01-10 | Bernese-Corgi/Machine-Learning-TIL | openai-pinecone~search_data.py | import os
import openai
import pandas as pd
from create_index import get_pinecone, load_data
from openai.embeddings_utils import get_embedding
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
EMBEDDING_MODEL = "text-embedding-ada-002"
def init_openai():
openai.api_key = OPENAI_API_KEY
def query_post(*, query: str, namespace: str = 'content', top_k: int = 5):
post_df = load_data('data/blog_posts.csv')
content_mapped = dict(zip(post_df.vector_id, post_df.content_text))
embedded_query = get_embedding(text=query, engine=EMBEDDING_MODEL)
print(embedded_query)
index = get_pinecone()
print(index)
query_result = index.query(
vector=embedded_query,
namespace=namespace,
top_k=top_k
)
print(query_result)
if not query_result.matches:
print('no query result')
matches = query_result.matches
ids = [res.id for res in matches]
scores = [res.score for res in matches]
df = pd.DataFrame({
'id': ids,
'score': scores,
'content': [content_mapped[_id] for _id in ids]
})
counter = 0
for _, value in df.iterrows():
counter += 1
print(f"\npost id: {value.id}, score: {value.score}, content: {value.content}\n")
print(f"total: {counter}")
return df
if __name__ == "__main__":
init_openai()
query_post(query='맘스보드와 노리터보드 비교')
# while True:
# user_input = input('질문해보세요 >> ')
# if user_input == ':q':
# break
# else:
# result = query_post(query=user_input)
# print(f"{result}\n\n")
| [] |
2024-01-10 | Bernese-Corgi/Machine-Learning-TIL | openai-embedding~with_pinecone.py | import os
import openai
import pandas as pd
import pinecone
from datasets import load_dataset
from tqdm.auto import tqdm
openai.api_key = os.getenv('OPENAI_API_KEY')
pinecone_key = os.getenv('PINECONE_API_KEY')
print(openai.Engine.list())
data_array = list(pd.read_csv('data/blog_posts.csv')['content_text'].values)
MODEL = "text-embedding-ada-002"
res = openai.Embedding.create(
input=data_array,
engine=MODEL
)
embeds = [record['embedding'] for record in res['data']]
pinecone.init(
api_key=pinecone_key,
environment="asia-southeast1-gcp-free"
)
if 'openai' not in pinecone.list_indexes():
pinecone.create_index('openai', dimension=len(embeds[0]))
index = pinecone.Index('openai')
trec = load_dataset('trec', split='train[:10]')
batch_size = 32
for i in tqdm(range(0, len(trec['text']), batch_size)):
i_end = min(i + batch_size, len(trec['text']))
lines_batch = trec['text'][i:(i + batch_size)]
ids_batch = [str(n) for n in range(i, i_end)]
res = openai.Embedding.create(input=lines_batch, engine=MODEL)
embeds = [record['embedding'] for record in res['data']]
meta = [{ 'text': line } for line in lines_batch]
to_upsert = zip(ids_batch, embeds, meta)
index.upsert(vectors=list(to_upsert))
query = "What caused the 1929 Great Depression?"
xq = openai.Embedding.create(input=query, engine=MODEL)['data'][0]['embedding']
res = index.query([xq], top_k=5, include_metadata=True)
for match in res['matches']:
print(f"{match['score']:.2f}:{match['metadata']['text']}") | [] |
2024-01-10 | jakelucasnyc/SciencePaperGPT | src~spgpt~__main__.py | from spgpt.pdf import retrieve_pdf_data, clear_cached_papers
from spgpt.query import get_response_from_query
from langchain.embeddings.openai import OpenAIEmbeddings
import os
import logging
from appdirs import user_cache_dir, user_data_dir
import json
from pprint import pprint
from spgpt.gui.spgptMainWin import SPGPTMainWin
from PySide6.QtWidgets import QApplication
import sys
logging.basicConfig(level=logging.INFO)
cache_dir = user_cache_dir('SciencePaperGPT', 'BioGear Labs')
def create_paper_cache_file(cache_dir:str):
cache_file = os.path.join(cache_dir, 'papers.json')
os.makedirs(cache_dir, exist_ok=True)
if not os.path.isfile(cache_file):
with open(cache_file, 'w') as f:
json.dump({}, f)
return cache_file
if __name__ == '__main__':
with open(os.path.realpath(os.path.join('spgpt', 'secrets', 'openai.txt'))) as f:
os.environ['OPENAI_API_KEY'] = f.read()
# clear_cached_papers(cache_dir)
create_paper_cache_file(cache_dir)
embeddings = OpenAIEmbeddings()
# pdf = r'/Users/jakelucas/Desktop/NicotineTest.pdf'
# pdf = r'/Users/jakelucas/Desktop/Niemeyer2022.pdf'
# faiss_db = retrieve_pdf_data(pdf, embeddings, cache_dir)
# response, docs = get_response_from_query(faiss_db, "What were the major results in this paper? ")
# pprint(response)
app = QApplication(sys.argv)
main_window = SPGPTMainWin(embeddings, cache_dir)
main_window.show()
sys.exit(app.exec())
| [] |
2024-01-10 | jakelucasnyc/SciencePaperGPT | src~spgpt~gui~spgptMainWin.py | import sys
from PySide6.QtCore import Qt, QThreadPool
from PySide6.QtGui import QPalette, QColor
from PySide6.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QWidget, QLabel, QLineEdit, QListWidget, QListView, QPushButton, QFileDialog, QListWidgetItem, QTextEdit
from spgpt.pdf import retrieve_pdf_data
from spgpt.query import get_response_from_query
from spgpt.gui.importPdf import ImportPDF
from spgpt.gui.query import Query
from langchain.embeddings.openai import OpenAIEmbeddings
import os
import logging
class SPGPTMainWin(QMainWindow):
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
def __init__(self, embeddings:OpenAIEmbeddings, cache_dir:str):
super().__init__()
self._threadpool = QThreadPool()
self._embeddings = embeddings
self._cache_dir = cache_dir
self._faiss_dict = {}
# Initialize widgets and layouts
self.init_ui()
def init_ui(self):
self.setWindowTitle('PDF Chat with GPT')
main_layout = QHBoxLayout()
# Left Column
left_column = QVBoxLayout()
import_button = QPushButton('Import PDF')
import_button.clicked.connect(self.import_pdf)
# import_button.setFixedWidth(40)
import_button.setStyleSheet("QPushButton { background-color: #42b1f5; color: white; font: bold; }")
left_column.addWidget(import_button, alignment=Qt.AlignTop)
self.pdf_list = QListWidget()
self.pdf_list.setAcceptDrops(True)
self.pdf_list.setDragEnabled(True)
self.pdf_list.setDropIndicatorShown(True)
self.pdf_list.setSelectionMode(QListView.ExtendedSelection)
self.pdf_list.setDragDropMode(QListView.InternalMove)
left_column.addWidget(self.pdf_list)
# Middle Column
middle_column = QVBoxLayout()
self.chat_display = QTextEdit()
self.chat_display.setReadOnly(True)
middle_column.addWidget(self.chat_display)
input_layout = QHBoxLayout()
self.prompt_input = QLineEdit()
self.prompt_input.setPlaceholderText('Type your question here...')
input_layout.addWidget(self.prompt_input)
self.send_button = QPushButton('Submit')
self.send_button.setStyleSheet("QPushButton { background-color: #42b1f5; color: white; font: bold; }")
input_layout.addWidget(self.send_button)
middle_column.addLayout(input_layout)
self.send_button.clicked.connect(lambda: self.on_user_input())
self.creativity_line_edit = QLineEdit()
self.creativity_line_edit.setText("0.3")
self.creativity_line_edit.setFixedWidth(50)
input_layout.addWidget(self.creativity_line_edit)
# Right Column
right_column = QVBoxLayout()
new_conversation_button = QPushButton('New Conversation')
new_conversation_button.setStyleSheet("QPushButton { background-color: #42b1f5; color: white; font: bold; }")
right_column.addWidget(new_conversation_button, alignment=Qt.AlignTop)
self.conversations_list = QListWidget()
right_column.addWidget(self.conversations_list)
new_conversation_button.clicked.connect(self.add_new_conversation)
# Set main layout
main_widget = QWidget()
main_layout.addLayout(left_column, 1)
main_layout.addLayout(middle_column, 2)
main_layout.addLayout(right_column, 1)
main_widget.setLayout(main_layout)
self.setCentralWidget(main_widget)
# Set dark theme
self.set_dark_theme()
# Start in full-screen mode
self.showFullScreen()
def add_new_conversation(self):
new_conversation = QListWidgetItem("New Conversation")
self.conversations_list.addItem(new_conversation)
self.chat_display.clear()
def import_pdf(self):
file_dialog = QFileDialog(self, "Import PDF", "", "PDF Files (*.pdf)")
file_dialog.setAcceptMode(QFileDialog.AcceptOpen)
file_dialog.setFileMode(QFileDialog.ExistingFile)
if file_dialog.exec() == QFileDialog.Accepted:
file_path = file_dialog.selectedFiles()[0]
self.pdf_list.insertItem(0, f'Processing: {os.path.basename(file_path)}')
pdf_item = self.pdf_list.item(0)
worker = ImportPDF(file_path, self._embeddings, self._cache_dir)
# worker.finished.connect()
worker.data_acquired.connect(lambda faiss_db: self._faiss_dict.update({file_path: faiss_db}))
worker.data_acquired.connect(lambda: pdf_item.setText(file_path))
worker.error.connect(lambda: pdf_item.setText(f'Error: {file_path}'))
self._threadpool.start(worker)
def _get_temperature(self) -> float:
temp_input = self.creativity_line_edit.text()
try:
temp = float(temp_input)
except Exception as e:
self._logger.warning(f'Invalid temperature input: {temp_input}. Must be a float between 0 and 1')
return 0.3
if not (0 <= temp <= 1):
self._logger.warning(f'Invalid temperature input: {temp}. Must be a float between (or equal to) 0 and 1')
return 0.3
return temp
def on_user_input(self):
user_input = self.prompt_input.text()
if not user_input:
return
# print input to textedit
self.chat_display.append(f'User: {user_input}\n')
# clear user input
self.prompt_input.clear()
# get selected pdf documents
selectedDocs = self.pdf_list.selectedItems()
if not selectedDocs:
self.chat_display.append(f'System: Please highlight a document before making a prompt.\n')
return
doc = selectedDocs[0]
faiss_db = self._faiss_dict[doc.text()]
temp = self._get_temperature()
print(f'Temperature: {temp}')
#disabling input widgets for when AI is coming up with response
self.prompt_input.setEnabled(False)
self.send_button.setEnabled(False)
# run query function with relevant faiss_db(s)
worker = Query(faiss_db, user_input, temperature=temp)
worker.signals.error.connect(lambda e: self.chat_display.append(f'AI: {e}\n'))
worker.signals.response_acquired.connect(lambda response: self.chat_display.append(f'AI: {response}\n'))
worker.signals.finished.connect(lambda: self.send_button.setEnabled(True))
worker.signals.finished.connect(lambda: self.prompt_input.setEnabled(True))
self._threadpool.start(worker)
def set_dark_theme(self):
dark_palette = QPalette()
# Base colors
dark_palette.setColor(QPalette.Window, QColor(53, 53, 53))
dark_palette.setColor(QPalette.WindowText, QColor(255, 255, 255))
dark_palette.setColor(QPalette.Base, QColor(42, 42, 42))
dark_palette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))
dark_palette.setColor(QPalette.Text, QColor(255, 255, 255))
dark_palette.setColor(QPalette.Button, QColor(53, 53, 53))
dark_palette.setColor(QPalette.ButtonText, QColor(255, 255, 255))
dark_palette.setColor(QPalette.BrightText, QColor(255, 0, 0))
dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))
# Selection colors
dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
dark_palette.setColor(QPalette.HighlightedText, QColor(255, 255, 255))
# Set the application palette
QApplication.setPalette(dark_palette) | [] |
2024-01-10 | jakelucasnyc/SciencePaperGPT | src~spgpt~gui~importPdf.py | from PySide6.QtCore import QRunnable, Signal, QObject
from PySide6.QtWidgets import QListWidgetItem
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from spgpt.pdf import retrieve_pdf_data
class ImportPDFSignals(QObject):
# finished = Signal()
error = Signal(str)
data_acquired = Signal(FAISS)
class ImportPDF(QRunnable):
# finished = Signal()
error = Signal(str)
data_acquired = Signal(FAISS)
def __init__(self, pdf_link:str, embeddings:OpenAIEmbeddings, cache_dir:str):
super().__init__()
self._pdf_link = pdf_link
self._embeddings = embeddings
self._cache_dir = cache_dir
self._signals = ImportPDFSignals()
self.error = self._signals.error
self.data_acquired = self._signals.data_acquired
def run(self):
try:
faiss_db = retrieve_pdf_data(self._pdf_link, self._embeddings, self._cache_dir)
except Exception as e:
pass
# self._item.setText(f'Error: {self._pdf_link}')
# self.finished.emit()
self.error.emit(repr(e))
else:
pass
# self.finished.emit()
self.data_acquired.emit(faiss_db)
# self._item.setText(f'{self._pdf_link}') | [] |
2024-01-10 | OlegRad4encko/Diploma | commandProcessor.py | from ttsModule import VoiceModel
from translatorModule import TranslatorModel
from openAIModule import TextNeuralNetwork as gpt
from sttModule import SpeachToText as speachToText, listen
from SettingsManager import SettingsManager, settings_manager
from fuzzywuzzy import fuzz
from notificationModule import show_notification
from datetime import datetime
from writeCommand import listen_write
from multiprocessing import Process
from tkinter import Scrollbar, Text, Frame
import random
import tkinter as tk
import time
import asyncio
import subprocess
import platform
import webbrowser
import inflect
settings_manager.load_settings()
# Asynchronous function for request in chatGPT
async def gpt_req(text_to_gpt: str):
freeGPT = gpt()
return await freeGPT.create_prompt(text_to_gpt)
# The function converts numbers to a string
def convert_numbers_in_text(text: str):
p = inflect.engine()
words = text.split()
for i in range(len(words)):
try:
num = int(words[i])
words[i] = p.number_to_words(num)
except ValueError:
pass
return ' '.join(words)
# The function creates a tkinder window to display the string
def paint_answer(answer_text: str):
root = tk.Tk()
root.title("Відповідь")
frame = Frame(root)
frame.pack(fill='both', expand=True)
text_widget = Text(frame, wrap='word', width=60, height=15)
text_widget.insert('1.0', answer_text)
scroll = Scrollbar(frame, command=text_widget.yview)
text_widget.config(yscrollcommand=scroll.set)
text_widget.grid(row=0, column=0, sticky='nsew')
scroll.grid(row=0, column=1, sticky='ns')
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
root.geometry("500x500")
root.mainloop()
# Function creates a process to display a string via tkinter
def paint_gpt_answer(text: str):
window_process = Process(target=paint_answer, args=[text])
window_process.start()
# function of opening a folder or executable file of a custom command
def execute_custom_command_exe(ff_path: str, type: str):
try:
if type == 'file':
result = subprocess.run([ff_path], check=True, text=True, capture_output=True)
if type == 'path':
system_type = platform.system()
if system_type == "Windows":
result = subprocess.run(['explorer', ff_path], check=True)
show_notification("Голосовий помічник","Теку выдкрито")
elif system_type == "Linux":
result = subprocess.run(['xdg-open', ff_path], check=True)
show_notification("Голосовий помічник","Теку выдкрито")
except subprocess.CalledProcessError as e:
#print("Error:", e)
pass
# the function translates all spoken text into a user request without a command and an alias assistant
def make_req_from_string(original_string: str, key_world: str):
key_world_count = len(key_world.split())
words = original_string.split()
remaining_words = words[key_world_count+1:]
result_string = ' '.join(remaining_words)
return result_string
# making the textToSpeach model
def make_tss_model(lang: str, tts: str):
language = tts[lang]['language']
model_id = tts[lang]['model_id']
sample_rate = tts[lang]['sample_rate']
speaker = tts[lang]['speaker']
return VoiceModel(language, model_id, sample_rate, speaker)
# The function translates text into voice
def speech_the_text(text: str, tts_model: VoiceModel):
tts_model.play_audio(text)
return 0
# execute command function
def execute_cmd(cmd: str, key_world: str, voice: str, assistant_alias, assistant_cmd_list, assistant_tts, assistant_stt, assistant_tra, current_settings, tts_model: VoiceModel):
cur_tra_to_lang = current_settings['ASSISTANT_TRA']
cur_speach_lang = current_settings['ASSISTANT_TTS']
cur_speaker_lang = current_settings['ASSISTANT_STT']
is_quick_answer = current_settings['IS_QUICK_ANSWER']
speak_the_answer = current_settings['SPEAK_THE_ANSWER']
if cmd == 'help':
# Add browser opening to documentation
webbrowser.open('https://github.com/OlegRad4encko/Diploma')
# command time - makes notifications with time \ or says so
elif cmd == 'time':
current_time = datetime.now()
p = inflect.engine()
hour_text = p.number_to_words(current_time.hour)
minute_text = p.number_to_words(current_time.minute)
time_text = f"'{hour_text} {p.plural('hour', current_time.hour)} and {minute_text} {p.plural('minute', current_time.minute)} at the moment"
translate = TranslatorModel('en', cur_speach_lang)
translated_text = translate.translate_text(time_text)
if speak_the_answer == "False":
show_notification("Голосовий помічник",translated_text)
else:
speech_the_text(translated_text, tts_model)
# command open browser - opens the browser
elif cmd == 'open browser':
webbrowser.open('https://www.google.com.ua/?hl=uk')
# The joke command - makes a notification with a joke \ or tells a joke
elif cmd == 'joke':
jokes = ['Є люди, які несуть в собі щастя. Коли ці люди поруч, все ніби стає яскравим і барвистим. Але моя дружина їх називає алкашами!',
'З одного боку, в гості без пляшки не підеш. А з іншого якщо в тебе є пляшка, та на холєру в гості пертись?',
'Кохана, давай миритися. Вибач мене, я був не правий. Стоп! Не їж! Я приготую щось інше!',
'Кажуть, що у геніїв в будинку має бути безлад. Дивлюсь на свою дитину і гордість розпирає! Енштейна виховую!.',
'Христос родився! Маю три вакцини, сертифікат вакцинації, негативний тест, оброблений антисептиком! Колядувати можна?',
'Пішла, куда послав. Веду себе, як ти назвав. І чому я тебе раніше не слухала?!']
joke = random.choice(jokes)
translated_text = ''
if speak_the_answer == "False":
if cur_speach_lang != 'ua':
translate = TranslatorModel('uk', cur_speach_lang)
translated_text = translate.translate_text(joke)
else:
translated_text = joke
show_notification("Голосовий помічник",translated_text)
else:
if cur_speach_lang != 'ua':
translate = TranslatorModel('uk', cur_speach_lang)
translated_text = translate.translate_text(joke)
else:
translated_text = joke
speech_the_text(translated_text, tts_model)
# command write - writes to the active window, everything that the user says
elif cmd == 'write':
write_process = Process(target=listen_write, args=[assistant_stt[current_settings['ASSISTANT_STT']]['model']])
write_process.start()
answer = "Диктуйте"
if speak_the_answer == "False":
if cur_speach_lang != 'ua':
translate = TranslatorModel('uk', cur_speach_lang)
translated_text = translate.translate_text(answer)
else:
translated_text = answer
time.sleep(5)
show_notification("Голосовий помічник",translated_text)
else:
if cur_speach_lang != 'ua':
translate = TranslatorModel('uk', cur_speach_lang)
translated_text = translate.translate_text(answer)
else:
translated_text = answer
time.sleep(6)
speech_the_text(translated_text, tts_model)
# command find - opens a browser window with a user request
elif cmd == 'find':
va_request = make_req_from_string(voice, key_world)
url_request = "https://www.google.com/search?q=" + '+'.join(va_request.split())
webbrowser.open(url_request)
show_notification("Голосовий помічник","Відкрито браузер з запитом '"+va_request+"'")
# the say command - either says what the user said \ or notifies that the voice is not turned on
elif cmd == 'say':
if speak_the_answer == "False":
show_notification("Голосовий помічник","Увімкніть озвучування відповідей голосового помічника")
else:
speech_the_text(make_req_from_string(voice, key_world), tts_model)
# makes a request to the GPT chat
elif cmd == 'gpt':
va_request = make_req_from_string(voice, key_world)
show_notification("Голосовий помічник","Очікуйте, звертаюсь до ChatGPT, це може зайняти декілька хвилин")
result = asyncio.run(gpt_req(va_request))
result_without_numbers = convert_numbers_in_text(result)
if len(result_without_numbers) < 500:
if speak_the_answer == "False":
if len(result_without_numbers) < 100:
show_notification("Голосовий помічник", result)
else:
paint_gpt_answer(result)
else:
paint_gpt_answer(result)
translate = TranslatorModel('en', cur_speach_lang)
translated_text = translate.translate_text(result_without_numbers)
speech_the_text(translated_text, tts_model)
else:
paint_gpt_answer(result)
# translate command
elif cmd == 'translate':
va_request = make_req_from_string(voice, key_world)
translate = TranslatorModel(cur_speach_lang, cur_tra_to_lang)
translated_text = translate.translate_text(va_request)
result = va_request +f'\u000a\u000a'+translated_text
paint_gpt_answer(result)
# custom command
else:
if assistant_cmd_list[cmd]['commandType'] == 'explorer':
execute_custom_command_exe(assistant_cmd_list[cmd]['customCommand'], 'path')
elif assistant_cmd_list[cmd]['commandType'] == 'execute':
execute_custom_command_exe(assistant_cmd_list[cmd]['customCommand'], 'file')
elif assistant_cmd_list[cmd]['commandType'] == 'openWebPage':
webbrowser.open(f'{assistant_cmd_list[cmd]["customCommand"]}')
else:
show_notification("Голосовий помічник", "Я Вас не зрозумів")
## recognizing function v1
# def recognize_cmd(cmd: str, assistant_cmd_list):
# rc = {'cmd': '', 'percent': 0, 'key_world': ''}
# max_per = 0
# key_world = ''
# for word_list_key in assistant_cmd_list:
# word_list = assistant_cmd_list[word_list_key]["word_list"]
# for x in word_list:
# vrt = fuzz.ratio(cmd, x)
# if max_per < vrt:
# max_per = vrt
# key_world = x
# if vrt > rc['percent']:
# rc['cmd'] = word_list_key
# rc['percent'] = vrt
# rc['key_world'] = key_world
# return rc
## recognizing function v2
def recognize_cmd(cmd: str, assistant_cmd_list):
rc = {'cmd': '', 'percent': 0, 'key_world': ''}
for word_list_key in assistant_cmd_list:
word_list = assistant_cmd_list[word_list_key]["word_list"]
max_per, key_world = max((fuzz.partial_ratio(cmd, x), x) for x in word_list)
if max_per > rc['percent']:
rc['cmd'] = word_list_key
rc['percent'] = max_per
rc['key_world'] = key_world
return rc
# voice recognition callback function
def stt_respond(voice: str, assistant_alias, assistant_cmd_list, assistant_tts, assistant_stt, assistant_tra, current_settings, tts_model):
if voice.startswith(assistant_alias):
cmd = recognize_cmd(filter_cmd(voice, assistant_alias), assistant_cmd_list)
if cmd['cmd'] not in assistant_cmd_list.keys():
show_notification("Голосовий помічник", "Я Вас не зрозумів")
else:
execute_cmd(cmd['cmd'], cmd['key_world'], voice, assistant_alias, assistant_cmd_list, assistant_tts, assistant_stt, assistant_tra, current_settings, tts_model)
# filtering the CMD, removing еру assistant alias from raw_voice
def filter_cmd(raw_voice: str, assistant_alias):
cmd = raw_voice
for x in assistant_alias:
cmd = cmd.replace(x, "").strip()
return cmd
# update settings for voice assistant
def update_settings():
settings_manager.load_settings()
current_settings = settings_manager.get_setting('CURRENT_SETTINGS', {})
assistant_cmd_list = settings_manager.get_setting('ASSISTANT_CMD_LIST', {})
assistant_alias = settings_manager.get_setting('ASSISTANT_ALIAS', {})
assistant_stt = settings_manager.get_setting('ASSISTANT_STT', {})
assistant_tts = settings_manager.get_setting('ASSISTANT_TTS', {})
assistant_tra = settings_manager.get_setting('ASSISTANT_TRA', {})
current_settings = settings_manager.get_setting('CURRENT_SETTINGS', {})
return {
"current_settings": current_settings,
"assistant_cmd_list": assistant_cmd_list,
"assistant_alias": assistant_alias,
"assistant_stt": assistant_stt,
"assistant_tts": assistant_tts,
"assistant_tra": assistant_tra,
"current_settings": current_settings
}
# Run command processor
def run_command_processor():
settings = update_settings()
current_settings = settings['current_settings']
assistant_cmd_list = settings['assistant_cmd_list']
assistant_alias_list = settings['assistant_alias']
assistant_stt = settings['assistant_stt']
assistant_tts = settings['assistant_tts']
assistant_tra = settings['assistant_tra']
current_settings = settings['current_settings']
cur_speach_lang = current_settings['ASSISTANT_TTS']
tts_model = make_tss_model(cur_speach_lang, assistant_tts)
assistant_alias = tuple(assistant_alias_list)
speachVA = speachToText(assistant_stt[current_settings['ASSISTANT_STT']]['model'])
listen(stt_respond, speachVA.get_model(), assistant_alias, assistant_cmd_list, assistant_tts, assistant_stt, assistant_tra, current_settings, tts_model)
| [] |
2024-01-10 | Yushi-Hu/tifa | tifa_test.py | from tifa import get_question_and_answers, filter_question_and_answers, UnifiedQAModel, tifa_score_benchmark, tifa_score_single, VQAModel
import json
import openai
if __name__ == "__main__":
#####################################
## Test TIFA score on benchmark
#####################################
# test tifa benchmarking
results = tifa_score_benchmark("mplug-large", "sample/sample_question_answers.json", "sample/sample_imgs.json")
with open("sample/sample_evaluation_result.json", "w") as f:
json.dump(results, f, indent=4)
#####################################
## Test TIFA score on one image
#####################################
# prepare the models
openai.api_key = "[OpenAI key]"
unifiedqa_model = UnifiedQAModel("allenai/unifiedqa-v2-t5-large-1363200")
vqa_model = VQAModel("mplug-large")
img_path = "sample/drawbench_8.jpg"
text = "a black colored banana."
# Generate questions with GPT-3.5-turbo
gpt3_questions = get_question_and_answers(text)
# Filter questions with UnifiedQA
filtered_questions = filter_question_and_answers(unifiedqa_model, gpt3_questions)
# See the questions
print(filtered_questions)
# calucluate TIFA score
result = tifa_score_single(vqa_model, filtered_questions, img_path)
print(f"TIFA score is {result['tifa_score']}")
print(result)
| [] |
2024-01-10 | Yushi-Hu/tifa | tifascore~openai_api.py | import openai
import time, sys
def openai_completion(prompt, engine="gpt-3.5-turbo", max_tokens=700, temperature=0):
resp = openai.ChatCompletion.create(
model=engine,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
temperature=temperature,
stop=["\n\n", "<|endoftext|>"]
)
return resp['choices'][0]['message']['content']
| [] |
2024-01-10 | Microwave-WYB/pyt_spider | spider.py | import time
import os
import json
import pandas as pd
import pytesseract
import re
import smtplib
import selenium
from datetime import datetime as dt
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from operator import itemgetter
from typing import Union
from PIL import Image
from selenium import webdriver
from selenium.webdriver.common.by import By
from tqdm import tqdm
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
summary_prompt = """给定一个招标文件,我需要你提取以下关键信息。请提取以下字段:
1. "招标人":发布招标的单位或者个人的名称。
2. "报价":招标的报价,单位为万元。
3. "联系信息":包括地址、电话和电子邮件等的联系方式。
4. "项目描述":包括项目的目标、背景、范围和期限等详细描述。
5. "资格要求":对参与投标的公司或个人的资格要求。
6. "投标程序":投标的具体步骤和程序。
这是招标文件OCR提取的文字,注意可能存在错误:
{text}
根据这个文件,请提取出上述关键信息。
输出格式如下,一定使用英文冒号和空格分隔,否则无法识别。"是否投标"字段的值为"是"或"否",表示是否是否投标。
报价: 100
招标人信息: xxx博物馆,地址: xxx街道xxx号,电话: 0123456789,电子邮件: [email protected]
代理机构信息: xxx有限公司,地址: xxx街道xxx号,电话: 0123456789,电子邮件: [email protected]
项目描述: 这是一个关于xxxx的项目,目标是xxxx,期限为xxxx。
资格要求: 投标者需要有xxxx的经验,且公司规模需要在xxxx以上。
投标程序: 首先需要xxxx,然后xxxx,最后将在xxxx公开投标结果。
是否投标: 是
"""
suggest_prompt = """给定一个招标文件的摘要,我需要你判断是否是否投标。
{text}
品艺堂以策略性设计为核心,拥有从策划到设计,再到工程施工、项目管理的全产业链专业能力。并且在长期的实践工作中,形成了一套以“服务设计”为核心理念的独特的工作方法。为客户提供博物馆展陈、企业展厅布展、专题馆、规划馆、景区规划设计、导视标识等“一站式”文化空间解决方案。该项目是否符合品艺堂的业务范围?仅回答是或否。
回答:
"""
select_prompt = """以下是招标项目的序号和标题:
{text}
品艺堂以策略性设计为核心,拥有从策划到设计,再到工程施工、项目管理的全产业链专业能力。并且在长期的实践工作中,形成了一套以“服务设计”为核心理念的独特的工作方法。为客户提供博物馆展陈、企业展厅布展、专题馆、规划馆、景区规划设计、导视标识等“一站式”文化空间解决方案。以上的项目,有哪些明显不符合品艺堂的业务范围?请去掉明显不符合的项目,仅将符合的项目序号写在下面,以逗号分隔。
例如: 0,2,3,5
链接:
"""
gpt35 = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=0)
gpt4 = ChatOpenAI(model="gpt-4", temperature=0)
summarize_chain = (
{"text": itemgetter("text")}
| PromptTemplate.from_template(summary_prompt)
| gpt35
| StrOutputParser()
)
select_chain = (
{"text": itemgetter("text")}
| PromptTemplate.from_template(select_prompt)
| gpt4
| StrOutputParser()
)
suggest_chain = (
{"text": itemgetter("text")}
| PromptTemplate.from_template(suggest_prompt)
| gpt4
| StrOutputParser()
)
def get_urls_by_keyword(keyword: str) -> pd.DataFrame:
"""通过关键字获取所有的招标url
Args:
keyword (str): 关键字
Returns:
pd.DataFrame: 招标标题和url
"""
# 初始化浏览器
driver = webdriver.Chrome()
driver.get("http://bulletin.sntba.com")
time.sleep(1)
# 输入关键字
driver.find_element(By.CSS_SELECTOR, "#wordkey").send_keys(keyword)
driver.find_element(
By.CSS_SELECTOR,
"body > div.con > div.mian.contents.clear > div.mian_right.fr > div.search > ul:nth-child(2) > li:nth-child(2) > a",
).click()
# 选择最近两天
driver.find_element(By.LINK_TEXT, "2天内").click()
# 获取总页数
driver.switch_to.frame(driver.find_element(By.CSS_SELECTOR, "#iframe"))
try:
total_page = int(
driver.find_element(
By.CSS_SELECTOR, "body > div.pagination > label:nth-child(1)"
).text
)
except selenium.common.exceptions.NoSuchElementException:
total_page = 1
print("总页数为:", total_page)
results = []
for i in tqdm(range(total_page)):
time.sleep(1)
trs = driver.find_elements(By.TAG_NAME, "tr")[1:]
for tr in trs:
tds = tr.find_elements(By.TAG_NAME, "td")
a = tds[0].find_element(By.TAG_NAME, "a")
title = a.get_attribute("title")
url = a.get_attribute("href").split("'")[1]
release_date = tds[-2].text
start_time = tds[-1].get_attribute("id")
print(title, url, release_date, start_time)
results.append([title, url, release_date, start_time])
if i != total_page - 1:
driver.find_element(By.LINK_TEXT, "下一页").click()
driver.quit()
# TODO: use gpt4 to select the most relevant url
print("开始选择最相关的招标信息")
indices = select_chain.invoke({"text": "\n".join([f"{i}. {title}" for i, (title, _, _, _) in enumerate(results)])}).strip().split(",")
results = [results[int(i)] for i in indices]
return pd.DataFrame(results, columns=["标题", "链接", "发布日期", "开标时间"])
def get_pdf_text(url: str) -> str:
"""通过url获取pdf的文本
Args:
url (str): 招标信息url
Returns:
str: pdf中的文本
"""
# 初始化浏览器
driver = webdriver.Chrome()
driver.get(url)
time.sleep(1)
# 切换到iframe
driver.switch_to.frame(driver.find_element(By.CSS_SELECTOR, "#iframe"))
# 等待pdf加载完成
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "page"))
)
pages = driver.find_elements(By.CLASS_NAME, "page")
print("总页数为:", len(pages))
text = ""
text_spans = driver.find_element(By.CLASS_NAME, "textLayer").find_elements(
By.TAG_NAME, "span"
)
if len(text_spans) > 10:
for span in text_spans:
text += span.text
else:
# 全屏截图
driver.find_element(By.CSS_SELECTOR, "#presentationMode").click()
for i in range(len(pages)):
time.sleep(1)
driver.save_screenshot(f"./.temp/page.png")
text += ocr(f"./.temp/page.png")
driver.find_element(By.CSS_SELECTOR, "body").send_keys(Keys.PAGE_DOWN)
print(text)
if "频率过高" in text:
time.sleep(30)
driver.refresh()
return get_pdf_text(url)
return text
def ocr(fp: Union[str, bytes]) -> str:
"""
对图片进行OCR识别,将图片中的文本内容识别为字符串。
Args:
fp (Union[str, bytes]): 图片文件或者文件名
Returns:
str: 识别的文本内容
"""
# 打开图像
image = Image.open(fp)
# 使用Tesseract将图像中的文字识别为字符串
text = pytesseract.image_to_string(image, lang="chi_sim", config="--psm 6 --oem 1")
return text.replace(" ", "")
def summarize(text: str) -> dict:
"""
对文本进行摘要提取,将文本中的重要信息提取出来。
Args:
text (str): 文本内容
Returns:
dict: 摘要信息
"""
def translate_summary_to_dict(text: str) -> dict:
pattern = r"(.*?)\s*:\s*(.*?)(?=\n[^:]*:|$)"
matches = re.findall(pattern, text, re.DOTALL)
return {key.strip(): value.strip() for key, value in matches}
print("开始摘要提取")
summary = summarize_chain.invoke({"text": text}).strip()
print("开始建议是否投标")
suggestion = suggest_chain.invoke({"text": summary}).strip()
print(summary)
key_info = translate_summary_to_dict(summary)
key_info["是否投标"] = suggestion == "是"
print(key_info)
return key_info
def send_email(subject, body, to, fpath):
# 邮件服务器设置
smtp_server_name = 'smtp.office365.com'
port = 587
username = '[email protected]' # 邮箱地址
password = 'Wyb2001622ms' # 邮箱密码
msg = MIMEMultipart()
msg['From'] = username
msg['To'] = ', '.join(to) # 收件人,如果有多个人,请用逗号分隔
msg['Subject'] = subject
msg.attach(MIMEText(body, "html"))
part = MIMEBase('application', "octet-stream")
part.set_payload(open(fpath, "rb").read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=fpath.split("/")[-1])
msg.attach(part)
try:
server = smtplib.SMTP(smtp_server_name, port)
server.ehlo()
server.starttls()
server.login(username, password)
server.sendmail(username, to, msg.as_string())
server.close()
print('邮件发送成功.')
except Exception as e:
print(f'邮件发送失败: {e}')
async def async_summary_text(text):
# 这里我们假设langchain有异步版本的函数ainvoke()
summary = await summarize_chain.ainvoke({"text": text})
return json.loads(summary)
def df_to_html(df):
html = '<html><head><style>body { max-width: 800px; }</style></head><body>\n'
for index, row in df.iterrows():
html += '<strong>' + str(row['标题']) + '</strong>\n'
html += '<ul>\n'
html += '<li><strong>链接: </strong><a href="' + str(row['链接']) + '">链接</a></li>\n'
html += '<li><strong>投标价格(万元): </strong>' + str(row['报价']) + '</li>\n'
html += '<li><strong>招标机构信息: </strong>' + str(row['招标人信息']) + '</li>\n'
html += '<li><strong>代理机构信息: </strong>' + str(row['代理机构信息']) + '</li>\n'
html += '<li><strong>项目描述: </strong>' + str(row['项目描述']) + '</li>\n'
html += '<li><strong>资格要求: </strong>' + str(row['资格要求']) + '</li>\n'
html += '<li><strong>投标流程: </strong>' + str(row['投标程序']) + '</li>\n'
html += '<li><strong>发布日期: </strong>' + str(row['发布日期']) + '</li>\n'
html += '<li><strong>开标时间: </strong>' + str(row['开标时间']) + '</li>\n'
html += '</ul>'
return html
def main():
# 获取所有关键字
with open("关键字.txt", "r") as f:
keywords = [line.strip() for line in f.readlines()]
# 如果已经存在 招标数据YY-mm-dd.csv,直接读取该csv为df,否则创建新df
file_name = f"招标数据_{dt.now().strftime('%Y-%m-%d')}"
df = pd.read_csv(f"{file_name}.csv") if os.path.exists(f"{file_name}.csv") else pd.DataFrame(
columns=[
"标题",
"链接",
"报价",
"招标人信息",
"代理机构信息",
"项目描述",
"资格要求",
"投标程序",
"发布日期",
"开标时间",
"是否投标",
"文本",
]
)
pbar = tqdm(keywords)
for keyword in pbar:
pbar.set_description(f"Processing keyword '{keyword}'")
new_df = get_urls_by_keyword(keyword)
df_to_append = new_df[~new_df["链接"].isin(df["链接"])]
print("新增招标信息数量:", len(df_to_append))
for _, row in df_to_append.iterrows():
try:
text = get_pdf_text(row["链接"])
summary_result = summarize(text)
row["报价"] = summary_result.get("报价", "")
row["招标人信息"] = summary_result.get("招标人信息", "")
row["代理机构信息"] = summary_result.get("代理机构信息", "")
row["项目描述"] = summary_result.get("项目描述", "")
row["资格要求"] = summary_result.get("资格要求", "")
row["投标程序"] = summary_result.get("投标程序", "")
row["是否投标"] = summary_result.get("是否投标", "")
row["文本"] = text
# df = df.append(row, ignore_index=True)
df = pd.concat([df, row.to_frame().T], ignore_index=True)
df.to_csv(f"{file_name}.csv", index=False)
# drop 是否投标为否的行并保存excel, 并drop是否投标和文本列
clean_df = df[df["是否投标"] == True].drop(columns=["是否投标", "文本"])
clean_df.to_excel(f"{file_name}.xlsx", index=False, sheet_name=f"{file_name}")
with open(f"{file_name}.html", "w") as f:
f.write(df_to_html(clean_df))
except Exception as e:
print(f"Error processing {row['链接']}: {e}")
else:
print(f"Successfully processed {row['链接']}")
# 发送邮件
with open(f"{file_name}.html", "r") as f:
send_email(file_name, f.read(), ["[email protected]", "[email protected]", "[email protected]"], f"{file_name}.xlsx")
# send_email(file_name, f.read(), ["[email protected]"], f"{file_name}.xlsx")
main()
| [
"给定一个招标文件的摘要,我需要你判断是否是否投标。\n\n{text}\n\n品艺堂以策略性设计为核心,拥有从策划到设计,再到工程施工、项目管理的全产业链专业能力。并且在长期的实践工作中,形成了一套以“服务设计”为核心理念的独特的工作方法。为客户提供博物馆展陈、企业展厅布展、专题馆、规划馆、景区规划设计、导视标识等“一站式”文化空间解决方案。该项目是否符合品艺堂的业务范围?仅回答是或否。\n\n回答:\n",
"给定一个招标文件,我需要你提取以下关键信息。请提取以下字段:\n1. \"招标人\":发布招标的单位或者个人的名称。\n2. \"报价\":招标的报价,单位为万元。\n3. \"联系信息\":包括地址、电话和电子邮件等的联系方式。\n4. \"项目描述\":包括项目的目标、背景、范围和期限等详细描述。\n5. \"资格要求\":对参与投标的公司或个人的资格要求。\n6. \"投标程序\":投标的具体步骤和程序。\n\n这是招标文件OCR提取的文字,注意可能存在错误:\n\n{text}\n\n根据这个文件,请提取出上述关键信息。\n\n输出格式如下,一定使用英文冒号和空格分隔,否则无法识别。\"是否投标\"字段的值为\"是\"或\"否\",表示是否是否投标。\n报价: 100\n招标人信息: xxx博物馆,地址: xxx街道xxx号,电话: 0123456789,电子邮件: [email protected]\n代理机构信息: xxx有限公司,地址: xxx街道xxx号,电话: 0123456789,电子邮件: [email protected]\n项目描述: 这是一个关于xxxx的项目,目标是xxxx,期限为xxxx。\n资格要求: 投标者需要有xxxx的经验,且公司规模需要在xxxx以上。\n投标程序: 首先需要xxxx,然后xxxx,最后将在xxxx公开投标结果。\n是否投标: 是\n",
"以下是招标项目的序号和标题:\n\n{text}\n\n品艺堂以策略性设计为核心,拥有从策划到设计,再到工程施工、项目管理的全产业链专业能力。并且在长期的实践工作中,形成了一套以“服务设计”为核心理念的独特的工作方法。为客户提供博物馆展陈、企业展厅布展、专题馆、规划馆、景区规划设计、导视标识等“一站式”文化空间解决方案。以上的项目,有哪些明显不符合品艺堂的业务范围?请去掉明显不符合的项目,仅将符合的项目序号写在下面,以逗号分隔。\n\n例如: 0,2,3,5\n\n链接:\n"
] |
2024-01-10 | XuanTuC/chatgpt-mirai-qq-bot | platforms~telegram_bot.py | import math
import os
import sys
import asyncio
import openai
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import Image, Plain, Voice
from loguru import logger
from telegram.request import HTTPXRequest
from io import BytesIO, IOBase
from universal import handle_message
sys.path.append(os.getcwd())
from constants import botManager, config
from telegram import Update, constants
from telegram.ext import ApplicationBuilder, ContextTypes, MessageHandler, filters, CommandHandler
async def on_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
type = 'friend' if update.message.chat.type == constants.ChatType.PRIVATE else 'group' if update.message.chat.type in [
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP] else None
if type is None:
return
bot_username = (await context.bot.get_me()).username
if type == 'group' and (
bot_username not in update.message.text and (
update.message.reply_to_message is None or update.message.reply_to_message.from_user is None or update.message.reply_to_message.from_user.username != bot_username)
):
logger.debug(f"忽略消息(未满足匹配规则): {update.message.text} ")
return
async def response(msg):
if isinstance(msg, MessageChain):
for elem in msg:
if isinstance(elem, Plain):
await update.message.reply_text(str(elem))
if isinstance(elem, Image):
await update.message.reply_photo(photo=await elem.get_bytes())
if isinstance(elem, Voice):
await update.message.reply_audio(audio=await elem.get_bytes())
return
if isinstance(msg, str):
return await update.message.reply_text(msg)
if isinstance(msg, Image):
return await update.message.reply_photo(photo=await msg.get_bytes())
if isinstance(msg, Voice):
await update.message.reply_audio(audio=await msg.get_bytes())
return
await handle_message(
response,
f"{type}-{update.message.chat.id}",
update.message.text.replace(f"@{bot_username}", '').strip(),
is_manager=update.message.from_user.id == config.telegram.manager_chat,
nickname=update.message.from_user.full_name or "群友"
)
async def on_check_api(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not update.message.from_user.id == config.telegram.manager_chat:
return await update.message.reply_text("您没有权限执行这个操作")
tasklist = []
bots = botManager.bots.get("openai-api", [])
for account in bots:
tasklist.append(botManager.check_api_info(account))
msg = await update.message.reply_text("查询中,请稍等……")
answer = ''
for account, r in zip(bots, await asyncio.gather(*tasklist)):
grant_used, grant_available, has_payment_method, total_usage, hard_limit_usd = r
total_available = grant_available
if has_payment_method:
total_available = total_available + hard_limit_usd - total_usage
answer = answer + '* `' + account.api_key[:6] + "**" + account.api_key[-3:] + '`'
answer = answer + f' - ' + f'本月已用: `{round(total_usage, 2)}$`, 可用:`{round(total_available, 2)}$`, 绑卡:{has_payment_method}'
answer = answer + '\n'
if answer == '':
await msg.edit_text("没有查询到任何 API")
return
await msg.edit_text(answer)
async def bootstrap() -> None:
"""Set up the application and a custom webserver."""
app = ApplicationBuilder() \
.proxy_url(config.telegram.proxy or openai.proxy) \
.token(config.telegram.bot_token) \
.connect_timeout(30)\
.read_timeout(30)\
.write_timeout(30)\
.get_updates_request(HTTPXRequest(http_version="1.1")) \
.http_version('1.1') \
.build()
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, on_message))
app.add_handler(CommandHandler("check_api", on_check_api))
await app.initialize()
await botManager.login()
await app.start()
logger.info("启动完毕,接收消息中……")
await app.updater.start_polling(drop_pending_updates=True)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(bootstrap())
loop.run_forever()
| [] |
2024-01-10 | XuanTuC/chatgpt-mirai-qq-bot | universal.py | import os
import re
from typing import Callable
import openai
from tempfile import NamedTemporaryFile
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import Plain, Voice
from httpx import HTTPStatusError, ConnectTimeout
from loguru import logger
from requests.exceptions import SSLError, ProxyError, RequestException
from urllib3.exceptions import MaxRetryError
from constants import config
from constants import botManager
from conversation import ConversationHandler
from exceptions import PresetNotFoundException, BotRatelimitException, ConcurrentMessageException, \
BotTypeNotFoundException, NoAvailableBotException, BotOperationNotSupportedException, CommandRefusedException
from middlewares.baiducloud import MiddlewareBaiduCloud
from middlewares.concurrentlock import MiddlewareConcurrentLock
from middlewares.ratelimit import MiddlewareRatelimit
from middlewares.timeout import MiddlewareTimeout
from utils.azure_tts import synthesize_speech
middlewares = [MiddlewareTimeout(), MiddlewareRatelimit(), MiddlewareBaiduCloud(), MiddlewareConcurrentLock()]
async def handle_message(_respond: Callable, session_id: str, message: str,
chain: MessageChain = MessageChain("Unsupported"), is_manager: bool = False,
nickname: str = '某人'):
"""正常聊天"""
if not message.strip():
return config.response.placeholder
for r in config.trigger.ignore_regex:
if re.match(r, message):
logger.debug(f"此消息满足正则表达式: {r},忽略……")
return
# 此处为会话不存在时可以执行的指令
conversation_handler = await ConversationHandler.get_handler(session_id)
conversation_context = None
# 指定前缀对话
if ' ' in message and (config.trigger.allow_switching_ai or is_manager):
for ai_type, prefixes in config.trigger.prefix_ai.items():
for prefix in prefixes:
if prefix + ' ' in message:
conversation_context = await conversation_handler.first_or_create(ai_type)
message = message.removeprefix(prefix + ' ')
break
else:
# Continue if the inner loop wasn't broken.
continue
# Inner loop was broken, break the outer.
break
if not conversation_handler.current_conversation:
conversation_handler.current_conversation = await conversation_handler.create(
config.response.default_ai)
def wrap_request(n, m):
async def call(session_id, message, conversation_context, respond):
await m.handle_request(session_id, message, respond, conversation_context, n)
return call
def wrap_respond(n, m):
async def call(session_id, message, rendered, respond):
await m.handle_respond(session_id, message, rendered, respond, n)
return call
async def respond(msg: str):
if not msg:
return
ret = await _respond(msg)
for m in middlewares:
await m.on_respond(session_id, message, msg)
# TODO: 之后重构成 platforms 的 respond 只处理 MessageChain
if isinstance(msg, str):
msg = MessageChain([Plain(msg)])
nonlocal conversation_context
if not conversation_context:
conversation_context = conversation_handler.current_conversation
# TTS Converting
if conversation_context.conversation_voice and isinstance(msg, MessageChain):
for elem in msg:
if isinstance(elem, Plain) and str(elem):
output_file = NamedTemporaryFile(mode='w+b', suffix='.wav', delete=False)
output_file.close()
if await synthesize_speech(
str(elem),
output_file.name,
conversation_context.conversation_voice
):
await _respond(Voice(path=output_file.name))
try:
os.unlink(output_file.name)
except:
pass
return ret
async def request(_session_id, prompt: str, conversation_context, _respond):
try:
task = None
# 不带前缀 - 正常初始化会话
if bot_type_search := re.search(config.trigger.switch_command, prompt):
if not (config.trigger.allow_switching_ai or is_manager):
await respond(f"不好意思,只有管理员才能切换AI!")
return
conversation_handler.current_conversation = await conversation_handler.create(
bot_type_search.group(1).strip())
await respond(f"已切换至 {bot_type_search.group(1).strip()} AI,现在开始和我聊天吧!")
return
# 最终要选择的对话上下文
if not conversation_context:
conversation_context = conversation_handler.current_conversation
# 此处为会话存在后可执行的指令
# 重置会话
if prompt in config.trigger.reset_command:
task = conversation_context.reset()
# 回滚会话
elif prompt in config.trigger.rollback_command:
task = conversation_context.rollback()
elif voice_type_search := re.search(config.trigger.switch_voice, prompt):
if config.azure.tts_speech_key:
conversation_context.conversation_voice = voice_type_search.group(1).strip()
await respond(
f"已切换至 {conversation_context.conversation_voice} 语音!详情参考: "
f"https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/language-support?tabs=tts#neural-voices")
else:
await respond(f"未配置 Azure TTS 账户,无法切换语音!")
return
elif prompt in config.trigger.mixed_only_command:
conversation_context.switch_renderer("mixed")
await respond(f"已切换至图文混合模式,接下来我的回复将会以图文混合的方式呈现!")
return
elif prompt in config.trigger.image_only_command:
conversation_context.switch_renderer("image")
await respond(f"已切换至纯图片模式,接下来我的回复将会以图片呈现!")
return
elif prompt in config.trigger.text_only_command:
conversation_context.switch_renderer("text")
await respond(f"已切换至纯文字模式,接下来我的回复将会以文字呈现(被吞除外)!")
return
elif switch_model_search := re.search(config.trigger.switch_model, prompt):
model_name = switch_model_search.group(1).strip()
if model_name in conversation_context.supported_models:
if not (is_manager or model_name in config.trigger.allowed_models):
await respond(f"不好意思,只有管理员才能切换到 {model_name} 模型!")
else:
await conversation_context.switch_model(model_name)
await respond(f"已切换至 {model_name} 模型,让我们聊天吧!")
else:
await respond(
f"当前的 AI 不支持切换至 {model_name} 模型,目前仅支持:{conversation_context.supported_models}!")
return
# 加载预设
if preset_search := re.search(config.presets.command, prompt):
logger.trace(f"{session_id} - 正在执行预设: {preset_search.group(1)}")
async for _ in conversation_context.reset(): ...
task = conversation_context.load_preset(preset_search.group(1))
elif not conversation_context.preset:
# 当前没有预设
logger.trace(f"{session_id} - 未检测到预设,正在执行默认预设……")
# 隐式加载不回复预设内容
async for _ in conversation_context.load_preset('default'): ...
# 没有任务那就聊天吧!
if not task:
task = conversation_context.ask(prompt=prompt, chain=chain, name=nickname)
async for rendered in task:
if rendered:
if str(rendered).strip() == '':
logger.warning("检测到内容为空的输出,已忽略")
continue
action = lambda session_id, prompt, rendered, respond: respond(rendered)
for m in middlewares:
action = wrap_respond(action, m)
# 开始处理 handle_response
await action(session_id, prompt, rendered, respond)
for m in middlewares:
await m.handle_respond_completed(session_id, prompt, respond)
except CommandRefusedException as e:
await respond(str(e))
except openai.error.InvalidRequestError as e:
await respond("服务器拒绝了您的请求,原因是" + str(e))
except BotOperationNotSupportedException:
await respond("暂不支持此操作,抱歉!")
except ConcurrentMessageException as e: # Chatbot 账号同时收到多条消息
await respond(config.response.error_request_concurrent_error)
except (BotRatelimitException, HTTPStatusError) as e: # Chatbot 账号限流
await respond(config.response.error_request_too_many.format(exc=e))
except NoAvailableBotException as e: # 预设不存在
await respond(f"当前没有可用的{e}账号,不支持使用此 AI!")
except BotTypeNotFoundException as e: # 预设不存在
respond_msg = f"AI类型{e}不存在,请检查你的输入是否有问题!目前仅支持:\n"
if len(botManager.bots['chatgpt-web']) > 0:
respond_msg += "* chatgpt-web - OpenAI ChatGPT 网页版\n"
if len(botManager.bots['openai-api']) > 0:
respond_msg += "* chatgpt-api - OpenAI ChatGPT API版\n"
if len(botManager.bots['bing-cookie']) > 0:
respond_msg += "* bing-c - 微软 New Bing (创造力)\n"
respond_msg += "* bing-b - 微软 New Bing (平衡)\n"
respond_msg += "* bing-p - 微软 New Bing (精确)\n"
if len(botManager.bots['bard-cookie']) > 0:
respond_msg += "* bard - Google Bard\n"
if len(botManager.bots['yiyan-cookie']) > 0:
respond_msg += "* yiyan - 百度 文心一言\n"
if len(botManager.bots['chatglm-api']) > 0:
respond_msg += "* chatglm-api - 清华 ChatGLM-6B (本地)\n"
if len(botManager.bots['poe-web']) > 0:
respond_msg += "* sage - POE Sage 模型\n"
respond_msg += "* calude - POE Calude 模型\n"
respond_msg += "* chinchilla - POE ChatGPT 模型\n"
await respond(respond_msg)
except PresetNotFoundException: # 预设不存在
await respond("预设不存在,请检查你的输入是否有问题!")
except (RequestException, SSLError, ProxyError, MaxRetryError, ConnectTimeout, ConnectTimeout) as e: # 网络异常
await respond(config.response.error_network_failure.format(exc=e))
except Exception as e: # 未处理的异常
logger.exception(e)
await respond(config.response.error_format.format(exc=e))
action = request
for m in middlewares:
action = wrap_request(action, m)
# 开始处理
await action(session_id, message.strip(), conversation_context, respond)
| [] |
2024-01-10 | moui72/dev-tools-cli | infra~modules~dev-tools~packages~handler~charset_normalizer~cd.py | import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Dict, List, Optional, Tuple
from .assets import FREQUENCIES
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import (
is_accentuated,
is_latin,
is_multi_byte_encoding,
is_unicode_range_secondary,
unicode_range,
)
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module("encodings.{}".format(iana_name)).IncrementalDecoder # type: ignore
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese", "Classical Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]:
if not target_have_accents and is_accentuated(character):
target_have_accents = True
if target_pure_latin and is_latin(character) is False:
target_pure_latin = False
return target_have_accents, target_pure_latin
def alphabet_languages(characters: List[str], ignore_non_latin: bool = False) -> List[str]:
"""
Return associated languages associated to given characters.
"""
languages: List[Tuple[str, float]] = []
source_have_accents = any(is_accentuated(character) for character in characters)
for language, language_characters in FREQUENCIES.items():
target_have_accents, target_pure_latin = get_target_features(language)
if ignore_non_latin and target_pure_latin is False:
continue
if target_have_accents is False and source_have_accents:
continue
character_count: int = len(language_characters)
character_match_count: int = len([c for c in language_characters if c in characters])
ratio: float = character_match_count / character_count
if ratio >= 0.2:
languages.append((language, ratio))
languages = sorted(languages, key=lambda x: x[1], reverse=True)
return [compatible_language[0] for compatible_language in languages]
def characters_popularity_compare(language: str, ordered_characters: List[str]) -> float:
"""
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
"""
if language not in FREQUENCIES:
raise ValueError("{} not available".format(language))
character_approved_count: int = 0
FREQUENCIES_language_set = set(FREQUENCIES[language])
for character in ordered_characters:
if character not in FREQUENCIES_language_set:
continue
characters_before_source: List[str] = FREQUENCIES[language][0 : FREQUENCIES[language].index(character)]
characters_after_source: List[str] = FREQUENCIES[language][FREQUENCIES[language].index(character) :]
characters_before: List[str] = ordered_characters[0 : ordered_characters.index(character)]
characters_after: List[str] = ordered_characters[ordered_characters.index(character) :]
before_match_count: int = len(set(characters_before) & set(characters_before_source))
after_match_count: int = len(set(characters_after) & set(characters_after_source))
if len(characters_before_source) == 0 and before_match_count <= 4:
character_approved_count += 1
continue
if len(characters_after_source) == 0 and after_match_count <= 4:
character_approved_count += 1
continue
if (
before_match_count / len(characters_before_source) >= 0.4
or after_match_count / len(characters_after_source) >= 0.4
):
character_approved_count += 1
continue
return character_approved_count / len(ordered_characters)
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
"""
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
One containing the latin letters and the other hebrew.
"""
layers: Dict[str, str] = {}
for character in decoded_sequence:
if character.isalpha() is False:
continue
character_range: Optional[str] = unicode_range(character)
if character_range is None:
continue
layer_target_range: Optional[str] = None
for discovered_range in layers:
if is_suspiciously_successive_range(discovered_range, character_range) is False:
layer_target_range = discovered_range
break
if layer_target_range is None:
layer_target_range = character_range
if layer_target_range not in layers:
layers[layer_target_range] = character.lower()
continue
layers[layer_target_range] += character.lower()
return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
"""
This function merge results previously given by the function coherence_ratio.
The return type is the same as coherence_ratio.
"""
per_language_ratios: Dict[str, List[float]] = {}
for result in results:
for sub_result in result:
language, ratio = sub_result
if language not in per_language_ratios:
per_language_ratios[language] = [ratio]
continue
per_language_ratios[language].append(ratio)
merge = [
(
language,
round(
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
4,
),
)
for language in per_language_ratios
]
return sorted(merge, key=lambda x: x[1], reverse=True)
@lru_cache(maxsize=2048)
def coherence_ratio(
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
) -> CoherenceMatches:
"""
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
A layer = Character extraction by alphabets/ranges.
"""
results: List[Tuple[str, float]] = []
ignore_non_latin: bool = False
sufficient_match_count: int = 0
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
if "Latin Based" in lg_inclusion_list:
ignore_non_latin = True
lg_inclusion_list.remove("Latin Based")
for layer in alpha_unicode_split(decoded_sequence):
sequence_frequencies: Counter = Counter(layer)
most_common = sequence_frequencies.most_common()
character_count: int = sum(o for c, o in most_common)
if character_count <= TOO_SMALL_SEQUENCE:
continue
popular_character_ordered: List[str] = [c for c, o in most_common]
for language in lg_inclusion_list or alphabet_languages(popular_character_ordered, ignore_non_latin):
ratio: float = characters_popularity_compare(language, popular_character_ordered)
if ratio < threshold:
continue
elif ratio >= 0.8:
sufficient_match_count += 1
results.append((language, round(ratio, 4)))
if sufficient_match_count >= 3:
break
return sorted(results, key=lambda x: x[1], reverse=True)
| [] |
2024-01-10 | shalin07/Stevens-BIA-660 | Assignment_6~Assignment_6.py | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from nltk.cluster import KMeansClusterer, cosine_distance
from sklearn.decomposition import LatentDirichletAllocation
import gensim
from gensim import corpora
from gensim.models.coherencemodel import CoherenceModel
import pandas as pd
from sklearn import metrics
import numpy as np
import json, time
from matplotlib import pyplot as plt
from termcolor import colored
def cluster_kmean(train_file, test_file):
train_file = json.load(open(train_file, 'r'))
test_file = json.load(open(test_file, 'r'))
tfidf_vect = TfidfVectorizer(stop_words="english",min_df=5)
test_text,label=zip(*test_file)
first_label=[item[0] for item in label]
dtm = tfidf_vect.fit_transform(train_file+list(test_text))
num_clusters=3
clusterer = KMeansClusterer(num_clusters, cosine_distance, repeats=20)
clusters = clusterer.cluster(dtm.toarray(), assign_clusters=True)
confusion_df=pd.DataFrame(list(zip(first_label, clusters[len(train_file):])), columns=['actual_class','cluster'])
confusion_matrix = pd.crosstab( index=confusion_df.cluster, columns=confusion_df.actual_class)
print(confusion_matrix)
matrix = confusion_matrix.idxmax(axis=1)
for idx, i in enumerate(matrix):
print("Cluster {}: Topic {}".format(idx, i))
cluster_dict={0:"Topic Travel & Transportation", 1:"Topic Disaster and Accident", 2:"Topic News and Economy"}
predicted_target=[matrix[i] for i in clusters[len(train_file):]]
print(metrics.classification_report(first_label, predicted_target))
def cluster_lda(train_file, test_file):
train_file = json.load(open(train_file,'r'))
test_file = json.load(open(test_file, 'r'))
tf_vectorizer = CountVectorizer(min_df=5, stop_words='english')
test_text,label=zip(*test_file)
first_label=[item[0] for item in label]
tf = tf_vectorizer.fit_transform(train_file+list(test_text))
num_clusters=3
lda = LatentDirichletAllocation(n_components=num_clusters, evaluate_every = 1, max_iter=25,verbose=1, n_jobs=1,
random_state=0).fit(tf[0:len(train_file)])
topic_assign=lda.transform(tf[len(train_file):])
topic=topic_assign.argmax(axis=1)
confusion_df=pd.DataFrame(list(zip(first_label, topic)), columns=['actual_class','topic'])
confusion_matrix = pd.crosstab( index=confusion_df.topic, columns=confusion_df.actual_class)
print(confusion_matrix)
matrix = confusion_matrix.idxmax(axis=1)
for idx, t in enumerate(matrix):
print("Cluster {}: Topic {}".format(idx, t))
cluster_dict={0:"Topic Travel & Transportation", 1:"Topic Disaster and Accident", 2:"Topic News and Economy"}
predicted_target=[matrix[i] for i in topic]
print(metrics.classification_report(first_label, predicted_target))
if __name__ == "__main__":
print(colored("Output of Kmeans model", 'blue', attrs=['bold']))
cluster_kmean("train_text.json","test_text.json")
print(colored("Output of LDA model", 'blue', attrs=['bold']))
cluster_lda("train_text.json","test_text.json")
# In[ ]:
| [] |
2024-01-10 | henryHTH/Personal-Projects | functions.py | # Importing built-in libraries (no need to install these)
import re
import os
from os import listdir
from os.path import isfile, join
# Importing libraries you need to install
import pandas as pd
import numpy as np
from tqdm import tqdm
import itertools as it
import nltk
from nltk.tokenize import word_tokenize,sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import spacy
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en.stop_words import STOP_WORDS
from gensim.corpora import Dictionary, MmCorpus
from gensim.models import Phrases
from gensim.models.word2vec import LineSentence
from gensim.parsing.preprocessing import remove_stopwords
from gensim.models.ldamulticore import LdaMulticore
from gensim.models import CoherenceModel
from wordcloud import WordCloud,STOPWORDS
import matplotlib.pyplot as plt
import pyLDAvis
import pyLDAvis.gensim
import warnings
from pyLDAvis import PreparedData
def punct_space(token):
"""
helper function to eliminate tokens
that are pure punctuation or whitespace
"""
return token.is_punct or token.is_space
def line_review(filename):
"""
generator function to read in reviews from the file
and un-escape the original line breaks in the text
"""
with open(filename, encoding='utf_8') as f:
for text in f:
#text_re_stop = remove_stopwords(text)
yield text.replace('\\n', '\n')
def lemmatized_sentence_corpus(filename,nlp):
"""
generator function to use spaCy to parse reviews,
lemmatize the text, and yield sentences
"""
for parsed_review in nlp.pipe(line_review(filename),batch_size=100, n_process=4):
for sent in parsed_review.sents:
print("**************************")
yield u' '.join([token.lemma_ for token in sent if not punct_space(token)])
print("##################################")
def trigram_bow_generator(filepath,dictionary):
"""
generator function to read reviews from a file
and yield a bag-of-words representation
"""
for text in LineSentence(filepath):
yield dictionary.doc2bow(text)
def explore_topic(lda,topic_number, topn=20):
"""
accept a user-supplied topic number and
print out a formatted list of the top terms
"""
print (u'{:20} {}'.format(u'term', u'frequency') + u'\n')
for term, frequency in lda.show_topic(topic_number, topn=20):
print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))
def topic_visualizer(lda,topic_number, topn=30):
"""
print out a wordcloud figure of the top terms
for the picked toptic
"""
stop_words = set(STOPWORDS)
topic = lda.show_topic(topic_number,topn)
dict_topic = dict(topic)
cloud = WordCloud(stopwords=stop_words,
background_color='white',
width=2500,
height=1800,
max_words=topn,
prefer_horizontal=1.0)
cloud.generate_from_frequencies(dict_topic, max_font_size=300)
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(cloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
def prepared_data_from_dict(vis_data):
topic_coordinates = pd.DataFrame.from_dict(vis_data['mdsDat'])
topic_info = pd.DataFrame.from_dict(vis_data['tinfo'])
token_table = pd.DataFrame.from_dict(vis_data['token.table'])
R = vis_data['R']
lambda_step = vis_data['lambda.step']
plot_opts = vis_data['plot.opts']
client_topic_order = vis_data['topic.order']
return PreparedData(topic_coordinates, topic_info,
token_table, R, lambda_step, plot_opts, client_topic_order)
| [] |
2024-01-10 | henryHTH/Personal-Projects | Textual%20Analysis%20of%20Risk%20Factors%20in%2010-K%20Report~functions.py | # Importing built-in libraries (no need to install these)
import re
import os
from os import listdir
from os.path import isfile, join
import datetime
import time
# Importing libraries you need to install
import pandas as pd
import numpy as np
#from tqdm import tqdm
import itertools as it
import nltk
from nltk.tokenize import word_tokenize,sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import spacy
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en.stop_words import STOP_WORDS
from gensim.corpora import Dictionary, MmCorpus
from gensim.models import Phrases
from gensim.models.word2vec import LineSentence
from gensim.parsing.preprocessing import remove_stopwords
from gensim.models.ldamulticore import LdaMulticore
from gensim.models import LdaModel
from gensim.models import CoherenceModel
from wordcloud import WordCloud,STOPWORDS
import matplotlib.pyplot as plt
import pyLDAvis
import pyLDAvis.gensim
import warnings
from pyLDAvis import PreparedData
def punct_space(token):
"""
helper function to eliminate tokens
that are pure punctuation or whitespace
"""
return token.is_punct or token.is_space
def remove_stop(token):
"""
helper function to eliminate tokens
that are pure punctuation or whitespace
"""
return token.is_stop
def line_review(filename):
"""
generator function to read in reviews from the file
and un-escape the original line breaks in the text
"""
with open(filename, encoding='utf_8') as f:
for text in f:
#text_re_stop = remove_stopwords(text)
yield text.replace('\\n', '\n')
def lemmatized_sentence_corpus(filename,nlp):
"""
generator function to use spaCy to parse reviews,
lemmatize the text, and yield sentences
"""
for parsed_review in nlp.pipe(line_review(filename),batch_size=100, n_process=4):
for sent in parsed_review.sents:
yield u' '.join([token.lemma_.lower() for token in sent if not punct_space(token)])
def trigram_bow_generator(filepath,dictionary):
"""
generator function to read reviews from a file
and yield a bag-of-words representation
"""
for text in LineSentence(filepath):
yield dictionary.doc2bow(text)
def explore_topic(lda,topic_number, topn=20):
"""
accept a user-supplied topic number and
print out a formatted list of the top terms
"""
print (u'{:20} {}'.format(u'term', u'frequency') + u'\n')
for term, frequency in lda.show_topic(topic_number, topn=20):
print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))
def topic_visualizer(lda,topic_number, topn=20):
"""
print out a wordcloud figure of the top terms
for the picked toptic
"""
stop_words = set(STOPWORDS)
topic = lda.show_topic(topic_number,topn)
dict_topic = dict(topic)
cloud = WordCloud(stopwords=stop_words,
background_color='white',
width=250,
height=180,
max_words=topn,
prefer_horizontal=0.8)
cloud.generate_from_frequencies(dict_topic, max_font_size=30)
plt.figure(figsize = (3, 3), facecolor = None)
plt.imshow(cloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
def prepared_data_from_dict(vis_data):
topic_coordinates = pd.DataFrame.from_dict(vis_data['mdsDat'])
topic_info = pd.DataFrame.from_dict(vis_data['tinfo'])
token_table = pd.DataFrame.from_dict(vis_data['token.table'])
R = vis_data['R']
lambda_step = vis_data['lambda.step']
plot_opts = vis_data['plot.opts']
client_topic_order = vis_data['topic.order']
return PreparedData(topic_coordinates, topic_info,
token_table, R, lambda_step, plot_opts, client_topic_order)
| [] |
2024-01-10 | Findigs/danswer | backend~danswer~llm~gpt_4_all.py | from collections.abc import Iterator
from typing import Any
from langchain.schema.language_model import LanguageModelInput
from danswer.configs.model_configs import GEN_AI_MAX_OUTPUT_TOKENS
from danswer.configs.model_configs import GEN_AI_MODEL_VERSION
from danswer.configs.model_configs import GEN_AI_TEMPERATURE
from danswer.llm.interfaces import LLM
from danswer.llm.utils import convert_lm_input_to_basic_string
from danswer.utils.logger import setup_logger
logger = setup_logger()
class DummyGPT4All:
"""In the case of import failure due to architectural incompatibilities,
this module does not raise exceptions during server startup,
as long as the module isn't actually used"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise RuntimeError("GPT4All library not installed.")
try:
from gpt4all import GPT4All # type:ignore
except ImportError:
# Setting a low log level because users get scared when they see this
logger.debug(
"GPT4All library not installed. "
"If you wish to run GPT4ALL (in memory) to power Danswer's "
"Generative AI features, please install gpt4all==2.0.2."
)
GPT4All = DummyGPT4All
class DanswerGPT4All(LLM):
"""Option to run an LLM locally, however this is significantly slower and
answers tend to be much worse"""
@property
def requires_warm_up(self) -> bool:
"""GPT4All models are lazy loaded, load them on server start so that the
first inference isn't extremely delayed"""
return True
@property
def requires_api_key(self) -> bool:
return False
def __init__(
self,
timeout: int,
model_version: str = GEN_AI_MODEL_VERSION,
max_output_tokens: int = GEN_AI_MAX_OUTPUT_TOKENS,
temperature: float = GEN_AI_TEMPERATURE,
):
self.timeout = timeout
self.max_output_tokens = max_output_tokens
self.temperature = temperature
self.gpt4all_model = GPT4All(model_version)
def log_model_configs(self) -> None:
logger.debug(
f"GPT4All Model: {self.gpt4all_model}, Temperature: {self.temperature}"
)
def invoke(self, prompt: LanguageModelInput) -> str:
prompt_basic = convert_lm_input_to_basic_string(prompt)
return self.gpt4all_model.generate(prompt_basic)
def stream(self, prompt: LanguageModelInput) -> Iterator[str]:
prompt_basic = convert_lm_input_to_basic_string(prompt)
return self.gpt4all_model.generate(prompt_basic, streaming=True)
| [] |
2024-01-10 | seandearnaley/reddit-gpt-summarizer | app~llm_handler.py | """Handler for the LLM app."""
from config import ANTHROPIC_AI_TYPE
from data_types.summary import GenerateSettings
from log_tools import Logger
from pyrate_limiter import Duration, Limiter, RequestRate
from services.anthropic_connector import complete_anthropic_text
from services.openai_connector import complete_openai_text
from utils.llm_utils import validate_max_tokens
from utils.streamlit_decorators import error_to_streamlit
app_logger = Logger.get_app_logger()
rate_limits = (RequestRate(10, Duration.MINUTE),) # 10 requests a minute
# Create the rate limiter
# Pyrate Limiter instance
limiter = Limiter(*rate_limits)
@Logger.log
@error_to_streamlit
def complete_text(
prompt: str,
max_tokens: int,
settings: GenerateSettings,
) -> str:
"""LLM orchestrator"""
validate_max_tokens(max_tokens)
selected_model_type = settings["selected_model_type"]
is_anthropic = selected_model_type == ANTHROPIC_AI_TYPE
try:
limiter.ratelimit("complete_text")
# delegate to the appropriate completion method
if is_anthropic:
return complete_anthropic_text(
prompt=prompt,
max_tokens=max_tokens,
settings=settings,
)
return complete_openai_text(
prompt=prompt,
max_tokens=max_tokens,
settings=settings,
)
except Exception as exc: # pylint: disable=broad-except
app_logger.error("Error completing text: %s", exc)
return f"Error completing text: {exc}"
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.