date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | robocorp/langchain | libs~community~langchain_community~chat_loaders~langsmith.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Dict, Iterable, Iterator, List, Optional, Union, cast
from langchain_core.chat_sessions import ChatSession
from langchain_core.load import load
from langchain_community.chat_loaders.base import BaseChatLoader
if TYPE_CHECKING:
from langsmith.client import Client
from langsmith.schemas import Run
logger = logging.getLogger(__name__)
class LangSmithRunChatLoader(BaseChatLoader):
"""
Load chat sessions from a list of LangSmith "llm" runs.
Attributes:
runs (Iterable[Union[str, Run]]): The list of LLM run IDs or run objects.
client (Client): Instance of LangSmith client for fetching data.
"""
def __init__(
self, runs: Iterable[Union[str, Run]], client: Optional["Client"] = None
):
"""
Initialize a new LangSmithRunChatLoader instance.
:param runs: List of LLM run IDs or run objects.
:param client: An instance of LangSmith client, if not provided,
a new client instance will be created.
"""
from langsmith.client import Client
self.runs = runs
self.client = client or Client()
def _load_single_chat_session(self, llm_run: "Run") -> ChatSession:
"""
Convert an individual LangSmith LLM run to a ChatSession.
:param llm_run: The LLM run object.
:return: A chat session representing the run's data.
"""
chat_session = LangSmithRunChatLoader._get_messages_from_llm_run(llm_run)
functions = LangSmithRunChatLoader._get_functions_from_llm_run(llm_run)
if functions:
chat_session["functions"] = functions
return chat_session
@staticmethod
def _get_messages_from_llm_run(llm_run: "Run") -> ChatSession:
"""
Extract messages from a LangSmith LLM run.
:param llm_run: The LLM run object.
:return: ChatSession with the extracted messages.
"""
if llm_run.run_type != "llm":
raise ValueError(f"Expected run of type llm. Got: {llm_run.run_type}")
if "messages" not in llm_run.inputs:
raise ValueError(f"Run has no 'messages' inputs. Got {llm_run.inputs}")
if not llm_run.outputs:
raise ValueError("Cannot convert pending run")
messages = load(llm_run.inputs)["messages"]
message_chunk = load(llm_run.outputs)["generations"][0]["message"]
return ChatSession(messages=messages + [message_chunk])
@staticmethod
def _get_functions_from_llm_run(llm_run: "Run") -> Optional[List[Dict]]:
"""
Extract functions from a LangSmith LLM run if they exist.
:param llm_run: The LLM run object.
:return: Functions from the run or None.
"""
if llm_run.run_type != "llm":
raise ValueError(f"Expected run of type llm. Got: {llm_run.run_type}")
return (llm_run.extra or {}).get("invocation_params", {}).get("functions")
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the iterable of run IDs.
This method fetches the runs and converts them to chat sessions on-the-fly,
yielding one session at a time.
:return: Iterator of chat sessions containing messages.
"""
from langsmith.schemas import Run
for run_obj in self.runs:
try:
if hasattr(run_obj, "id"):
run = run_obj
else:
run = self.client.read_run(run_obj)
session = self._load_single_chat_session(cast(Run, run))
yield session
except ValueError as e:
logger.warning(f"Could not load run {run_obj}: {repr(e)}")
continue
class LangSmithDatasetChatLoader(BaseChatLoader):
"""
Load chat sessions from a LangSmith dataset with the "chat" data type.
Attributes:
dataset_name (str): The name of the LangSmith dataset.
client (Client): Instance of LangSmith client for fetching data.
"""
def __init__(self, *, dataset_name: str, client: Optional["Client"] = None):
"""
Initialize a new LangSmithChatDatasetLoader instance.
:param dataset_name: The name of the LangSmith dataset.
:param client: An instance of LangSmith client; if not provided,
a new client instance will be created.
"""
try:
from langsmith.client import Client
except ImportError as e:
raise ImportError(
"The LangSmith client is required to load LangSmith datasets.\n"
"Please install it with `pip install langsmith`"
) from e
self.dataset_name = dataset_name
self.client = client or Client()
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the specified LangSmith dataset.
This method fetches the chat data from the dataset and
converts each data point to chat sessions on-the-fly,
yielding one session at a time.
:return: Iterator of chat sessions containing messages.
"""
from langchain_community.adapters import openai as oai_adapter # noqa: E402
data = self.client.read_dataset_openai_finetuning(
dataset_name=self.dataset_name
)
for data_point in data:
yield ChatSession(
messages=[
oai_adapter.convert_dict_to_message(m)
for m in data_point.get("messages", [])
],
functions=data_point.get("functions"),
)
| [] |
2024-01-10 | robocorp/langchain | libs~core~langchain_core~runnables~history.py | from __future__ import annotations
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.load import load
from langchain_core.pydantic_v1 import BaseModel, create_model
from langchain_core.runnables.base import Runnable, RunnableBindingBase, RunnableLambda
from langchain_core.runnables.config import run_in_executor
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain_core.runnables.utils import (
ConfigurableFieldSpec,
get_unique_config_specs,
)
if TYPE_CHECKING:
from langchain_core.messages import BaseMessage
from langchain_core.runnables.config import RunnableConfig
from langchain_core.tracers.schemas import Run
MessagesOrDictWithMessages = Union[Sequence["BaseMessage"], Dict[str, Any]]
GetSessionHistoryCallable = Callable[..., BaseChatMessageHistory]
class RunnableWithMessageHistory(RunnableBindingBase):
"""A runnable that manages chat message history for another runnable.
Base runnable must have inputs and outputs that can be converted to a list of BaseMessages.
RunnableWithMessageHistory must always be called with a config that contains session_id, e.g. ``{"configurable": {"session_id": "<SESSION_ID>"}}`.
Example (dict input):
.. code-block:: python
from typing import Optional
from langchain.chat_models import ChatAnthropic
from langchain.memory.chat_message_histories import RedisChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
prompt = ChatPromptTemplate.from_messages([
("system", "You're an assistant who's good at {ability}"),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
])
chain = prompt | ChatAnthropic(model="claude-2")
chain_with_history = RunnableWithMessageHistory(
chain,
RedisChatMessageHistory,
input_messages_key="question",
history_messages_key="history",
)
chain_with_history.invoke(
{"ability": "math", "question": "What does cosine mean?"},
config={"configurable": {"session_id": "foo"}}
)
# -> "Cosine is ..."
chain_with_history.invoke(
{"ability": "math", "question": "What's its inverse"},
config={"configurable": {"session_id": "foo"}}
)
# -> "The inverse of cosine is called arccosine ..."
Example (get_session_history takes two keys, user_id and conversation id):
.. code-block:: python
store = {}
def get_session_history(
user_id: str, conversation_id: str
) -> ChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = ChatMessageHistory()
return store[(user_id, conversation_id)]
prompt = ChatPromptTemplate.from_messages([
("system", "You're an assistant who's good at {ability}"),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
])
chain = prompt | ChatAnthropic(model="claude-2")
with_message_history = RunnableWithMessageHistory(
chain,
get_session_history=get_session_history,
input_messages_key="messages",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
chain_with_history.invoke(
{"ability": "math", "question": "What does cosine mean?"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}}
)
""" # noqa: E501
get_session_history: GetSessionHistoryCallable
input_messages_key: Optional[str] = None
output_messages_key: Optional[str] = None
history_messages_key: Optional[str] = None
history_factory_config: Sequence[ConfigurableFieldSpec]
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "runnable"]
def __init__(
self,
runnable: Runnable[
MessagesOrDictWithMessages,
Union[str, BaseMessage, MessagesOrDictWithMessages],
],
get_session_history: GetSessionHistoryCallable,
*,
input_messages_key: Optional[str] = None,
output_messages_key: Optional[str] = None,
history_messages_key: Optional[str] = None,
history_factory_config: Optional[Sequence[ConfigurableFieldSpec]] = None,
**kwargs: Any,
) -> None:
"""Initialize RunnableWithMessageHistory.
Args:
runnable: The base Runnable to be wrapped. Must take as input one of:
1. A sequence of BaseMessages
2. A dict with one key for all messages
3. A dict with one key for the current input string/message(s) and
a separate key for historical messages. If the input key points
to a string, it will be treated as a HumanMessage in history.
Must return as output one of:
1. A string which can be treated as an AIMessage
2. A BaseMessage or sequence of BaseMessages
3. A dict with a key for a BaseMessage or sequence of BaseMessages
get_session_history: Function that returns a new BaseChatMessageHistory.
This function should either take a single positional argument
`session_id` of type string and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
session_id: str,
*,
user_id: Optional[str]=None
) -> BaseChatMessageHistory:
...
Or it should take keyword arguments that match the keys of
`session_history_config_specs` and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
*,
user_id: str,
thread_id: str,
) -> BaseChatMessageHistory:
...
input_messages_key: Must be specified if the base runnable accepts a dict
as input.
output_messages_key: Must be specified if the base runnable returns a dict
as output.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
history_factory_config: Configure fields that should be passed to the
chat history factory. See ``ConfigurableFieldSpec`` for more details.
Specifying these allows you to pass multiple config keys
into the get_session_history factory.
**kwargs: Arbitrary additional kwargs to pass to parent class
``RunnableBindingBase`` init.
""" # noqa: E501
history_chain: Runnable = RunnableLambda(
self._enter_history, self._aenter_history
).with_config(run_name="load_history")
messages_key = history_messages_key or input_messages_key
if messages_key:
history_chain = RunnablePassthrough.assign(
**{messages_key: history_chain}
).with_config(run_name="insert_history")
bound = (
history_chain | runnable.with_listeners(on_end=self._exit_history)
).with_config(run_name="RunnableWithMessageHistory")
if history_factory_config:
_config_specs = history_factory_config
else:
# If not provided, then we'll use the default session_id field
_config_specs = [
ConfigurableFieldSpec(
id="session_id",
annotation=str,
name="Session ID",
description="Unique identifier for a session.",
default="",
is_shared=True,
),
]
super().__init__(
get_session_history=get_session_history,
input_messages_key=input_messages_key,
output_messages_key=output_messages_key,
bound=bound,
history_messages_key=history_messages_key,
history_factory_config=_config_specs,
**kwargs,
)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return get_unique_config_specs(
super().config_specs + list(self.history_factory_config)
)
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
super_schema = super().get_input_schema(config)
if super_schema.__custom_root_type__ is not None:
from langchain_core.messages import BaseMessage
fields: Dict = {}
if self.input_messages_key and self.history_messages_key:
fields[self.input_messages_key] = (
Union[str, BaseMessage, Sequence[BaseMessage]],
...,
)
elif self.input_messages_key:
fields[self.input_messages_key] = (Sequence[BaseMessage], ...)
else:
fields["__root__"] = (Sequence[BaseMessage], ...)
return create_model( # type: ignore[call-overload]
"RunnableWithChatHistoryInput",
**fields,
)
else:
return super_schema
def _get_input_messages(
self, input_val: Union[str, BaseMessage, Sequence[BaseMessage]]
) -> List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(input_val, str):
from langchain_core.messages import HumanMessage
return [HumanMessage(content=input_val)]
elif isinstance(input_val, BaseMessage):
return [input_val]
elif isinstance(input_val, (list, tuple)):
return list(input_val)
else:
raise ValueError(
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {input_val}."
)
def _get_output_messages(
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
) -> List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(output_val, dict):
output_val = output_val[self.output_messages_key or "output"]
if isinstance(output_val, str):
from langchain_core.messages import AIMessage
return [AIMessage(content=output_val)]
elif isinstance(output_val, BaseMessage):
return [output_val]
elif isinstance(output_val, (list, tuple)):
return list(output_val)
else:
raise ValueError()
def _enter_history(self, input: Any, config: RunnableConfig) -> List[BaseMessage]:
hist = config["configurable"]["message_history"]
# return only historic messages
if self.history_messages_key:
return hist.messages.copy()
# return all messages
else:
input_val = (
input if not self.input_messages_key else input[self.input_messages_key]
)
return hist.messages.copy() + self._get_input_messages(input_val)
async def _aenter_history(
self, input: Dict[str, Any], config: RunnableConfig
) -> List[BaseMessage]:
return await run_in_executor(config, self._enter_history, input, config)
def _exit_history(self, run: Run, config: RunnableConfig) -> None:
hist = config["configurable"]["message_history"]
# Get the input messages
inputs = load(run.inputs)
input_val = inputs[self.input_messages_key or "input"]
input_messages = self._get_input_messages(input_val)
# If historic messages were prepended to the input messages, remove them to
# avoid adding duplicate messages to history.
if not self.history_messages_key:
historic_messages = config["configurable"]["message_history"].messages
input_messages = input_messages[len(historic_messages) :]
# Get the output messages
output_val = load(run.outputs)
output_messages = self._get_output_messages(output_val)
for m in input_messages + output_messages:
hist.add_message(m)
def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig:
config = super()._merge_configs(*configs)
expected_keys = [field_spec.id for field_spec in self.history_factory_config]
configurable = config.get("configurable", {})
missing_keys = set(expected_keys) - set(configurable.keys())
if missing_keys:
example_input = {self.input_messages_key: "foo"}
example_configurable = {
missing_key: "[your-value-here]" for missing_key in missing_keys
}
example_config = {"configurable": example_configurable}
raise ValueError(
f"Missing keys {sorted(missing_keys)} in config['configurable'] "
f"Expected keys are {sorted(expected_keys)}."
f"When using via .invoke() or .stream(), pass in a config; "
f"e.g., chain.invoke({example_input}, {example_config})"
)
parameter_names = _get_parameter_names(self.get_session_history)
if len(expected_keys) == 1:
# If arity = 1, then invoke function by positional arguments
message_history = self.get_session_history(configurable[expected_keys[0]])
else:
# otherwise verify that names of keys patch and invoke by named arguments
if set(expected_keys) != set(parameter_names):
raise ValueError(
f"Expected keys {sorted(expected_keys)} do not match parameter "
f"names {sorted(parameter_names)} of get_session_history."
)
message_history = self.get_session_history(
**{key: configurable[key] for key in expected_keys}
)
config["configurable"]["message_history"] = message_history
return config
def _get_parameter_names(callable_: GetSessionHistoryCallable) -> List[str]:
"""Get the parameter names of the callable."""
sig = inspect.signature(callable_)
return list(sig.parameters.keys())
| [] |
2024-01-10 | robocorp/langchain | libs~langchain~langchain~callbacks~streamlit~mutable_expander.py | from langchain_community.callbacks.streamlit.mutable_expander import (
ChildRecord,
ChildType,
MutableExpander,
)
__all__ = ["ChildType", "ChildRecord", "MutableExpander"]
| [] |
2024-01-10 | jheitzeb/langchain | langchain~vectorstores~elastic_vector_search.py | """Wrapper around Elasticsearch vector database."""
import uuid
from typing import Any, Callable, Dict, List
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
def _default_text_mapping(dim: int) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {"type": "dense_vector", "dims": dim},
}
}
def _default_script_query(query_vector: List[int]) -> Dict:
return {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
"params": {"query_vector": query_vector},
},
}
}
class ElasticVectorSearch(VectorStore):
"""Wrapper around Elasticsearch as a vector database.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
elastic_vector_search = ElasticVectorSearch(
"http://localhost:9200",
"embeddings",
embedding_function
)
"""
def __init__(
self, elasticsearch_url: str, index_name: str, embedding_function: Callable
):
"""Initialize with necessary components."""
try:
import elasticsearch
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticearch`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
es_client = elasticsearch.Elasticsearch(elasticsearch_url) # noqa
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is misformatted. Got error: {e} "
)
self.client = es_client
def similarity_search(self, query: str, k: int = 4) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function(query)
script_query = _default_script_query(embedding)
response = self.client.search(index=self.index_name, query=script_query)
texts = [hit["_source"]["text"] for hit in response["hits"]["hits"][:k]]
documents = [Document(page_content=text) for text in texts]
return documents
@classmethod
def from_texts(
cls, texts: List[str], embedding: Embeddings, **kwargs: Any
) -> "ElasticVectorSearch":
"""Construct ElasticVectorSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Elasticsearch instance.
3. Adds the documents to the newly created Elasticsearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch.from_texts(
texts,
embeddings,
elasticsearch_url="http://localhost:9200"
)
"""
elasticsearch_url = get_from_dict_or_env(
kwargs, "elasticsearch_url", "ELASTICSEARCH_URL"
)
try:
import elasticsearch
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticearch`."
)
try:
client = elasticsearch.Elasticsearch(elasticsearch_url)
except ValueError as e:
raise ValueError(
"Your elasticsearch client string is misformatted. " f"Got error: {e} "
)
index_name = uuid.uuid4().hex
embeddings = embedding.embed_documents(texts)
dim = len(embeddings[0])
mapping = _default_text_mapping(dim)
# TODO would be nice to create index before embedding,
# just to save expensive steps for last
client.indices.create(index=index_name, mappings=mapping)
requests = []
for i, text in enumerate(texts):
request = {
"_op_type": "index",
"_index": index_name,
"vector": embeddings[i],
"text": text,
}
requests.append(request)
bulk(client, requests)
client.indices.refresh(index=index_name)
return cls(elasticsearch_url, index_name, embedding.embed_query)
| [] |
2024-01-10 | jheitzeb/langchain | langchain~example_generator.py | """Utility functions for working with prompts."""
from typing import List
from langchain.chains.llm import LLMChain
from langchain.llms.base import LLM
from langchain.prompts.dynamic import DynamicPrompt
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(examples: List[str], llm: LLM) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = DynamicPrompt(examples=examples, suffix=TEST_GEN_TEMPLATE_SUFFIX)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()
def generate_example_from_dynamic_prompt(prompt: DynamicPrompt, llm: LLM) -> str:
"""Return another example given a DynamicPrompt object."""
return generate_example(prompt.examples, llm)
| [
"Add another example."
] |
2024-01-10 | jheitzeb/langchain | langchain~llms~__init__.py | """Wrappers on top of large language models APIs."""
from langchain.llms.cohere import Cohere
from langchain.llms.huggingface_hub import HuggingFaceHub
from langchain.llms.nlpcloud import NLPCloud
from langchain.llms.openai import OpenAI
__all__ = ["Cohere", "NLPCloud", "OpenAI", "HuggingFaceHub"]
| [] |
2024-01-10 | jheitzeb/langchain | langchain~llms~huggingface_hub.py | """Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "gpt2"
VALID_TASKS = ("text2text-generation", "text-generation")
class HuggingFaceHub(LLM, BaseModel):
"""Wrapper around HuggingFaceHub models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain import HuggingFaceHub
hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key")
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = None
"""Task to call the model with. Should be a task that returns `generated_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values.get("repo_id", DEFAULT_REPO_ID)
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please it install it with `pip install huggingface_hub`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{"repo_id": self.repo_id}, **_model_kwargs}
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
response = self.client(inputs=prompt, params=_model_kwargs)
if "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | jheitzeb/langchain | tests~unit_tests~docstore~test_inmemory.py | """Test in memory docstore."""
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
def test_document_found() -> None:
"""Test document found."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
output = docstore.search("foo")
assert isinstance(output, Document)
assert output.page_content == "bar"
def test_document_not_found() -> None:
"""Test when document is not found."""
_dict = {"foo": Document(page_content="bar")}
docstore = InMemoryDocstore(_dict)
output = docstore.search("bar")
assert output == "ID bar not found."
| [] |
2024-01-10 | jheitzeb/langchain | tests~unit_tests~test_input.py | """Test input manipulating logic."""
import sys
from io import StringIO
from langchain.input import ChainedInput, get_color_mapping
def test_chained_input_not_verbose() -> None:
"""Test chained input logic."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input = ChainedInput("foo")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == ""
assert chained_input.input == "foo"
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("bar")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == ""
assert chained_input.input == "foobar"
def test_chained_input_verbose() -> None:
"""Test chained input logic, making sure verbose doesn't mess it up."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input = ChainedInput("foo", verbose=True)
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "foo"
assert chained_input.input == "foo"
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("bar")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "bar"
assert chained_input.input == "foobar"
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("baz", color="blue")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "\x1b[36;1m\x1b[1;3mbaz\x1b[0m"
assert chained_input.input == "foobarbaz"
def test_get_color_mapping() -> None:
"""Test getting of color mapping."""
# Test on few inputs.
items = ["foo", "bar"]
output = get_color_mapping(items)
expected_output = {"foo": "blue", "bar": "yellow"}
assert output == expected_output
# Test on a lot of inputs.
items = [f"foo-{i}" for i in range(20)]
output = get_color_mapping(items)
assert len(output) == 20
def test_get_color_mapping_excluded_colors() -> None:
"""Test getting of color mapping with excluded colors."""
items = ["foo", "bar"]
output = get_color_mapping(items, excluded_colors=["blue"])
expected_output = {"foo": "yellow", "bar": "pink"}
assert output == expected_output
| [] |
2024-01-10 | jheitzeb/langchain | langchain~chains~mapreduce.py | """Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from typing import Dict, List
from pydantic import BaseModel, Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.llms.base import LLM
from langchain.prompts.base import BasePrompt
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain, BaseModel):
"""Map-reduce chain."""
map_llm: LLMChain
"""LLM wrapper to use for the map step."""
reduce_llm: LLMChain
"""LLM wrapper to use for the reduce step."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls, llm: LLM, prompt: BasePrompt, text_splitter: TextSplitter
) -> "MapReduceChain":
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(map_llm=llm_chain, reduce_llm=llm_chain, text_splitter=text_splitter)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
# Split the larger text into smaller chunks.
docs = self.text_splitter.split_text(inputs[self.input_key])
# Now that we have the chunks, we send them to the LLM and track results.
# This is the "map" part.
input_list = [{self.map_llm.prompt.input_variables[0]: d} for d in docs]
summary_results = self.map_llm.apply(input_list)
summaries = [res[self.map_llm.output_key] for res in summary_results]
# We then need to combine these individual parts into one.
# This is the reduce part.
summary_str = "\n".join(summaries)
inputs = {self.reduce_llm.prompt.input_variables[0]: summary_str}
output = self.reduce_llm.predict(**inputs)
return {self.output_key: output}
| [] |
2024-01-10 | jheitzeb/langchain | tests~integration_tests~chains~test_self_ask_with_search.py | """Integration test for self ask with search."""
from langchain.chains.self_ask_with_search.base import SelfAskWithSearchChain
from langchain.chains.serpapi import SerpAPIChain
from langchain.llms.openai import OpenAI
def test_self_ask_with_search() -> None:
"""Test functionality on a prompt."""
question = "What is the hometown of the reigning men's U.S. Open champion?"
chain = SelfAskWithSearchChain(
llm=OpenAI(temperature=0),
search_chain=SerpAPIChain(),
input_key="q",
output_key="a",
)
answer = chain.run(question)
final_answer = answer.split("\n")[-1]
assert final_answer == "So the final answer is: El Palmar, Murcia, Spain"
| [] |
2024-01-10 | jheitzeb/langchain | tests~unit_tests~test_text_splitter.py | """Test text splitting functionality."""
import pytest
from langchain.text_splitter import CharacterTextSplitter
def test_character_text_splitter() -> None:
"""Test splitting by character count."""
text = "foo bar baz 123"
splitter = CharacterTextSplitter(separator=" ", chunk_size=5, chunk_overlap=3)
output = splitter.split_text(text)
expected_output = ["foo bar", "bar baz", "baz 123"]
assert output == expected_output
def test_character_text_splitter_longer_words() -> None:
"""Test splitting by characters when splits not found easily."""
text = "foo bar baz 123"
splitter = CharacterTextSplitter(separator=" ", chunk_size=1, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ["foo", "bar", "baz", "123"]
assert output == expected_output
def test_character_text_splitting_args() -> None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
CharacterTextSplitter(chunk_size=2, chunk_overlap=4)
| [] |
2024-01-10 | jheitzeb/langchain | tests~integration_tests~chains~test_react.py | """Integration test for self ask with search."""
from langchain.chains.react.base import ReActChain
from langchain.docstore.wikipedia import Wikipedia
from langchain.llms.openai import OpenAI
def test_react() -> None:
"""Test functionality on a prompt."""
llm = OpenAI(temperature=0)
react = ReActChain(llm=llm, docstore=Wikipedia())
question = (
"Author David Chanoff has collaborated with a U.S. Navy admiral "
"who served as the ambassador to the United Kingdom under "
"which President?"
)
output = react.run(question)
assert output == "Bill Clinton"
| [] |
2024-01-10 | jheitzeb/langchain | langchain~chains~serpapi.py | """Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.chains.base import Chain
from langchain.utils import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class SerpAPIChain(Chain, BaseModel):
"""Chain that calls SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain import SerpAPIChain
serpapi = SerpAPIChain()
"""
search_engine: Any #: :meta private:
input_key: str = "search_query" #: :meta private:
output_key: str = "search_result" #: :meta private:
serpapi_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ValueError(
"Could not import serpapi python package. "
"Please it install it with `pip install google-search-results`."
)
return values
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
params = {
"api_key": self.serpapi_api_key,
"engine": "google",
"q": inputs[self.input_key],
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
toret = res["answer_box"]["answer"]
elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret = res["answer_box"]["snippet"]
elif (
"answer_box" in res.keys()
and "snippet_highlighted_words" in res["answer_box"].keys()
):
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif "snippet" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["snippet"]
else:
toret = None
return {self.output_key: toret}
| [] |
2024-01-10 | jheitzeb/langchain | tests~integration_tests~llms~test_huggingface_hub.py | """Test HuggingFace API wrapper."""
import pytest
from langchain.llms.huggingface_hub import HuggingFaceHub
def test_huggingface_text_generation() -> None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10})
output = llm("Say foo:")
assert isinstance(output, str)
def test_huggingface_text2text_generation() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = HuggingFaceHub(repo_id="google/flan-t5-xl")
output = llm("The capital of New York is")
assert output == "Albany"
def test_huggingface_call_error() -> None:
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1})
with pytest.raises(ValueError):
llm("Say foo:")
| [] |
2024-01-10 | kakao-aicoursework/joey.hi | chatbot~chatbot~preprocessing.py | from langchain.schema import Document
def doc_preprocessing(documents):
processed_docs = list()
page_content = documents[0].page_content
metadata = documents[0].metadata
idx = 0
for char in page_content:
if char == '#':
idx += 1
page_content = page_content[:idx] + " " + page_content[idx:]
idx += 1
idx = 1
prev_idx = 0
for char in page_content:
if char == '#' and idx > 1:
tmp = page_content[prev_idx:idx - 1]
processed_docs.append(Document(page_content = tmp, metadata = metadata))
prev_idx = idx - 1
idx += 1
| [] |
2024-01-10 | ArrowLuo/SegCLIP | modules~modeling.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from modules.util_module import dist_collect, show_log, update_attr, check_attr, get_attr
from modules.util_module import PreTrainedModel, AllGather, CrossEn
from modules.module_clip import CLIP, available_models
from modules.module_mae import MAEDecoder
from util import get_logger
allgather = AllGather.apply
class SegCLIPPreTrainedModel(PreTrainedModel, nn.Module):
def __init__(self, *inputs, **kwargs):
super(SegCLIPPreTrainedModel, self).__init__()
self.clip = None
@classmethod
def from_pretrained(cls, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None: state_dict = {}
pretrained_clip_name = get_attr(task_config, "pretrained_clip_name", default_value="ViT-B/16", donot_log=True)
if pretrained_clip_name in available_models():
clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name)
else:
# We will reset ViT but keep Text Encoder
clip_state_dict = CLIP.get_config(pretrained_clip_name="ViT-B/32")
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
for key, val in clip_state_dict.items():
# HARD CODE for initialization trick
FIRST_STAGE_LAYER = 10
if hasattr(task_config, "first_stage_layer"):
FIRST_STAGE_LAYER = task_config.first_stage_layer
new_key = "clip." + key
if "visual.transformer." in key:
_, _, _, _, n_, *_ = new_key.split(".")
n_ = int(n_)
if n_ >= FIRST_STAGE_LAYER:
new_key = new_key.replace(".resblocks.", ".layers2.")
new_key_ls_ = new_key.split(".")
new_key_ls_[4] = str(n_ - FIRST_STAGE_LAYER)
new_key = ".".join(new_key_ls_)
else:
new_key = new_key.replace(".resblocks.", ".layers0.")
if new_key not in state_dict:
state_dict[new_key] = val.clone()
model = cls(clip_state_dict, *inputs, **kwargs)
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config, print_logger=get_logger())
return model
class SegCLIP(SegCLIPPreTrainedModel):
def __init__(self, clip_state_dict, task_config):
super(SegCLIP, self).__init__()
self.task_config = task_config
self.ignore_image_index = -1
pretrained_clip_name = get_attr(task_config, "pretrained_clip_name", default_value="ViT-B/16", donot_log=True)
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
if pretrained_clip_name not in available_models():
assert pretrained_clip_name[:5] == "ViT-B"
vision_patch_size = int(pretrained_clip_name.split("/")[-1])
assert image_resolution % vision_patch_size == 0
grid_size = image_resolution // vision_patch_size
show_log(task_config, "\t\t USE {} NOW!!!!!!!!!!!!".format(pretrained_clip_name))
else:
raise NotImplementedError()
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.first_stage_layer = get_attr(task_config, "first_stage_layer", default_value=10)
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer,
first_stage_layer=self.first_stage_layer,
).float()
self.clip = nn.SyncBatchNorm.convert_sync_batchnorm(self.clip)
# <=== End of CLIP Encoders
self.loss_fct = CrossEn()
self.loss_fct_stdce = nn.CrossEntropyLoss()
## ==============================================================================
# Reconstruct the masked input as MAE
## ==============================================================================
mae_vis_mask_ratio = get_attr(task_config, "mae_vis_mask_ratio", default_value=0.75)
self.use_vision_mae_recon = get_attr(task_config, "use_vision_mae_recon", default_value=False)
if self.use_vision_mae_recon:
self.vis_mask_ratio = mae_vis_mask_ratio
decoder_embed_dim = vision_width // 2
decoder_num_heads = 8
vision_patch_size_ = vision_patch_size
self.vis_mae_decoder = MAEDecoder(vision_width, decoder_embed_dim, image_resolution,
vision_patch_size_,
decoder_depth=3, decoder_num_heads=decoder_num_heads, mlp_ratio=4.,
norm_layer=partial(nn.LayerNorm, eps=1e-6))
mae_seq_mask_ratio = get_attr(task_config, "mae_seq_mask_ratio", default_value=0.15)
self.use_text_mae_recon = get_attr(task_config, "use_text_mae_recon", default_value=False)
if self.use_text_mae_recon:
self.seq_mask_ratio = mae_seq_mask_ratio
decoder_embed_dim = embed_dim // 2
decoder_num_heads = 8
vision_patch_size_ = vision_patch_size
self.seq_mae_decoder = MAEDecoder(embed_dim, decoder_embed_dim, image_resolution,
vision_patch_size_,
decoder_depth=3, decoder_num_heads=decoder_num_heads, mlp_ratio=4.,
choice_seq=True,
pred_len=vocab_size, seq_len=self.task_config.max_words)
## ==============================================================================
# Use segmentation label for unsupervised learning
## ==============================================================================
self.use_seglabel = get_attr(task_config, "use_seglabel", default_value=False)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, image, image_seg=None):
# B x T x L
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
# T x 3 x H x W
image_input = torch.as_tensor(image).float()
b, pair, channel, h, w = image_input.shape
image = image_input[:, 0].view(b, channel, h, w)
image_frame = 1 # TODO: HARD CODE, A compatibility for video in CLIP4Clip
sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
image, shaped=True,
image_frame=image_frame, return_hidden=True)
if isinstance(sequence_output, tuple):
sequence_output, sequence_hidden = sequence_output
if isinstance(visual_output, tuple):
visual_output, visual_hidden, mid_states = visual_output
if self.use_seglabel:
# T x patch_len x patch_len
image_seg_input = torch.as_tensor(image_seg)
image_seg = image_seg_input[:, 0]
if self.training:
loss = 0.
sim_matrix_t2v, sim_matrix_v2t = self._loose_similarity(sequence_output, visual_output)
labels = torch.arange(sequence_output.size(0), dtype=torch.long, device=sequence_output.device)
labels = labels + sequence_output.size(0) * self.task_config.rank
sim_loss1 = self.loss_fct_stdce(sim_matrix_t2v, labels)
sim_loss2 = self.loss_fct_stdce(sim_matrix_v2t, labels)
sim_loss = (sim_loss1 + sim_loss2) / 2.
loss = loss + sim_loss
if self.use_seglabel:
mid_attn_hidden = mid_states['attns'][0]['hard_attn'].permute(0, 2, 1) # B x L x CENTER
image_seg_ = image_seg.view(b, -1)
image_seg_ = image_seg_.unsqueeze(-1) - image_seg_.unsqueeze(-2)
image_seg_ = (image_seg_ == 0).to(dtype=mid_attn_hidden.dtype) # B x L x L
clutering_sum = torch.einsum('b g l, b l c -> b g c', image_seg_, mid_attn_hidden)
clutering_mean = clutering_sum / torch.clamp_min(torch.sum(image_seg_, dim=-1, keepdim=True), min=1.0)
coef_ = mid_attn_hidden.size(0) * mid_attn_hidden.size(1) * mid_attn_hidden.size(2)
kl_mean_1 = F.kl_div(F.log_softmax(mid_attn_hidden, dim=-1), F.softmax(clutering_mean, dim=-1), reduction='sum') / float(coef_)
kl_mean_2 = F.kl_div(F.log_softmax(clutering_mean, dim=-1), F.softmax(mid_attn_hidden, dim=-1), reduction='sum') / float(coef_)
clutering_loss = (kl_mean_1 + kl_mean_2) / 2.
loss = loss + clutering_loss
if self.use_text_mae_recon:
sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True,
return_hidden=True, mask_ratio=self.seq_mask_ratio)
_, seq_hidden, seq_mae_mask, seq_mae_ids_restore = sequence_output
seq_mae_mask = seq_mae_mask.view(-1, seq_mae_mask.size(-1))
seq_mae_ids_restore = seq_mae_ids_restore.view(-1, seq_mae_ids_restore.size(-1))
_mae_mask = (seq_mae_mask + attention_mask).gt(1)
seq_mae_loss = self.seq_mae_decoder.forward_seq(input_ids, seq_hidden, _mae_mask, seq_mae_ids_restore, attention_mask)
loss = loss + seq_mae_loss
if self.use_vision_mae_recon:
visual_output = self.get_visual_output(image, shaped=True, image_frame=image_frame,
return_hidden=True, mask_ratio=self.vis_mask_ratio)
_, vis_hidden, vis_mae_mask, vis_mae_ids_restore, mid_mae_states = visual_output
vis_hidden = mid_mae_states['hidden']
cls_ = torch.mean(vis_hidden, dim=1, keepdim=True)
vis_hidden = torch.cat([cls_, vis_hidden], dim=1)
vis_mae_mask = vis_mae_mask.view(-1, vis_mae_mask.size(-1))
vis_mae_ids_restore = vis_mae_ids_restore.view(-1, vis_mae_ids_restore.size(-1))
vis_mae_loss = self.vis_mae_decoder.forward_vis(image, vis_hidden, vis_mae_mask, vis_mae_ids_restore,
loss_allpatch=False)
loss = loss + vis_mae_loss
return loss
else:
return None
def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False, return_hidden=False, seq_model=None, mask_ratio=0.):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
if seq_model is None:
seq_model = self.clip
bs_pair = input_ids.size(0)
sequence_hidden = seq_model.encode_text(input_ids, return_hidden=return_hidden, mask_ratio=mask_ratio)
if isinstance(sequence_hidden, tuple):
if mask_ratio > 0:
sequence_hidden = tuple([itm.float().view(bs_pair, -1, itm.size(-1)) for itm in sequence_hidden[:2]]
+ [itm.view(bs_pair, -1, itm.size(-1)) for itm in sequence_hidden[2:]])
else:
sequence_hidden = tuple([itm.float().view(bs_pair, -1, itm.size(-1)) for itm in sequence_hidden])
else:
sequence_hidden = sequence_hidden.float().view(bs_pair, -1, sequence_hidden.size(-1))
return sequence_hidden
def get_visual_output(self, image, shaped=False, image_frame=-1, return_hidden=False, vis_model=None, mask_ratio=0.):
if shaped is False:
image_input = torch.as_tensor(image).float()
b, pair, channel, h, w = image_input.shape
image = image_input[:, 0].view(b, channel, h, w)
image_frame = 1 # TODO: HARD CODE, A compatibility for video in CLIP4Clip
if vis_model is None:
vis_model = self.clip
bs_pair = image.size(0)
visual_hidden = vis_model.encode_image(image, video_frame=image_frame, return_hidden=return_hidden, mask_ratio=mask_ratio)
if isinstance(visual_hidden, tuple):
if mask_ratio > 0:
visual_hidden = tuple([itm.float().view(bs_pair, -1, itm.size(-1)) for itm in visual_hidden[:2]]
+ [itm.view(bs_pair, -1, itm.size(-1)) for itm in visual_hidden[2:4]] + [visual_hidden[4]])
else:
visual_hidden = tuple([itm.float().view(bs_pair, -1, itm.size(-1)) for itm in visual_hidden[:2]]
+ [visual_hidden[2]])
else:
visual_hidden = visual_hidden.float().view(bs_pair, -1, visual_hidden.size(-1))
return visual_hidden
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, image,
shaped=False, image_frame=-1, return_hidden=False, seq_model=None, vis_model=None):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
image_input = torch.as_tensor(image).float()
b, pair, channel, h, w = image_input.shape
image = image_input[:, 0].view(b, channel, h, w)
image_frame = 1 # TODO: HARD CODE, A compatibility for video in CLIP4Clip
sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True, return_hidden=return_hidden, seq_model=seq_model)
visual_output = self.get_visual_output(image, shaped=True, image_frame=image_frame, return_hidden=return_hidden, vis_model=vis_model)
return sequence_output, visual_output
def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_visual(self, visual_output,):
image_out = torch.mean(visual_output, dim=1)
return image_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask,):
text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask)
image_out = self._mean_pooling_for_similarity_visual(visual_output)
return text_out, image_out
def _loose_similarity(self, sequence_output, visual_output, logit_scale=None):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
visual_output = visual_output.squeeze(1)
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
sequence_output = sequence_output.squeeze(1)
sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True)
if logit_scale is not None:
logit_scale = torch.clamp(logit_scale.exp(), max=100)
else:
logit_scale = torch.clamp(self.clip.logit_scale.exp(), max=100)
if self.training:
visual_output_collect = dist_collect(visual_output, self.task_config)
sequence_output_collect = dist_collect(sequence_output, self.task_config)
torch.distributed.barrier()
retrieve_logits_t2v = logit_scale * torch.matmul(sequence_output, visual_output_collect.t())
retrieve_logits_v2t = logit_scale * torch.matmul(visual_output, sequence_output_collect.t())
else:
retrieve_logits_t2v = logit_scale * torch.matmul(sequence_output, visual_output.t())
retrieve_logits_v2t = retrieve_logits_t2v.T
return retrieve_logits_t2v, retrieve_logits_v2t
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, shaped=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
contrastive_direction = ()
retrieve_logits_t2v, retrieve_logits_v2t = self._loose_similarity(sequence_output, visual_output)
return retrieve_logits_t2v, retrieve_logits_v2t, contrastive_direction
| [] |
2024-01-10 | TYTTYTTYT/book_search | book_gpt~bookgpt2.py | import openai
import re
class Bookgpt:
def __init__(self, message_history):
openai.api_key = "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"
self.message_history = message_history
def predict(self,input):
# tokenize the new input sentence
self.message_history.append({"role": "user", "content": f"{input}"})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo", #10x cheaper than davinci, and better. $0.002 per 1k tokens
messages= self.message_history
)
#Just the reply:
reply_content = completion.choices[0].message.content#.replace('```python', '<pre>').replace('```', '</pre>')
print(reply_content)
print(type(reply_content))
self.message_history.append({"role": "assistant", "content": f"{reply_content}"})
# get a list of reply_content
# delete number and punctuation
reply_content = re.sub('[0-9.]+', '', reply_content)
response = list(reply_content.split("\n"))
response = list(map(lambda x: x.strip('"\' \n\t'), response))
if len(response) > 5:
response = response[2:]
elif len(response) == 1:
response = []
print(response)
print(type(response))
self.message_history.pop(-1)
self.message_history.pop(-1)
return response
| [
"INPUT",
"PLACEHOLDER"
] |
2024-01-10 | cesaralej/syllabus-generator | Syllabus_generator.py | # import required packages
import openai
import streamlit as st
from streamlit_option_menu import option_menu
# Start by creating a venv:
# python -m venv myenv
# Activate your venv:
# source venv_name/bin/activate (mac)
# venv_name\Scripts\activate (windows)
# Install the required packages:
# pip install -r requirements.txt
# Run the code in the terminal:
# streamlit run Syllabus_generator.py
# Read the original syllabus
def read_original_syllabus(file_path="original_syllabus.txt"):
try:
with open(file_path, "r", encoding="utf-8") as file:
original_syllabus = file.read()
return original_syllabus
except FileNotFoundError:
print(f"Error: File '{file_path}' not found.")
return None
original_syllabus = read_original_syllabus()
# API Request to generate the syllabus
def syllabus_request():
messages = [
{
"role": "system",
"content": f"You are a teacher for the class BIG DATA & ARTIFICIAL INTELLIGENCE IN BUSINESS STRATEGY. Your class follow the framework of this Syllabus:\n\n{original_syllabus}",
},
{
"role": "user",
"content": f"""Customize the first 5 sessions of the syllabus based on the syllabus framework for the 'BIG DATA & ARTIFICIAL INTELLIGENCE IN BUSINESS STRATEGY' class for a student with
{st.session_state.student_exp_years} years of professional experience, with a
{st.session_state.student_background} role background that wants to move to a
{st.session_state.student_future} role in the
{st.session_state.student_industry} industry. Your reply should only have the updated 5 sessions of the syllabus written in the same structure as the original one""",
},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.3, max_tokens=2048
)
return response["choices"][0]["message"]["content"]
# API Request to generate the capstone project
def capstone_request():
messages = [
{
"role": "system",
"content": f"You are a teacher for the class BIG DATA & ARTIFICIAL INTELLIGENCE IN BUSINESS STRATEGY. Your class follow the framework of this Syllabus:\n\n{original_syllabus}",
},
{
"role": "user",
"content": f"""Design a case study project for the 'BIG DATA & ARTIFICIAL INTELLIGENCE IN BUSINESS STRATEGY' class for a student with
{st.session_state.student_exp_years} years of professional experience, with a
{st.session_state.student_background} role background that wants to move to a
{st.session_state.student_future} role in the
{st.session_state.student_industry} industry. Your reply should only have the project instructions. The project should present a case where a fictional company of the industry is facing a challenge and the student needs to identify a solution based on the subjects learned on the syllabus""",
},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.3, max_tokens=2048
)
return response["choices"][0]["message"]["content"]
st.set_page_config(
page_title="Business Strategy Syllabus",
page_icon="🌐",
initial_sidebar_state="expanded",
)
# Sidebar
with st.sidebar:
# Set up OpenAI API key
st.header("OpenAI API Configuration")
st.write("To personalize your syllabus, configure your OpenAI API settings below.")
st.write(
"Don't have an API key? Visit [OpenAI](https://beta.openai.com/signup/) to get one."
)
api_key = st.sidebar.text_input("Enter your OpenAI API key")
# Validation check for API key
if st.button("Submit"):
if not api_key:
st.error("Please enter your OpenAI API key.")
else:
openai.api_key = api_key
st.success("API key set successfully!")
# User Information
st.sidebar.header("Input Your Information")
st.session_state.student_exp_years = st.sidebar.text_input(
"Years of Professional Experience",
help="Enter the number of years you have been working professionally.",
value="5",
)
professional_background_options = ["Business", "Tech", "Hybrid"]
st.session_state.student_background = st.sidebar.selectbox(
"Professional Background",
professional_background_options,
help="Specify your professional background, e.g., Business, Tech, or Hybrid.",
)
if st.session_state.student_background == "Tech":
st.sidebar.text_input(
"Tech Skills", help="List your relevant technical skills."
)
st.session_state.student_future = st.sidebar.selectbox(
"Future Career Goal",
professional_background_options,
help="Describe the role you aim to achieve, e.g., Business, Tech, or Hybrid.",
)
st.session_state.student_industry = st.sidebar.text_input(
"Target Industry",
help="Enter the industry in which you aspire to work.",
value="Consulting",
)
# Validations
if (
st.session_state.student_exp_years
and not st.session_state.student_exp_years.isdigit()
):
st.error("Please enter a valid number for years of experience.")
def generate_syllabus():
try:
with st.spinner("Generating Syllabus..."):
if (
st.session_state.student_exp_years
and st.session_state.student_background
and st.session_state.student_future
and st.session_state.student_industry
):
st.session_state.syllabus_content = syllabus_request()
st.success("Syllabus generated successfully!")
except Exception as e:
st.error(f"Error generating syllabus: {e}")
# Submit button
if (
not st.session_state.student_exp_years
or not st.session_state.student_background
or not st.session_state.student_future
or not st.session_state.student_industry
):
st.warning(
"Please complete all required fields before generating the syllabus."
)
st.button("Generate Syllabus", disabled=True)
else:
if st.button("Generate Syllabus"):
generate_syllabus()
st.image("IE_Business_School_logo.svg.png", width=100)
# Title
st.markdown(
f"<h1 style='font-size: 36px; text-align: center;'>BIG DATA & AI IN BUSINESS STRATEGY</h1>",
unsafe_allow_html=True,
)
# Introductory Message
st.markdown(
f"<p style='font-size: 20px; text-align: center;'>Welcome to Your AI-Driven Learning Experience!</p>",
unsafe_allow_html=True,
)
# Instructions on how to use the app
st.markdown(
f"<h2 style='font-size: 28px;'>How to Use:</h2>",
unsafe_allow_html=True,
)
st.write("1. **Configure your OpenAI API key in the sidebar.**")
st.write("2. **Input your professional information on the left.**")
st.write(
"3. **Click on 'Generate Syllabus' to receive your personalized learning plan.**"
)
type = option_menu(
None,
["Syllabus", "Capstone Project"],
icons=[],
default_index=0,
orientation="horizontal",
)
# Syllabus section
if type == "Syllabus":
st.subheader("Personalized Syllabus Generator")
st.markdown("---")
if "syllabus_content" not in st.session_state:
st.subheader("No syllabus generated yet")
st.write(
"Your personalized syllabus is crafted based on the information you provide."
)
st.write("Unlock a unique learning journey with AI-driven customization.")
else:
st.markdown(
f"**Your Personalized Syllabus:**\n\n{st.session_state.syllabus_content}"
)
# Capstone Project section
if type == "Capstone Project":
st.subheader("Capstone Project Generator")
st.write(
"Once your project is ready, submit to the corresponding learning platform"
)
# Call a function to generate and display the dynamic content
def generate_capstone():
try:
with st.spinner("Generating project instructions..."):
if (
st.session_state.student_exp_years
and st.session_state.student_background
and st.session_state.student_future
and st.session_state.student_industry
):
st.session_state.project_content = capstone_request()
st.success("Instructions generated successfully!")
except Exception as e:
st.error(f"Error generating project instructions: {e}")
# Submit button
if (
not st.session_state.student_exp_years
or not st.session_state.student_background
or not st.session_state.student_future
or not st.session_state.student_industry
):
st.warning(
"Please complete all required fields before generating the project instructions."
)
st.button("Generate Capstone Project", disabled=True)
else:
if st.button("Generate Project Instructions"):
generate_capstone()
st.markdown("---")
if "project_content" not in st.session_state:
st.subheader("No project instructions generated yet")
st.write(
"This is where the dynamic capstone project content will be displayed."
)
else:
st.markdown(
f"**Your Personalized Project Instructions:**\n\n{st.session_state.project_content}"
)
| [
"You are a teacher for the class BIG DATA & ARTIFICIAL INTELLIGENCE IN BUSINESS STRATEGY. Your class follow the framework of this Syllabus:\n\nPLACEHOLDER"
] |
2024-01-10 | recipede/recipe-detect | backend~generation~recipe_gen.py | from typing import List
import requests
import cohere
import os
from dotenv import load_dotenv
load_dotenv()
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
EDAMAM_API_KEY = os.getenv("EDAMAM_API_KEY")
EDAMAM_API_URL = "https://api.edamam.com/api/recipes/v2"
EDAMAM_APP_ID = os.getenv("EDAMAM_APP_ID")
def generate_recipe(food_name: str, ingredients: List[str]) -> str:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = f"Give me a recipe in JSON for {food_name} that uses the following ingredients: "
for ingredient in ingredients:
prompt += "\n " + ingredient
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=200,
temperature=0.750)
if response.generations == None:
raise Exception("No response from API.")
return response.generations[0].text
def generate_llm_recipes(ingredients: List[str]) -> str:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = "Give me a list of recipes (maximum 3) with steps in JSON format that use the following ingredients: "
for ingredient in ingredients:
prompt += "\n " + ingredient
prompt += "\n Give a JSON format of an array with objects with property keys \"name\", \"ingredients\", \"steps\". Keep your answer relatively short. Separate the steps into individual strings in their respective arrays and include commas for each element. Make sure you don't leave trailing commas for the end of arrays. "
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=2000,
temperature=0.750)
if response.generations == None:
raise Exception("No response from API.")
print("".join([elem.text for elem in response.generations]))
return response.generations[0].text
def get_edamam_recipe(ingredients: List[str]) -> str:
if EDAMAM_API_KEY == None or EDAMAM_APP_ID == None:
raise Exception("oh no")
query_str = f"?app_id=98d69878&app_key={EDAMAM_API_KEY}"
query_str += "&q=" + '+'.join(ingredients)
print(query_str)
r = requests.get(f"{EDAMAM_API_URL}{query_str}",
params={"app_key": EDAMAM_API_KEY,
"app_id": EDAMAM_APP_ID,
"ingredients": ingredients,
"type": "public",
}
)
recipes = r.json()["hits"]
recipes = [{ "title": x["recipe"]["label"], "ingredients": [ y["text"] for y in x["recipe"]["ingredients"]] } for x in recipes]
return str(recipes)
if __name__ == "__main__":
ingredients = ["ham", "rice", "chicken", "teriyaki"]
#get_edamam_recipe(ingredients)
| [
"Give me a list of recipes (maximum 3) with steps in JSON format that use the following ingredients: ",
"\n Give a JSON format of an array with objects with property keys \"name\", \"ingredients\", \"steps\". Keep your answer relatively short. Separate the steps into individual strings in their respective arrays and include commas for each element. Make sure you don't leave trailing commas for the end of arrays. ",
"\n PLACEHOLDER",
"Give me a recipe in JSON for PLACEHOLDER that uses the following ingredients: "
] |
2024-01-10 | recipede/recipe-detect | backend~scanner.py | from collections import Counter
from typing import List
from google.cloud import vision
import cohere
import os
from dotenv import load_dotenv
from unicodedata import normalize
load_dotenv()
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
def is_food(flyer_text: str) -> bool:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = "The following is text from a grocery store flyer that sells conventional household goods and food. Determine if this item on the flyer is a food or not: " + flyer_text
prompt += "\n\nPlease respond with only 'true' or 'false' based on whether the item is a food or not."
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=200,
temperature=0.75)
if response.generations == None:
raise Exception("No response from API.")
return response.generations[0].text.strip() == "true"
def extract_grocery(flyer_text: str) -> str:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = "The following is text from a grocery store flyer that sells conventional household goods and food. Determine what the product name is: " +flyer_text
prompt += "\n\nPlease respond with only the name of the product." #kind of food or product that the item is."#"
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=200,
temperature=0.75)
if response.generations == None:
raise Exception("No response from API.")
return response.generations[0].text.strip()
def extract_flyer(image_uri: str) -> str:
client = vision.ImageAnnotatorClient()
response = client.annotate_image({
'image': {'source': { 'image_uri': image_uri }},
'features': [{'type_': vision.Feature.Type.TEXT_DETECTION}]
})
return str(response.text_annotations[0].description)
def extract_cost(flyer_text: str) -> float:
flyer_text = flyer_text.replace("\\\n", " ")
flyer_text = flyer_text.replace("\n", " ")
print(flyer_text)
flyer_words = [ normalize("NFKC", w) for w in flyer_text.split(" ") ]
print( flyer_words)
costs = [ w for w in flyer_words if (len(w) >= 3 and (w.isdigit() or w in ["4.99", "14.99", "4.50", "14.50", "9.99", "4.49", "24.99", "19.99"]))]
print(costs)
costs = [ float(w) for w in costs if w[-1] == '9' or w[-2:] == '50']
print(costs)
return costs[0] / 100 if costs[0] > 100 else costs[0]
if __name__ == "__main__":
for i in range(11):
flyer_text = extract_flyer(f"https://raw.githubusercontent.com/recipede/recipe-detect/main/grocery/crop_{i}.jpg")
print(extract_cost(flyer_text))
| [
"The following is text from a grocery store flyer that sells conventional household goods and food. Determine what the product name is: PLACEHOLDER",
"\n\nPlease respond with only the name of the product.",
"The following is text from a grocery store flyer that sells conventional household goods and food. Determine if this item on the flyer is a food or not: PLACEHOLDER",
"\n\nPlease respond with only 'true' or 'false' based on whether the item is a food or not."
] |
2024-01-10 | recipede/recipe-detect | backend~generation~scanner.py | from google.cloud import vision
import cohere
import os
from dotenv import load_dotenv
load_dotenv()
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
def is_food(flyer_text: str) -> bool:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = "The following is text from a grocery store flyer that sells conventional household goods and food. Determine if this item on the flyer is a food or not: " + flyer_text
prompt += "\n\nPlease respond with only 'true' or 'false' based on whether the item is a food or not."
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=200,
temperature=0.75)
if response.generations == None:
raise Exception("No response from API.")
return response.generations[0].text.strip() == "true"
def extract_grocery(flyer_text: str) -> str:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = "The following is text from a grocery store flyer that sells conventional household goods and food. Determine what the product name is: " +flyer_text
prompt += "\n\nPlease respond with only the name of the product." #kind of food or product that the item is."#"
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=200,
temperature=0.75)
if response.generations == None:
raise Exception("No response from API.")
return response.generations[0].text.strip()
def extract_flyer(image_uri: str) -> str:
client = vision.ImageAnnotatorClient()
response = None
with open(image_uri, "rb") as image:
file = image.read()
byte_array = bytes(file)
response = client.annotate_image({
'image': {'content': byte_array },
'features': [{'type_': vision.Feature.Type.TEXT_DETECTION}]
})
return str(response.text_annotations[0].description)
if __name__ == "__main__":
flyer_text = str(extract_flyer("../grocery/crop_6.jpg"))
print(flyer_text)
print(extract_grocery(flyer_text))
print(is_food(flyer_text))
| [
"The following is text from a grocery store flyer that sells conventional household goods and food. Determine what the product name is: PLACEHOLDER",
"\n\nPlease respond with only the name of the product.",
"The following is text from a grocery store flyer that sells conventional household goods and food. Determine if this item on the flyer is a food or not: PLACEHOLDER",
"\n\nPlease respond with only 'true' or 'false' based on whether the item is a food or not."
] |
2024-01-10 | recipede/recipe-detect | backend~recipe_gen.py | from typing import List
import requests
import cohere
import os
from dotenv import load_dotenv
load_dotenv()
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
EDAMAM_API_KEY = os.getenv("EDAMAM_API_KEY")
EDAMAM_API_URL = "https://api.edamam.com/api/recipes/v2"
EDAMAM_APP_ID = os.getenv("EDAMAM_APP_ID")
def generate_recipe(food_name: str, ingredients: List[str]) -> str:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = f"Give me a recipe in JSON for {food_name} that uses the following recipes: "
for ingredient in ingredients:
prompt += "\n " + ingredient
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=200,
temperature=0.750)
if response.generations == None:
raise Exception("No response from API.")
return response.generations[0].text
def generate_llm_recipes(ingredients: List[str]) -> str:
if COHERE_API_KEY == None:
raise Exception("API key not found.")
co = cohere.Client(COHERE_API_KEY)
prompt = "Ignoring non-food and inappropriate items, give me a list of recipes in JSON format that use the following ingredients: "
for ingredient in ingredients:
prompt += "\n " + ingredient
response = co.generate(
model='command-nightly',
prompt = prompt,
max_tokens=200,
temperature=0.750)
if response.generations == None:
raise Exception("No response from API.")
return response.generations[0].text
def get_edamam_recipe(ingredients: List[str]) -> str:
if EDAMAM_API_KEY == None or EDAMAM_APP_ID == None:
raise Exception("oh no")
query_str = f"?app_id=98d69878&app_key={EDAMAM_API_KEY}"
query_str += "&q=" + '+'.join(ingredients)
print(query_str)
r = requests.get(f"{EDAMAM_API_URL}{query_str}",
params={"app_key": EDAMAM_API_KEY,
"app_id": EDAMAM_APP_ID,
"ingredients": ingredients,
"type": "public",
}
)
recipes = r.json()["hits"]
recipes = [{ "title": x["recipe"]["label"], "ingredients": [ y["text"] for y in x["recipe"]["ingredients"]] } for x in recipes]
return str(recipes)
if __name__ == "__main__":
ingredients = ["ham", "rice", "chicken", "teriyaki"]
#get_edamam_recipe(ingredients)
| [
"Ignoring non-food and inappropriate items, give me a list of recipes in JSON format that use the following ingredients: ",
"\n PLACEHOLDER",
"Give me a recipe in JSON for PLACEHOLDER that uses the following recipes: "
] |
2024-01-10 | siddarthanath/University-College-London | Thesis~cebo~helper~distmodel.py | """
This file stores distribution models corresponding to predictions from OpenAI.
"""
# -------------------------------------------------------------------------------------------------------------------- #
# Standard Library
# Third Party
import numpy as np
from dataclasses import dataclass
# Private
# -------------------------------------------------------------------------------------------------------------------- #
@dataclass
class DiscreteDist:
values: np.ndarray
probs: np.ndarray
def __post_init__(self):
# make sure np arrays
self.values = np.array(self.values)
self.probs = np.array(self.probs)
uniq_values = np.unique(self.values)
if len(uniq_values) < len(self.values):
# need to mergefg
uniq_probs = np.zeros(len(uniq_values))
for i, v in enumerate(uniq_values):
uniq_probs[i] = np.sum(self.probs[self.values == v])
self.values = uniq_values
self.probs = uniq_probs
def sample(self):
return np.random.choice(self.values, p=self.probs)
def mean(self):
return np.sum(self.values * self.probs)
def mode(self):
return self.values[np.argmax(self.probs)]
def std(self):
return np.sqrt(np.sum((self.values - self.mean()) ** 2 * self.probs))
def __repr__(self):
return f"DiscreteDist({self.values}, {self.probs})"
def __len__(self):
return len(self.values)
@dataclass
class GaussDist:
_mean: float
_std: float
def sample(self):
return np.random.normal(self._mean, self._std)
def mean(self):
return self._mean
def mode(self):
return self._mean
def std(self):
return self._std
def set_std(self, value):
self._std = value
def __repr__(self):
return f"GaussDist({self._mean}, {self._std})"
def __len__(self):
return 1
| [] |
2024-01-10 | siddarthanath/University-College-London | Thesis~cebo~models~bo_lift.py | """
The original BO-LIFT code (with minor changes).
"""
# -------------------------------------------------------------------------------------------------------------------- #
# Standard Library
from typing import *
from functools import partial
from typing import Tuple, List
# Third Party
import numpy as np
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts.example_selector import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain.vectorstores import FAISS, Chroma
# Private
from cebo.helper.distmodel import DiscreteDist, GaussDist
from cebo.models.llm import LLM
from cebo.helper.aqfxns import (
expected_improvement,
upper_confidence_bound,
)
# -------------------------------------------------------------------------------------------------------------------- #
_answer_choices = ["A", "B", "C", "D", "E"]
class QuantileTransformer:
def __init__(self, values, n_quantiles):
self.n_quantiles = n_quantiles
self.quantiles = np.linspace(0, 1, n_quantiles + 1)
self.values_quantiles = np.quantile(values, self.quantiles)
def to_quantiles(self, values):
quantile_scores = np.digitize(values, self.values_quantiles[1:-1])
return quantile_scores
def to_values(self, quantile_scores):
values_from_scores = np.interp(
quantile_scores, range(self.n_quantiles + 1), self.values_quantiles
)
return values_from_scores
class BOLIFT(LLM):
def __init__(
self,
model: str,
prompt_template: PromptTemplate = None,
suffix: Optional[str] = None,
temperature: Optional[float] = None,
prefix: Optional[str] = None,
x_formatter: Callable[[str], str] = lambda x: x,
y_formatter: Callable[[float], str] = lambda y: f"{y:0.2f}",
y_name: str = "output",
x_name: str = "input",
selector_k: Optional[int] = None,
k: int = 5,
use_quantiles: bool = False,
n_quantiles: int = 100,
verbose: bool = False,
cos_sim: bool = False,
) -> None:
"""Initialize Ask-Tell optimizer.
You can pass formatters that will make your data more compatible with the model. Note that
y as output form the model must be a float(can be parsed with ``float(y_str)``)
Args:
prompt_template: Prompt template that should take x and y (for few shot templates)
suffix: Matching suffix for first part of prompt template - for actual completion.
temperature: Temperature to use for inference. If None, will use model default.
prefix: Prefix to add before all examples (e.g., some context for the model).
x_formatter: Function to format x for prompting.
y_formatter: Function to format y for prompting.
y_name: Name of y variable in prompt template (e.g., density, value of function, etc.)
x_name: Name of x variable in prompt template (e.g., input, x, etc.). Only appears in inverse prompt
selector_k: What k to use when switching to selection mode. If None, will use all examples
k: Number of examples to use for each prediction.
verbose: Whether to print out debug information.
"""
self._model = model
self._temperature = temperature
self._selector_k = selector_k
self._ready = False
self._ys = []
self.format_x = x_formatter
self.format_y = y_formatter
self._y_name = y_name
self._x_name = x_name
self._prompt_template = prompt_template
self._suffix = suffix
self._prefix = prefix
self._example_count = 0
self._k = k
self._answer_choices = _answer_choices[:k]
self.use_quantiles = use_quantiles
self.n_quantiles = n_quantiles
self._calibration_factor = None
self._verbose = verbose
self.tokens_used = 0
self.cos_sim = cos_sim
def set_calibration_factor(self, calibration_factor):
self._calibration_factor = calibration_factor
def _setup_llm(self):
# nucleus sampling seems to get more diversity
return self.get_llm(
n=self._k,
best_of=self._k,
temperature=0.1 if self._temperature is None else self._temperature,
model=self._model,
top_p=1.0,
stop=["\n", "###", "#", "##"],
logit_bias={
"198": -100, # new line,
"628": -100, # double new line,
"50256": -100, # endoftext
},
max_tokens=256,
logprobs=1,
)
def _setup_prompt(
self,
example: Dict,
prompt_template: Optional[PromptTemplate] = None,
suffix: Optional[str] = None,
prefix: Optional[str] = None,
) -> FewShotPromptTemplate:
if prefix is None:
prefix = (
"The following are correctly answered questions. "
"Each answer is numeric and ends with ###\n"
)
if prompt_template is None:
prompt_template = PromptTemplate(
input_variables=["x", "y", "y_name"],
template="Q: Given {x}, what is {y_name}?\nA: {y}###\n\n",
)
if suffix is not None:
raise ValueError(
"Cannot provide suffix if using default prompt template."
)
suffix = "Q: Given {x}, what is {y_name}?\nA: "
elif suffix is None:
raise ValueError("Must provide suffix if using custom prompt template.")
# test out prompt
if example is not None:
prompt_template.format(**example)
examples = [example]
# TODO: make fake example text
else:
examples = []
example_selector = None
if self._selector_k is not None:
if len(examples) == 0:
raise ValueError("Cannot do zero-shot with selector")
if not self.cos_sim:
example_selector = (
example_selector
) = MaxMarginalRelevanceExampleSelector.from_examples(
[example],
OpenAIEmbeddings(),
FAISS,
k=self._selector_k,
)
else:
example_selector = (
example_selector
) = SemanticSimilarityExampleSelector.from_examples(
[example],
OpenAIEmbeddings(),
Chroma,
k=self._selector_k,
)
return FewShotPromptTemplate(
examples=examples if example_selector is None else None,
example_prompt=prompt_template,
example_selector=example_selector,
suffix=suffix,
prefix=prefix,
input_variables=["x", "y_name"],
)
def tell(self, x: str, y: float, alt_ys: Optional[List[float]] = None) -> None:
"""Tell the optimizer about a new example."""
example_dict, inv_example = self._tell(x, y, alt_ys)
# we want to have example
# to initialize prompts, so send it
if not self._ready:
self.prompt = self._setup_prompt(
example_dict, self._prompt_template, self._suffix, self._prefix
)
self.llm = self._setup_llm()
self._ready = True
else:
# in else, so we don't add twice
if self._selector_k is not None:
self.prompt.example_selector.add_example(example_dict)
else:
self.prompt.examples.append(example_dict)
self._example_count += 1
def predict(
self, x: Union[str, List[str]]
) -> Union[Tuple[float, float], List[Tuple[float, float]]]:
"""Predict the probability distribution and values for a given x.
Args:
x: The x value(s) to predict.
Returns:
The probability distribution and values for the given x.
"""
if not isinstance(x, list):
x = [x]
if not self._ready:
# special zero-shot
self.prompt = self._setup_prompt(
None, self._prompt_template, self._suffix, self._prefix
)
self.llm = self._setup_llm()
self._ready = True
if self._selector_k is not None:
# have to update this until my PR is merged
self.prompt.example_selector.k = min(self._example_count, 10)
if not isinstance(x, list):
x = {key: str(value) for key, value in x.items()}
queries = [self.prompt.format(**x)]
else:
queries = [
self.prompt.format(
x=self.format_x(list(x_i.values())), y_name=self._y_name
)
for x_i in x
]
results, tokens = self._predict(queries)
self.tokens_used += tokens
# need to replace any GaussDist with pop std
for i, result in enumerate(results):
if len(self._ys) > 1:
ystd = np.std(self._ys)
elif len(self._ys) == 1:
ystd = self._ys[0]
else:
ystd = 10
if isinstance(result, GaussDist):
results[i].set_std(ystd)
if self._calibration_factor:
for i, result in enumerate(results):
if isinstance(result, GaussDist):
results[i].set_std(result.std() * self._calibration_factor)
elif isinstance(result, DiscreteDist):
results[i] = GaussDist(
results[i].mean(),
results[i].std() * self._calibration_factor,
)
# compute mean and standard deviation
if len(x) == 1:
return results[0], queries
return results, queries
def ask(
self,
data,
possible_x: List[str],
_lambda: float = 0.5,
) -> Dict:
"""Ask the optimizer for the next x to try.
Args:
possible_x: List of possible x values to choose from.
_lambda: Lambda value to use for UCB.
Return:
The selected x values, their acquisition function values, and the predicted y modes.
Sorted by acquisition function value (descending)
"""
# Store highest value so far
if len(self._ys) == 0:
best = 0
else:
best = np.max(self._ys)
# Create list of values to query over
possible_x_l = list(possible_x)
# Calculate results over 3 acquisition functions
aq_fxns = {
"Expected Improvement": expected_improvement,
"Upper Confidence Bound": partial(upper_confidence_bound, _lambda=_lambda),
}
# Obtain results for each acquisition function value
results = self._ask(data, possible_x_l, best, aq_fxns)
# If we have nothing then just go random
return results
def _tell(
self, x: str, y: float, alt_ys: Optional[List[float]] = None
) -> Tuple[Dict, Dict]:
"""Tell the optimizer about a new example."""
if self.use_quantiles:
self.qt = QuantileTransformer(
values=self._ys + [y], n_quantiles=self.n_quantiles
)
y = self.qt.to_quantiles(y)
if alt_ys is not None:
raise ValueError("Alt ys not supported for topk.")
example_dict = dict(
x=self.format_x(x),
y=self.format_y(y),
y_name=self._y_name,
)
self._ys.append(y)
inv_dict = dict(
x=self.format_x(x),
y=self.format_y(y),
y_name=self._y_name,
x_name=self._x_name,
)
return example_dict, inv_dict
def _predict(self, queries: List[str]) -> Tuple[List[DiscreteDist], int]:
result, token_usage = self.openai_topk_predict(queries, self.llm, self._verbose)
if self.use_quantiles and self.qt is None:
raise ValueError(
"Can't use quantiles without building the quantile transformer"
)
if self.use_quantiles:
for r in result:
if isinstance(r, GaussDist):
r._mean = self.qt.to_values(r._mean)
elif isinstance(r, DiscreteDist):
r.values = self.qt.to_values(r.values)
return result, token_usage
def _ask(
self, data, possible_x: List[str], best: float, aq_fxns: Dict[str, Callable]
) -> Dict:
# Obtain results and queries
results, queries = self.predict(possible_x)
# Calculate acquisition function value
final_results = {}
for aq_fxn_name, aq_fxn in aq_fxns.items():
aq_vals = np.array(
[aq_fxn(r, best) if len(r) > 0 else np.nan for r in results]
)
if aq_fxn_name == "Upper Confidence Bound":
# Check UCB range
target_vals = [
data[
(data["SMILES"] == example["SMILES"])
& (data["SMILES Solvent"] == example["SMILES Solvent"])
]["Solubility"].values[0]
for example in possible_x
]
num_success_bounds = sum(
[
1 if result_range[0] <= target_val <= result_range[1] else 0
for result_range, target_val in zip(aq_vals, target_vals)
]
) / len(possible_x)
# Final acquisition values
aq_vals = aq_vals[:, 1]
# Other acquisition values
aq_vals_cleaned = np.where(
np.isnan(aq_vals),
-np.inf,
np.where(np.isinf(aq_vals), 1e10, aq_vals),
)
selected = np.argmax(aq_vals_cleaned)
final_results[f"{aq_fxn_name}"] = {
"Selected": possible_x[selected],
"Acquisition Values": aq_vals_cleaned,
"Number of points contained in acquisition range": num_success_bounds,
}
if aq_fxn_name == "Expected Improvement":
# Other acquisition values
aq_vals_cleaned = np.where(
np.isnan(aq_vals),
-np.inf,
np.where(np.isinf(aq_vals), 1e10, aq_vals),
)
selected = np.argmax(aq_vals_cleaned)
final_results[f"{aq_fxn_name}"] = {
"Selected": possible_x[selected],
"Acquisition Values": aq_vals_cleaned,
"Number of points contained in acquisition range": "N/A",
}
# Add random
final_results["random"] = {
"Selected": np.random.choice(possible_x),
"Acquisition Values": [0],
"Number of points contained in acquisition range": None,
}
return final_results
| [
"Q: Given {x}, what is {y_name}?\nA: ",
"Q: Given {x}, what is {y_name}?\nA: {y}###\n\n",
"y_name"
] |
2024-01-10 | siddarthanath/University-College-London | Thesis~cebo~models~cebo_lift.py | """
This file creates extends the original Ask-Tell interface by incorporating contextual information for solubility
prediction.
This method adapts the prefix and the prompt template, in attempt to improve prediction accuracy.
Note that there are other ways to incorporate contextual information into the LLM.
"""
# -------------------------------------------------------------------------------------------------------------------- #
# Standard Library
from typing import *
from functools import partial
from typing import Tuple, List, Any, Union
# Third Party
import numpy as np
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts.example_selector import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain.vectorstores import FAISS, Chroma
from numpy import ndarray
# Private
from cebo.helper.distmodel import DiscreteDist, GaussDist
from cebo.models.llm import LLM
from cebo.helper.aqfxns import (
probability_of_improvement,
expected_improvement,
upper_confidence_bound,
greedy,
)
# -------------------------------------------------------------------------------------------------------------------- #
_answer_choices = ["A", "B", "C", "D", "E"]
class CEBOLIFT(LLM):
def __init__(
self,
model: str,
prompt_template: PromptTemplate = None,
suffix: Optional[str] = None,
temperature: Optional[float] = None,
prefix: Optional[str] = None,
x_formatter: Callable[[str], str] = lambda x: x,
y_formatter: Callable[[float], str] = lambda y: f"{y:0.2f}",
y_name: str = "output",
x_name: str = "input",
selector_k: Optional[int] = None,
k: int = 5,
verbose: bool = False,
cos_sim: bool = False,
features: bool = False,
domain: str = None,
) -> None:
"""Initialize Ask-Tell optimizer.
You can pass formatters that will make your data more compatible with the model. Note that
y as output form the model must be a float(can be parsed with ``float(y_str)``)
Args:
prompt_template: Prompt template that should take x and y (for few shot templates)
suffix: Matching suffix for first part of prompt template - for actual completion.
model: OpenAI base model to use for training and inference.
temperature: Temperature to use for inference. If None, will use model default.
prefix: Prefix to add before all examples (e.g., some context for the model).
x_formatter: Function to format x for prompting.
y_formatter: Function to format y for prompting.
y_name: Name of y variable in prompt template (e.g., density, value of function, etc.)
x_name: Name of x variable in prompt template (e.g., input, x, etc.). Only appears in inverse prompt
selector_k: What k to use when switching to selection mode. If None, will use all examples
k: Number of examples to use for each prediction.
verbose: Whether to print out debug information.
"""
self._model = model
self._temperature = temperature
self._selector_k = selector_k
self._ready = False
self._ys = []
self.format_x = x_formatter
self.format_y = y_formatter
self._y_name = y_name
self._x_name = x_name
self._prompt_template = prompt_template
self._suffix = suffix
self._prefix = prefix
self._example_count = 0
self._temperature = temperature
self._k = k
self._answer_choices = _answer_choices[:k]
self._calibration_factor = None
self._verbose = verbose
self.tokens_used = 0
self.cos_sim = cos_sim
self.features = features
self.domain = domain
def set_calibration_factor(self, calibration_factor):
self._calibration_factor = calibration_factor
def _setup_llm(self):
# nucleus sampling seems to get more diversity
return self.get_llm(
n=self._k,
best_of=self._k,
temperature=0.1 if self._temperature is None else self._temperature,
model=self._model,
top_p=1.0,
stop=["\n", "###", "#", "##"],
logit_bias={
"198": -100, # new line,
"628": -100, # double new line,
"50256": -100, # endoftext
},
max_tokens=256,
logprobs=1,
)
def _setup_prompt(
self,
example: Dict,
prompt_template: Optional[PromptTemplate] = None,
suffix: Optional[str] = None,
prefix: Optional[str] = None,
) -> FewShotPromptTemplate:
# Create input variables and template
input_variables = list(example.keys())
if self.features:
template = (
f"Q: What is the {self._y_name} of {{{input_variables[0]}}}, given the following properties: "
+ ", ".join([f"{var} is {{{var}}}" for var in input_variables[1:-1]])
+ "?"
+ f"\nA: {{{input_variables[-1]}}}###\n\n "
)
else:
template = f"Q: Given {input_variables[0]}, what is {self._y_name}?\nA: {input_variables[-1]}###\n\n"
# Setup prefix i.e. the background on the task that the LLM will perform
if prefix is None:
if self.domain is None:
prefix = (
"The following are correctly answered questions. "
"Each answer is numeric and ends with ###\n"
)
else:
prefix = (
f"You are an expert {self.domain}. "
"The following are correctly answered questions. "
"Each answer is numeric and ends with ###\n"
"Your task is to answer the question as accurately as possible. "
)
# Setup prompt template i.e. the information the LLM will process for the given problem
if prompt_template is None:
prompt_template = PromptTemplate(
input_variables=input_variables, template=template
)
if suffix is not None:
raise ValueError(
"Cannot provide suffix if using default prompt template."
)
elif self.features:
suffix = (
f"Q: What is the {self._y_name} of {{{input_variables[0]}}} given the following properties: "
+ ", ".join(
[f"{var} is {{{var}}}" for var in input_variables[1:-1]]
)
+ "?"
+ f"\nA: "
)
else:
suffix = (
f"Q: Given {input_variables[0]}, what is the {self._y_name}?\nA: "
)
elif suffix is None:
raise ValueError("Must provide suffix if using custom prompt template.")
# test out prompt
if example is not None:
prompt_template.format(**example)
examples = [example]
else:
examples = []
example_selector = None
if self._selector_k is not None:
# Convert list to be readable
example = {key: str(value) for key, value in example.items()}
if len(examples) == 0:
raise ValueError("Cannot do zero-shot with selector")
if not self.cos_sim:
example_selector = (
example_selector
) = MaxMarginalRelevanceExampleSelector.from_examples(
[example],
OpenAIEmbeddings(),
FAISS,
k=self._selector_k,
)
else:
example_selector = (
example_selector
) = SemanticSimilarityExampleSelector.from_examples(
[example],
OpenAIEmbeddings(),
Chroma,
k=self._selector_k,
)
return FewShotPromptTemplate(
examples=examples if example_selector is None else None,
example_prompt=prompt_template,
example_selector=example_selector,
suffix=suffix,
prefix=prefix,
input_variables=input_variables[:-1],
)
def tell(self, example_dict: Dict) -> None:
"""Tell the optimizer about a new example."""
# Add points
self._ys.append(example_dict["Solubility"])
# change example dictionary
example_dict = {
key: str(value)
if key != "Solubility"
else f"{value:.8f}".rstrip("0").rstrip(".")
if value != 0
else "0.00"
for key, value in example_dict.items()
}
if not self._ready:
self.prompt = self._setup_prompt(
example_dict, self._prompt_template, self._suffix, self._prefix
)
self.llm = self._setup_llm()
self._ready = True
else:
# in else, so we don't add twice
if self._selector_k is not None:
self.prompt.example_selector.add_example(example_dict)
else:
self.prompt.examples.append(example_dict)
self._example_count += 1
def predict(self, x: Dict) -> Union[tuple[Any, list[str]], Any]:
"""Predict the probability distribution and values for a given x.
Args:
x: The x value(s) to predict.
Returns:
The probability distribution and values for the given x.
"""
if not self._ready:
# special zero-shot
self.prompt = self._setup_prompt(
None, self._prompt_template, self._suffix, self._prefix
)
self.llm = self._setup_llm()
self._ready = True
if self._selector_k is not None:
# have to update this until my PR is merged
self.prompt.example_selector.k = min(self._example_count, 10)
if not isinstance(x, list):
x = {key: str(value) for key, value in x.items()}
queries = [self.prompt.format(**x)]
else:
queries = [
self.prompt.format(**{key: str(value) for key, value in x_i.items()})
for x_i in x
]
results, tokens = self._predict(queries)
self.tokens_used += tokens
# need to replace any GaussDist with pop std
for i, result in enumerate(results):
if len(self._ys) > 1:
ystd = np.std(self._ys)
elif len(self._ys) == 1:
ystd = self._ys[0]
else:
ystd = 10
if isinstance(result, GaussDist):
results[i].set_std(ystd)
if self._calibration_factor:
for i, result in enumerate(results):
if isinstance(result, GaussDist):
results[i].set_std(result.std() * self._calibration_factor)
elif isinstance(result, DiscreteDist):
results[i] = GaussDist(
results[i].mean(),
results[i].std() * self._calibration_factor,
)
# Compute mean and standard deviation
if len(results) > 1:
return results, queries
else:
return results[0], queries
def ask(
self,
data,
possible_x: List[str],
_lambda: float = 0.5,
) -> Dict:
"""Ask the optimizer for the next x to try.
Args:
possible_x: List of possible x values to choose from.
_lambda: Lambda value to use for UCB.
Return:
The selected x values, their acquisition function values, and the predicted y modes.
Sorted by acquisition function value (descending)
"""
# Store highest value so far
if len(self._ys) == 0:
best = 0
else:
best = np.max(self._ys)
# Create list of values to query over
possible_x_l = list(possible_x)
# Calculate results over 3 acquisition functions
aq_fxns = {
"Expected Improvement": expected_improvement,
"Upper Confidence Bound": partial(upper_confidence_bound, _lambda=_lambda),
}
# Obtain results for each acquisition function value
results = self._ask(data, possible_x_l, best, aq_fxns)
# If we have nothing then just go random
return results
def _tell(self, x: str, y: float, alt_ys: Optional[List[float]] = None) -> Dict:
# implementation of tell
if alt_ys is not None:
if len(alt_ys) != len(self._answer_choices) - 1:
raise ValueError("Must provide 4 alternative ys.")
alt_ys = [self.format_y(alt_y) for alt_y in alt_ys]
else:
alt_ys = []
alt_y = y
for i in range(100):
if len(alt_ys) == len(self._answer_choices) - 1:
break
if i < 50:
alt_y = y * 10 ** np.random.normal(0, 0.2)
else: # try something different
alt_y = y + np.random.uniform(-10, 10)
if self.format_y(alt_y) not in alt_ys and self.format_y(
alt_y
) != self.format_y(y):
alt_ys.append(self.format_y(alt_y))
# choose answer
answer = np.random.choice(self._answer_choices)
example_dict = dict(
x=self.format_x(x),
Answer=answer,
y_name=self._y_name,
)
for a in self._answer_choices:
if a == answer:
example_dict[a] = self.format_y(y)
else:
example_dict[a] = alt_ys.pop()
self._ys.append(y)
inv_example = dict(
x=self.format_x(x),
y_name=self._y_name,
y=self.format_y(y),
x_name=self._x_name,
)
return example_dict, inv_example
def _predict(self, queries: List[str]) -> tuple[Any, Any]:
result, token_usage = self.openai_topk_predict(queries, self.llm, self._verbose)
return result, token_usage
def _ask(
self, data, possible_x: List[str], best: float, aq_fxns: Dict[str, Callable]
) -> Dict:
# Obtain results and queries
results, queries = self.predict(possible_x)
# Calculate acquisition function value
final_results = {}
for aq_fxn_name, aq_fxn in aq_fxns.items():
aq_vals = np.array(
[aq_fxn(r, best) if len(r) > 0 else np.nan for r in results]
)
if aq_fxn_name == "Upper Confidence Bound":
# Check UCB range
target_vals = [
data[
(data["SMILES"] == example["SMILES"])
& (data["SMILES Solvent"] == example["SMILES Solvent"])
]["Solubility"].values[0]
for example in possible_x
]
num_success_bounds = sum(
[
1 if result_range[0] <= target_val <= result_range[1] else 0
for result_range, target_val in zip(aq_vals, target_vals)
]
) / len(possible_x)
# Final acquisition values
aq_vals = aq_vals[:, 1]
# Other acquisition values
aq_vals_cleaned = np.where(
np.isnan(aq_vals),
-np.inf,
np.where(np.isinf(aq_vals), 1e10, aq_vals),
)
selected = np.argmax(aq_vals_cleaned)
final_results[f"{aq_fxn_name}"] = {
"Selected": possible_x[selected],
"Acquisition Values": aq_vals_cleaned,
"Number of points contained in acquisition range": num_success_bounds,
}
if aq_fxn_name == "Expected Improvement":
# Other acquisition values
aq_vals_cleaned = np.where(
np.isnan(aq_vals),
-np.inf,
np.where(np.isinf(aq_vals), 1e10, aq_vals),
)
selected = np.argmax(aq_vals_cleaned)
final_results[f"{aq_fxn_name}"] = {
"Selected": possible_x[selected],
"Acquisition Values": aq_vals_cleaned,
"Number of points contained in acquisition range": "N/A",
}
# Add random
final_results["random"] = {
"Selected": np.random.choice(possible_x),
"Acquisition Values": [0],
"Number of points contained in acquisition range": None,
}
return final_results
| [
", ",
"\nA: {PLACEHOLDER}###\n\n ",
"PLACEHOLDER is {PLACEHOLDER}"
] |
2024-01-10 | coolbeevip/langchain_plantuml | langchain_plantuml~diagram.py | # Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from langchain.callbacks.base import BaseCallbackHandler
from langchain_plantuml.plantuml.plantuml_activity_diagram_beta_callback_handler import \
PlantUMLActivityDiagramCallbackHandler
from langchain_plantuml.plantuml.plantuml_sequence_diagram_callback_handler import \
PlantUMLSequenceDiagramCallbackHandler
def activity_diagram_callback(
note_max_length: int = 1000, note_wrap_width: int = 500
) -> BaseCallbackHandler:
return PlantUMLActivityDiagramCallbackHandler(
note_max_length=note_max_length, note_wrap_width=note_wrap_width
)
def sequence_diagram_callback(
note_max_length: int = 1000, note_wrap_width: int = 500
) -> BaseCallbackHandler:
return PlantUMLSequenceDiagramCallbackHandler(
note_max_length=note_max_length, note_wrap_width=note_wrap_width
)
| [] |
2024-01-10 | coolbeevip/langchain_plantuml | examples~example_1.py | # Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from langchain import OpenAI, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain_plantuml import diagram
from dotenv import load_dotenv
load_dotenv()
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
activity_diagram = diagram.activity_diagram_callback(note_max_length=2000)
sequence_diagram = diagram.sequence_diagram_callback(note_max_length=2000)
llm_chain = LLMChain(
llm=OpenAI(),
prompt=prompt,
verbose=True,
memory=memory,
callbacks=[activity_diagram, sequence_diagram]
)
try:
llm_chain.predict(human_input="What did biden say about ketanji brown jackson in the state of the union address?")
finally:
activity_diagram.save_uml_content("example_1_activity-plantuml.puml")
sequence_diagram.save_uml_content("example_1_sequence-plantuml.puml")
| [
"You are a chatbot having a conversation with a human.\n\n{chat_history}\nHuman: {human_input}\nChatbot:",
"chat_history",
"human_input"
] |
2024-01-10 | coolbeevip/langchain_plantuml | langchain_plantuml~plantuml~plantuml_sequence_diagram_callback_handler.py | # Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Any, Dict, List, Optional, Union
from langchain.schema import AgentAction, AgentFinish, LLMResult
from langchain_plantuml.core.plantuml_callback_handler import \
BasePlantUMLCallbackHandler
DEFAULT_SKIN_PARAM = [
"skinparam maxMessageSize 50",
"skinparam roundcorner 20",
"skinparam sequenceArrowThickness 2",
"skinparam ParticipantPadding 20",
]
UML_PARTICIPANTS_FLAG = "-participants-"
class PlantUMLSequenceDiagramCallbackHandler(BasePlantUMLCallbackHandler):
_runs_metrics: dict = {}
def __init__(
self,
color: Optional[str] = None,
skin_param: List[str] = DEFAULT_SKIN_PARAM,
note_max_length: int = 1000,
note_wrap_width: int = 500,
) -> None:
super().__init__(
note_max_length=note_max_length, note_wrap_width=note_wrap_width
)
for param in skin_param:
self.uml_content.append(param)
self.uml_content.append(UML_PARTICIPANTS_FLAG)
self.participants = {}
self.participant_name_indexes = []
self.color = color
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
run_metric = self._get_run_object(serialized=serialized, **kwargs)
activity_name = self._wrapper_sequence_name(
self.on_llm_start.__name__,
run_metric["parent_run_name"],
run_metric["name"],
)
self._append_uml_sequence(
line=activity_name,
activate=True,
participant=run_metric["name"],
color="#A9DCDF",
)
self._append_uml_notes(
align="left", color="#A9DCDF", notes=self._wrapper_note(prompts[0])
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
run_metric = self._get_run_object(**kwargs)
time_cost = run_metric["end_time"] - run_metric["begin_time"]
self.prompt_tokens += response.llm_output["token_usage"].prompt_tokens
self.completion_tokens += response.llm_output["token_usage"].completion_tokens
self.total_tokens += response.llm_output["token_usage"].total_tokens
activity_name = self._wrapper_sequence_name(
method_name=self.on_llm_end.__name__,
parent_run_name=run_metric["name"],
run_name=run_metric["parent_run_name"],
message=f"Time cost: {time_cost:.2f}s",
)
self._append_uml_sequence(
line=activity_name, activate=False, participant=run_metric["name"]
)
for chats in response.generations:
for chat in chats:
self._append_uml_notes(
align="right", color="#A9DCDF", notes=self._wrapper_note(chat.text)
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_sequence_name(
self.on_llm_new_token.__name__,
run_metric["name"],
run_metric["parent_run_name"],
)
self._append_uml_sequence(
line=activity_name, activate=True, participant=run_metric["name"]
)
self._append_uml_notes(
align="right", color="#FEFECE", notes=self._wrapper_note(token)
)
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_sequence_name(
self.on_llm_error.__name__,
run_metric["name"],
run_metric["parent_run_name"],
"#red",
)
self._append_uml_sequence(
line=activity_name, activate=False, participant=run_metric["name"]
)
self._append_uml_notes(
align="right", color="#red", notes=self._wrapper_note(str(error))
)
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
run_metric = self._get_run_object(serialized, **kwargs)
activity_name = self._wrapper_sequence_name(
self.on_chain_start.__name__,
run_metric["parent_run_name"],
run_metric["name"],
)
self._append_uml_sequence(
line=activity_name, activate=True, participant=run_metric["name"]
)
self._append_uml_notes(
align="left", color="#FEFECE", notes=self._wrapper_note(str(inputs))
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_sequence_name(
self.on_chain_end.__name__,
run_metric["name"],
run_metric["parent_run_name"],
)
self._append_uml_sequence(
line=activity_name, activate=False, participant=run_metric["name"]
)
self._append_uml_notes(
align="right", color="#A9DCDF", notes=self._wrapper_note(str(outputs))
)
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_sequence_name(
self.on_chain_error.__name__,
run_metric["name"],
run_metric["parent_run_name"],
"#red",
)
self._append_uml_sequence(
line=activity_name, activate=False, participant=run_metric["name"]
)
self._append_uml_notes(
align="right", color="#red", notes=self._wrapper_note(str(error))
)
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
run_metric = self._get_run_object(**kwargs)
if "parent_run_name" in run_metric:
activity_name = self._wrapper_sequence_name(
self.on_agent_action.__name__,
run_metric["parent_run_name"],
run_metric["name"],
)
self._append_uml_sequence(
line=activity_name, activate=True, participant=run_metric["name"]
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
pass
run_metric = self._get_run_object(serialized, **kwargs)
activity_name = self._wrapper_sequence_name(
self.on_tool_start.__name__,
run_metric["parent_run_name"],
run_metric["name"],
)
self._append_uml_sequence(
line=activity_name,
activate=True,
participant=run_metric["name"],
color="#orange",
)
self._append_uml_notes(
align="left", color="#FEFECE", notes=self._wrapper_note(input_str)
)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_sequence_name(
self.on_tool_end.__name__, run_metric["name"], run_metric["parent_run_name"]
)
self._append_uml_sequence(
line=activity_name, activate=False, participant=run_metric["name"]
)
self._append_uml_notes(
align="right", color="#A9DCDF", notes=self._wrapper_note(output)
)
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_sequence_name(
self.on_tool_end.__name__,
run_metric["name"],
run_metric["parent_run_name"],
"#red",
)
self._append_uml_sequence(
line=activity_name, activate=False, participant=run_metric["name"]
)
self._append_uml_notes(
align="right", color="#red", notes=self._wrapper_note(str(error))
)
def on_text(
self,
text: Any,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
run_metric = self._get_run_object(**kwargs)
if isinstance(text, list):
activity_name = self._wrapper_sequence_name(
self.on_text.__name__, run_metric["parent_run_name"], run_metric["name"]
)
self._append_uml_sequence(activity_name)
self._append_uml_notes(
align="left",
color="#FEFECE",
notes=[
f"Step{index}.{step.value}\n" for index, step in enumerate(text)
],
)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_sequence_name(
self.on_agent_finish.__name__,
run_metric["name"],
run_metric["parent_run_name"],
)
self._append_uml_sequence(
line=activity_name, activate=False, participant=run_metric["name"]
)
def export_uml_content(self) -> List[str]:
new_uml_content = []
for line in self.uml_content:
if line == UML_PARTICIPANTS_FLAG:
for participant_name in self.participant_name_indexes:
new_uml_content.append(self.participants[participant_name])
else:
new_uml_content.append(line)
new_uml_content.append("note left")
new_uml_content.append(
f"* prompt_tokens: {self.prompt_tokens} \n"
f"* completion_tokens: {self.completion_tokens} \n"
f"* total_tokens: {self.total_tokens}"
)
new_uml_content.append("end note")
new_uml_content.append("@enduml")
return new_uml_content
def _get_run_object(self, serialized: Dict[str, Any] = None, **kwargs: Any) -> Dict:
run_id = str(kwargs["run_id"])
if run_id not in self._runs_metrics:
self._runs_metrics[run_id] = {}
if "begin_time" not in self._runs_metrics[run_id]:
self._runs_metrics[run_id]["begin_time"] = time.time()
else:
self._runs_metrics[run_id]["end_time"] = time.time()
if kwargs["parent_run_id"] is not None:
parent_run_id = str(kwargs["parent_run_id"])
self._runs_metrics[run_id]["parent_run_id"] = parent_run_id
self._runs_metrics[run_id]["parent_run_name"] = self._runs_metrics[
parent_run_id
]["name"]
else:
self._runs_metrics[run_id]["parent_run_id"] = "Human"
self._runs_metrics[run_id]["parent_run_name"] = "Human"
if serialized is not None:
run_name = (
serialized.get("name")
if serialized.get("name") is not None
else serialized["id"][len(serialized["id"]) - 1]
)
self._runs_metrics[run_id]["name"] = run_name.replace(" ", "_")
return self._runs_metrics[run_id]
def _append_uml_sequence(
self,
line,
activate: bool = False,
participant: str = None,
color: str = "#FEFECE",
):
self.uml_content.append(line)
if activate:
self.uml_content.append(f'activate "{participant}" {color}')
else:
self.uml_content.append(f'deactivate "{participant}"')
self.step += 1
def _append_uml_notes(
self, align: str = "left", notes: List[str] = [], color: str = ""
):
if len(notes) > 0:
self._append_uml_line(f"note {align} {color}")
self._append_uml_multi_line(notes)
self._append_uml_line("end note")
def _wrapper_sequence_name(
self,
method_name: str,
parent_run_name: str,
run_name: str,
color: str = None,
message: str = "",
) -> str:
if parent_run_name not in self.participants:
self.participants[
parent_run_name
] = f'participant "{self.emojis[method_name] if method_name in self.emojis else ""} {parent_run_name}" as {parent_run_name}'
self.participant_name_indexes += [parent_run_name]
if run_name not in self.participants:
self.participants[
run_name
] = f'participant "{self.emojis[method_name] if method_name in self.emojis else ""} {run_name}" as {run_name}'
self.participant_name_indexes += [run_name]
return f'"{parent_run_name}" -{[color] if color is not None else ""}-> "{run_name}": {self.step} {message}'
| [] |
2024-01-10 | coolbeevip/langchain_plantuml | langchain_plantuml~plantuml~plantuml_activity_diagram_beta_callback_handler.py | # Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Any, Dict, List, Optional, Union
from langchain.schema import AgentAction, AgentFinish, LLMResult
from langchain_plantuml.core.plantuml_callback_handler import \
BasePlantUMLCallbackHandler
DEFAULT_SKIN_PARAM = [
"skinparam activityFontName Arial",
"skinparam activityFontSize 10",
"skinparam activityBorderThickness 1",
"skinparam activityShadowing true",
"skinparam ArrowHeadColor none",
]
class PlantUMLActivityDiagramCallbackHandler(BasePlantUMLCallbackHandler):
_runs_metrics: dict = {}
def __init__(
self,
color: Optional[str] = None,
skin_param: List[str] = DEFAULT_SKIN_PARAM,
note_max_length: int = 1000,
note_wrap_width: int = 500,
) -> None:
super().__init__(
note_max_length=note_max_length, note_wrap_width=note_wrap_width
)
for param in skin_param:
self.uml_content.append(param)
self.uml_content.append("start")
self.color = color
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
run_metric = self._get_run_object(serialized=serialized, **kwargs)
activity_name = self._wrapper_activity_name(
self.on_llm_start.__name__,
f'{run_metric["name"]}({kwargs["invocation_params"]["model_name"]})',
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="left", notes=self._wrapper_note(prompts[0]))
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
run_metric = self._get_run_object(**kwargs)
time_cost = run_metric["end_time"] - run_metric["begin_time"]
self.prompt_tokens += response.llm_output["token_usage"].prompt_tokens
self.completion_tokens += response.llm_output["token_usage"].completion_tokens
self.total_tokens += response.llm_output["token_usage"].total_tokens
activity_name = self._wrapper_activity_name(
self.on_llm_end.__name__,
f'{run_metric["name"]}\n'
f"\n* time {time_cost:.2f}s "
f'\n* prompt_tokens {response.llm_output["token_usage"].prompt_tokens} '
f'\n* completion_tokens {response.llm_output["token_usage"].completion_tokens} '
f'\n* total_tokens {response.llm_output["token_usage"].total_tokens};',
)
self._append_uml_activity(activity_name)
for chats in response.generations:
for chat in chats:
self._append_uml_notes(
align="right", notes=self._wrapper_note(chat.text)
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_activity_name(
self.on_llm_new_token.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="right", notes=self._wrapper_note(token))
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_activity_name(
self.on_llm_error.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="right", notes=self._wrapper_note(str(error)))
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
run_metric = self._get_run_object(serialized, **kwargs)
activity_name = self._wrapper_activity_name(
self.on_chain_start.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="left", notes=self._wrapper_note(str(inputs)))
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_activity_name(
self.on_chain_end.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="right", notes=self._wrapper_note(str(outputs)))
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_activity_name(
self.on_chain_error.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="right #red", notes=self._wrapper_note(str(error)))
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
pass
run_metric = self._get_run_object(serialized, **kwargs)
activity_name = self._wrapper_activity_name(
self.on_tool_start.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="left", notes=self._wrapper_note(input_str))
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
run_metric = self._get_run_object(**kwargs)
if kwargs["parent_run_id"] is not None:
activity_name = self._wrapper_activity_name(
self.on_agent_action.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_activity_name(
self.on_tool_end.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(align="right", notes=self._wrapper_note(output))
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
pass
def on_text(
self,
text: Any,
color: Optional[str] = None,
end: str = "",
**kwargs: Any,
) -> None:
run_metric = self._get_run_object(**kwargs)
if isinstance(text, list):
activity_name = self._wrapper_activity_name(
self.on_text.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
self._append_uml_notes(
align="left",
notes=[
f"Step{index}.{step.value}\n" for index, step in enumerate(text)
],
)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
run_metric = self._get_run_object(**kwargs)
activity_name = self._wrapper_activity_name(
self.on_agent_finish.__name__, run_metric["name"]
)
self._append_uml_activity(activity_name)
def export_uml_content(self) -> List[str]:
self.uml_content.append("stop")
self.uml_content.append("note right")
self.uml_content.append(
f"* prompt_tokens: {self.prompt_tokens} \n"
f"* completion_tokens: {self.completion_tokens} \n"
f"* total_tokens: {self.total_tokens}"
)
self.uml_content.append("end note")
self.uml_content.append("@enduml")
return self.uml_content
def _get_run_object(self, serialized: Dict[str, Any] = None, **kwargs: Any) -> Dict:
run_id = str(kwargs["run_id"])
if run_id not in self._runs_metrics:
self._runs_metrics[run_id] = {}
if "begin_time" not in self._runs_metrics[run_id]:
self._runs_metrics[run_id]["begin_time"] = time.time()
else:
self._runs_metrics[run_id]["end_time"] = time.time()
if serialized is not None:
run_name = (
serialized.get("name")
if serialized.get("name") is not None
else serialized["id"][len(serialized["id"]) - 1]
)
self._runs_metrics[run_id]["name"] = run_name
return self._runs_metrics[run_id]
def _append_uml_activity(self, line):
self.uml_content.append(line)
self.step += 1
def _append_uml_notes(self, align: str = "left", notes: List[str] = []):
if len(notes) > 0:
self._append_uml_line(f"note {align}")
self._append_uml_multi_line(notes)
self._append_uml_line("end note")
def _wrapper_activity_name(self, method_name: str, run_name: str) -> str:
return f':{self.step}. {self.emojis[method_name] if method_name in self.emojis else ""} {run_name};'
| [] |
2024-01-10 | coolbeevip/langchain_plantuml | langchain_plantuml~core~plantuml_callback_handler.py | # Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List
from langchain.callbacks.base import BaseCallbackHandler
class BasePlantUMLCallbackHandler(BaseCallbackHandler, ABC):
crlf: str = "⏎"
note_max_length: int = 1000
note_wrap_width: int = 500
emojis = {
"on_llm_start": "<:1f916:>",
"on_llm_end": "<:1f916:>",
"on_chain_start": "<:1f3af:>",
"on_chain_end": "<:1f3af:>",
"on_tool_start": "<:1f528:>",
"on_tool_end": "<:1f528:>",
"on_text": "<:1f4c6:>",
}
def __init__(self, note_max_length: int = 1000, note_wrap_width: int = 500):
self.note_wrap_width = note_wrap_width
self.note_max_length = note_max_length
self.step = 0
self.total_tokens = 0
self.prompt_tokens = 0
self.completion_tokens = 0
self.uml_content = []
self.uml_content.append("@startuml")
self.uml_content.append("skinparam dpi 300")
self.uml_content.append(f"skinparam wrapWidth {self.note_wrap_width}")
self.uml_content.append("skinparam shadowing false")
self.uml_content.append("skinparam noteFontName Arial")
self.uml_content.append("skinparam noteFontSize 10")
self.uml_content.append("skinparam noteBackgroundColor #ECECEC")
self.uml_content.append("skinparam noteBorderColor #C0C0C0")
self.uml_content.append("skinparam noteFontColor #333333")
self.uml_content.append("skinparam noteBorderThickness 0")
self.uml_content.append("skinparam noteShadowing false")
self.uml_content.append("skinparam noteArrow none")
@abstractmethod
def export_uml_content(self) -> List[str]:
pass
def save_uml_content(self, file_path: str):
with open(file_path, "w") as f:
for line in self.export_uml_content():
f.write(str(line) + "\n")
def _append_uml_line(self, line):
self.uml_content.append(line)
def _append_uml_multi_line(self, lines: List[str]):
for line in lines:
self.uml_content.append(line)
def _wrapper_note(self, note: str) -> List[str]:
new_note = note.strip()
if len(new_note) > self.note_max_length:
new_note = f"{new_note[:self.note_max_length]} ... (Omit {len(new_note) - self.note_max_length} words)"
new_notes = [f"{line}{self.crlf}" for line in new_note.split("\n")]
wrap_notes = [
word
for phrase in new_notes
for word in (
[
phrase[i : i + self.note_wrap_width]
for i in range(0, len(phrase), self.note_wrap_width)
]
if len(phrase) > self.note_wrap_width
else [phrase]
)
]
return wrap_notes
| [] |
2024-01-10 | coolbeevip/langchain_plantuml | examples~example_2.py | # Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
from langchain.agents import initialize_agent, AgentType
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import TextLoader, WebBaseLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.tools import Tool
from langchain.vectorstores import Chroma
from langchain_plantuml import diagram
from langchain_plantuml.core.plantuml_callback_handler import (
BasePlantUMLCallbackHandler,
)
from dotenv import load_dotenv
load_dotenv()
# Define an Agent
class MyAgent:
def __init__(self):
llm = ChatOpenAI(model_name="gpt-3.5-turbo-0613")
"""Create the state_of_union Vectorstore"""
current_path = os.path.abspath(os.path.dirname(__file__))
doc_path = os.path.join(current_path, "state_of_the_union.txt")
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(
texts, embeddings, collection_name="state-of-union"
)
state_of_union = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=docsearch.as_retriever()
)
"""Create the ruff Vectorstore"""
loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/")
docs = loader.load()
ruff_texts = text_splitter.split_documents(docs)
ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff")
ruff = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=ruff_db.as_retriever()
)
"""Create the Agent"""
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.",
),
Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.",
),
]
self.agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
def run(self, question: str, callbacks: List[BasePlantUMLCallbackHandler]):
self.agent.run(question, callbacks=callbacks)
# Run the Agent
agent = MyAgent()
activity_diagram = diagram.activity_diagram_callback(note_max_length=2000)
sequence_diagram = diagram.sequence_diagram_callback(note_max_length=2000)
question = "What did biden say about ketanji brown jackson in the state of the union address?"
try:
agent.run(question=question, callbacks=[activity_diagram, sequence_diagram])
finally:
activity_diagram.save_uml_content("example_2_activity-plantuml.puml")
sequence_diagram.save_uml_content("example_2_sequence-plantuml.puml")
| [] |
2024-01-10 | radi-cho/datasetGPT | src~datasetGPT~texts.py | from dataclasses import dataclass, field
from typing import List, Any, Dict, Tuple, Union
from langchain.prompts import PromptTemplate
from langchain.llms import BaseLLM
from langchain.chains import LLMChain
from .base import DatasetGenerator
OPTIONS_CONFIG_KEYS = ["backend", "max_length", "temperature"]
GENERATOR_CONFIG_KEYS = ["backends", "max_lengths", "temperatures"]
@dataclass
class TextsGeneratorConfig:
prompt: str
"""Text prompt."""
backends: List[Tuple[str, str, str]]
"""LLM APIs to use as backends."""
num_samples: int = 1
"""Number of texts to generate for each options combination."""
max_lengths: List[int] = field(default_factory=lambda: [5])
"""Maximum lengths in tokens for the output of each generation."""
temperatures: List[float] = field(default_factory=lambda: [0])
"""Possible temperatures for the backend LLM."""
options: List[Tuple[str, str]] = field(default_factory=lambda: [])
"""Additional options defined in the system prompts with curly brackets."""
class TextsGenerator(DatasetGenerator):
"""Generator producing texts by varying model parameters and prompt options."""
config: TextsGeneratorConfig
"""Configuration for a TextsGenerator."""
def __init__(self, config: TextsGeneratorConfig) -> None:
"""Initialize TextsGenerator."""
super().__init__(config)
def initialize_options_configs(
self,
options_config_keys: List[str] = OPTIONS_CONFIG_KEYS,
generator_config_keys: List[str] = GENERATOR_CONFIG_KEYS
) -> None:
"""Prepare options combinations."""
super().initialize_options_configs(options_config_keys, generator_config_keys)
def initialize_backend(self, text_config: Dict[str, Any]) -> BaseLLM:
"""Initialize a specific LLM."""
backend_str = text_config["backend"]
temperature = text_config["temperature"]
max_length = text_config["max_length"]
backend, model = backend_str.split("|")
if backend.lower() == "openai":
from langchain.llms import OpenAI
llm = OpenAI(model_name=model,
temperature=temperature,
max_tokens=max_length)
elif backend.lower() == "cohere":
from langchain.llms import Cohere
llm = Cohere(model=model,
temperature=temperature,
max_tokens=max_length)
elif backend.lower() == "petals":
from langchain.llms import Petals
llm = Petals(model_name=model,
temperature=temperature,
max_new_tokens=max_length)
else:
raise ValueError("Cannot use the specified backend.")
return llm
def generate_item(self) -> Dict[str, Union[List[List[Any]], float, int]]:
"""Produce text with a LLM Chain."""
if self.generator_index >= len(self.options_configs):
raise StopIteration()
text_config = self.options_configs[self.generator_index]
self.generator_index += 1
input_variables = text_config.keys() - ["sample_id",
"backend",
"temperature",
"max_length"]
prompt_template = PromptTemplate(template=self.config.prompt,
input_variables=input_variables)
llm = self.initialize_backend(text_config)
prompt_params = {k: text_config[k] for k in input_variables}
input_prompt = prompt_template.format(**prompt_params)
chain = LLMChain(prompt=prompt_template, llm=llm)
output = chain.predict(**prompt_params)
return {**text_config,
"prompt": input_prompt,
"output": output}
| [
"max_length",
"backend",
"temperature"
] |
2024-01-10 | MKrale/ATM | AM_Gyms~frozen_lake_v2.py | from contextlib import closing
from io import StringIO
from os import path
from typing import List, Optional
import numpy as np
from gym import Env, spaces, utils
from gym.envs.toy_text.utils import categorical_sample
from gym.error import DependencyNotInstalled
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
MAPS = {
"4x4": ["SFFF", "FHFH", "FFFH", "HFFG"],
"8x8": [
"SFFFFFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG",
],
}
def is_valid(board: List[List[str]], max_size: int) -> bool:
frontier, discovered = [], set()
frontier.append((0, 0))
while frontier:
r, c = frontier.pop()
if not (r, c) in discovered:
discovered.add((r, c))
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
for x, y in directions:
r_new = r + x
c_new = c + y
if r_new < 0 or r_new >= max_size or c_new < 0 or c_new >= max_size:
continue
if board[r_new][c_new] == "G":
return True
if board[r_new][c_new] != "H":
frontier.append((r_new, c_new))
return False
def generate_random_map(size: int = 8, p: float = 0.8) -> List[str]:
"""Generates a random valid map (one that has a path from start to goal)
Args:
size: size of each side of the grid
p: probability that a tile is frozen
Returns:
A random valid map
"""
valid = False
board = [] # initialize to make pyright happy
while not valid:
p = min(1, p)
board = np.random.choice(["F", "H"], (size, size), p=[p, 1 - p])
board[0][0] = "S"
board[-1][-1] = "G"
valid = is_valid(board, size)
return ["".join(x) for x in board]
class FrozenLakeEnv_v2(Env):
"""
This is a variant on the Frozen Lake environment from OpenAI.
A complete description on the original evironment can be found at https://www.gymlibrary.ml/environments/toy_text/frozen_lake/
In this variant, behavious of 'slippery' environments is slighly altered:
Instead of the 3 possibilities in the original, a step in some direction now has
a 1/2 chance of going to that spot, and a 1/2 chance to taking 2 steps in that direction.
In case of the latter, if the space that gets 'skipped' is a hole the run terminates as though
the current state is a hole.
Also, if going 2 spaces would result in going outside the playingfield, the chance of going forward one space becomes 1.
(Also, some options and rendering functions in the original have been removed from this version.)
"""
metadata = {
"render_modes": ["human", "ansi", "rgb_array", "single_rgb_array"],
"render_fps": 4,
}
def __init__(
self,
render_mode: Optional[str] = None,
desc=None,
map_name="4x4",
is_slippery=True,
):
if desc == None:
desc = MAPS[map_name]
self.desc = desc = np.asarray(desc, dtype="c")
self.nrow, self.ncol = nrow, ncol = desc.shape
self.reward_range = (0, 1)
self.is_slippery = is_slippery
nA = 4
nS = nrow * ncol
self.initial_state_distrib = np.array(desc == b"S").astype("float64").ravel()
self.initial_state_distrib /= self.initial_state_distrib.sum()
self.P = {s: {a: [] for a in range(nA)} for s in range(nS)}
def to_s(row, col):
return row * ncol + col
def inc(row, col, a):
if a == LEFT:
col = max(col - 1, 0)
elif a == DOWN:
row = min(row + 1, nrow - 1)
elif a == RIGHT:
col = min(col + 1, ncol - 1)
elif a == UP:
row = max(row - 1, 0)
return (row, col)
def update_probability_matrix(row, col, action):
newrow, newcol = inc(row, col, action)
newstate = to_s(newrow, newcol)
newletter = desc[newrow, newcol]
terminated = bytes(newletter) in b"GH"
reward = float(newletter == b"G")
return newstate, reward, terminated
for row in range(nrow):
for col in range(ncol):
s = to_s(row, col)
for a in range(4):
li = self.P[s][a]
letter = desc[row, col]
# If this state is goal or Hole, we do this (?)
if letter in b"GH":
li.append((1.0, *update_probability_matrix(row, col, a)))
else:
if is_slippery:
(row_2, col_2) = inc(row,col,a)
letter2 = desc[row_2, col_2]
# If next state a hole or goal, we always go there
if letter2 in b"GH":
li.append((1.0, *update_probability_matrix(row, col, a)))
# if not, we have a 50/50 chance to either take 1 or two steps
else:
li.append(( 1.0 / 2.0, *update_probability_matrix(row, col, a) ))
li.append(( 1.0 / 2.0, *update_probability_matrix(row_2, col_2, a) ))
else:
li.append((1.0, *update_probability_matrix(row, col, a)))
self.observation_space = spaces.Discrete(nS)
self.action_space = spaces.Discrete(nA)
# pygame utils
self.window_size = (min(64 * ncol, 512), min(64 * nrow, 512))
self.cell_size = (
self.window_size[0] // self.ncol,
self.window_size[1] // self.nrow,
)
self.window_surface = None
self.clock = None
self.hole_img = None
self.cracked_hole_img = None
self.ice_img = None
self.elf_images = None
self.goal_img = None
self.start_img = None
def step(self, a):
transitions = self.P[self.s][a]
i = categorical_sample([t[0] for t in transitions], self.np_random)
p, s, r, t = transitions[i]
self.s = s
self.lastaction = a
return (int(s), r, t, (False, {"prob": p}))
def reset(
self,
*,
seed: Optional[int] = None,
return_info: bool = False,
options: Optional[dict] = None,
):
super().reset(seed=seed)
self.s = categorical_sample(self.initial_state_distrib, self.np_random)
self.lastaction = None
if not return_info:
return int(self.s)
else:
return int(self.s), {"prob": 1}
def set_state(self,s):
self.s = s
def getname(self):
if self.is_slippery:
variant_name = "semi-slippery"
else:
variant_name = "det"
return "Lake_{}_{}".format(self.nrow, variant_name)
# Elf and stool from https://franuka.itch.io/rpg-snow-tileset
# All other assets by Mel Tillery http://www.cyaneus.com/ | [] |
2024-01-10 | SimonB97/BG3Chat | bg3_chat.py | """
BG3Chat.py
This module contains the implementation of a chatbot for the Baldur's Gate 3 Wiki.
The chatbot uses the Langchain library to scrape the wiki, build an index of the content,
and generate responses to user queries based on the indexed content.
The chatbot is designed to be used with the Streamlit library for a user-friendly interface.
It also uses the OpenAI API for generating responses and the BeautifulSoup library for web scraping.
The chatbot's functionality includes:
- Scraping the Baldur's Gate 3 Wiki
- Building an index of the scraped content
- Generating responses to user queries based on the indexed content
- Displaying the chatbot interface using Streamlit
"""
import os
import re
import requests
import streamlit as st
from langchain.callbacks import StreamlitCallbackHandler
from langchain.vectorstores import FAISS
from langchain.document_loaders import RecursiveUrlLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
from langchain.schema import BaseRetriever
from langchain.tools import Tool
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import (
_load_stuff_chain, _load_map_reduce_chain, _load_refine_chain
)
from langchain.chains import create_tagging_chain
from langsmith import Client
from openai import InvalidRequestError
from bs4 import BeautifulSoup as Soup
from dotenv import load_dotenv
import prompts
# Langsmith (only for tracing)
ENABLE_TRACING = "False"
if ENABLE_TRACING == "True":
load_dotenv()
client = Client()
# URL to scrape
URL = 'https://bg3.wiki/'
# turn url into indexname (remove special characters)
indexname = re.sub('[^a-zA-Z0-9]', '_', URL)
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=msgs, return_messages=True)
if len(msgs.messages) == 0:
msgs.add_ai_message("How can I help you?")
# Page title
st.set_page_config(page_title="🏰🔮 BG3Chat")
st.sidebar.title("🏰🔮 BG3Chat")
def scrape_url(link):
"""
This function scrapes the content of a given URL.
Parameters:
link (str): The URL to be scraped.
Returns:
cleaned_text (str): The scraped and cleaned text from the URL.
"""
print(f"Scraping {link}...")
response = requests.get(link, timeout=10)
content_type = response.headers['content-type']
parser = "xml" if "xml" in content_type else "html.parser"
loader = RecursiveUrlLoader(
url=link,
extractor=lambda x: Soup(x, parser).text,
prevent_outside=True,
max_depth=1
)
docs = loader.load()
# Combine docs
combined_docs = [doc.page_content for doc in docs]
text = " ".join(combined_docs)
# Clean text
cleaned_text = re.sub('\n{3,}', '\n\n', text)
# Remove non-ASCII characters
cleaned_text = re.sub(r'[^\x00-\x7F]+', '', cleaned_text)
# save text to file
with open(f"scraped_text_{indexname}.txt", 'w', encoding='utf-8') as file:
file.write(cleaned_text)
return cleaned_text
def build_index(scraped_text: str):
"""
This function builds an index from the scraped text.
Parameters:
text (str): The scraped and cleaned text from the URL.
Returns:
database (FAISS): The built index from the text.
"""
print("Building index...")
# split text into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1200, chunk_overlap=120)
splits = text_splitter.split_text(scraped_text)
# build index
index_embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
database = FAISS.from_texts(splits, index_embeddings)
database.save_local(indexname)
return database
def create_retriever_tool(
llm: ChatOpenAI, retriever: BaseRetriever, name: str, description: str
) -> Tool:
"""
This function creates a tool for retrieving and combining documents.
Parameters:
llm (ChatOpenAI): The language model used for combining documents.
retriever (BaseRetriever): The retriever used to get relevant documents.
name (str): The name of the tool.
description (str): The description of the tool.
Returns:
Tool: The created tool for retrieving and combining documents.
"""
if CHAIN_TYPE == "stuff":
summarize_chain = _load_stuff_chain(llm, verbose=True)
elif CHAIN_TYPE == "map-reduce":
map_prompt_template = prompts.MAPREDUCE_PROMPT_TEMPLATE
map_prompt = PromptTemplate.from_template(
template=map_prompt_template
)
summarize_chain = _load_map_reduce_chain(
llm,
map_prompt=map_prompt,
combine_prompt=map_prompt,
verbose=True
)
elif CHAIN_TYPE == "refine":
question_prompt_template = prompts.QUESTION_PROMPT_TEMPLATE
question_prompt = PromptTemplate.from_template(
template=question_prompt_template
)
refine_prompt_template = prompts.REFINE_PROMPT_TEMPLATE
refine_prompt = PromptTemplate.from_template(
template=refine_prompt_template
)
summarize_chain = _load_refine_chain(llm, question_prompt, refine_prompt, verbose=True)
else:
raise ValueError(f"Unknown chain type {CHAIN_TYPE}")
def retrieve_and_combine_documents(query):
if CHAIN_TYPE == "stuff":
documents = retriever.get_relevant_documents(query)
return summarize_chain.run(documents)
documents = retriever.get_relevant_documents(query)
return summarize_chain.run(question=query, input_documents=documents)
return Tool(
name=name, description=description, func=retrieve_and_combine_documents
)
def create_agent(vectordb):
"""
This function creates an agent for retrieving and generating responses.
Parameters:
vectordb (FAISS): The built index from the text.
Returns:
agent_executor (AgentExecutor): The created agent executor.
"""
print("Creating agent...")
retriever = vectordb.as_retriever(search_kwargs={'k': num_docs})
llm = ChatOpenAI(
model=MODEL,
temperature=0,
openai_api_key=OPENAI_API_KEY,
streaming=True
)
tool_description = "Searches and returns documents regarding the Baldur's Gate 3 Wiki. \
USE ALWAYS when you need information about the game, to make sure \
your answers are accurate. \
Input should be a short question, not only concatenated keywords."
tool = create_retriever_tool(
llm,
retriever,
"search_baldurs_gate_3_wiki",
tool_description
)
tools = [tool]
system_message = SystemMessage(
content="""Yor are a helpful Assistant that is here to help the user find information
about the Baldur's Gate 3 by searching the bg3 wiki database. Before answering, search the wiki
if the question is related to the game. Answer all questions in the tone and style of Astaarion from
Baldur's Gate 3 after searching the wiki and keep the answer concise but do not leave out anything
important to answer the question. Astarion's talking style and tone can be described as
deceptive, sarcastic, and self-interested, with a hint of his dark past.
ALWAYS MAKE SURE to provide ACCURATE INFORMATION by SEARCHING the
Baldur's Gate 3 Wiki whenever the user asks a question about the game.
If the context is not enough to answer the question, ask the user for more information, try to guide the user.
Remember, ALWAYS (!!) use the search tool before answering questions about the game. Never
answer questions about the game without using the search tool, except when the
necessary information is already in the message history.
After answering and reflecting on the answer, provide options for clarifying the answer by predicting
what the user might ask next.
Avoid too general advice, always try to be specific and provide concrete information.
ALWAYS USE THE SEARCH TOOL BEFORE ANSWERING QUESTIONS ABOUT THE GAME!
Format your answers in markdown."""
)
agent_executor = create_conversational_retrieval_agent(
llm,
tools,
system_message=system_message,
remember_intermediate_steps=False
)
agent_executor.memory = memory
return agent_executor
def generate_response(agent_executor, input_query):
"""
This function generates a response to a given input query using the agent executor.
Parameters:
agent_executor (AgentExecutor): The agent executor used to generate the response.
input_query (str): The input query to generate a response for.
Returns:
response (str): The generated response to the input query.
"""
print("Generating response...")
try:
# generate response
response = agent_executor(
input_query,
callbacks=[st_callback]
)['output']
print(f"\nResponse: \n\n{response}")
return response
except InvalidRequestError as error:
# Convert the exception to a string to get the error message
error_message = str(error)
# Extract the number of tokens from the error message
match = re.search(
r"your messages resulted in (\d+) tokens", error_message)
if match:
num_tokens = match.group(1)
else:
num_tokens = "an unknown number of"
# Custom warning message
context_size = str(
4097 if MODEL == "gpt-3.5-turbo-0613"
else 8191 if MODEL == "gpt-4-0613"
else 16384 if MODEL == "gpt-3.5-turbo-16k"
else "an unknown (but too small) number of"
)
warning_message = f"Your input resulted in too many tokens for the model to handle. \
The model's maximum context length is {context_size} tokens, but your messages resulted \
in {num_tokens} tokens. Please reduce the number of documents returned by the search \
(slider on the left) or the length of your input or use a model with larger context \
window and try again."
st.warning(warning_message)
return None
def is_related_to_bg3(query):
"""
This function determines if a given query is related to Baldur's Gate 3.
Parameters:
query (str): The query to be checked.
Returns:
bool: True if the query is related to Baldur's Gate 3, False otherwise.
"""
schema = {
"properties": {
"bg3_related": {
"type": "boolean",
"enum": [True, False],
"description": "describes if the question is related to or \
could be related to Baldur's Gate 3 or a game"
},
},
"required": ["bg3_related"],
}
llm = ChatOpenAI(
model="gpt-3.5-turbo-0613",
temperature=0,
openai_api_key=OPENAI_API_KEY,
)
chain = create_tagging_chain(schema, llm)
return chain.run(query)['bg3_related']
# Input Widgets
OPENAI_API_KEY = st.sidebar.text_input('OpenAI API Key', type='password')
CHAIN_TYPE = st.sidebar.selectbox(
'Summarize Chain Type (see Info below)',
['stuff', 'map-reduce', 'refine'],
disabled=not OPENAI_API_KEY.startswith('sk-')
)
MODEL = st.sidebar.selectbox('Model', ['gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k',
'gpt-4-0613'], disabled=not OPENAI_API_KEY.startswith('sk-'))
num_docs = st.sidebar.slider(
'Number of documents retrieved per wiki search', 1, 30, 8)
if st.sidebar.button('Clear Message History'):
msgs.clear()
st.sidebar.info(
'Summarize Chain Type: \n\n"stuff" ➝ faster, limited docs \n"map-reduce" ➝ slower, unlimited \
docs \n"refine" ➝ often more accurate for complex questions, slowest, unlimited docs'
)
st.sidebar.markdown(
"""<style>small {font-size: 0.9em; line-height: 0.5;}</style>
<small>
<b>Disclaimer</b>: <br>
<i>
BG3Chat is unofficial Fan Content permitted under the Fan Content Policy.
Not approved/endorsed by Wizards. Portions of the materials used are property of Wizards of the
Coast. ©Wizards of the Coast LLC. <br>
It is also not commisioned or sponsored by Larian Studios.
</i>
</small>""", unsafe_allow_html=True
)
# App Logic
if not OPENAI_API_KEY.startswith('sk-'):
st.warning("""Please enter your OpenAI API key!
If you don't have an API key yet, you can get one at
[openai.com](https://platform.openai.com/account/api-keys).""", icon='⚠')
if OPENAI_API_KEY.startswith('sk-'):
placeholder = st.empty()
# check if the scraped text file exists
if os.path.exists(f'scraped_text_{indexname}.txt'):
print("text file exists, loading...")
with open(f"scraped_text_{indexname}.txt", 'r', encoding='utf-8') as f:
SCRAPED_TEXT = f.read()
else:
print("text file doesn't exist, scraping...")
placeholder.info('Scraping data...')
SCRAPED_TEXT = scrape_url(URL)
placeholder.empty()
# check if the index exists
if os.path.isdir(indexname):
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
VECTORDB = FAISS.load_local(indexname, embeddings)
else:
# if the directory doesn't exist, rebuild the index
placeholder.info('Building index...')
VECTORDB = build_index(SCRAPED_TEXT)
placeholder.empty()
AGENT_EXECUTOR = create_agent(VECTORDB)
for msg in msgs.messages:
st.chat_message(msg.type).write(msg.content)
if query_text := st.chat_input():
st.chat_message("human").write(query_text)
with st.chat_message("assistant"):
st_callback = StreamlitCallbackHandler(st.container())
# help remembering to use the search tool if the query is related to BG3
related_to_bg3 = is_related_to_bg3(query_text)
print(f"\nQuestion: \n'{query_text}'\n\nRelated to BG3: {related_to_bg3}\n")
if not is_related_to_bg3(query_text):
RESPONSE = generate_response(AGENT_EXECUTOR, query_text)
else:
extended_query = query_text + " Search the wiki for this!"
RESPONSE = generate_response(AGENT_EXECUTOR, extended_query)
st.write(RESPONSE)
| [
"Yor are a helpful Assistant that is here to help the user find information\n about the Baldur's Gate 3 by searching the bg3 wiki database. Before answering, search the wiki\n if the question is related to the game. Answer all questions in the tone and style of Astaarion from\n Baldur's Gate 3 after searching the wiki and keep the answer concise but do not leave out anything\n important to answer the question. Astarion's talking style and tone can be described as\n deceptive, sarcastic, and self-interested, with a hint of his dark past.\n ALWAYS MAKE SURE to provide ACCURATE INFORMATION by SEARCHING the\n Baldur's Gate 3 Wiki whenever the user asks a question about the game.\n If the context is not enough to answer the question, ask the user for more information, try to guide the user.\n Remember, ALWAYS (!!) use the search tool before answering questions about the game. Never\n answer questions about the game without using the search tool, except when the\n necessary information is already in the message history. \n After answering and reflecting on the answer, provide options for clarifying the answer by predicting\n what the user might ask next.\n Avoid too general advice, always try to be specific and provide concrete information.\n \n ALWAYS USE THE SEARCH TOOL BEFORE ANSWERING QUESTIONS ABOUT THE GAME!\n Format your answers in markdown."
] |
2024-01-10 | guojm14/HRL | hrl~env~goal_env~nchain.py | # copied from openai gym
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
class NChainEnv(gym.Env):
"""n-Chain environment
This game presents moves along a linear chain of states, with two actions:
0) forward, which moves along the chain but returns no reward
1) backward, which returns to the beginning and has a small reward
The end of the chain, however, presents a large reward, and by moving
'forward' at the end of the chain this large reward can be repeated.
At each action, there is a small probability that the agent 'slips' and the
opposite transition is instead taken.
The observed state is the current state in the chain (0 to n-1).
This environment is described in section 6.1 of:
A Bayesian Framework for Reinforcement Learning by Malcolm Strens (2000)
http://ceit.aut.ac.ir/~shiry/lecture/machine-learning/papers/BRL-2000.pdf
"""
def __init__(self, n=5, slip=0.2, small=0.001, large=1.0):
self.n = n
self.n2 = bin(n-1)
print("n2", self.n2, len(self.n2)-2)
self.slip = slip # probability of 'slipping' an action
self.small = small # payout for 'backwards' action
self.large = large # payout at end of chain for 'forwards' action
self.state = 0 # Start at beginning of the chain
self.action_space = spaces.Box(low=-1., high=1., shape=(1,))
# self.observation_space = spaces.Discrete(self.n)
self.observation_space = spaces.Discrete(len(self.n2) - 2)
self.shuffle_order = np.arange(len(self.n2) - 2)
np.random.shuffle(self.shuffle_order)
self.seed()
target = np.zeros(n)
target[n-1] = 1
self.target = target
self.reward_type = "sparse"
self.visited_count = np.zeros(n)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
# print("action", action)
success = False
info = {}
assert self.action_space.contains(action)
if self.np_random.rand() < self.slip:
action = 0 - action # agent slipped, reverse action taken
if action < 0 and self.state > 0: # 'backwards': go back to the beginning, get small reward
reward = self.small
self.state -= 1
elif action > 0 and self.state < self.n - 1: # 'forwards': go up along the chain
reward = 0
self.state += 1
elif self.state == self.n - 1: # 'forwards': stay at the end of the chain, collect large reward
reward = self.large
success = True
else:
reward = 0
done = False
info["is_success"] = success
# print("state", self.state)
if self.visited_count[self.state] == 0:
self.visited_count[self.state] = 1
return self.get_obs(), reward, done, info
def reset(self):
self.state = 0
if self.visited_count[self.state] == 0:
self.visited_count[self.state] = 1.
return self.get_obs()
def get_obs(self):
new = np.zeros(len(self.n2) - 2)
# new[self.state] = 1
new2 = bin(self.state)
new2 = list(new2[2:])
new2.reverse()
for i, ele in enumerate(new2):
new[-(i+1)] = int(ele)
new = new[::-1]
# new = new[self.shuffle_order]
return {
"observation": np.copy(new),
"achieved_goal": np.copy(new),
"desired_goal": np.copy(self.target),
}
@property
def coverage(self):
return np.sum(self.visited_count) / self.n | [] |
2024-01-10 | Lokisfeuer/StorySphere | encode_adventure.py | # !pip install openai==0.28 # TODO: Upgrade your code to most recent version.
# from rnn import train_model
import json
import numpy as np
import openai
from transformers import AutoTokenizer, AutoModel
import torch
import torch.nn.functional as F
PRE_ENC_LENGTH = 1050
PRE_RNN_HIDDEN = 2000
TOKENIZER = AutoTokenizer.from_pretrained('sentence-transformers/all-roberta-large-v1')
MODEL = AutoModel.from_pretrained('sentence-transformers/all-roberta-large-v1')
# openai.api_key = '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'
# These two are commented out because they contain boolean lists that need to be written out.
'''
'mot': {
'texts': ['zu_was_beschreibung'],
'bools': ['wie', 'positive_faktoren', 'negative_faktoren'], # TODO: Expand the factors!
'scalars': [],
'single_ids': [],
'list_ids': ['wer', 'zu_was_fuer_objekten', 'von_wem']
},
'bea': {
'texts': ['aussehen'],
'bools': ['art'], # TODO: Ausschreiben
'scalars': ['difficulty'],
'single_ids': ['wo'],
'list_ids': []
},
'''
text_features_to_prompts = {
'name': 'Give me the name of a fictional character',
'backstory': 'Give me the backstory of a fictional character',
'was': 'Give me a short description of what could happen at a fictional scene in a Theatre I am writing',
'warum': 'Give me conditions for a scene in my self-written theatre to occur like who needs to be on stage',
}
all_features = {
'sci': {
'texts': ['name', 'backstory'],
'bools': ['charakterbogen', 'plaene_fuer_den_charakter', 'hat_eine_backstory'],
'scalars': [],
'single_ids': [],
'list_ids': ['startszene', 'events', 'gruppen', 'backstory_sonstiges']
},
'eus': {
'texts': ['was', 'warum'],
'bools': ['untersuchen', 'soziale_interaktion', 'fight', 'start'],
'scalars': ['schwierigkeitsgrad', 'wahrscheinlichkeit'],
'single_ids': [],
'list_ids': ['wer', 'wo', 'Gegenstände', 'Geheimnisse', 'personen', 'wer_muss_da_sein', 'wo_kann_das_sein',
'motivationen']
},
'npc': {
'texts': ['name', 'backstory'],
'bools': ['charakterbogen', 'plaene', 'hat_eine_backstory'],
'scalars': [],
'single_ids': [],
'list_ids': ['events_und_szenen', 'gruppen', 'backstory_sonstiges']
},
'geh': {
'texts': ['was'],
'bools': [],
'scalars': ['positivitaet'],
'single_ids': [],
'list_ids': ['wer_weiss_davon', 'wen_und_was_betrifft_das']
},
'gru': {
'texts': ['grund_des_zusammenhalts'],
'bools': [],
'scalars': [],
'single_ids': ['moegliche_motivation_von_aussen', 'geburtsort_der_gruppe'],
'list_ids': []
},
'geg': {
'texts': ['was'],
'bools': [],
'scalars': ['wert'],
'single_ids': [],
'list_ids': ['wessen', 'wo']
}
}
'''
This class is the central structure for an adventure. It's supposed to be convertible to virtually any other
possible representation of an adventure. To save this as JSON works already. Currently I am working on a computer
readable representation of an adventure (in a high-dimensional vector field). Also I have in mind a full text
representation, maybe a representation that uses a lot of graphics, a representation that would work as a computer
game like the AI-RPG project, the adventure as a board game and so on.
'''
class Adventure:
def __init__(self, name):
self.name = name
self.sci = ObjectClass('sci',
name=str,
charakterbogen=bool,
plaene_fuer_den_charakter=bool,
startszene=(list, str), # list of events and scenes (where start-scene is true)
events=(list, str), # list of events and scenes
gruppen=(list, str), # list of groups
hat_eine_backstory=bool,
backstory=str,
backstory_sonstiges=(list, str)
)
self.mot = ObjectClass('mot',
wer=(list, str), # list of Persons (PCs and NPCs) and groups
zu_was_beschreibung=str,
zu_was_fuer_objekten=(list, str),
wie=bool, # always True
positive_faktoren=(list, bool),
negative_faktoren=(list, bool),
# TODO: beide vollständig ausschreiben. Listen sind reserviert für unklar lange Listen.
# both factors are exactly 10 bools, each hardcoded to the emotions from the Notizbuch.
von_wem=(list, str) # list of Persons ??
)
self.eus = ObjectClass('eus',
wer=list, # this seems wrong!
wo=(list, str),
was=str,
untersuchen=bool,
Gegenstände=(list, str), # list of Gegenstände
Geheimnisse=(list, str), # list of secrets
soziale_interaktion=bool, # is it a scene of social interaction?
personen=(list, str), # list of persons whose relation to the players might change
fight=bool, # is it a fight scene?
schwierigkeitsgrad=float,
warum=str,
wer_muss_da_sein=(list, str), # list of persons
wo_kann_das_sein=(list, str), # list of locations
start=bool,
wahrscheinlichkeit=float,
motivationen=(list, str)
)
# TODO: Orte
self.npc = ObjectClass('npc',
name=str,
charakterbogen=bool, # hat einen Charakterbogen?
plaene=bool, # es gibt Zukunftspläne für diesen NPC
events_und_szenen=(list, str), # list of events
gruppen=(list, str), # list of groups
hat_eine_backstory=bool,
backstory=str,
backstory_sonstiges=(list, str)
)
self.geh = ObjectClass('geh',
was=str,
wer_weiss_davon=(list, str), # list of Personen
wen_und_was_betrifft_das=(list, str), # list of persons, Gegenstände und Orten
positivitaet=float # how positive is this secret to the players.
)
self.gru = ObjectClass('gru',
grund_des_zusammenhalts=str,
moegliche_motivation_von_aussen=str, # ??, ids are strings
geburtsort_der_gruppe=str # roomID, Geburtsort der Gruppe
)
self.bea = ObjectClass('bea',
art=(list, bool), # TODO Ausschreiben!
difficulty=float, # how big of a challenge does this beast pose.
wo=str, # roomIDs
aussehen=str
)
self.geg = ObjectClass('geg',
wessen=(list, str), # list of Persons
wert=float,
was=str,
wo=(list, str) # list of locations
)
def save(self, path='adventure.json'):
to_save = {}
for i in [self.sci, self.mot, self.eus, self.npc, self.geh, self.gru, self.bea, self.geg]:
to_save.update(i.to_save())
with open(path, 'w+') as f:
f.write(json.dumps(to_save, indent=4))
def load(self, path='adventure.json'):
with open(path, 'r') as f:
data = json.load(f)
for i in [self.sci, self.mot, self.eus, self.npc, self.geh, self.gru, self.bea, self.geg]:
i.all_objects = data[i.name]
i.id_counter = len(data[i.name])
def to_list(self):
to_save = {}
for i in [self.sci, self.mot, self.eus, self.npc, self.geh, self.gru, self.bea, self.geg]:
to_save.update(i.to_save())
return json.dumps(to_save, indent=4)
def to_text(self):
return 'Adventure to text doesn\'t really work yet.'
# This class is more or less an add-on to the adventure class.
class ObjectClass:
def __init__(self, class_name, **features):
self.name = class_name
self.features = features
self.id_counter = 0
self.all_objects = []
def add(self, **features_values):
for i, val in features_values.items():
if i not in list(self.features.keys()):
raise ValueError
else:
if isinstance(self.features[i], tuple):
if not isinstance(val, list):
raise ValueError
if not isinstance(val[0], self.features[i][1]):
raise ValueError
elif not isinstance(val, self.features[i]):
raise ValueError
object_id = f'id_{self.name[0:3]}_{self.id_counter}'
features_values.update({'ID': object_id})
self.id_counter += 1
self.all_objects.append(features_values)
return object_id
def to_save(self):
return {self.name: self.all_objects}
# This is not up-to-date. It generates a demo-adventure about Max Mustermann.
def demo_adventure():
adv = Adventure('demo')
# Max once met a monster which he now meets again in the very first scene.
# Max wants revenge and intends to kick the monster with his boots.
# John also exists. He knows that Max once met the monster.
# John and Max are a group.
adv.sci.add(
name='Max',
charakterbogen=False,
plaene_fuer_den_charakter=True,
startszene=['id_Eve_1'], # list of events and scenes (where start-scene is true)
# events=[], # list of events and scenes
gruppen=['id_Gru_1'], # list of groups
hat_eine_backstory=True,
backstory='This is Max awesome backstory. Max was born in Musterhausen. He was once attacked by a monster.',
backstory_sonstiges=['id_Bea_1']
)
adv.mot.add(
wer=['id_Spi_1'], # list of Persons (PCs and NPCs) and groups
zu_was_beschreibung='Max will sich am Monster rächen indem er es mit seinen Stiefeln tritt.',
zu_was_fuer_objekten=['id_Geg_1'],
wie=True, # always True
positive_faktoren=[False, False, False, False, False, False, False, False, False, False],
# exactly 10 bools, each hardcoded to the emotions from the notizbuch
negative_faktoren=[True, False, True, False, False, False, False, False, True, False],
# von_wem=(list, str) # he hasn't been motivated by anyone on the outside.
)
adv.eus.add(
wer=['id_Spi_1', 'id_Bea_1'],
wo=['id_Ort_1_leidergibtesnochkeineorte'],
was='Max meets the monster that once attacked him again.',
untersuchen=False,
Gegenstände=['id_Geg_1'], # list of Gegenstände
Geheimnisse=['id_Geh_1'], # list of secrets
soziale_interaktion=False, # is it a scene of social interaction?
# personen=(list, str), # since its no social interaction the SC can't change any social relations.
fight=True, # is it a fight scene?
schwierigkeitsgrad=0.8,
warum='Max und Monster sind am gleichen Ort.?!',
# wer_muss_da_sein=(list, str), # list of persons # muss nicht unbedingt was hin.
# wo_kann_das_sein=(list, str), # list of locations # dito
start=True,
wahrscheinlichkeit=1.,
motivationen=['id_Mot_1']
)
# TODO: Orte
adv.npc.add(
name='John',
charakterbogen=False, # hat einen Charakterbogen?
plaene=False, # es gibt Zukunftspläne für diesen NPC
events_und_szenen=['id_Eve_1'], # list of events
gruppen=['id_Gru_1'], # list of groups
hat_eine_backstory=True,
backstory='John is the one who originally sold Max his boots.',
backstory_sonstiges=['id_Spi_1', 'id_Geg_1']
)
adv.geh.add(
was='Max once was attacked by the monster in his childhood.',
wer_weiss_davon=['id_Spi_1', 'id_NPC_1'], # list of Personen
wen_und_was_betrifft_das=['id_Spi_1', 'id_Bea_1'], # list of persons, Gegenstände und Orten
positivitaet=0.2 # how positive is this secret to the players.
)
adv.gru.add(
grund_des_zusammenhalts='John and Max are very good friends.',
# moegliche_motivation_von_aussen=str, # There is no motivation from the outside
# geburtsort_der_gruppe=str # roomID, Geburtsort der Gruppe
)
adv.bea.add(
# art=(list, bool),
difficulty=0.8, # how big of a challenge does this beast pose.
wo='id_Ort_1_leidergibtesnochkeineorte', # roomIDs
aussehen='This beast is a big Monster that seem really quite threatening.'
)
adv.geg.add(
wessen=['id_Spi_1'], # list of Persons
wert=2.,
was='anti-monster-Boots',
# wo=[] # Wo Max halt ist.
)
# adv.save('demo_adventure.json')
return adv
# returns a high-dimensional (1024) vector representation of the passed in sentence.
def roberta(sentence):
# from https://huggingface.co/sentence-transformers/all-roberta-large-v1
# Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = [sentence]
# Load model from HuggingFace Hub
# I made this global variables because they take years to load so best just do it once.
# Tokenize sentences
encoded_input = TOKENIZER(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = MODEL(**encoded_input)
# Perform pooling
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
# Normalize embeddings
sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
# print("Sentence embeddings:")
# print(sentence_embeddings)
return sentence_embeddings.tolist()[0]
def rnn_pres(list_of_ids, id_to_pre):
pass # this function is supposed to return the output of the RNN encoder when fed by the pre_encoding of the
# objects of list_of_ids
return list(range(PRE_RNN_HIDDEN)) # this has the same length
# this function takes an object (by id) and returns an encoding which is either a pre_encoding (ignoring ids) or,
# if id_to_pre is not None the full encoding.
def enc_obj(obj_class, id, id_to_pre=None):
features = all_features[obj_class.name]
for i in ['texts', 'bools', 'scalars', 'single_ids', 'list_ids']:
if i not in features.keys():
features.update({i: []})
f_v = obj_class.all_objects[int(id[7:]) - 1] # =features_values
enc = []
# deal with actual texts ; 1024 Values all together
text = 'This is text.'
for n in features['texts']:
if n in f_v.keys():
text = f'{text}\n{n}: {f_v[n]}'
text_embedding = roberta(text)
for i in text_embedding:
enc.append(i)
# deal with booleans; 2 Values each
for n in features['bools']:
if n in f_v.keys(): # 2 values.
enc.append(1.)
if f_v[n]:
enc.append(1.)
else:
enc.append(0.)
else:
enc.append(0.)
enc.append(0.)
# deal with scalars; 2 Values each
for n in features['scalars']:
if n in f_v.keys():
enc.append(1.)
enc.append(float(f_v[n]))
else:
enc.append(0.)
enc.append(0.)
# check length
expected_length = {'sci': 1030, 'eus': 1036, 'npc': 1030, 'geh': 1026, 'gru': 1024, 'bea': 1028,
'geg': 1026} # TODO: add mot
if len(enc) != expected_length[obj_class.name]:
raise ValueError
# fill up with zeros then return if done.
for i in range(PRE_ENC_LENGTH - len(enc)):
enc.append(0)
if id_to_pre is None:
return enc
# deal with single ids; PRE_ENC_LENGTH values each
for n in features['single_ids']:
if n in f_v:
enc.append(1.)
for i in id_to_pre[f_v[n]]:
enc.append(i)
else:
enc.append(0.)
for i in range(PRE_ENC_LENGTH):
enc.append(0.)
# deal with list of ids; PRE_RNN_HIDDEN values each (=per list)
for n in features['list_ids']:
if n in f_v:
enc.append(1.)
eve = rnn_pres(f_v[n], id_to_pre)
for i in eve:
enc.append(i)
else:
enc.append(0.)
for i in range(PRE_RNN_HIDDEN):
enc.append(0.)
return enc
# This function writes an adventure with every mathematically possible object.
def generate_adventure_objs():
adv = Adventure(name='AllObjects')
all_options = {}
for cla in all_features.keys():
opt = {}
for b in all_features[cla]['bools']:
opt.update({b: [False, True]})
for s in all_features[cla]['scalars']:
opt.update({s: [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]})
for t in all_features[cla]['texts']:
opt.update({t: [t]})
all_options.update({cla: opt})
# generate objs:
def iter_(idcs_, maxs_):
if idcs_ == []:
return None, None
if 0 in maxs_:
raise ValueError
idcs_[-1] += 1
x = 0
for i in range(len(idcs_)):
idx = idcs_[-(i + 1)]
max = maxs_[-(i + 1)]
x += 1
if idx == max:
idcs_[-x] = 0
if x == len(idcs_):
return None, None
idcs_[-(x + 1)] += 1
return idcs_, maxs_
def create_obj(opt, idcs, cla, adv):
# TODO: Debug: Why is this not called or doesn't work?
name_to_feat = {'sci': adv.sci, 'mot': adv.mot, 'eus': adv.eus, 'npc': adv.npc, 'geh': adv.geh, 'gru': adv.gru,
'bea': adv.bea, 'geg': adv.geg}
parameter = {}
for o, idx in zip(opt.items(), idcs):
if not isinstance(o[1][idx], str):
parameter.update({o[0]: o[1][idx]})
else:
prompt = f'Give me a very short fascinating story consisting of up to five sentences:\n\n'
# response = openai.Completion.create(model="text-davinci-003", prompt=prompt, temperature=2.,
# max_tokens=200)
# response = response['choices'][0]['text']
# parameter.update({o[0]: response})
parameter.update({o[0]: prompt})
# TODO: texts!
name_to_feat[cla].add(**parameter)
print('start writing')
for cla in all_features.keys():
print(cla)
opt = all_options[cla]
idcs = [0 for _ in opt.keys()]
maxs = [len(i) for i in opt.values()]
while idcs is not None:
create_obj(opt, idcs, cla, adv)
idcs, maxs = iter_(idcs, maxs)
return adv
# This function generates a handful of objects and prints the result of enc_obj for each.
def test():
adv = demo_adventure()
# adv = Adventure(name='demo')
# adv.load('demo_adventure.json')
adv.sci.add(
name='Alfred',
charakterbogen=True,
# plaene_fuer_den_charakter=True,
# startszene=['id_Eve_1'], # list of events and scenes (where start-scene is true)
# events=[], # list of events and scenes
gruppen=['id_Gru_1'], # list of groups
hat_eine_backstory=True,
backstory='This is Max awesome backstory. Max was born in Musterhausen. He was once attacked by a monster.',
backstory_sonstiges=['id_sci_1']
)
adv.sci.add(
name='Berta',
charakterbogen=False,
plaene_fuer_den_charakter=True,
startszene=['id_Eve_1'], # list of events and scenes (where start-scene is true)
events=['id_Eve_1'], # list of events and scenes
gruppen=['id_Gru_1'], # list of groups
hat_eine_backstory=True,
backstory='This is Max awesome backstory. Max was born in Musterhausen. He was once attacked by a monster.',
backstory_sonstiges=['id_sci_1']
)
adv.sci.add()
print(enc_obj(adv.sci, id='id_spi_1'))
# print(pre_encode_object(adv.mot, id='id_mot_1'))
print(enc_obj(adv.eus, id='id_eus_1'))
print(enc_obj(adv.npc, id='id_npc_1'))
print(enc_obj(adv.geh, id='id_geh_1'))
print(enc_obj(adv.gru, id='id_gru_1'))
print(enc_obj(adv.bea, id='id_bea_1'))
print(enc_obj(adv.geg, id='id_geg_1'))
print('Spielercharaktere:')
print(enc_obj(adv.sci, id='id_spi_1'))
print(enc_obj(adv.sci, id='id_spi_2'))
print(enc_obj(adv.sci, id='id_spi_3'))
print(enc_obj(adv.sci, id='id_spi_4'))
# This function (currently) first cally generate_adventure_objs() to then get the pre-encoding for each object.
# It saves the resulting array and prints its overall length.
def main():
adv = generate_adventure_objs()
adv.save(path='all_objects_adv.json')
name_to_feat = {'sci': adv.sci, 'mot': adv.mot, 'eus': adv.eus, 'npc': adv.npc, 'geh': adv.geh, 'gru': adv.gru,
'bea': adv.bea, 'geg': adv.geg}
i = 0
all = []
print('start encoding')
for name, cla in name_to_feat.items():
print(name)
for j in range(cla.id_counter):
i += 1
all.append(enc_obj(cla, id=f'id_{name}_{j}'))
arr = np.array(all)
np.savetxt('test.csv', arr, delimiter=',')
np.save("pres.npy", arr)
print(i)
# Generate A LOT of adventures and their objects.
# train RNN with train_model from RNN
# save the resulting models
# write function RNN to use these saved model
# test enc_obj with optional parameter id_to_pre
if __name__ == '__main__':
main()
| [
"{'name': 'Give me the name of a fictional character', 'backstory': 'Give me the backstory of a fictional character', 'was': 'Give me a short description of what could happen at a fictional scene in a Theatre I am writing', 'warum': 'Give me conditions for a scene in my self-written theatre to occur like who needs to be on stage'}",
"Give me a very short fascinating story consisting of up to five sentences:\n\n"
] |
2024-01-10 | Joentze/chad-bod | build_supabase.py | """adds documents in supabase vector database"""
from threading import Thread
import json
from typing import List
from os import listdir, environ
import openai
from supabase import create_client
from pprint import pprint
# thread
MAX_NUM_OF_THREADS = 8
# open ai details
openai.api_key = environ["OPENAI_API_KEY"]
# supabase details
supabase_url = environ["SUPABASE_URL"]
supabase_key = environ["SUPABASE_KEY"]
supabase = create_client(supabase_url=supabase_url, supabase_key=supabase_key)
COLLECTION_JSON = "compiled.json"
def compile_all_documents(path: str) -> None:
"""gets all vector documents and generates a compiled file for supabase loading"""
documents = {"documents": []}
for this_file in listdir(f"./{path}"):
with open(f"./{path}/{this_file}", "r", encoding="utf-8") as file:
obj = json.load(file)
docs = obj["documents"]
srcs = obj["sources"]
if len(docs) == len(srcs):
documents["documents"] += [{"content": docs[i], "source":srcs[i]}
for i in range(len(docs))]
with open(COLLECTION_JSON, "w", encoding="utf-8") as file:
json.dump(documents, file)
def write_embeddings_to_documents(documents: List[object]) -> None:
"""writes all documents and embeddings to supabase documents table"""
for document in documents:
embedding = openai.Embedding.create(
input=document["content"], model="text-embedding-ada-002"
)["data"][0]["embedding"]
document = {"content": document["content"],
"embedding": embedding, "source": document["source"]}
supabase.table("documents").insert(document).execute()
# def segment_write_to_supabase(all_docs:List[object])->None:
def add_new_user(chat_id: str, username: str) -> None:
"""Adds new user into db"""
try:
supabase.table("users").insert({
"id": chat_id,
"username": username
}).execute()
except:
pass
def remove_user(chat_id: str) -> None:
"""Remove users from db"""
try:
supabase.table("users").delete().eq("id", chat_id).execute()
except:
pass
def segment_content(documents: List[object], num_of_threads: int) -> List[List[object]]:
"""breaks documents into chunks"""
segments = [[] for i in range(num_of_threads)]
for i, document in enumerate(documents):
multiplier = i // num_of_threads
segments[i - num_of_threads*multiplier].append(document)
return segments
def segment_write_to_supabase(documents: List[object]) -> None:
"""threaded write to supabase"""
threads = [Thread(target=write_embeddings_to_documents, kwargs={"documents": segment})
for segment in segment_content(documents, MAX_NUM_OF_THREADS)]
# START THREADS
for thread in threads:
thread.start()
# JOIN THREADS
for thread in threads:
thread.join()
def get_context_from_supabase(query: str, threshold: float, count: int) -> str:
"""get contexts from supabase"""
contexts = []
embedding = openai.Embedding.create(
input=query, model="text-embedding-ada-002")["data"][0]["embedding"]
response = supabase.rpc("match_documents", {
"query_embedding": embedding,
"similarity_threshold": threshold,
"match_count": count,
}).execute()
for context in response.data:
content = context["content"]
source = context["source"]
line = f"{content} (source: {source})"
contexts.append(line)
return "\n".join(contexts)
# def add_message_to_supabase(chat_id:str, message_id:str, message:str)->None:
if __name__ == "__main__":
# compile_all_documents("vector_documents")
# with open(COLLECTION_JSON, "r", encoding="utf-8") as file:
# obj = json.load(file)
# docs = obj["documents"]
# segment_write_to_supabase(docs)
pass
| [
"content"
] |
2024-01-10 | Joentze/chad-bod | chat_bot_main.py | """Running LLM Scripts"""
import json
from typing import List
from datetime import datetime
from dataclasses import dataclass
import openai
from secret_keys import TELEGRAM_API_KEY, OPEN_AI_KEY
from build_supabase import get_context_from_supabase
from prompts import get_prompt, insert_context_to_prompt
from telegram_helper import edit_message
from redis_handler import insert_message_history, get_message_history
from llm_functions.function_map import function_map, llm_functions
MODEL_NAME = "gpt-3.5-turbo-0613"
MAX_NUM_TOKEN_TELEGRAM = 50
EXCEED_TOKEN_MESSAGE = """```You've exceeded the token limit for this message, please rephrase into a shorter statement...```"""
openai.api_key = OPEN_AI_KEY
# davinci = OpenAI(model_name=MODEL_NAME,
# openai_api_key=OPEN_AI_KEY, temperature=0, max_tokens=1000)
@dataclass
class TelegramQuery:
chat_id: str
message_id: str
query: str
def is_within_token_limit(message: str) -> bool:
"""checks if message sent is within character limit"""
return len(message)//4 <= MAX_NUM_TOKEN_TELEGRAM
# def run_llm(question: str):
# """runs open ai llm"""
# contexts = get_context_from_supabase(question, 0.8, 3)
# llm_chain = LLMChain(prompt=get_prompt(contexts), llm=davinci)
# response = llm_chain.run(question)
# return response
def respond_with_llm(configs):
"""edits specific telegram bot message"""
query = TelegramQuery(
chat_id=configs["chat_id"], message_id=configs["message_id"], query=configs["query"])
message_history = get_message_history(query.chat_id)
response = chat_completion(query.chat_id, query.query, message_history)
edit_message(API_KEY=TELEGRAM_API_KEY, message_id=query.message_id,
chat_id=query.chat_id, new_message=response)
def chat_completion(chat_id: str, curr_query: str, history: List[object]) -> str:
"""sends query to LLM"""
contexts = get_context_from_supabase(curr_query, 0.8, 3)
prompt = insert_context_to_prompt(curr_query, contexts)
message_history = [
{"role": "system", "content": "You are Chad Bod, a Singapore Management University Student Helper. You do not help students with any of their school work, you can only advise them briefly"},
*history,
{"role": "user", "content": prompt}
]
print(message_history)
completion = openai.ChatCompletion.create(
model=MODEL_NAME,
temperature=0,
messages=message_history,
functions=llm_functions
)
# message = completion['choices'][0]['message']['content']
response_body = completion['choices'][0]['message']
if "function_call" in response_body:
func = response_body["function_call"]
function_name = func["name"]
args = func["arguments"]
args = json.loads(args)
message = function_map[function_name](**args)
else:
message = response_body["content"]
insert_message_history(chat_id=chat_id, message={
"role": "assistant", "content": message})
return message
if __name__ == "__main__":
t1 = datetime.now()
# print(run_llm("how many libraries are there in smu"))
messages = get_message_history("549991017")
# print(type(messages))
test = [{'role': 'system', 'content': 'You are Chad Bod, a Singapore Management University Student Helper.'}, {'role': 'user', 'content': 'who is the president of smu'}, {'role': 'user', 'content': 'who is kyriakos'}, {'role': 'user', 'content': 'how do i bid for classes?'}, {'role': 'user', 'content': 'how do i start planning for exchange'}, {'role': 'user',
'content': "\nRoleplay as the following:\nYou are an enthusiastic student helper of Singapore Management University. You respond to student's questions based on the context in a direct manner. If you do not know how to respond to the question, just say you do not know, do not come up with your own answers. quote the sources from context.\n\ncontext:\nWhat should I do if I do not have sufficient e$? Additional e$ will not be allocated as all students are given the same amount of e$ and e-pt in each term throughout the years of study in SMU. Please adjust your e$ bids accordingly so that you can bid for additional courses.But if you do not have sufficient e$ to bid for courses in your final term, please proceed to bid for the required courses with all your e$ until the end of Round 1B. You might be able to get your bids. If you are still unable to have successful bids, please consult your school manager for advice. (source: https://oasis.smu.edu.sg/Pages/faq.aspx)\nHow can I check for the applicable course area(s) for a course? Navigate toBOSS> BOSS Bidding > Plan & Bid > Add to Cart > Course Search to search for courses under a specific course area.You should check the course area of the class you wish to enrol in, as the course area(s) may change over time. (source: https://oasis.smu.edu.sg/Pages/faq.aspx)\n\nquestion:\nhow should i plan for bidding\n\nanswer:\n"}]
completion = chat_completion(
"549991017", "what is lks", messages)
print(completion)
# respond_with_llm({
# "chat_id": 549991017,
# "message_id": 73,
# "query": "what are the requirements for dean's list"
# })
print("total time taken: ", datetime.now()-t1)
| [
"who is kyriakos",
"how do i start planning for exchange",
"\nRoleplay as the following:\nYou are an enthusiastic student helper of Singapore Management University. You respond to student's questions based on the context in a direct manner. If you do not know how to respond to the question, just say you do not know, do not come up with your own answers. quote the sources from context.\n\ncontext:\nWhat should I do if I do not have sufficient e$? Additional e$ will not be allocated as all students are given the same amount of e$ and e-pt in each term throughout the years of study in SMU. Please adjust your e$ bids accordingly so that you can bid for additional courses.But if you do not have sufficient e$ to bid for courses in your final term, please proceed to bid for the required courses with all your e$ until the end of Round 1B. You might be able to get your bids. If you are still unable to have successful bids, please consult your school manager for advice. (source: https://oasis.smu.edu.sg/Pages/faq.aspx)\nHow can I check for the applicable course area(s) for a course? Navigate toBOSS> BOSS Bidding > Plan & Bid > Add to Cart > Course Search to search for courses under a specific course area.You should check the course area of the class you wish to enrol in, as the course area(s) may change over time. (source: https://oasis.smu.edu.sg/Pages/faq.aspx)\n\nquestion:\nhow should i plan for bidding\n\nanswer:\n",
"who is the president of smu",
"You are Chad Bod, a Singapore Management University Student Helper. You do not help students with any of their school work, you can only advise them briefly",
"You are Chad Bod, a Singapore Management University Student Helper.",
"how do i bid for classes?"
] |
2024-01-10 | ssutl/Instagram-bot | insta.py | from instagrapi import Client
import requests
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from typing import Dict
import textwrap
from dotenv import load_dotenv
import os
import openai
import random
import schedule
import time
import json
load_dotenv()
# Get the environment variables
insta_username = os.getenv('insta_username')
insta_password = os.getenv('insta_password')
kton_username = os.getenv('kton_username')
kton_password = os.getenv('kton_password')
openai.api_key = os.getenv('openAI_key')
def getQuote(index):
with open("quotesList.json", "r") as jsonFile:
json_data = json.load(jsonFile)
if index >= len(json_data):
return None
return json_data[index]
def getImageD():
##Using Dall-e
# Generate an ultra-realistic anime cityscape that immerses the viewer in a bright and futuristic metropolis. The attention to detail is paramount – from the intricately designed skyscrapers with realistic glass reflections to the individual leaves swaying on the holographic trees. Every aspect of the scene should evoke a sense of realism and wonder.
#Action photography of a parkour athlete jumping between urban structures, using a fast shutter speed.
# Lifestyle photography of someone listening to vinyl records, using warm tones to evoke nostalgia.
# Lifestyle photography of a black 80s DJs playing music and mixing vinyls with his crew, using warm tones to evoke nostalgia.
#Lifestyle photography of the 80s streets with black people. DJs playing music and mixing vinyls. Kids running. Palm trees. using warm tones to evoke nostalgia.
#Lifestyle photography of the 80s streets with black people. DJs passionately mixing vinyl records on turntables, where the vinyl decks themselves are miniature cityscapes, complete with intricate details. Kids running. Palm trees. using warm tones to evoke nostalgia.
try:
response = openai.Image.create(
prompt="Lifestyle photography of the 80s streets with black people.Vibrant. DJs passionately mixing vinyl records on turntables, where the vinyl decks themselves are miniature cityscapes, complete with intricate details. Using warm tones to evoke nostalgia.",
n=1,
size="1024x1024"
)
imageUrl = response['data'][0]['url']
##Saving the file
response = requests.get(imageUrl)
with open('image.jpg', 'wb') as f:
# Write the contents of the response to the file
f.write(response.content)
except openai.error.OpenAIError as e:
print(f'Request failed: {e}')
def getImageU():
##Requests Unsplash
random_url="https://api.unsplash.com/photos/random"
access_key = "QyIVMq6A6fL2y7WlNE9XsU2X7F40JUSTj-nsCaX_MYI"
headers = {"Authorization": f"Client-ID {access_key}"}
params = {'query': 'modern building black', 'orientation': 'squarish'}
try:
unsplash_response = requests.get(random_url,headers=headers,params=params)
unsplash_response.raise_for_status() #Anything thats not 200
random_image = unsplash_response.json()["urls"]["raw"]
##Saving the file
response = requests.get(random_image)
with open('image.jpg', 'wb') as f:
# Write the contents of the response to the file
f.write(response.content)
except requests.exceptions.RequestException as e:
print(f'Request failed: {e}')
def createPost(index):
# Open image
img = Image.open("image.jpg")
draw = ImageDraw.Draw(img, 'RGBA')
font_size = 1
font = ImageFont.truetype("font.ttf", font_size)
# Get quote information
quote = getQuote(index)
global title, author
title, author = quote['Title'], quote['Author']
text = quote['Text']
global caption
caption=f'Quote extracted from {author.replace(";"," & ")}\'s "{title}" {randomEmoji()} \n #Quotes #Books #HumblySubtly #MentalMobility'
# Set background color
bg_color = (0, 0, 0, 200) # Black color with 70% opacity
# Wrap text and calculate total height
wrapped_text = textwrap.wrap(text, width=40) #Maximum 20 characters per line, splits into array of strings
line_height = font.getsize('hg')[1] #random string to get rough height of a single line (returns a tuple of (height,width))
total_height = len(wrapped_text) * line_height #jsut multiply each line by their heights
#Find the longest string in wrapped text and continually increase font until it reaches max
longest_string = max(wrapped_text, key=len)
while font.getsize(longest_string)[0] < 0.8*img.size[0]:
font_size+=1
font = ImageFont.truetype("font.ttf", font_size)
line_height = font.getsize('hg')[1] * 2
total_height = len(wrapped_text) * line_height
# the y-coordinate of the starting point of the text,
# which is the point where the text will be drawn on the image.
y = (img.height - total_height) / 2
# Draw each line of wrapped text on the image
#In computing vertical axis goes from zero at top to image height at bottom !
for line in wrapped_text:
# Center horizontally
line_width = font.getsize(line)[0]
#the horizontal position of the starting point of the text,
# if the text is horizontally centered within the image.
line_x = (img.width - line_width) / 2
# Draw background rectangle (defining top left and bottom right point) first line we add padding of 10
bg_x1, bg_y1 = line_x - 20, y - 10
bg_x2, bg_y2 = line_x + line_width + 20, y + line_height + 10 #bottom right
# Draw background rectangle and text
draw.rectangle((bg_x1, bg_y1, bg_x2, bg_y2), fill=bg_color)
# Calculate vertical position for text (to center it within the rectangle)
bg_center_y = (bg_y1 + bg_y2) / 2
text_y = bg_center_y - (font.getsize(line)[1] / 2)
draw.text((line_x, text_y), line, font=font, fill=(255, 255, 255))
# To move the y coordinate to the vertical position below previous line
y += line_height + 20
#Draw rectangle bottom right
# Save modified image
img.save("overlay.jpg")
def randomEmoji():
EmojiArray = ["📚","🧠","🥭","⌛","♾️","📜","🎯"]
randomEmojis = random.sample(EmojiArray,2)
return " ".join(randomEmojis)
def postFunction():
global current_index
print("Uploading Post")
quote = getQuote(current_index)
if quote is not None:
# Extract quote data
quote_text = quote["Text"]
quote_author = quote["Author"]
quote_title = quote["Title"]
# Create post
getImageD()
createPost(current_index)
cl.photo_upload('overlay.jpg', caption, extra_data={
"like_and_view_counts_disabled": True,
"disable_comments": True
})
print(f"Posted: {quote_text} - {quote_author} ({quote_title})")
current_index += 1 # Increment index for next post
else:
print("No more quotes to post")
testing = input("Are you testing the software?")
if testing == "yes" or testing == "YES" or testing == "Y" or testing == "y":
imageGeneration = input("Do you want to use DALLE (D) or no (any key)?")
if imageGeneration == "D" or imageGeneration == "d":
getImageD()
else:
getImageU()
createPost(28)
else:
cl = Client()
cl.login(username=insta_username, password=insta_password)
#When code starts start from this index
current_index = 0
schedule.every().day.at("04:00").do(postFunction)
while True:
schedule.run_pending()
time.sleep(1)
| [] |
2024-01-10 | worldbank/llm4data | llm4data~scripts~indexing~docs~docs.py | from typing import Optional, Union
from pathlib import Path
from langchain.docstore.document import Document
from langchain.document_loaders import PyMuPDFLoader
from langchain.text_splitter import (
NLTKTextSplitter,
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from llm4data.embeddings.docs import get_docs_embeddings
from llm4data import index
from llm4data import configs
from llm4data.schema.schema2info import get_doc_title
# Get the docs embeddings
docs_embeddings = get_docs_embeddings()
# Get access to the Qdrant docs collection
docs_index = index.get_docs_index()
chunk_overlap = 32
chunk_size = docs_embeddings.max_tokens + chunk_overlap
# Create a text splitter
text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
docs_index.embeddings.client.tokenizer,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
# text_splitter = NLTKTextSplitter()
def add_pdf_document(path: Union[str, Path], metadata: Optional[dict] = None):
# Load the document
documents = PyMuPDFLoader(str(path)).load_and_split(text_splitter=text_splitter)
# Add document metadata
if metadata is not None:
if len(documents):
# Index the title of the document
documents.append(
Document(page_content=get_doc_title(metadata), metadata=documents[0].metadata)
)
for doc in documents:
doc.metadata[configs.METADATA_KEY] = metadata
# Add the document to the collection
# Load the documens in batches
batch_size = 100
for i in range(0, len(documents), batch_size):
docs_index.add_documents(documents[i : i + batch_size])
| [] |
2024-01-10 | worldbank/llm4data | llm4data~scripts~indexing~indicators~indicators.py | from typing import List, Optional, Union
from langchain.text_splitter import NLTKTextSplitter
from langchain.docstore.document import Document
from llm4data import configs
from llm4data.index import get_indicators_index
# Get access to the Qdrant docs collection
indicators_index = get_indicators_index()
text_splitter = NLTKTextSplitter()
def build_document(text: str, metadata: dict = None):
# Load the document
document = Document(page_content=text, metadata={configs.METADATA_KEY: metadata} if metadata else {})
return document
def add_indicators(text: Union[str, List[str]], metadata: Optional[Union[dict, List[dict]]] = None):
# Load the document
if isinstance(text, str):
documents = [build_document(text, metadata)]
else:
documents = [build_document(text, meta) for text, meta in zip(text, metadata)]
# Add the document to the collection
indicators_index.add_documents(documents)
| [] |
2024-01-10 | worldbank/llm4data | llm4data~prompts~context.py | import json
from llm4data.index import get_docs_index, get_indicators_index
from llm4data import configs
from hashlib import md5
from llm4data.schema.schema2info import get_doc_id, get_doc_title, get_doc_authors
from langchain.docstore.document import Document
indicators = get_indicators_index()
docs = get_docs_index()
def get_hash_id(text: str):
return md5(text.encode("utf-8")).hexdigest()
def get_page(doc: Document, offset=0, default=-1):
"""Get the page number from the document metadata.
We use offset=1 because the page numbers we want in the metadata
should start from 1, while the page numbers in the PDF start from 0.
"""
return doc.metadata.get('page', default) + offset
def get_contexts(prompt: str, k_docs: int = 5, k_indicators: int = 10, doc_id: str = None):
# Search for documents
if doc_id is not None:
docs_result = docs.similarity_search(prompt, k=k_docs, filter={configs.METADATA_KEY: {"document_description": {"title_statement": {"idno": doc_id}}}})
else:
docs_result = docs.similarity_search(prompt, k=k_docs)
indicators_result = indicators.similarity_search(prompt, k=k_indicators)
doc_context = []
indicators_context = []
doc_context_records = []
indicators_context_records = []
for doc in docs_result:
doc_id = get_doc_id(doc.metadata[configs.METADATA_KEY])
doc_context.append("<h1>Title: " + get_doc_title(doc.metadata[configs.METADATA_KEY]) + "</h1>")
if doc.metadata[configs.METADATA_KEY].get("authors"):
doc_context.append("<h1>Author: " + json.dumps(get_doc_authors(doc.metadata[configs.METADATA_KEY])) + "</h1>")
if doc_id is not None:
doc_context.append(f"<p>(id: {doc_id}) (page: {get_page(doc, offset=1)}) {doc.page_content}</p>")
else:
doc_context.append(f"<p>(id: {doc_id}) {doc.page_content}</p>")
doc_context_records.append(dict(id=get_hash_id(doc.page_content), doc_id=doc_id, page=get_page(doc, offset=1), content=doc.page_content))
for indicator in indicators_result:
indicator_id = indicator.metadata[configs.METADATA_KEY]["series_code"]
indicators_context.append(f"<p>(id: {indicator_id}) {indicator.metadata[configs.METADATA_KEY]['name']}</p>")
indicators_context_records.append(dict(id=get_hash_id(indicator.page_content), indicator_id=indicator_id, name=indicator.metadata[configs.METADATA_KEY]['name']))
doc_context = "<br>".join(doc_context) if doc_context else ""
indicators_context = "<br>".join(indicators_context) if indicators_context else ""
return dict(
docs_result=[i.dict() for i in docs_result],
indicators_result=[i.dict() for i in indicators_result],
doc_context=doc_context,
indicators_context=indicators_context,
doc_context_records=doc_context_records,
indicators_context_records=indicators_context_records,
)
| [] |
2024-01-10 | worldbank/llm4data | llm4data~index~qdrant.py | import os
from typing import Optional, Union
from langchain.vectorstores import Qdrant
import qdrant_client
from qdrant_client.http import models
from ..embeddings.docs import get_docs_embeddings
from ..embeddings.indicators import get_indicators_embeddings
from ..embeddings.microdata import get_microdata_embeddings
_CLIENT = None
def collection_exists(collection_name: str) -> bool:
colls = get_index_client().get_collections()
return collection_name in [i.name for i in colls.collections]
def get_index_client(path: Optional[str] = None):
global _CLIENT
if _CLIENT is None:
if path is not None:
_CLIENT = qdrant_client.QdrantClient(path=path, prefer_grpc=True)
else:
url = os.environ.get("QDRANT_URL")
if url is not None:
port = os.environ.get("QDRANT_PORT")
if port is not None:
url += f":{port}"
_CLIENT = qdrant_client.QdrantClient(url=url, prefer_grpc=False)
else:
path = os.environ.get("QDRANT_PATH")
if path is not None:
_CLIENT = qdrant_client.QdrantClient(path=path, prefer_grpc=True)
else:
raise ValueError("QDRANT_URL or QDRANT_PATH not set in the environment")
return _CLIENT
def get_index_collection(embeddings, path: Optional[str] = None, recreate: bool = False):
client = get_index_client(path=path)
if recreate:
client.recreate_collection(
collection_name=embeddings.collection_name,
vectors_config=models.VectorParams(
size=embeddings.size, distance=embeddings.distance
),
)
if not collection_exists(embeddings.collection_name):
client.create_collection(
collection_name=embeddings.collection_name,
vectors_config=models.VectorParams(
size=embeddings.size, distance=embeddings.distance
),
)
return Qdrant(
client=client,
collection_name=embeddings.collection_name,
embeddings=embeddings.embeddings,
)
def get_docs_index(path: Optional[str] = None, recreate: bool = False):
return get_index_collection(get_docs_embeddings(), path=path, recreate=recreate)
def get_indicators_index(path: Optional[str] = None, recreate: bool = False):
return get_index_collection(
get_indicators_embeddings(), path=path, recreate=recreate
)
def get_microdata_index(path: Optional[str] = None, recreate: bool = False):
return get_index_collection(
get_microdata_embeddings(), path=path, recreate=recreate
)
| [] |
2024-01-10 | neethanm/EduScorer | backend.py | import openai
from dotenv import load_dotenv
import os
# Load the environment variables from the .env file
load_dotenv()
# Access the API key
api_key = os.getenv("API_KEY")
openai.api_key = api_key
def check_answer(Teachers_solution, Students_answer, Max_marks, Question):
openai.api_key = api_key
# openai.api_key = api_key_input
# try:
print("sending to gpt3")
completion1 = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages = [
{
"role": "system",
"content": "You are a strict teacher evaluating student answers.",
},
{
"role": "user",
"content": f'''Please evaluate the student's answer for the following question. You will be provided with the teacher's solution, the question, the student's answer, and the maximum marks. Your task is to assign a score to the student's answer.
**Teacher's Solution:**
{Teachers_solution}
**Question:**
{Question}
**Student's Answer:**
{Students_answer}
**Max Marks:**
{Max_marks}
**Important stuff**
- Make sure to deduct marks wherever you can ( you have to be really strict)
- Make sure to give the response in the specified format
**Evaluation Criteria:**
- Accuracy: Compare the student's answer to the teacher's solution. Deduct 0.5 marks for each factual inaccuracy.
- Completeness: Consider the depth of coverage in the student's answer. Deduct 0.5 marks for each missing key point.
- Relevance: Assess if the student's answer stays on-topic. Deduct 0.5 marks for each irrelevant point.
- Clarity: Evaluate the clarity and organization of the student's response. Deduct 0.5 marks for incoherent or poorly structured answers.
**Marks Allocation:**
- Full Marks: Give full marks (as specified) for answers that match the teacher's solution exactly(context and accuracy wise).
- Partial Marks: Deduct 1 marks for any discrepancies between the student's answer and the teacher's solution, applying a clear grading scale.
- Length: If the student's answer is significantly shorter or longer than the teacher's solution, adjust the marks accordingly according to the content.(too short -3 marks ,short -2 marks, little short -1 marks)
- Explaination: If the student's answer doesnt contain the explaination of the answer that is there in the teachers answer deduct 0.5 marks.
You should consider all evaluation criteria and allocate marks based on the provided guidelines and just return the total marks allocated out of max marks.
YOU HAVE TO GIVE THE RESPONSE IN THIS FORMAT : {{ "marks": int,"explaination": string,"accuracy": string,"completeness":int(marks) ,"relevance": int,"clarity": int }} make sure you follow the format and give just integer values where asked and string where asked
all the features accuracy , completeness,relavance,clarity,length should be positive integers ( the number of marks to be deducted )
'''
}
],
# Your code to interact with the model here
temperature=1,
# max_tokens=15000,
)
final_html = completion1['choices'][0]['message']['content']
return final_html
| [
"You are a strict teacher evaluating student answers."
] |
2024-01-10 | jalbrekt85/ebook-diffuser | diffusers~knollingcase.py | from ebook_difusser import EBookDiffuser
import os
from PIL import Image
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class Knollingcase(EBookDiffuser):
# configured for 8.25x11 hardcover Amazon books
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.init_profile()
def generate_theme(self) -> str:
res = openai.Completion.create(
model=f"text-davinci-003",
prompt=self.story.gpt_theme_prompt,
temperature=1.0,
max_tokens=15,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["\n"],
)
text = res["choices"][0]["text"][1:]
print("theme: ", text)
if text not in os.listdir(self.books_dir):
return text
self.generate_theme()
def generate_page_prompt(self, theme) -> str:
prompt = self.story.gpt_page_prompt.format(theme)
res = openai.Completion.create(
model=f"text-davinci-003",
prompt=prompt,
temperature=1,
max_tokens=9,
top_p=1,
frequency_penalty=0.02,
presence_penalty=0.02,
stop=["\ntheme"],
)
page_prompt = res["choices"][0]["text"].split(":")[1][1:]
# add latest result to gpt prompt template to avoid repetitive results
self.story.gpt_page_prompt = prompt + "\nresponse: " + page_prompt + "\n" + "theme: {}"
return "{} {}".format(theme, page_prompt)
def generate_page_image(self, prompt) -> Image:
res = self.api.txt2img(
prompt=self.sd.prompt_template.format(prompt),
negative_prompt=self.sd.negative_prompt,
steps=self.sd.steps,
cfg_scale=self.sd.cfg_scale,
sampler_name=self.sd.sampler,
width=self.sd.width,
height=self.sd.height,
)
upscaled = self.api.extra_single_image(
res.image, upscaler_1="ESRGAN_4x", upscaling_resize=3
)
return upscaled.image
| [] |
2024-01-10 | drewgillson/googlepalm-minute-book-extraction | terraform~modules~cloud_functions~src~minute-book-parser~directors.py | import main
import json
import re
from langchain.prompts import PromptTemplate
from langchain.llms import VertexAI
from langchain.chains import LLMChain
def Parser(sorted_files):
"""
Extracts details of elected directors from the sorted pages of minute book.
Args:
sorted_files (list): A list of tuples where each tuple contains the page number and file name of a sorted file.
Returns:
A list of dictionaries where each dictionary represents an elected officer and includes their full name, election
date, address, title, and URL of the source document page where the officer's details were extracted from.
"""
elected_directors = [{}]
election_of_director_content = ""
election_of_director_provenance = []
election_of_director_token_count = 0
election_of_director_max_token_limit = 1024
extracting_election_of_director = False
minimum_number_of_directors = []
maximum_number_of_directors = []
file_count = len(sorted_files)
for file in sorted_files:
page_number, file_name = file
content = main.get_page(file_name)
lowercase_content = content.lower()
parsed_this_page = False
# "minimum_directors": string, // Minimum number of directors required for the corporation
if ("minimum" in lowercase_content or "less than" in lowercase_content) and "directors" in lowercase_content and "number" in lowercase_content:
min_directors = extract_minimum_directors(content)
if min_directors is not None:
minimum_number_of_directors.append({"min_directors": min_directors, "provenance": main.get_url(file_name)})
# "maximum_directors": string, // Maximum number of directors allowed for the corporation
if ("maximum" in lowercase_content or "more than" in lowercase_content) and "directors" in lowercase_content and "number" in lowercase_content:
max_directors = extract_maximum_directors(content)
if max_directors is not None:
maximum_number_of_directors.append({"max_directors": max_directors, "provenance": main.get_url(file_name)})
# "directors": array, // One or more directors of a corporation, with child properties for their full name, election date, and address
if "elected" in lowercase_content and "director" in lowercase_content and "register" in lowercase_content:
extracting_election_of_director = True
if extracting_election_of_director is True:
election_of_director_tokens = election_of_director_token_count + main.num_tokens_from_string(content)
election_of_director_provenance.append(main.get_url(file_name))
if election_of_director_tokens < election_of_director_max_token_limit:
election_of_director_token_count += election_of_director_tokens
election_of_director_content += content
parsed_this_page = True
if election_of_director_tokens >= election_of_director_max_token_limit or page_number == file_count:
if parsed_this_page:
output = extract_election_of_directors(election_of_director_content)
else:
output = extract_election_of_directors(content)
if output is not None:
try:
output = json.loads(output)
found_directors = []
for director in elected_directors:
if not bool(director):
elected_directors.remove(director)
for item in output:
if "director_name" in director and director['director_name'] == item['director_name']:
director['director_name'] = item['director_name'].title()
director['date_elected'] = item['date_elected']
director['date_retired'] = item['date_retired']
if "address" in item and isinstance(item['address'], str):
director['address'] = re.sub(r'\s+', ' ', item['address']).strip()
try:
director['provenance'] = item['provenance']
except KeyError as e:
print(e)
else:
item['director_name'] = item['director_name'].title()
item['address'] = main.extract_address_for_person(person=item['director_name'], sorted_files=sorted_files)
item['provenance'] = election_of_director_provenance
found_directors.append(item)
for director in found_directors:
if not any(d['director_name'] == director['director_name'] for d in elected_directors):
elected_directors.append(director)
except json.decoder.JSONDecodeError:
pass
extracting_election_of_director = False
election_of_director_content = ""
election_of_director_token_count = 0
election_of_director_provenance = []
election_of_director_tokens = 0
output = {"directors": elected_directors, "minimum_directors": minimum_number_of_directors, "maximum_directors": maximum_number_of_directors}
return output
# The following functions use a large language model to perform question & answer-style extraction from a minute book
def extract_minimum_directors(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""What is the minimum number of directors who can sit on
the board of directors? If this passage is about quorum rules return Not Found.
Format output as a number.
Passage:
{content}
Minimum:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2),
prompt=prompt)
output = chain.predict(content=content).strip()
if output != "Not Found":
return output
def extract_maximum_directors(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""What is the maximum number of directors who can sit on
the board of directors? If this passage is about quorum rules return Not Found.
Format output as a number.
Passage:
{content}
Maximum:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2),
prompt=prompt)
output = chain.predict(content=content).strip()
if output != "Not Found":
return output
def extract_election_of_directors(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""List the names of the directors of the corporation, the
date they were elected, and the date they retired (if not a current director).
The output should be a JSON object with one or more children having the following schema:
{{
"director_name": string // Name of the elected director
"date_elected": string // Formatted date (YYYY-MM-DD) of the elected date
"date_retired": string // Formatted date (YYYY-MM-DD) of the retired date
"address": string // Address of the elected director
}}
If the passage does not mention names of directors, output [].
Passage:
{content}
Directors JSON:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2,
max_output_tokens=1024),
prompt=prompt)
output = chain.predict(content=content).strip()
if output != "[]":
return output
| [
"What is the minimum number of directors who can sit on\n the board of directors? If this passage is about quorum rules return Not Found.\n Format output as a number.\n Passage:\n {content}\n Minimum:",
"content",
"List the names of the directors of the corporation, the\n date they were elected, and the date they retired (if not a current director).\n The output should be a JSON object with one or more children having the following schema:\n {{\n \"director_name\": string // Name of the elected director\n \"date_elected\": string // Formatted date (YYYY-MM-DD) of the elected date\n \"date_retired\": string // Formatted date (YYYY-MM-DD) of the retired date\n \"address\": string // Address of the elected director\n }}\n If the passage does not mention names of directors, output [].\n Passage:\n {content}\n Directors JSON:",
"What is the maximum number of directors who can sit on\n the board of directors? If this passage is about quorum rules return Not Found.\n Format output as a number.\n Passage:\n {content}\n Maximum:"
] |
2024-01-10 | drewgillson/googlepalm-minute-book-extraction | terraform~modules~cloud_functions~src~minute-book-parser~officers.py | import main
import json
import re
from langchain.prompts import PromptTemplate
from langchain.llms import VertexAI
from langchain.chains import LLMChain
def Parser(sorted_files):
"""
Extracts details of appointed officers from the sorted pages of minute book.
Args:
sorted_files (list): A list of tuples where each tuple contains the page number and file name of a sorted file.
Returns:
A list of dictionaries where each dictionary represents an appointed officer and includes their full name, appointment
date, address, title, and URL of the source document page where the details were extracted from.
"""
elected_officers = [{}]
election_of_officer_content = ""
election_of_officer_provenance = []
election_of_officer_token_count = 0
election_of_officer_max_token_limit = 1024
extracting_election_of_officer = False
file_count = len(sorted_files)
for file in sorted_files:
page_number, file_name = file
content = main.get_page(file_name)
lowercase_content = content.lower()
parsed_this_page = False
# "officers": array, // One or more officers of a corporation, with children properties for their full name, election date, address, and title
if "officer" in lowercase_content and "register" in lowercase_content:
extracting_election_of_officer = True
if extracting_election_of_officer is True:
election_of_officer_tokens = election_of_officer_token_count + main.num_tokens_from_string(content)
election_of_officer_provenance.append(main.get_url(file_name))
if election_of_officer_tokens < election_of_officer_max_token_limit:
election_of_officer_token_count += election_of_officer_tokens
election_of_officer_content += content
parsed_this_page = True
if election_of_officer_tokens >= election_of_officer_max_token_limit or page_number == file_count:
if parsed_this_page:
output = extract_election_of_officers(election_of_officer_content)
else:
output = extract_election_of_officers(content)
if output is not None:
try:
output = json.loads(output)
found_officers = []
for officer in elected_officers:
if not bool(officer):
elected_officers.remove(officer)
for item in output:
if "officer_name" in officer and officer['officer_name'] == item['officer_name']:
officer['officer_name'] = item['officer_name'].title()
officer['date_appointed'] = item['date_appointed']
officer['date_retired'] = item['date_retired']
officer['position_held'] = item['position_held']
if "address" in item and isinstance(item['address'], str):
officer['address'] = re.sub(r'\s+', ' ', item['address']).strip()
try:
officer['provenance'] = item['provenance']
except KeyError as e:
print(e)
else:
item['officer_name'] = item['officer_name'].title()
item['address'] = main.extract_address_for_person(person=item['officer_name'], sorted_files=sorted_files)
item['provenance'] = election_of_officer_provenance
found_officers.append(item)
for officer in found_officers:
if not any(d['officer_name'] == officer['officer_name'] for d in elected_officers):
elected_officers.append(officer)
except json.decoder.JSONDecodeError:
pass
extracting_election_of_officer = False
election_of_officer_content = ""
election_of_officer_token_count = 0
election_of_officer_provenance = []
election_of_officer_tokens = 0
return elected_officers
# The following function uses a large language model to perform question & answer-style extraction from a minute book
def extract_election_of_officers(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""List the names of the officers of the corporation, the date they were elected,
and the date they retired (if not a current officer). The output should be a
JSON object with one or more children having the following schema:
{{
"officer_name": string // Name of the elected officer
"date_appointed": string // Formatted date (YYYY-MM-DD) of the appointed date
"date_retired": string // Formatted date (YYYY-MM-DD) of the retired date
"position_held": string // Position held by the elected officer
"address": string // Address of the elected officer
}}
If the passage does not mention names of officers, output [].
Passage:
{content}
Officers JSON:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2,
max_output_tokens=1024),
prompt=prompt)
output = chain.predict(content=content).strip()
if output != "[]":
return output
| [
"List the names of the officers of the corporation, the date they were elected,\n and the date they retired (if not a current officer). The output should be a\n JSON object with one or more children having the following schema:\n {{\n \"officer_name\": string // Name of the elected officer\n \"date_appointed\": string // Formatted date (YYYY-MM-DD) of the appointed date\n \"date_retired\": string // Formatted date (YYYY-MM-DD) of the retired date\n \"position_held\": string // Position held by the elected officer\n \"address\": string // Address of the elected officer\n }}\n If the passage does not mention names of officers, output [].\n Passage:\n {content}\n Officers JSON:",
"content"
] |
2024-01-10 | drewgillson/googlepalm-minute-book-extraction | terraform~modules~cloud_functions~src~minute-book-parser~entity_details.py | import main
import json
import re
from langchain.prompts import PromptTemplate
from langchain.llms import VertexAI
from langchain.chains import LLMChain
def Parser(sorted_files):
"""
Extracts various entity details from the sorted pages of a minute book.
Args:
sorted_files (list): A list of tuples where each tuple contains a page number and file name.
Returns:
A list of dictionaries where each dictionary contains the extracted entity details that match the minute book
extraction schema. Each detail object includes the date, extracted details, and the URL of the source document
page where the details were extracted from.
"""
entity_name = ""
entity_details = []
for file in sorted_files:
page_number, file_name = file
content = main.get_page(file_name)
lowercase_content = content.lower()
# "entity_name": string, // Incorporation number for the corporation
if page_number == 1:
entity_name = extract_entity_name(content)
# "tax_id_number": string, // Tax identification number for the corporation
if "business number" in lowercase_content or "business no." in lowercase_content:
tax_id_number = extract_tax_id_number(content)
if tax_id_number is not None:
entity_details.append({"tax_id_number": tax_id_number, "provenance": main.get_url(file_name)})
# "entity_number": string, // Incorporation number for the corporation
# "entity_type": string // Type of business entity
# "formation_date": string, // Date (YYYY-MM-DD) when the corporation was incorporated
# "address": string, // Address where the corporation is registered
# "home_jurisdiction": string, // Jurisdiction where the corporation is incorporated
if "certificate" not in lowercase_content and "articles" in lowercase_content and ("address" in lowercase_content or "number" in lowercase_content):
try:
output = extract_entity_details(content)
output = json.loads(output)
output['entity_name'] = output['entity_name'].upper()
missing_values = False
for key, value in output.items():
if not value:
missing_values = True
break
if output['entity_name'] == entity_name and not missing_values and "address" in output:
entity_details.append({"details": output, "provenance": main.get_url(file_name)})
except json.decoder.JSONDecodeError:
pass
# TODO implement Fiscal Month, Fiscal Day, Home Report Filed Date, and Waived Auditor?
return entity_details
def extract_entity_name(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""Extract the name of the corporate entity from this passage.
Passage:
{content}
Entity:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2),
prompt=prompt)
return chain.predict(content=content).strip().upper()
def extract_tax_id_number(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""Extract the business number / tax identification number from this passage.
Passage:
{content}
Entity:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2),
prompt=prompt)
return chain.predict(content=content).strip()
def extract_entity_details(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""What is the name of the entity, corporate registration number, date of incorporation,
type of entity, address, and jurisdiction in these articles of incorporation?
The output should be a JSON object with the following schema:
{{
"entity_name": string // Name of the corporate entity
"corporation_number": string // Corporation number of the entity (should contain numbers)
"formation_date": string // Date of incorporation or formation (YYYY-MM-DD)
"entity_type": string // Type of entity (e.g. corporation, limited liability company)
"address": string // Mailing address with street, city, state/province, and zip/postal code
"home_jurisdiction": string // Jurisdiction of incorporation (State/Province, Country)
}}
Do not include keys if they are not present in the passage.
Passage:
{content}
JSON:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.4,
max_output_tokens=1024),
prompt=prompt)
output = chain.predict(content=content)
if output != "Not Found":
return re.sub(r'\s+', ' ', output)
| [
"content",
"Extract the name of the corporate entity from this passage.\n Passage:\n {content}\n Entity:",
"What is the name of the entity, corporate registration number, date of incorporation,\n type of entity, address, and jurisdiction in these articles of incorporation?\n The output should be a JSON object with the following schema:\n {{\n \"entity_name\": string // Name of the corporate entity\n \"corporation_number\": string // Corporation number of the entity (should contain numbers)\n \"formation_date\": string // Date of incorporation or formation (YYYY-MM-DD)\n \"entity_type\": string // Type of entity (e.g. corporation, limited liability company)\n \"address\": string // Mailing address with street, city, state/province, and zip/postal code\n \"home_jurisdiction\": string // Jurisdiction of incorporation (State/Province, Country)\n }}\n Do not include keys if they are not present in the passage.\n Passage:\n {content}\n JSON:",
"Extract the business number / tax identification number from this passage.\n Passage:\n {content}\n Entity:"
] |
2024-01-10 | drewgillson/googlepalm-minute-book-extraction | terraform~modules~cloud_functions~src~minute-book-parser~quorum_rules.py | import main
from langchain.prompts import PromptTemplate
from langchain.llms import VertexAI
from langchain.chains import LLMChain
def Parser(sorted_files):
"""
Extracts quorum rules for directors and shareholders from the sorted pages of minute book.
Args:
sorted_files (list): A list of tuples where each tuple contains the page number and file name of a sorted file.
Returns:
A list of dictionaries where each dictionary contains the extracted quorum details that match the minute book
extraction schema. Each quorum object includes the date, extracted quorum rules, and the URL of the source
document page where the rules were extracted from.
"""
quorum_rules = []
quorum_content = ""
quorum_token_count = 0
quorum_max_token_limit = 3072
extracting_quorum = False
file_count = len(sorted_files)
for file in sorted_files:
page_number, file_name = file
content = main.get_page(file_name)
lowercase_content = content.lower()
if "quorum" in lowercase_content:
extracting_quorum = True
# "directors_quorum": string, // Quorum rules for directors
# "shareholders_quorum": string, // Quorum rules for shareholders
if extracting_quorum is True:
quorum_tokens = quorum_token_count + main.num_tokens_from_string(content)
shareholders_quorum, directors_quorum = [None, None]
if quorum_tokens < quorum_max_token_limit:
quorum_token_count += quorum_tokens
quorum_content += content
parsed_this_page = True
# Quorum rules can sometimes be split across multiple pages so we need a larger context window
if quorum_tokens >= quorum_max_token_limit or page_number == file_count:
if parsed_this_page:
shareholders_quorum = extract_shareholders_quorum(quorum_content)
directors_quorum = extract_directors_quorum(quorum_content)
else:
if shareholders_quorum is None:
shareholders_quorum = extract_shareholders_quorum(content)
if directors_quorum is None:
directors_quorum = extract_directors_quorum(content)
quorum_rules.append({"directors_quorum": directors_quorum, "provenance": main.get_url(file_name)})
quorum_rules.append({"shareholders_quorum": shareholders_quorum, "provenance": main.get_url(file_name)})
extracting_quorum = False
quorum_content = ""
quorum_token_count = 0
quorum_tokens = 0
return quorum_rules
# The following functions use a large language model to perform question & answer-style extraction from a minute book
def extract_directors_quorum(content, entity_name):
prompt = PromptTemplate(
input_variables=["content", "entity_name"],
template="""What constitutes quorum for meetings of directors of {entity_name} where only
one director is present? How about when two or more directors are present? Is
a majority of directors required for quorum? Explain in a concise paragraph.
THINK: Do not explain quorum for meetings of shareholders, this is irrelevant.
Passage:
{content}
Director Quorum:""")
directors_quorum_candidate = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.5, max_output_tokens=512),
prompt=prompt)
return directors_quorum_candidate.predict(content=content).strip()
def extract_shareholders_quorum(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""What constitutes quorum for meetings of shareholders according to this passage?
THINK: Do not get confused between meetings of directors and meetings of shareholders.
Passage:
{content}
Shareholder Quorum:""")
shareholders_quorum_candidate = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.5, max_output_tokens=512),
prompt=prompt)
return shareholders_quorum_candidate.predict(content=content).strip()
| [
"What constitutes quorum for meetings of shareholders according to this passage?\n THINK: Do not get confused between meetings of directors and meetings of shareholders.\n Passage:\n {content}\n Shareholder Quorum:",
"What constitutes quorum for meetings of directors of {entity_name} where only\n one director is present? How about when two or more directors are present? Is\n a majority of directors required for quorum? Explain in a concise paragraph.\n THINK: Do not explain quorum for meetings of shareholders, this is irrelevant.\n Passage:\n {content}\n Director Quorum:",
"content",
"entity_name"
] |
2024-01-10 | drewgillson/googlepalm-minute-book-extraction | terraform~modules~cloud_functions~src~minute-book-parser~restrictions_provisions.py | import main
from langchain.prompts import PromptTemplate
from langchain.llms import VertexAI
from langchain.chains import LLMChain
def Parser(sorted_files):
"""
Extracts restrictions and provisions related to a corporation from a minute book.
Args:
sorted_files (list): A list of tuples where each tuple contains the page number and file name of a sorted file.
Returns:
A list of dictionaries where each dictionary represents a set of restrictions or provisions and includes the date
when they were established, the type of restriction or provision, the text of the restriction or provision, and the URL
of the source document page where the restriction or provision was extracted from.
"""
restrictions_provisions = []
for file in sorted_files:
page_number, file_name = file
content = main.get_page(file_name)
lowercase_content = content.lower()
# "transfer_restrictions": string, // Provisions or rules that limit or regulate the transfer or sale of a company's shares or other ownership interests
if "transfer" in lowercase_content and "restrictions" in lowercase_content and "certificate" not in lowercase_content:
output = extract_transfer_restrictions(content)
restrictions_provisions.append({"transfer_restrictions": output, "provenance": main.get_url(file_name)})
# "other_restrictions": string, // Restrictions on the corporation's activities
if "other" in lowercase_content and "restrictions" in lowercase_content and "certificate" not in lowercase_content:
output = extract_other_restrictions(content)
restrictions_provisions.append({"other_restrictions": output, "provenance": main.get_url(file_name)})
# "other_provisions": string, // Additional provisions or rules that are not covered by the other properties
if "other provisions" in lowercase_content:
output = extract_other_provisions(content)
restrictions_provisions.append({"other_provisions": output, "provenance": main.get_url(file_name)})
return restrictions_provisions
# The following functions use a large language model to perform question & answer-style extraction from a minute book
def extract_other_restrictions(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""If this passage from a set of corporate by-laws
pertains to other restrictions, read the restrictions and then describe
them concisely. Do not include share transfer restrictions. Do not include
information about the minimum or maximum number of directors. Format output
as a single line without linebreaks.
Passage:
{content}
Other Restrictions:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2,
max_output_tokens=512),
prompt=prompt)
output = chain.predict(content=content).strip()
if output != "Not Found":
return output
def extract_transfer_restrictions(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""If this passage from a set of corporate by-laws
pertains to share transfer restrictions, read the restrictions and then
describe them concisely. Do not include any other restrictions except
for share transfer restrictions. Do not include information about the
minimum or maximum number of directors. Format output as a single line
without linebreaks.
Passage:
{content}
Share Transfer Restrictions:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2,
max_output_tokens=512),
prompt=prompt)
output = chain.predict(content=content).strip()
if output != "Not Found":
return output
def extract_other_provisions(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""If this passage from a set of corporate by-laws pertains to other provisions,
read the provisions and then describe them. Do not include information about
the minimum or maximum number of directors. Format output as a single line
without linebreaks.
Passage:
{content}
Other Provisions:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.2,
max_output_tokens=512),
prompt=prompt)
output = chain.predict(content=content).strip()
if output != "Not Found":
return output
| [
"If this passage from a set of corporate by-laws pertains to other provisions,\n read the provisions and then describe them. Do not include information about\n the minimum or maximum number of directors. Format output as a single line\n without linebreaks.\n Passage:\n {content}\n Other Provisions:",
"If this passage from a set of corporate by-laws\n pertains to share transfer restrictions, read the restrictions and then\n describe them concisely. Do not include any other restrictions except\n for share transfer restrictions. Do not include information about the\n minimum or maximum number of directors. Format output as a single line\n without linebreaks.\n Passage:\n {content}\n Share Transfer Restrictions:",
"If this passage from a set of corporate by-laws\n pertains to other restrictions, read the restrictions and then describe\n them concisely. Do not include share transfer restrictions. Do not include\n information about the minimum or maximum number of directors. Format output\n as a single line without linebreaks.\n Passage:\n {content}\n Other Restrictions:",
"content"
] |
2024-01-10 | drewgillson/googlepalm-minute-book-extraction | terraform~modules~cloud_functions~src~minute-book-parser~share_classes.py | import main
import json
import re
from langchain.prompts import PromptTemplate
from langchain.llms import VertexAI
from langchain.chains import LLMChain
def Parser(sorted_files):
"""
Extracts share class details from the sorted pages of minute book.
Args:
sorted_files (list): A list of tuples where each tuple contains the page number and file name of a sorted file.
Returns:
A list of dictionaries where each dictionary represents a share class and includes its name, voting rights,
votes per share, limit for number of shares, number of shares authorized, and share restrictions. Each share class
object also includes the URL of the source document page where the share class details were extracted from.
"""
share_classes = [{}]
share_class_content = ""
share_class_token_count = 0
share_class_max_token_limit = 2560
extracting_share_classes = False
file_count = len(sorted_files)
for file in sorted_files:
page_number, file_name = file
content = main.get_page(file_name)
lowercase_content = content.lower()
# "share_classes": array, // One or more share classes with children properties for name, voting rights, votes per share, limit for number of shares, number of shares authorized, and share restrictions
if "authorized to issue" in lowercase_content and "class" in lowercase_content:
extracting_share_classes = True
if extracting_share_classes is True:
share_class_tokens = share_class_token_count + main.num_tokens_from_string(content)
if share_class_tokens < share_class_max_token_limit:
share_class_token_count += share_class_tokens
share_class_content += content
if share_class_tokens >= share_class_max_token_limit or page_number == file_count:
output = extract_share_classes(share_class_content)
try:
share_classes = json.loads(output)
share_classes.append({'provenance': main.get_url(file_name)})
share_classes.append(share_classes)
for share_class in share_classes:
if not bool(share_class):
share_classes.remove(share_class)
except json.decoder.JSONDecodeError:
pass
extracting_share_classes = False
share_class_content = ""
share_class_token_count = 0
share_class_tokens = 0
return share_classes
# The following function uses a large language model to perform question & answer-style extraction from a minute book
def extract_share_classes(content):
prompt = PromptTemplate(
input_variables=["content"],
template="""What share classes is the corporation authorized to issue? Output JSON
objects that conform to the following schema:
{{
{{
"share_class": string // Name of class of shares (example: Class A, Class B or Common, Preferred)
"voting_rights": string // Yes or no
"votes_per_share": string // Number of votes per share
"notes": string // Summarize rights, privileges, restrictions, and conditions
}},
// Repeat for each share class found
}}
Passage:
{content}
Share Classes JSON:""")
chain = LLMChain(llm=VertexAI(model_name="text-bison", temperature=0.5,
max_output_tokens=1024),
prompt=prompt)
output = chain.predict(content=content)
return re.sub(r'\s+', ' ', output)
| [
"content",
"What share classes is the corporation authorized to issue? Output JSON\n objects that conform to the following schema:\n {{\n {{\n \"share_class\": string // Name of class of shares (example: Class A, Class B or Common, Preferred)\n \"voting_rights\": string // Yes or no\n \"votes_per_share\": string // Number of votes per share\n \"notes\": string // Summarize rights, privileges, restrictions, and conditions\n }},\n // Repeat for each share class found\n }}\n Passage:\n {content}\n Share Classes JSON:"
] |
2024-01-10 | ituki0426/ML | src~OpenAI~vision.py | from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "この画像の中の文字を出力してください"},
{
"type": "image_url",
"image_url": {
"url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAPkAAACaCAMAAABc8R5WAAAAhFBMVEX///8AAAC+vr5+fn4/Pz/8/PyMjIxaWlpJSUl7e3v4+Pj29vbh4eHa2tpUVFQMDAw4ODjv7+/s7OzNzc1CQkJiYmLDw8MXFxeurq4fHx9RUVHJycmenp6YmJjV1dUQEBBsbGxycnKoqKiGhoa0tLQqKipubm4xMTGbm5scHBySkpIkJCT7BdU9AAASyElEQVR4nO1d6ZaquhJOmGWQQRDFgVlafP/3u0kACQhGQPbpdVd/P3a7lRAqSQ2pVBUA/OEPk6ElExsk2irPMQvilbtOa7Er9aatkk3sbS+oE1ushzKH/KQGZ+sW1jMXbyZ3x8uTm6yGA+SmXC76m4dZUb47Vn8l6/PmWrib0tuqiOEk3jvBg25UH920+vtwJ7Tf/J5JLybMGEIKGy7X4bX+m05ov4P2pP7WgwMRs+4uHwqeK+eaHCeSz+ew+oaHEje4bJySLA6986VoTpWKa+EAM3CLhNtnV6cCDIWomrWILJaN4ENB0Aeu1a0LXhVlj5vy6WJxHdygHe0P0Ot9nQotFLH9XoPPMVK21d98hM2VXYZGFbiP7tdC8IWn/gaKo7cHDu+gFW99oNj38PxsWY2WM8Lm4g14SAmoZm9Qt8L8h/0mbBjeIwd/Sjj4AeUX+NRKVkVSAvcj16ph0BmqCt40iboaMsg5xZEsZ42m3OBa0JR55nPpK5V+8iDFDB1wmOhWF9QIpujAFRFDB7G6eEp7lCd8i5QijdKBskL+PNDqDcRT5CWSW5nxzW1KiGS78gD6TUikoOEJoV794kSj+duwCjRrR+DpPcpHIMLo+Tl9VN/E4LABkQ5PQCKroGzE5QVN9xWxeayZCciKullYD8EGNfwPoeLu9+YWP6sG2YbljuLa2iaR81hRgXNGU78ltCTQrC7QlCAuYAKck48IrVYIsntq5ilhT+j/YxBirzu8nj+h/EJz7YPYJOKeNPNSJMbrGzw52eEwN4EUrYW83tAmLb2/hOE/oNyWDx3B/EPrK58D2cMmk25X05uYHHDMCH1yL8AwtepqpWxa6L/Fgo8FWGzfXpHAzZF2RajHdgE4oQpOVkDM1duhuqF/VRUXc0SoAz33yCo/5U87eXv62rOvDf2Yd4USV7S7D2wT6IQqp14K+i1QLmLzm0MuVa3nurr+Fit2FpK+0csC/1u2K3/4wx/+8AcWfqVmdoYcRd+FuP2VqtkRVndw3yZvui68PObO+Bwqvwmcdxc4j5VJz5TJTSQLGov71fijOeYUqcDd347MUuj+DCKU8Btd+6zd3kZ4PzTLIEw7liMQzW84YA3miaDmz3i4T8H7Mw6GT1D6QtcX9omgZK623p1ZNEiQ2yibpVpHNq+RIr0deC2MFnYyCt6cc0ymQOtgW9MlYxe+6XJO+H4983ClSRf9aE4r84HmO7gv69uA+PDn+F5ijJ2DLEbyge/uFTuIXVLHhX64H+zLdFgeXGXh+I5B8Oe0kvB4XZdKORlr8zNLyn0gBufAhtGcZgF+5hTqzqKj9Ds+oAtCUX+rs3UYLelkDBmcGhyFUWnzvADxkunQsTbXoAfc9zLsuIpbf/PJIeQLqnVuxrtFwj3By1iHScJg9O2YdOf4Wzn8ywjEMn4yqDt6mvgOHDHak0e8KDiu9HHffC7Vj7Djh0O1eHgYvsFBhtMoj7dyM4qimVM/6P9tAAwXDYv5ZFSvXaYpJhuWXuNr1iGlTFVzbJf+FU+zypQJ2fDknl7iQRrIjK1eDwlsHTA7epSzfojAE18JFdDfn/8ALHQGGVqHY3bDfZqcocPnOFol8/0QgSf+EeVCPvi1OHZmayA20MrgQ/3yw+cmzzcMncCf5gce/zBimnxKueNt8Kwlg/qCSbkGIyCeg1c1G5rDDX7gyd6ehf59R6JFdlzoNvFwWEQ0/YhcBmVuxMP0IeVaoBcRZqFBG4FJOQcTUb5sXzu7jwR5yqazdRIcDphJ1GAfRqJFHNobkMJWeL0xaj6knM+ISZgOO6qYlG+gIRsniI+mjQ7fPUYU+t2XHTRlIpB/JJ9tSR9oAXqjKI+H7l+SkbuTfxv5x8k0qIszkRjhSn8roJLmt5z8aTq80DchSkUwZTRkexsk8b2jTfJhytFWz8oxk+/8EyhNpm3RkWMRNQzWkBzZE27Jyb+NSjhtKHQ31woyTkSzH1CgkuaJQP40y/JA3+UCMJtbyrF+HL9DeTEserE2V5Be0x3XQCv2ec3Yag9ocXFrFRk2oIEBHGQb7PpD/KmEI+FHOyQ1DQOIp94ksFY71uYRen5MQZfy4/CcY20emWJC1jnfzttYPFxHB25arYZj33YRiDwlPRx7Ry4sypP6eh1bkxI0VM/1pCTsmoQsyjG3SVA/4eDULuVhK+EOlAo7IkrioxjgH/UH039sQ9pQk1pxl8H91dX1MkX3s3qyjkG5Aeulppk8Yro7yE7BBi2A7sOwKMfavISi/DrnEDafHNjusFTzjD3mLjZgHYttxu47FuK5Df8VlXuBO3XPaHR6fMWgXJXz+qZc4BbE1gyvgOt55FmU52gS7Lwgk+rTD6lRloxX/PTbARzRZICS5Q6VOnH5e/gijUwdnAuta4sMUF4GQdreia8WoWHjtZNhaYukp6d32HOAco1X5HLI9O5QrkOKQcsB0161vHj7eGvCG/d02/H3dW5KwB2R6EilLqO/8JAYpE4Whs/vBcKIV3ML1BwPk4R6yTOvy+gvd3GsH/sC8wF1FNLstqc3F8qAsLviSPb3EWoZLB/d2TT7pnIm4bAhlllwtgxs+jY7ibK6/mpmtixgcks076nH2uLFW2cwhpSE5rf/vcDL8zPLizGKUu4NvDDPmx3j3a0KYR0a1whcZONMcW65WL6eYMG4LKJckN9zRsZw1t77JGB2M6clTb3gLBAP7vAOrYW1sJthZHB+zMAJfmP3eoER4wpzlfBaDc5PzQi+4QjXHiFDHRlfObx8xXaaQ4fCZdSFMwWeyQrRKUcdJstwGHNssrAPZzbs4OIzvd4FSw7MhGrOW+764/SFlGwuR/N5eXvJdaJn+XPEs44UHRfP1Xu7iY0rtkuc93Iy9tdK+3ZettMfQCtyZGxYC9eh4VvoLvnbcGvRX2vK8a5lujHDQwKmN/U9lOoub8/Hy/uKmf6/JttyALq5yhFyDcNcb0EthbtuYlg2S8j9C5yLNcPhEKTjb0mm72J3Xz3mt5yazvJP4CjrBzv/4Q9/+MMf/v9QrhhFPh+GsrqZkUy23E8p/4VaC1nKv6XtVKyazIG2ycVkD0N6hMsTveyY5b0tV03mAGoxY/omBmSNIGc5hJToC72MIprD5P7SqH4Mh5nHoptz4nI/RDYnj+U7ruCE7bdOV3NGAfE+h4YLPPFKuvShIlOPlKFj4RZauI63HeAyGnNUhwyDg41DwBYh97ecY74nnffXEnLWrA2qbyKpKC/Ms3DgA416/l6l6l850BjAbtYRUVVFcGIxwhfgynZAhdH7q6yVCnTN004/WJs7C04jCSJ8Cs3MqkhnJR8wIZqzPMek4NjE6PJXEG3usUL4dhOLun6I07yzmzvmTeEO0iVna0Sbi2EAtm+dTmLICi2YhXKWEapXeSyRs6jgL6lTa8DLlbFrUMZMXJ07TIt7EMvg2WAL5zx7RkLd4sJbxIES0VeuEtXbEoc7DLL8ZkwIS1OrG8dWG9zrH6kf7P/W95wUw2kbyShHMrP8utBg6ua1QO8mShzHDjS+s1dkupBfKk5WOMFopIE3LYImo0ON6JuOdIzwlWKf7GwOfiQwfjyPZWp143YKD90w/7Go2X+VxzKcI/smj2VDl2Rn4JoVMOEasfbTshDHufCwLJsDaOcfUmttUF58kMcio6vKV6k5nseSaWns9s//R6K8L7yZ83xDudTmsfAbaPEjouRDylXrnEd4Tz24B/kgj+UilpvglbXG8lg8qOM8FiQWbY4abL4ubBzIXryhLFSbjrbcUPFBp/GtwYeUX37E0Bu17ZiU83CH81gUNPu7jq4dy2M53gMH2BcHSHHisz0YHG0nR1QIZDp0/x2VzdEsw51Eg7o4sYkR/roVoLM5Gq7M6JuQRxLM7RUZGzpItnKHkJE8Fh0+wioAGO3hI3bCskRvAGjKlaG2KZXB08QwZQqNzuUe3uy/OKps0jyuMniaRSbRN8FR9qp5DwUsY9X7Dpxpr0ExTHnyrG58vjZVezHG8lg6M0LlsYhV4WpcuPfat+s+lu2hjHcYEtAMIPaXPGu17+GPpmCCbTMBV9pGGVntEWL/qKpufD1QOexj1Y1zmoxNK9sxm+se2HiedCim5TSAfU0lqdCCtgJi5G7KpOgOIItyrFQlqOsR/s+BJtZvJdyOGhBMSVRVN+Y2LnP/0XUEUHksB3jSgpPNS7kKhGl5LE6Tx4LrgNiFKXIHnMdmTstjwWluKRR5PIpiQYcHtnksmknlseBZq6sbozlkOtV2HQl+bgW9fVQUPKDbsi3124BFuW/W3Cv5N4Ukdh/34GpOm3P/hoOVtsRe8zpVaeHT66U+TKo4M/4HVzc20DY3YW46O9WNkaBv9wh21dS/guzYs9RH8lhayqRKUNq2er3iJF8dCZM00DoyfiSP5an7ryT76oRvKu3BvpXDDh2PfR7QXic0U3z4ds5xdePOZtiA/b3xDtlLXnzoDuBLb6KyyWJoPdlvSwZBx4U3FOwg/0EzVyRp52HUFy+GbUmZ/JJDhMU+z7vtBHUy7+UhYSdtIuW9fyiBl7xzhfiSUHBGD5IqrCi0RCBVVZon2nv1Ix7Axccd3JCk28rv3clouxwBHCXfj/i1sQXmtuvlhzrVGPFi2Kw8PU1WegNfzHu5ygbPlAMbBVkXvRJ5IYgm7GhdTBLHzC2g04bLr7kQvHl5CdwRCxpz4TtpLjgL1IBHxmXuKjWzzgvqURnM3JtPkIzWz2gQruJwd15S9T5HNC/vqYfCZOyv9XW8zkCYXc8xg9EX+k+Zh0fnL8QoDGH2cYEeskT3J+B8ppxxv1Jr8xXanJQGBB0byslSSbvD73t6P4KLz7BGMVheggmbvOhoaQz21XJwZsjba9JZp9yfQJ81porC3+T+q6imQjt6m2jLeLfEim/22oTTo0WkKgNlYayMXN3lLWmH1aYcv6Djw1ezUajdmwsLHFeuLu6dVrP999lsy8B9RS+vg2jdV76krKTY/wynoYIE30SwbqjhbDjF8oLh76EGvzLi13GX14hn4lembf2DtxX84Q9/+MMf/p+QRf/1E9Q4TN88gGwrTItj1OWnP31XTNkJibE7+OboD2Hfnl70gTt5012hV7Yrq4vAa/y46qNycRkfpkxwxRLPkMb5jRNC5Ip+NJDmTzfYymmvw3FgyUfVx7TeCSlj7uNLz19znLYjT7vbAbWtw1dFznbATy/6EEx74UD7AmgxrLanYv9t1094vYPgid75Xlgs10blDOTBGJNrBYnhtD1s9DyH39dnJNxoiHmvqNpUB3CPcr49kxly+U52QuzgBRhb4UPvQiSEpiBUT1C9F5kXHrAQhhmmR7lnngtLob+zAwuL18PwWrC6lAt5aRXb6o3EpghKJej8Lk/1QkjwupP3974fbyRaxOHw21/qrgjXXrni2FZ67qJH+RF6GjjSDxhwKY6W9oe9iN051yBMVa06MfEDEJ/T7hJKp8axB/7JE7lKvqfteftYvVej5VWromGczXuU6xBrpZyScnsPCI/x95N0KSdhCiKRcughIg4oRw3opdSwdzn1BSmh+Qzo2Y++vb7FofW6FV7daoDNJRJHSMobNcxRJdZ1CsY6DnmRzCvXxqRdWAUj1t+RuspVtMoPvFfr0HDBuSmkeZ74zogr9CKzGnLVM9mUU2KmfoX7YMV6nSwW90z+1N95OPLlp3vuXOLB3ob9Z74O1IMmhelj0pkXllaO1+cZOloTOnCZSPkFXoEHVRs9T6pTlKdCC4Wu69weVwcVdwpHXIZbvOy0pOzp7+5qf+DoHdcU6brPJLwOca1dGs7l3PMXdlZ7FZV1zMFeBA+FRKFymroDXHOYmk40k3BRP88EZQJ2Z/DBnFMmxIYcTItQBrYL0n240TdR9+IO5USb2+jqrQbOXv24glXF/sWHY+pse+zeoZwwFU5EUsjBsA15URCBvbk3ayKeKOGwWE39qyKq6KnZlNOvrsqqQfY9TTmB8/4ogrinWDqUk8ArHZ4PPI4yqU1/PtQMC7H5z1lAi6jnyOxQLmGTGUkiKSF3En2+JMucVMfDUKZlS6rBHse/oClIPZ6HHot0OqPBrsLq9i4JhONRx3lvB9ChPAsw06QufnPM/mlrngPFwkJZTpGS6PXeofwnwv9G7hm/QgbdIhPQJxuNxqO6lWjOPfDAagwyAvwNjaeFUaeWL3qQq2l3e+8XxqXuVE3Q7oJr6+H5908gu4vd03B2UheWerWaPCxIvXVYlO9h1Eniv+ZtZyoSuol77lqQxujDxNX0HpGm2OJXexo49i/adClnJ3XdPMQ/1WhP3Gt3O0Lm2tvVnsG4W31r0/Zm4yLi0qf7Xa5OqpE9ziNBewa6ky5NXq/azYuq0TqslHhb48J3l6/ozuStJlVF5KTvnIM4a5cR6cNeKbl7Mk5rn6D9n+B/LV8Ie4tKW4MAAAAASUVORK5CYII=",
},
},
],
}
],
max_tokens=300,
)
print(response.choices[0]) | [] |
2024-01-10 | ituki0426/ML | src~BERT~gen_index.py | import openai
import json
from transformers import BertJapaneseTokenizer
from transformers import BertModel
tokenizer = BertJapaneseTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking')
bert_model = BertModel.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking')
import json
# 入力用の文章をロード
with open('./docs.json') as f:
docs = json.load(f)
index = []
for doc in docs:
input_s = tokenizer(doc['title'], return_tensors="pt")
outputs = bert_model(**input_s)
last_hidden_states = outputs.last_hidden_state
attention_mask = input_s.attention_mask.unsqueeze(-1)
valid_token_num = attention_mask.sum(1)
base_vec = (last_hidden_states*attention_mask).sum(1) / valid_token_num
base_vec = base_vec.detach().cpu().numpy()[0]
# ベクトルをデータベースに追加
index.append({
'title': doc['title'],
'embedding': base_vec.tolist()
})
with open('./index.json', 'w') as f:
json.dump(index, f,ensure_ascii=False) | [] |
2024-01-10 | ituki0426/ML | src~OpenAI~gen_index.py | import openai
import os
import json
# 入力用の文章をロード
with open('./docs.json') as f:
docs = json.load(f)
index = []
for doc in docs:
# ここでベクトル化を行う
# openai.embeddings_utils.embeddings_utilsを使うともっとシンプルにかけます
res = openai.Embedding.create(
model='text-embedding-ada-002',
input=doc['title']
)
# ベクトルをデータベースに追加
index.append({
'title': doc['title'],
'embedding': res['data'][0]['embedding']
})
with open('./index.json', 'w') as f:
json.dump(index, f,ensure_ascii=False) | [] |
2024-01-10 | ituki0426/ML | src~OpenAI~gen_doc.py | import openai
import os
import json
titles = [
'パトカー',
'Python',
'写真撮影',
'正式名称',
'パイナップル',
'挑戦状',
'成人',
'焼き肉',
'迷彩柄',
'竜巻',
]
SYSTEM_PROMPT = '''
提供される単語を300字以内で説明してください。
'''
docs = []
for title in titles:
res = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": title}
]
)
docs.append({
'title': title,
'body': res.choices[0].message.content
})
print(f'タイトル: {title}')
print(res.choices[0].message.content)
with open('./docs.json', 'w') as f:
json.dump(docs, f,ensure_ascii=False) | [
"\n提供される単語を300字以内で説明してください。\n"
] |
2024-01-10 | neevparikh/atari-graph-priors | gym_wrappers.py | from collections import deque
import torch
import numpy as np
import torchvision.transforms as T
import gym
import cv2
import random
class IndexedObservation(gym.ObservationWrapper):
"""
Description:
Return elements of observation at given indices
Usage:
For example, say the base env has observations Box(4) and you want the indices 1 and 3. You
would pass in indices=[1,3] and the observation_space of the wrapped env would be Box(2).
Notes:
- This currently only supports 1D observations but can easily be extended to support
multidimensional observations
"""
def __init__(self, env, indices):
super(IndexedObservation, self).__init__(env)
self.indices = indices
assert len(env.observation_space.shape) == 1, env.observation_space
wrapped_obs_len = env.observation_space.shape[0]
assert len(indices) <= wrapped_obs_len, indices
assert all(i < wrapped_obs_len for i in indices), indices
self.observation_space = gym.spaces.Box(low=env.observation_space.low[indices],
high=env.observation_space.high[indices],
dtype=env.observation_space.dtype)
def observation(self, observation):
return observation[self.indices]
class TorchTensorObservation(gym.ObservationWrapper):
"""
Description:
Downsample the image observation to a given shape.
Usage:
Pass in requisite shape (e.g. 84,84) and it will use opencv to resize the observation to
that shape
Notes:
- N/A
"""
def __init__(self, env, device):
super(TorchTensorObservation, self).__init__(env)
self.device = device
def observation(self, observation):
return torch.from_numpy(observation).to(dtype=torch.float, device=self.device)
# Adapted from https://github.com/openai/gym/blob/master/gym/wrappers/resize_observation.py
class ResizeObservation(gym.ObservationWrapper):
"""
Description:
Downsample the image observation to a given shape.
Usage:
Pass in requisite shape (e.g. 84,84) and it will use opencv to resize the observation to
that shape
Notes:
- N/A
"""
def __init__(self, env, shape):
super(ResizeObservation, self).__init__(env)
if isinstance(shape, int):
shape = (shape, shape)
assert all(x > 0 for x in shape), shape
self.shape = tuple(shape)
obs_shape = self.shape + self.observation_space.shape[2:]
self.observation_space = gym.spaces.Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
observation = cv2.resize(observation, self.shape[::-1], interpolation=cv2.INTER_AREA)
return observation
class ObservationDictToInfo(gym.Wrapper):
"""
Description:
Given an env with an observation dict, extract the given state key as the state and pass the
existing dict into the info.
Usage:
Wrap any Dict observation.
Notes:
- By convention, no info is return on reset, so that dict is lost.
"""
def __init__(self, env, state_key):
gym.Wrapper.__init__(self, env)
assert type(env.observation_space) == gym.spaces.Dict
self.observation_space = env.observation_space.spaces[state_key]
self.state_key = state_key
def reset(self, **kwargs):
next_state_as_dict = self.env.reset(**kwargs)
return next_state_as_dict[self.state_key]
def step(self, action):
next_state_as_dict, reward, done, info = self.env.step(action)
info.update(next_state_as_dict)
return next_state_as_dict[self.state_key], reward, done, info
class ResetARI(gym.Wrapper):
"""
Description:
On reset and step, grab the values of the labeled dict from info and return as state.
Usage:
Wrap over ARI env.
Notes:
- N/A
"""
def __init__(self, env):
gym.Wrapper.__init__(self, env)
# change the observation space to accurately represent
# the shape of the labeled RAM observations
self.observation_space = gym.spaces.Box(
0,
255, # max value
shape=(len(self.env.labels()),),
dtype=np.uint8)
def reset(self, **kwargs):
self.env.reset(**kwargs)
# reset the env and get the current labeled RAM
return np.array(list(self.env.labels().values()))
def step(self, action):
# we don't need the obs here, just the labels in info
_, reward, done, info = self.env.step(action)
# grab the labeled RAM out of info and put as next_state
next_state = np.array(list(info['labels'].values()))
return next_state, reward, done, info
# Adapted from OpenAI Baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
class AtariPreprocess(gym.Wrapper):
"""
Description:
Preprocessing as described in the Nature DQN paper (Mnih 2015)
Usage:
Wrap env around this. It will use torchvision to transform the image according to Mnih 2015
Notes:
- Should be decomposed into using separate envs for each.
"""
def __init__(self, env, shape=(84, 84)):
gym.Wrapper.__init__(self, env)
self.shape = shape
self.transforms = T.Compose([
T.ToPILImage(mode='YCbCr'),
T.Lambda(lambda img: img.split()[0]),
T.Resize(self.shape),
T.Lambda(lambda img: np.array(img)),
])
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=self.shape,
dtype=np.uint8,
)
def reset(self, **kwargs):
return self.transforms(self.env.reset(**kwargs))
def step(self, action):
next_state, reward, done, info = self.env.step(action)
return self.transforms(next_state), reward, done, info
class MaxAndSkipEnv(gym.Wrapper):
"""
Description:
Return only every `skip`-th frame. Repeat action, sum reward, and max over last
observations.
Usage:
Wrap env and provide skip param.
Notes:
- N/A
"""
def __init__(self, env, skip=4):
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
# np.save("FIRST_FRAME.npy",self.env.render('rgb_array'))
# if self.episode_steps > self.max_frames - 1000:
# print(self.episode_steps )
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
# np.save("SECOND_FRAME.npy",self.env.render('rgb_array'))
# exit()
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
class AtariSkips(gym.Wrapper):
def __init__(self, env, max_frames=int(108e3)):
gym.Wrapper.__init__(self, env)
self.env = env
self.episode_steps = 0
self.max_frames = max_frames
def reset(self):
ob = self.env.reset()
self.episode_steps = 0
for _ in range(random.randrange(30)):
ob, reward, done, info = self.env.step(0)
self.episode_steps+=1
if done:
ob = self.env.reset()
return ob
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.episode_steps+=1
#Should we add noop after death?
return ob, reward, done or self.episode_steps > self.max_frames, info
class FrameStack(gym.Wrapper):
def __init__(self, env, k, device, cast=torch.float32, scale=True):
"""Stack k last frames.
cast : torch dtype to cast to. If None, no cast
scale : bool. If True, divides by 255 (scaling to float). cast must be torch.float
Returns lazy array, which is much more memory efficient.
See Also
--------
LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.cast = cast
self.device = device
self.scale = scale
if self.scale:
assert cast == torch.float32 or cast == torch.float64, f"Cast must be torch.float, found {self.cast}"
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(low=0,
high=255,
shape=((k,) + shp),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
# ob = torch.as_tensor(np.stack(list(self.frames), axis=0), device=self.device)
# if self.cast is not None:
# ob = ob.to(dtype=self.cast)
# if self.scale:
# ob = ob.div_(255)
ob = np.stack(list(self.frames), axis=0)
return ob
class LazyFrames(object):
"""
Description:
This object ensures that common frames between the observations are only stored once. It
exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers.
This object should only be converted to numpy array before being passed to the model.
Usage:
Wrap frames with this object.
Notes:
- Can be finicky if used without the OpenAI ReplayBuffer
"""
def __init__(self, frames):
self._frames = frames
def _force(self):
return np.stack(self._frames, axis=0)
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._frames)
def __getitem__(self, i):
return self._frames[i]
class AtariPreprocessPixelInput():
def __init__(self, shape=(84, 84)): #Do we still want to do this?
self.shape = shape
self.transforms = T.Compose([
T.ToPILImage(mode='YCbCr'),
T.Lambda(lambda img: img.split()[0]),
T.Resize(self.shape),
T.Lambda(lambda img: np.array(img)),
])
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=self.shape,
dtype=np.uint8,
)
# def transforms(self,state):
# rgb_weights = [0.2989, 0.5870, 0.1140]
# grayscale_image = np.dot(state[...,:3], rgb_weights)
# state = cv2.resize(state, (84, 84), interpolation=cv2.INTER_LINEAR)
# return state #torch.tensor(state, dtype=torch.float32, device=self.device).div_(255)
def get_state(self, rendered_pixel):
return self.transforms(rendered_pixel)
class CombineRamPixel(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
# self.env = env
# print(self.env.env.__dict__)
# exit()
# self.env.reset()
# self.env.render("rgb_array")
# get_pixel_name = env.unwrapped.spec.id
# self.pixel_env = gym.make(get_pixel_name.replace('-ram',''))
# print("Found atari game:",self.pixel_env.unwrapped.spec.id)
self.pixel_wrap = AtariPreprocessPixelInput()
self.pixel_shape = self.pixel_wrap.observation_space.shape
self.ram_shape = self.observation_space.shape
new_total_shape = (self.ram_shape[0] + self.pixel_shape[0] * self.pixel_shape[1],)
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=new_total_shape,
dtype=np.uint8,
)
def combine_states(self, ram_state, pixel_state):
# for x in range(len(pixel_state)):
# print(pixel_state[x])
return np.concatenate((ram_state, np.reshape(pixel_state, -1)))
def observation(self, obs):
pixel_state = self.pixel_wrap.get_state(self.render(mode='rgb_array'))
return self.combine_states(obs, pixel_state)
| [] |
2024-01-10 | tazzuno/educationalChatbot | get_prompt.py | import streamlit
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain.schema import SystemMessage
def load_prompt(content):
template = template = (""""I want you to act as a university software engineering professor delivering engaging
and concise lectures to Italian students. Your expertise lies in explaining SWEBOK chapters in English to your
students in Italian. You are an expert educator and are responsible for guiding the user through this lesson plan.
Ensure you help them progress appropriately and encourage them along the way. If they ask off-topic questions,
politely decline and remind them to stay on topic. Please limit responses to one concept or step at a time. Each
step should contain no more than ~5 lines. Ensure they fully understand before proceeding. This is an interactive
lesson - engage and guide them, don't lecture. ----------------- {content} ----------------- End of Content.
End of Lesson."""
.format(content=content))
prompt_template = ChatPromptTemplate(messages=[
SystemMessage(content=template),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{input}")
])
return prompt_template
def load_prompt_with_questions(content):
template = """"I want you to act as a university software engineering professor delivering engaging and concise
lectures to Italian students. Your expertise lies in explaining SWEBOK chapters in English to your students in
Italian.You are an expert educator, and are responsible for walking the user through this lesson plan. You should
make sure to guide them along, encouraging them to progress when appropriate. If they ask questions not related
to this getting started guide, you should politely decline to answer and remind them to stay on topic. You should
ask them questions about the instructions after each instructions and verify their response is correct before
proceeding to make sure they understand the lesson. Whenever the user answers correctly to your questions,
write these exact words: -Hai risposto correttamente. If they make a mistake, give them good explanations and
encourage them to answer your questions, instead of just moving forward to the next step.
Please limit any responses to only one concept or step at a time.
Each step show only be ~15 lines at MOST.
Make sure they fully understand that before moving on to the next.
This is an interactive lesson - do not lecture them, but rather engage and guide them along!
-----------------
{content}
-----------------
End of Content.
Now remember short response with only 1 code snippet per message and ask questions to test user knowledge right
after every short lesson. Only one question per message. Only one lesson per message.
Your teaching should be in the following interactive format:
Short lesson 3-5 sentences long
Questions about the short lesson (1-3 questions)
Short lesson 3-5 sentences long
Questions about the short lesson (1-3 questions)
...
""".format(content=content)
prompt_template = ChatPromptTemplate(messages=[
SystemMessage(content=template),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{input}")
])
return prompt_template
def get_lesson_guide(connection):
cursor = connection.cursor()
lesson_guides = {}
query = "SELECT id, nome, descrizione, percorso_file FROM Lezioni where username= %s "
values = (streamlit.session_state.username,)
try:
cursor.execute(query, values)
# Estrai i risultati
results = cursor.fetchall()
# Itera attraverso i risultati e aggiungi le informazioni a lesson_guides
for result in results:
id_lezione, nome_lezione, descrizione, percorso_file = result
lesson_guides[nome_lezione] = {
"id": id_lezione,
"description": descrizione,
"file": percorso_file
}
except Exception as e:
print(f"Errore durante l'esecuzione della query: {e}")
return lesson_guides
| [
"\"I want you to act as a university software engineering professor delivering engaging \n and concise lectures to Italian students. Your expertise lies in explaining SWEBOK chapters in English to your \n students in Italian. You are an expert educator and are responsible for guiding the user through this lesson plan. \n Ensure you help them progress appropriately and encourage them along the way. If they ask off-topic questions, \n politely decline and remind them to stay on topic. Please limit responses to one concept or step at a time. Each \n step should contain no more than ~5 lines. Ensure they fully understand before proceeding. This is an interactive \n lesson - engage and guide them, don't lecture. ----------------- {content} ----------------- End of Content.\n\n End of Lesson.",
"chat_history",
"{input}",
"\"I want you to act as a university software engineering professor delivering engaging and concise \n lectures to Italian students. Your expertise lies in explaining SWEBOK chapters in English to your students in \n Italian.You are an expert educator, and are responsible for walking the user through this lesson plan. You should \n make sure to guide them along, encouraging them to progress when appropriate. If they ask questions not related \n to this getting started guide, you should politely decline to answer and remind them to stay on topic. You should \n ask them questions about the instructions after each instructions and verify their response is correct before \n proceeding to make sure they understand the lesson. Whenever the user answers correctly to your questions, \n write these exact words: -Hai risposto correttamente. If they make a mistake, give them good explanations and \n encourage them to answer your questions, instead of just moving forward to the next step.\n\n Please limit any responses to only one concept or step at a time.\n Each step show only be ~15 lines at MOST.\n Make sure they fully understand that before moving on to the next. \n This is an interactive lesson - do not lecture them, but rather engage and guide them along!\n -----------------\n\n PLACEHOLDER\n\n -----------------\n End of Content.\n\n Now remember short response with only 1 code snippet per message and ask questions to test user knowledge right \n after every short lesson. Only one question per message. Only one lesson per message.\n\n Your teaching should be in the following interactive format:\n\n Short lesson 3-5 sentences long\n Questions about the short lesson (1-3 questions)\n\n Short lesson 3-5 sentences long\n Questions about the short lesson (1-3 questions)\n ...\n\n "
] |
2024-01-10 | tazzuno/educationalChatbot | Lezioni.py | import time
import streamlit as st
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
import get_prompt
from langchain.schema import AIMessage, HumanMessage
from StreamHandler import StreamHandler
def handle_messages():
"""Gestisce i messaggi della chat.
Inizializza lo stato della sessione. Se "messages" non è presente in st.session_state,
lo inizializza a una lista vuota. Successivamente, gestisce i messaggi presenti in
st.session_state["messages"], scrivendo i messaggi degli utenti e dell'assistente
nella chat.
"""
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
for msg in st.session_state["messages"]:
if isinstance(msg, HumanMessage):
st.chat_message("user").write(msg.content)
else:
st.chat_message("assistant").write(msg.content)
def display_lesson(lesson_selection, lesson_info):
"""Visualizza una lezione specifica.
Parameters:
lesson_selection (str): Il titolo della lezione da visualizzare.
lesson_info (dict): Un dizionario contenente le informazioni sulla lezione, con la chiave "description" per la descrizione.
Returns:
None
"""
with st.container():
st.markdown(f"**{lesson_selection}**")
st.write(lesson_info["description"])
def run_langchain_model(prompt, lesson_type, lesson_content, lesson_selection, openai_api_key):
"""Esegue il modello Langchain per gestire le lezioni e interagire con l'utente tramite il chatbot.
Parameters:
prompt (str): Il prompt iniziale per il modello.
lesson_type (str): Il tipo di lezione.
lesson_content (str): Il contenuto della lezione.
lesson_selection (str): La selezione della lezione.
openai_api_key (str): La chiave API di OpenAI per l'accesso al modello.
"""
try:
# Set up a streaming handler for the model
with st.chat_message("assistant"):
stream_handler = StreamHandler(st.empty())
model = ChatOpenAI(streaming=True, callbacks=[stream_handler], model="gpt-3.5-turbo-16k",
openai_api_key=openai_api_key)
# Load a prompt template based on the lesson type
if lesson_type == "Instructions based lesson":
prompt_template = get_prompt.load_prompt(content=lesson_content)
else:
prompt_template = get_prompt.load_prompt_with_questions(content=lesson_content)
# Run a chain of the prompt and the language model
chain = LLMChain(prompt=prompt_template, llm=model)
response = chain(
{"input": prompt, "chat_history": st.session_state.messages[-20:]},
include_run_info=True,
tags=[lesson_selection, lesson_type]
)
st.session_state.messages.append(HumanMessage(content=prompt))
st.session_state.messages.append(AIMessage(content=response[chain.output_key]))
except Exception as e:
# Handle any errors that occur during the execution of the code
st.error(f"An error occurred: {e}")
@st.cache_data()
def get_lesson_content(lesson_file):
"""Ottiene il contenuto di una lezione da un file.
Parameters:
lesson_file (str): Il percorso del file della lezione.
Returns:
str: Il contenuto della lezione.
"""
try:
with open(lesson_file, "r") as f:
return f.read()
except FileNotFoundError:
st.error(f"Error: Lesson file not found at {lesson_file}")
st.stop()
def download_chat():
"""Genera e scarica la conversazione nel formato HTML.
La funzione genera un file HTML che rappresenta la conversazione
registrata tra l'utente e l'assistente. Il file HTML include
messaggi dell'utente e dell'assistente formattati.
"""
messages = st.session_state.get("messages", []) # Retrieve messages from session state
chat_content = "<html><head><link rel='stylesheet' type='text/css' href='styles.css'></head><body>"
for msg in messages:
if isinstance(msg, AIMessage):
chat_content += f"<p class='message ai-message'><strong>AI:</strong> {msg.content}</p>"
elif isinstance(msg, HumanMessage):
chat_content += f"<p class='message user-message'><strong>User:</strong> {msg.content}</p>"
else:
chat_content += f"<p class='message'>Unknown Message Type: {msg}</p>"
chat_content += "</body></html>"
with open("chat.html", "w", encoding="utf-8") as html_file:
html_file.write(chat_content)
# Download the generated HTML file
st.download_button("Download Chat", open("chat.html", "rb"), key="download_chat", file_name="chat.html",
mime="text/html")
def reset_lesson():
"""Ripristina lo stato della lezione.
La funzione reimposta diversi attributi nello stato della sessione a valori vuoti o None,
consentendo di ripartire da zero in una nuova lezione.
"""
st.session_state["messages"] = []
st.session_state["completed_lessons"] = []
st.session_state["current_lesson"] = None
st.session_state["current_lesson_type"] = None
st.session_state["code_snippet"] = None
def setup_page():
"""Configura la pagina per l'applicazione.
Questa funzione configura la pagina dell'applicazione, impostando il titolo e l'icona.
"""
st.set_page_config(page_title="AIDE", page_icon="🤖")
st.title("AIDE: Studiare non è mai stato così facile! Aide è qui per guidarti!")
def avanzamento_barra(connection):
"""Gestisce la barra di avanzamento e il punteggio associato ai messaggi.
La funzione controlla i messaggi presenti nello stato della sessione e aggiorna una barra di avanzamento
nel sidebar in base al numero di messaggi di risposta corretta.
"""
# inizializzazione variabili
bar = st.progress(0)
bar.empty()
contatore = 0
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM Lezioni"
cursor.execute(query)
result = cursor.fetchall()
messages = st.session_state.get("messages", [])
for msg in messages:
if isinstance(msg, AIMessage):
if msg.content.startswith("Hai risposto correttamente!") or msg.content.startswith("That's correct!"):
contatore += 1
num_lezioni = 100 / result[0][0]
progresso = contatore * num_lezioni
bar = st.sidebar.progress(progresso, "Punteggio")
time.sleep(1)
def load_lesson_content(lesson_file):
"""Carica il contenuto di una lezione da un file.
Parameters:
lesson_file (str): Il percorso del file della lezione.
Returns:
str: Il contenuto della lezione.
Raises:
FileNotFoundError: Se il file della lezione non è trovato.
"""
try:
with open(lesson_file, "r", encoding="utf-8") as f:
return f.read()
except FileNotFoundError:
st.error(f"Error: Lesson file not found at {lesson_file}")
st.stop()
| [] |
2024-01-10 | tazzuno/educationalChatbot | Authentication.py | import streamlit as st
import streamlit_authenticator as stauth
import secrets
import bcrypt
import re
import mysql.connector
import openai
from mysql.connector import Error
# Credenziali
credentials = {'usernames': {'user1': 'pass123'}}
# Genera chiave random
key = secrets.token_urlsafe(16)
# Inizializza login manager
login_manager = stauth.Authenticate(credentials,
cookie_name='auth',
key=key)
# Variabile globale password validata
validated_password = ""
def connetti_database():
try:
# Recupera le informazioni di connessione dal file secrets
return mysql.connector.connect(**st.secrets["mysql"])
except Exception as e:
st.error(f"Errore di connessione al database: {e}")
return None
def chiudi_connessione_database(connection):
if connection and connection.is_connected():
connection.close()
def validate_password(password):
global validated_password
if len(password) > 0:
# Controllo lunghezza
if len(password) < 8:
st.error("Password troppo corta")
return
# Controllo maiuscolo
if not any(char.isupper() for char in password):
st.error("Inserisci almeno 1 maiuscola")
return
# Controllo carattere speciale
if not re.search(r'[!@#$]', password):
st.error("Inserisci almeno 1 carattere speciale")
return
validated_password = password
return validated_password
def is_api_key_valid(key):
try:
openai.api_key = key
response = openai.Completion.create(
engine="davinci", # https://platform.openai.com/docs/models
prompt="This is a test.",
max_tokens=5
)
except Exception as ex:
return str(ex)
return False
else:
return True
def aggiungi_utente_al_database(username, password, email, api_key, connection):
if connection:
try:
cursor = connection.cursor()
# Aggiungi l'utente al database
salt = bcrypt.gensalt()
hashed_password = bcrypt.hashpw(password.encode('utf-8'), salt)
cursor = connection.cursor()
query = '''INSERT INTO Utenti (Username, Password, Email, API_key) VALUES (%s, %s, %s, %s)'''
args = (username, password, email, api_key)
cursor.execute(query, args)
connection.commit()
except Error as e:
print(f"Errore durante l'aggiunta dell'utente al database: {e}")
finally:
chiudi_connessione_database(connection)
def verifica_credenziali(username, password, connection):
if connection:
try:
cursor = connection.cursor()
query = "SELECT * FROM utenti WHERE username = %s AND password = %s"
values = (username, password)
cursor.execute(query, values)
# Estrai i risultati
result = cursor.fetchall()
# Mostra il risultato
if result:
return 1
else:
return 0
except Error as e:
print(f"Errore durante l'aggiunta dell'utente al database: {e}")
finally:
chiudi_connessione_database(connection)
| [
"This is a test."
] |
2024-01-10 | tazzuno/educationalChatbot | pages~Progressi.py | import streamlit as st
import time
from langchain.schema import AIMessage
st.title("Dashboard dello Studente")
container_centrale = st.container()
if "completed_lessons" in st.session_state:
st.subheader("Lezioni Svolte:")
for lesson in st.session_state.completed_lessons:
st.write(f"- {lesson}")
else:
st.info("Nessuna leziona completata, se desideri conoscere il tuo progresso clicca 'Show Progress'")
def avanzamento_barra():
# inizializzazione variabili
bar = st.progress(0)
bar.empty()
contatore = 0
messages = st.session_state.get("messages", [])
for msg in messages:
if isinstance(msg, AIMessage):
if msg.content.startswith("Hai risposto correttamente!"):
contatore += 1
progresso = contatore * 10
bar = st.progress(progresso, "Punteggio")
time.sleep(1)
# AVANZAMENTO BARRA PROGRESSO
container_button = st.sidebar.container()
container_button = st.empty()
button = container_button.button("Show Progress", on_click=None)
if button:
container_button.empty()
button_hide = container_button.button("Hide Progress", on_click=None)
container_centrale = avanzamento_barra()
| [] |
2024-01-10 | sweetice/Deep-reinforcement-learning-with-pytorch | Char04%20A2C~multiprocessing_env.py | #This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
| [] |
2024-01-10 | stjordanis/poet | poet_distributed~optimizers.py | # The following code is modified from openai/evolution-strategies-starter
# (https://github.com/openai/evolution-strategies-starter)
# under the MIT License.
# Modifications Copyright (c) 2019 Uber Technologies, Inc.
import numpy as np
class Optimizer(object):
def __init__(self, theta):
self.dim = len(theta)
self.t = 0
def update(self, theta, globalg):
self.t += 1
step = self._compute_step(globalg)
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return ratio, theta + step
def _compute_step(self, globalg):
raise NotImplementedError
class SimpleSGD(Optimizer):
def __init__(self, stepsize):
self.stepsize = stepsize
def compute(self, theta, globalg):
step = -self.stepsize * globalg
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return ratio, theta + step
class SGD(Optimizer):
def __init__(self, theta, stepsize, momentum=0.9):
Optimizer.__init__(self, theta)
self.v = np.zeros(self.dim, dtype=np.float32)
self.stepsize, self.momentum = stepsize, momentum
def _compute_step(self, globalg):
self.v = self.momentum * self.v + (1. - self.momentum) * globalg
step = -self.stepsize * self.v
return step
class Adam(Optimizer):
def __init__(self, theta, stepsize, beta1=0.9, beta2=0.999, epsilon=1e-08):
Optimizer.__init__(self, theta)
self.stepsize = stepsize
self.init_stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def reset(self):
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
self.t = 0
self.stepsize = self.init_stepsize
def _compute_step(self, globalg):
a = self.stepsize * \
np.sqrt(1 - self.beta2 ** self.t) / (1 - self.beta1 ** self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = -a * self.m / (np.sqrt(self.v) + self.epsilon)
return step
def propose(self, theta, globalg):
a = self.stepsize * \
np.sqrt(1 - self.beta2 ** self.t) / (1 - self.beta1 ** self.t)
m = self.beta1 * self.m + (1 - self.beta1) * globalg
v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = -a * m / (np.sqrt(v) + self.epsilon)
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return ratio, theta + step
| [] |
2024-01-10 | srutanik/llm-search | src~llmsearch~models~azureopenai.py | import os
from langchain.chat_models import AzureChatOpenAI
from llmsearch.models.abstract import AbstractLLMModel
from llmsearch.models.config import AzureOpenAIModelConfig
class AzureOpenAIModel(AbstractLLMModel):
def __init__(self, config: AzureOpenAIModelConfig) -> None:
super().__init__(prompt_template=config.prompt_template)
self.config = config
@property
def model(self):
os.environ["OPENAI_API_TYPE"] = self.config.openai_api_type
os.environ["OPENAI_API_BASE"] = self.config.openai_api_base
os.environ["OPENAI_API_VERSION"] = self.config.openai_api_version
return AzureChatOpenAI(deployment_name=self.config.deployment_name, model = self.config.model_name, **self.config.model_kwargs)
| [] |
2024-01-10 | fdasilva59/Udacity-Deep-Reinforcement-Learning-Nanodegree | multi-agents~env_wrapper.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
from baselines.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, ob_full, reward, done, info = env.step(data)
if all(done):
ob = env.reset()
remote.send((ob, ob_full, reward, done, info))
elif cmd == 'reset':
ob, ob_full = env.reset()
remote.send((ob, ob_full))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in
env.agents])
else:
remote.send(['agent' for _ in env.agents])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, obs_full, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(obs_full), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
# code doesn't work all that well
# TODO: need to clean up
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, obs_full, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def close(self):
return
| [] |
2024-01-10 | jsnel/pyglotaran | glotaran~builtin~megacomplexes~coherent_artifact~test~test_coherent_artifact.py | import numpy as np
import pytest
import xarray as xr
from glotaran.builtin.megacomplexes.coherent_artifact import CoherentArtifactMegacomplex
from glotaran.builtin.megacomplexes.decay import DecayMegacomplex
from glotaran.model import Model
from glotaran.model import fill_item
from glotaran.optimization.matrix_provider import MatrixProvider
from glotaran.optimization.optimize import optimize
from glotaran.parameter import Parameters
from glotaran.project import Scheme
from glotaran.simulation import simulate
@pytest.mark.parametrize(
"spectral_dependence",
("none", "dispersed", "shifted"),
)
def test_coherent_artifact(spectral_dependence: str):
model_dict = {
"initial_concentration": {
"j1": {"compartments": ["s1"], "parameters": ["irf_center"]},
},
"megacomplex": {
"mc1": {"type": "decay", "k_matrix": ["k1"]},
"mc2": {"type": "coherent-artifact", "order": 3},
},
"k_matrix": {
"k1": {
"matrix": {
("s1", "s1"): "rate",
}
}
},
"irf": {
"irf1": {
"type": "multi-gaussian",
"center": ["irf_center"],
"width": ["irf_width"],
},
},
"dataset": {
"dataset1": {
"initial_concentration": "j1",
"megacomplex": ["mc1", "mc2"],
"irf": "irf1",
},
},
}
parameter_list = [
["rate", 101e-4],
["irf_center", 10, {"vary": False, "non-negative": False}],
["irf_width", 20, {"vary": False, "non-negative": False}],
]
irf_spec = model_dict["irf"]["irf1"]
if spectral_dependence == "dispersed":
irf_spec["type"] = "spectral-multi-gaussian"
irf_spec["dispersion_center"] = "irf_dispc"
irf_spec["center_dispersion_coefficients"] = ["irf_disp1", "irf_disp2"]
parameter_list += [
["irf_dispc", 300, {"vary": False, "non-negative": False}],
["irf_disp1", 0.01, {"vary": False, "non-negative": False}],
["irf_disp2", 0.001, {"vary": False, "non-negative": False}],
]
elif spectral_dependence == "shifted":
irf_spec["shift"] = ["irf_shift1", "irf_shift2", "irf_shift3"]
parameter_list += [
["irf_shift1", -2],
["irf_shift2", 0],
["irf_shift3", 2],
]
model = Model.create_class_from_megacomplexes([DecayMegacomplex, CoherentArtifactMegacomplex])(
**model_dict
)
parameters = Parameters.from_list(parameter_list)
time = np.arange(0, 50, 1.5)
spectral = np.asarray([200, 300, 400])
dataset_model = fill_item(model.dataset["dataset1"], model, parameters)
matrix = MatrixProvider.calculate_dataset_matrix(dataset_model, 0, spectral, time)
compartments = matrix.clp_labels
print(compartments)
assert len(compartments) == 4
for i in range(1, 4):
assert compartments[i] == f"coherent_artifact_{i}"
assert matrix.matrix.shape == (time.size, 4)
clp = xr.DataArray(
np.ones((3, 4)),
coords=[
("spectral", spectral),
(
"clp_label",
[
"s1",
"coherent_artifact_1",
"coherent_artifact_2",
"coherent_artifact_3",
],
),
],
)
axis = {"time": time, "spectral": clp.spectral}
data = simulate(model, "dataset1", parameters, axis, clp)
dataset = {"dataset1": data}
scheme = Scheme(
model=model, parameters=parameters, data=dataset, maximum_number_function_evaluations=20
)
result = optimize(scheme)
print(result.optimized_parameters)
for param in result.optimized_parameters.all():
assert np.allclose(param.value, parameters.get(param.label).value, rtol=1e-1)
resultdata = result.data["dataset1"]
assert np.array_equal(data.time, resultdata.time)
assert np.array_equal(data.spectral, resultdata.spectral)
assert data.data.shape == resultdata.data.shape
assert data.data.shape == resultdata.fitted_data.shape
assert np.allclose(data.data, resultdata.fitted_data)
assert "coherent_artifact_response" in resultdata
if spectral_dependence == "none":
assert resultdata["coherent_artifact_response"].shape == (time.size, 3)
else:
assert resultdata["coherent_artifact_response"].shape == (spectral.size, time.size, 3)
assert "coherent_artifact_associated_spectra" in resultdata
assert resultdata["coherent_artifact_associated_spectra"].shape == (3, 3)
| [] |
2024-01-10 | Utshav-paudel/YouTube-assistant-langchain | youtube_assistant.py | #@ Creating a youtube assistant that will help you convert a youtube video to script
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.agents import initialize_agent, load_tools, AgentType
from langchain.document_loaders import YoutubeLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
url="https://youtu.be/BoutTY8XHSc?si=RFqU6VHQiFBENdop" # link of video
embeddings = OpenAIEmbeddings()
def yotube_url_to_vector_db(url:str) -> FAISS:
loader = YoutubeLoader.from_youtube_url(youtube_url=url) # uses langchain component to load yotube url
transcripts = loader.load() # create transcript of video using yotube loader
splitter = RecursiveCharacterTextSplitter(chunk_size =1000, chunk_overlap =50) # split the trancript
docs = splitter.split_documents(transcripts)
# vector databse to store the embeddings
db = FAISS.from_documents(docs, embeddings) # store the embedding into vector db of docs
return db
def get_response_from_query(db, query, k=4): # k determines number of chunks
"we will use text-davinci-003 which has cap of 4096 tokens k determine number of 1000 chunk"
docs = db.similarity_search(query, k=k)
docs_content =" ".join([d.page_content for d in docs])
llm = OpenAI(model_name = "text-davinci-003")
template = PromptTemplate(input_variables=['question','docs'],template=
"""You are a helpful assistant that that can answer questions about youtube videos
based on the video's transcript.
Answer the following question: {question}
By searching the following video transcript: {docs}
Only use the factual information from the transcript to answer the question.
If you feel like you don't have enough information to answer the question, say "I don't know".
Your answers should be verbose and detailed.""")
chain = LLMChain(prompt = template, llm=llm)
response = chain.run(question=query,docs= docs_content)
return response
db = yotube_url_to_vector_db(url)
query = "What are the tools to hack your brain ?"
print(get_response_from_query(db,query)) | [
"question",
"You are a helpful assistant that that can answer questions about youtube videos \n based on the video's transcript.\n \n Answer the following question: {question}\n By searching the following video transcript: {docs}\n \n Only use the factual information from the transcript to answer the question.\n \n If you feel like you don't have enough information to answer the question, say \"I don't know\".\n \n Your answers should be verbose and detailed."
] |
2024-01-10 | cliffpyles/smartypants | planning~psuedo_code~subscription_handlers~message_crupdate_subscriber_lambda.py | import json
import boto3
import openai
def lambda_handler(event, context):
print("MessageCreatedOrUpdatedSubscriberLambda invoked.")
dynamodb_event = json.loads(event["detail"])
# Initialize OpenAI client
openai.api_key = "your-openai-api-key"
# Continue the chat session with a new user message
chat_session = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": dynamodb_event["NewImage"]["Content"]["S"]},
],
)
# Extract the assistant's reply
assistant_reply = chat_session["choices"][0]["message"]["content"]
# Initialize DynamoDB client
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table("your-dynamodb-table-name")
# Write the assistant's reply to the database
table.put_item(
Item={
"MessageId": dynamodb_event["NewImage"]["MessageId"]["S"],
"Timestamp": dynamodb_event["NewImage"]["Timestamp"]["S"],
"Content": assistant_reply,
"UserId": dynamodb_event["NewImage"]["UserId"]["S"],
"ChatId": dynamodb_event["NewImage"]["ChatId"]["S"],
"Access": dynamodb_event["NewImage"]["Access"]["S"],
}
)
return {
"statusCode": 200,
"body": json.dumps("Message creation or update event handled."),
}
| [
"Content",
"You are a helpful assistant."
] |
2024-01-10 | toilaluan/DOST-AI | services~search_doc.py | import os
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import chromadb
from pymongo import MongoClient
from dotenv import load_dotenv
load_dotenv()
client = MongoClient(os.environ.get("MONGODB"))
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL")
CHAT_MODEL = os.environ.get("CHAT_MODEL")
CHROMA_ROOT = os.environ.get("CHROMA_ROOT")
DOC_EMBED_COLLECTION = os.environ.get("DOC_EMBED_COLLECTION")
def search_doc(query: str):
persist_directory = os.path.join(CHROMA_ROOT, DOC_EMBED_COLLECTION)
embeddings = OpenAIEmbeddings()
chroma = Chroma(
collection_name=DOC_EMBED_COLLECTION,
embedding_function=embeddings,
# client_settings=client_settings,
persist_directory=persist_directory,
)
n_docs = len(chroma._collection.get()["documents"])
print(n_docs)
docs = chroma.similarity_search_with_score(query=query, k=min(5, n_docs))
result = [doc[0].metadata["_id"] for doc in docs]
return result
| [] |
2024-01-10 | toilaluan/DOST-AI | services~init_doc.py | from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import TokenTextSplitter
from langchain.document_loaders import UnstructuredPDFLoader, PDFMinerLoader, PyMuPDFLoader
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
import gdown
import os
from pymongo import MongoClient
from bson.objectid import ObjectId
from cleantext import clean
import chromadb
import torch
from dotenv import load_dotenv
load_dotenv()
k = 3
def response_to_structured(response: str):
try:
title_index = response.index('Title')
summary_index = response.index('Summary')
tags_index = response.index('Tags')
title = response[title_index+7: summary_index]
summary = response[summary_index+9: tags_index]
tags = response[tags_index+6:]
result = {
'title': title.rstrip(),
'summary': summary.rstrip(),
'tags': tags.rstrip()
}
return result
except:
return {}
def init_keys(pdf_path, chunk_size=1000):
loader = PyMuPDFLoader(pdf_path)
data = loader.load()
text_splitter = TokenTextSplitter(
chunk_size=chunk_size, chunk_overlap=0, encoding_name='cl100k_base')
texts = text_splitter.split_documents(data)
k_first_texts = [chunks.page_content for chunks in texts[:k]]
texts = ' '.join(text for text in k_first_texts)
with open('model/prompts/init_doc_prompt.txt', 'r') as f:
init_doc_prompt = f.readlines()
init_doc_prompt = ''.join(x for x in init_doc_prompt)
prompt = PromptTemplate(template=init_doc_prompt,
input_variables=['context'])
chain = LLMChain(
llm=ChatOpenAI(),
prompt=prompt,
verbose=True
)
result = chain.predict(context=texts)
result_json = response_to_structured(result)
return result_json
| [
"context"
] |
2024-01-10 | toilaluan/DOST-AI | services~init_embedding.py | from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import TokenTextSplitter
from langchain.document_loaders import UnstructuredPDFLoader
import gdown
import os
from pymongo import MongoClient
from bson.objectid import ObjectId
import chromadb
import torch
from langchain.docstore.document import Document
from dotenv import load_dotenv
load_dotenv()
client = MongoClient(os.environ.get("MONGODB"))
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL")
CHAT_MODEL = os.environ.get("CHAT_MODEL")
CHROMA_ROOT = os.environ.get("CHROMA_ROOT")
DOC_EMBED_COLLECTION = os.environ.get("DOC_EMBED_COLLECTION")
db = client["doc_stock"]
docs = db["docs"]
def download_pdf(id: str):
doc_id = ObjectId(id)
doc = docs.find_one({"_id": doc_id})
link = doc["link"].split("/")
drive_id = link[link.index("d") + 1]
path = gdown.download(id=drive_id, output="cached_file.pdf")
return path
async def init_for_search(id: str):
doc_id = ObjectId(id)
doc = docs.find_one({"_id": doc_id})
summary = doc["summary"]
title = doc["title"]
tags = doc["tags"]
context = f"{summary} {title} {tags}"
doc_obj = Document(page_content=context, metadata={"_id": id})
persist_directory = os.path.join(CHROMA_ROOT, DOC_EMBED_COLLECTION)
embeddings = OpenAIEmbeddings()
client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory,
anonymized_telemetry=False,
)
vectorstore = Chroma(
collection_name=DOC_EMBED_COLLECTION,
embedding_function=embeddings,
client_settings=client_settings,
persist_directory=persist_directory,
)
vectorstore.add_documents([doc_obj])
vectorstore.persist()
print("Init for search successfully!")
torch.cuda.empty_cache()
def id_to_texts(id: str, chunk_size: int) -> list[Document]:
path = download_pdf(id)
loader = UnstructuredPDFLoader(path)
data = loader.load()
text_splitter = TokenTextSplitter(chunk_size=chunk_size, chunk_overlap=0)
texts = text_splitter.split_documents(data)
return texts
async def store_embeddings(id: str, chunk_size: int = 1000):
texts = id_to_texts(id, chunk_size)
persist_directory = os.path.join(CHROMA_ROOT, id)
embeddings = OpenAIEmbeddings()
client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory,
anonymized_telemetry=False,
)
vectorstore = Chroma(
collection_name=id,
embedding_function=embeddings,
client_settings=client_settings,
persist_directory=persist_directory,
)
vectorstore.add_documents(texts)
vectorstore.persist()
print("Init successfully!")
torch.cuda.empty_cache()
| [] |
2024-01-10 | toilaluan/DOST-AI | model~DostChat.py | import chromadb
import os
import asyncio
from bson.objectid import ObjectId
from pymongo import MongoClient
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import LLMChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
from dotenv import load_dotenv
from .select_chunk import select_chunk
load_dotenv()
client = MongoClient(os.environ.get("MONGODB"))
CHROMA_ROOT = os.environ.get("CHROMA_ROOT")
CHAT_MODEL = os.environ.get("CHAT_MODEL")
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL")
client = MongoClient(os.environ.get("MONGODB"))
db = client["doc_stock"]
docs = db["docs"]
class DostChat:
def __init__(self, id):
self.id = id
doc_id = ObjectId(id)
self.doc = docs.find_one({"_id": doc_id})
persist_directory = os.path.join(CHROMA_ROOT, id)
client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory,
anonymized_telemetry=False,
)
embeddings = OpenAIEmbeddings()
self.chroma = Chroma(
collection_name=id,
embedding_function=embeddings,
client_settings=client_settings,
persist_directory=persist_directory,
)
self.n_pages = len(self.chroma._collection.get(0)["documents"])
async def init_chat_query(self):
with open("model/prompts/init_prompt.txt", "r") as f:
init_prompt = f.readlines()
init_prompt = "".join(x for x in init_prompt)
prompt = PromptTemplate(template=init_prompt, input_variables=[])
chain = LLMChain(llm=ChatOpenAI(), prompt=prompt, verbose=True)
result = chain.predict()
return result
async def doc_query(self, query):
# try:
with open("model/prompts/prompt.txt", "r") as f:
prompt = f.readlines()
prompt = "".join(x for x in prompt)
docs = self.chroma.similarity_search(query, self.n_pages)
docs = await select_chunk(docs=docs, query=query, k=min(4, self.n_pages))
context = "\n".join(x.page_content for x in docs)
prompt = PromptTemplate(
template=prompt,
input_variables=["context", "question", "summary", "title", "tags"],
)
chain = LLMChain(llm=ChatOpenAI(), prompt=prompt, verbose=False)
result = chain.predict(
context=context,
question=query,
summary=self.doc["summary"],
title=self.doc["title"],
tags=self.doc["tags"],
)
# except:
# return 'We have some error, try again later!'
return result
# return ''
| [
"question",
"context"
] |
2024-01-10 | illidanlab/tensorpack | examples~DeepQNetwork~atari_wrapper.py | # -*- coding: utf-8 -*-
# File: atari_wrapper.py
import numpy as np
from collections import deque
import gym
_v0, _v1 = gym.__version__.split('.')[:2]
assert int(_v0) > 0 or int(_v1) >= 10, gym.__version__
"""
The following wrappers are copied or modified from openai/baselines:
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
class RewardShaping(gym.Wrapper):
"""
When taking a step, add a logit value to the immediate reward,
where logits are provied by a pre-trained agent using expert demonstration.
"""
def __init__(self, env, logit_provider=None):
### logit provider should be a model
gym.Wrapper.__init__(self, env)
self.logit_provider = logit_provider
def step(self, action):
ob, reward, done, info = self.env.step(action)
#self.frames.append(ob)
## TBD: change reward
reward += 10
return ob, reward, done, info
class MapState(gym.ObservationWrapper):
def __init__(self, env, map_func):
gym.ObservationWrapper.__init__(self, env)
self._func = map_func
def observation(self, obs):
return self._func(obs)
class FrameStack(gym.Wrapper):
"""
Buffer consecutive k observations and stack them on a new last axis.
The output observation has shape `original_shape + (k, )`.
"""
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
def reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k - 1):
self.frames.append(np.zeros_like(ob))
self.frames.append(ob)
return self.observation()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self.observation(), reward, done, info
def observation(self):
assert len(self.frames) == self.k
return np.stack(self.frames, axis=-1)
class _FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
def step(self, action):
return self.env.step(action)
def FireResetEnv(env):
if isinstance(env, gym.Wrapper):
baseenv = env.unwrapped
else:
baseenv = env
if 'FIRE' in baseenv.get_action_meanings():
return _FireResetEnv(env)
return env
class LimitLength(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
def reset(self):
# This assumes that reset() will really reset the env.
# If the underlying env tries to be smart about reset
# (e.g. end-of-life), the assumption doesn't hold.
ob = self.env.reset()
self.cnt = 0
return ob
def step(self, action):
ob, r, done, info = self.env.step(action)
self.cnt += 1
if self.cnt == self.k:
done = True
return ob, r, done, info
| [] |
2024-01-10 | odellus/scrape_oai | scrape_oai.py | import json
import requests
from bs4 import BeautifulSoup
import argparse
import time
def parse_args():
'''Parses the command line arguments'''
parser = argparse.ArgumentParser(description='Scrape a conversation from OpenAI chat')
parser.add_argument('--input_url', type=str, help='The URL of the conversation to scrape', default=None)
parser.add_argument('--input_file', type=str, help='The file containing the URLs of the conversations to scrape', default=None)
return parser.parse_args()
def fetch_webpage_content(input_url):
'''Fetches the content of a webpage'''
# Headers to mimic a browser request
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
# Creating a session to handle cookies
with requests.Session() as session:
# Send a get request to the URL
response = session.get(input_url, headers=headers)
# Check if the request was successful
if response.status_code == 200:
# Use BeautifulSoup to parse the HTML content
soup = BeautifulSoup(response.content, 'html.parser')
return soup
else:
return f"Error fetching the page: Status code {response.status_code}"
def get_conversation(soup):
'''Extracts the conversation from the webpage'''
script_tag = soup.find('script', type='application/json')
d = json.loads(script_tag.string)
conv = d['props']['pageProps']['serverResponse']['data']['linear_conversation']
chat = []
for turn in conv:
_id = turn.get('id')
message = turn.get('message')
if message:
author = message.get('author')
role = author.get('role')
content = message.get('content')
if content:
content_type = content.get('content_type')
parts = content.get('parts')
if parts is not None and content_type == 'text':
chat.append({'role': role, 'content': parts.pop()})
return chat
def save_conversation(input_url):
'''Saves the conversation to a JSON file'''
conversation_id = input_url.split('/')[-1]
soup = fetch_webpage_content(input_url)
chat = get_conversation(soup)
out_fname = f'oai-chat-{conversation_id}.json'
print(f"Saving conversation to {out_fname}")
with open(out_fname, 'w') as f:
json.dump(chat, f, indent=4)
def main():
'''Main function'''
args = parse_args()
if args.input_file:
with open(args.input_file, 'r') as f:
input_urls = f.readlines()
input_urls = [x.strip() for x in input_urls]
print(input_urls)
for input_url in input_urls:
time.sleep(1)
print(f"Fetching conversation from {input_url}")
save_conversation(input_url)
elif not args.input_url:
input_url = "https://chat.openai.com/share/4ad82157-c4b9-421e-9e33-7902ea940d71"
save_conversation(input_url)
else:
save_conversation(args.input_url)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | break-free/breakfree-dk-privategpt | privateGPT.py | from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import LlamaCppEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
import os
load_dotenv()
llama_embeddings_model = os.environ.get("LLAMA_EMBEDDINGS_MODEL")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
from constants import CHROMA_SETTINGS
def main():
llama = LlamaCppEmbeddings(model_path=llama_embeddings_model, n_ctx=model_n_ctx)
db = Chroma(persist_directory=persist_directory, embedding_function=llama, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever()
# Prepare the LLM
callbacks = [StreamingStdOutCallbackHandler()]
match model_type:
case "LlamaCpp":
llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, callbacks=callbacks, verbose=False)
case "GPT4All":
llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', callbacks=callbacks, verbose=False)
case _default:
print(f"Model {model_type} not supported!")
exit;
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = qa(query)
answer, docs = res['result'], res['source_documents']
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | ConnectAI-E/Feishu-Webhook-Proxy | tests~test_openai.py | import logging
from connectai.lark.websocket import *
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage
from langchain.callbacks.base import BaseCallbackHandler
class TextMessageBot(Bot):
def __init__(self, app=None, *args, **kwargs):
self.app = app
super().__init__(*args, **kwargs)
def on_message(self, data, *args, **kwargs):
if 'header' in data:
if data['header']['event_type'] == 'im.message.receive_v1' and data['event']['message']['message_type'] == 'text':
content = json.loads(data['event']['message']['content'])
if self.app:
return self.app.process_text_message(text=content['text'], **data['event']['sender']['sender_id'], **data['event']['message'])
logging.warn("unkonw message %r", data)
class OpenAICallbackHandler(BaseCallbackHandler):
def __init__(self, bot, message_id):
self.bot = bot
self.message_id = message_id
self.result = ''
self.send_length = 0
self.reply_message_id = ''
def on_llm_start(self, *args, **kwargs):
response = self.bot.reply_card(
self.message_id,
FeishuMessageCard(
FeishuMessageDiv(''),
FeishuMessageNote(FeishuMessagePlainText('正在思考,请稍等...'))
)
)
self.reply_message_id = response.json()['data']['message_id']
def on_llm_new_token(self, token, **kwargs):
logging.info("on_llm_new_token %r", token)
self.result += token
if len(self.result) - self.send_length < 25:
return
self.send_length = len(self.result)
self.bot.update(
self.reply_message_id,
FeishuMessageCard(
FeishuMessageDiv(self.result, tag="lark_md"),
FeishuMessageNote(FeishuMessagePlainText('正在生成,请稍等...'))
)
)
def on_llm_end(self, response, **kwargs):
content = response.generations[0][0].text
logging.info("on_llm_end %r", content)
self.bot.update(
self.reply_message_id,
FeishuMessageCard(
FeishuMessageDiv(content, tag="lark_md"),
FeishuMessageNote(FeishuMessagePlainText("reply from openai.")),
)
)
class Session(object):
store = {}
def __init__(self, app_id, user_id):
self.key = f"{app_id}:{user_id}"
self.data = self.store.get(self.key, dict(
chat_history=[], temperature=0.7,
system_role='', model='gpt-3.5-turbo'
))
def __getattr__(self, name):return self.data.get(name)
def __enter__(self): return self
def __exit__(self, *args):
self.store[self.key] = self.data
class Application(object):
def __init__(self, openai_api_base='', openai_api_key='', system_role='', temperature=0.7, streaming=True, **kwargs):
self.system_role = system_role
# self.bot.app = self
self.bot = TextMessageBot(app=self, **kwargs)
self.temperature = temperature
self.openai_options = dict(
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
streaming=streaming,
)
def process_text_message(self, text, message_id, open_id, **kwargs):
with Session(self.bot.app_id, open_id) as session:
if text == '/help' or text == '帮助':
self.bot.reply_card(
message_id,
FeishuMessageCard(
FeishuMessageDiv('👋 你好呀,我是一款基于OpenAI技术的智能聊天机器人'),
FeishuMessageHr(),
FeishuMessageDiv('👺 **角色扮演模式**\n文本回复*/system*+空格+角色信息', tag='lark_md'),
FeishuMessageHr(),
FeishuMessageDiv('🎒 **需要更多帮助**\n文本回复 *帮助* 或 */help*', tag='lark_md'),
header=FeishuMessageCardHeader('🎒需要帮助吗?'),
)
)
elif text[:7] == '/system' and text[7:]:
session.data['system_role'] = text[7:]
session.data['chat_history'] = []
self.bot.reply_card(
message_id,
FeishuMessageCard(
FeishuMessageDiv('请注意,这将开始一个全新的对话'),
header=FeishuMessageCardHeader('👺 已进入角色扮演模式'),
)
)
elif text:
chat = ChatOpenAI(
callbacks=[OpenAICallbackHandler(self.bot, message_id)],
temperature=session.temperature or self.temperature,
model=session.model,
**self.openai_options
)
system_role = session.system_role or self.system_role
system_message = [SystemMessage(content=system_role)] if system_role else []
messages = system_message + session.chat_history + [HumanMessage(content=text)]
message = chat(messages)
# save chat_history
session.chat_history.append(HumanMessage(content=text))
session.chat_history.append(message)
logging.info("reply message %r\nchat_history %r", message, session.chat_history)
else:
logging.warn("empty text", text)
if __name__ == "__main__":
import click
@click.command()
@click.option('--openai_api_base', prompt="OpenAI API BASE", help='Your openai_api_base')
@click.option('--openai_api_key', prompt="OpenAI API KEY", help='Your openai_api_key')
@click.option('--system_role', default='', prompt="SYSTEM ROLE", help='OpenAI system_role')
@click.option('--temperature', default=0.7, prompt="TEMPERATURE", help='OpenAI temperature')
@click.option('--app_id', prompt="APP ID", help='Your app_id')
@click.option('--app_secret', default='', prompt="APP SECRET", help='Your app_secret')
@click.option('--verification_token', default='',
prompt="VERIFICATION TOKEN", help='Your verification_token')
@click.option('--encrypt_key', prompt="ENCRYPT KEY", help='Your encrypt_key')
@click.option('--debug', default=False, prompt="DEBUG MODE", help='debug mode')
def main(debug, **kwargs):
app = Application(**kwargs)
client = Client(app.bot)
client.start(debug) # debug mode
main()
| [] |
2024-01-10 | eleqtrizit/youtube-summarizer | youtube.py | import argparse
import contextlib
import os
from openai import OpenAI
from rich.console import Console
from youtube_transcript_api import YouTubeTranscriptApi
console = Console()
# Initialize parser
parser = argparse.ArgumentParser()
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
def handle_query(gpt_prompt):
print("Generating summary (writing to STDOUT and summary.txt)...")
messages = [{"role": "user", "content": gpt_prompt}]
for response in client.chat.completions.create(
model = "gpt-4-1106-preview",
temperature = 0.7,
max_tokens = 1010,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0,
stream = True,
messages = messages
):
reply = ''
with contextlib.suppress(AttributeError):
if content := response.choices[0].delta.content:
console.print(content, style = "#FFFFFF", end = '') # type: ignore
reply += content
# save to file
with open("summary.txt", "w") as f:
f.write(reply)
print("Saved to summary.txt")
def get_transcript(video_id):
print("Fetching transcript...")
transcript_json = YouTubeTranscriptApi.get_transcript(video_id)
return ' '.join([x['text'] for x in transcript_json])
PROMPT = """Summarize the following transcript in markdown.
Pretend you are a college student taking notes on a lecture.
Your output should use the following template:
### Summary
### Notes
### Keywords
### Media Discussed (TV, Movies, Books, etc)
### Tools Discussed
Transcript below:
"""
if __name__ == '__main__':
# Adding optional argument
parser.add_argument("-v", "--VideoId", help = "Youtube Video ID")
# Read arguments from command line
args = parser.parse_args()
transcript = get_transcript(args.VideoId)
prompt = PROMPT + transcript
handle_query(prompt)
| [
"Summarize the following transcript in markdown.\nPretend you are a college student taking notes on a lecture.\n\nYour output should use the following template:\n\n### Summary\n\n### Notes\n\n### Keywords\n\n### Media Discussed (TV, Movies, Books, etc)\n\n### Tools Discussed\n\nTranscript below:\n",
"Summarize the following transcript in markdown.\nPretend you are a college student taking notes on a lecture.\n\nYour output should use the following template:\n\n### Summary\n\n### Notes\n\n### Keywords\n\n### Media Discussed (TV, Movies, Books, etc)\n\n### Tools Discussed\n\nTranscript below:\nPLACEHOLDER"
] |
2024-01-10 | mlincon/vector-databases | src~chunking~character.py | from langchain.document_loaders import TextLoader
from langchain.text_splitter import (
CharacterTextSplitter,
NLTKTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain_core.documents import Document
def create_fixed_size_chunks(
text: list[str],
separator: str = "\n\n",
chunk_size: int = 512,
chunk_overlap: int = 20,
) -> list[Document]:
text_splitter = CharacterTextSplitter(
separator=separator,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
return text_splitter.create_documents(text)
loader = TextLoader("../data/test.txt")
text_splitter = CharacterTextSplitter(separator=":", chunk_size=20, chunk_overlap=0)
text_splitter = NLTKTextSplitter()
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size=512,
chunk_overlap=20,
)
docs = loader.load_and_split(text_splitter=text_splitter)
for doc in docs:
print(doc.page_content)
print("\n")
| [] |
2024-01-10 | PierreBeaujuge/holbertonschool-machine_learning | reinforcement_learning~0x00-q_learning~0-load_env.py | #!/usr/bin/env python3
"""
0-load_env.py
"""
import numpy as np
import gym
def load_frozen_lake(desc=None, map_name=None, is_slippery=False):
"""
function that loads a pre-made FrozenLakeEnv environment from OpenAI’s gym
"""
env = gym.make("FrozenLake-v0",
desc=desc,
map_name=map_name,
is_slippery=is_slippery)
return env
| [] |
2024-01-10 | microsoft/OpenAIWorkshop | scenarios~openai_batch_pipeline~document_generation~cleansed_generation.py | import os
import logging
import random
import openai
import time
from azure.storage.blob import BlobClient
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--conn_string', type=str, help='Azure Storage connection string')
parser.add_argument('--openai_api_base_url', type=str, help='OpenAI API Base URL')
parser.add_argument('--openai_api_key', type=str, help='OpenAI API Key')
parser.add_argument('--openai_model', type=str, help='OpenAI Model', default='davincitest')
args = parser.parse_args()
temperature= 0.75
max_tokens= 2000
top_p= 0.80
frequency_penalty= 0.25
presence_penalty= 0.15
stop= None
openai.api_type = "azure"
openai.api_base = args.openai_api_base_url
openai.api_version = "2022-12-01"
openai.api_key = args.openai_api_key
for filename in os.listdir('scenarios/openai_batch_pipeline/document_generation/generated_documents'):
if filename.endswith('.txt'):
results = {}
with open(os.path.join("scenarios/openai_batch_pipeline/document_generation/generated_documents", filename), 'r') as src:
txt = src.read()
prmpt = txt + "\n \nProvide summary."
openai_output = openai.Completion.create(
engine= args.openai_model,
prompt= prmpt,
temperature= temperature,
max_tokens= max_tokens,
top_p= top_p,
frequency_penalty= frequency_penalty,
presence_penalty= presence_penalty,
stop= None)
results['summary'] = openai_output.choices[0].text
results["customerSentiment"] = filename.split("_")[3]
results["topic"] = filename.split("_")[4]
results["product"] = filename.split("_")[5]
results["filename"] = filename
with open(os.path.join("scenarios/openai_batch_pipeline/document_generation/cleansed_documents", filename.split("_")[0]+".json"), 'w') as dest:
dest.write(json.dumps(results, indent=4))
file_name = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'generated_documents', filename.split("_")[0]+".json")
blob_name=f'generated_documents/{filename.split("_")[0]}.json'
blob_client = BlobClient.from_connection_string(
args.conn_string,
container_name="workshop-data",
blob_name=blob_name,
)
blob_client.upload_blob(dest)
if __name__ == '__main__':
main() | [
"PLACEHOLDER\n \nProvide summary."
] |
2024-01-10 | microsoft/OpenAIWorkshop | scenarios~incubations~copilot~employee_support~hr_copilot_utils.py | # Agent class
### responsbility definition: expertise, scope, conversation script, style
import openai
import os
from pathlib import Path
import json
import time
from azure.search.documents.models import Vector
import uuid
from tenacity import retry, wait_random_exponential, stop_after_attempt
from dotenv import load_dotenv
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from openai.embeddings_utils import get_embedding, cosine_similarity
import inspect
env_path = Path('.') / 'secrets.env'
load_dotenv(dotenv_path=env_path)
openai.api_key = os.environ.get("AZURE_OPENAI_API_KEY")
openai.api_base = os.environ.get("AZURE_OPENAI_ENDPOINT")
emb_engine = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT")
emb_engine = emb_engine.strip('"')
openai.api_type = "azure"
import sys
import random
sys.path.append("..")
from utils import Agent, check_args
class Search_Client():
def __init__(self,emb_map_file_path):
with open(emb_map_file_path) as file:
self.chunks_emb = json.load(file)
def find_article(self,question, topk=3):
"""
Given an input vector and a dictionary of label vectors,
returns the label with the highest cosine similarity to the input vector.
"""
input_vector = get_embedding(question, engine = emb_engine)
# Compute cosine similarity between input vector and each label vector
cosine_list=[]
for chunk_id,chunk_content, vector in self.chunks_emb:
#by default, we use embedding for the entire content of the topic (plus topic descrition).
# If you you want to use embedding on just topic name and description use this code cosine_sim = cosine_similarity(input_vector, vector[0])
cosine_sim = cosine_similarity(input_vector, vector)
cosine_list.append((chunk_id,chunk_content,cosine_sim ))
cosine_list.sort(key=lambda x:x[2],reverse=True)
cosine_list= cosine_list[:topk]
best_chunks =[chunk[0] for chunk in cosine_list]
contents = [chunk[1] for chunk in cosine_list]
text_content = ""
for chunk_id, content in zip(best_chunks, contents):
text_content += f"{chunk_id}\n{content}\n"
return text_content
#azcs implementation
if os.getenv("USE_AZCS") == "True":
service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT")
index_name = os.getenv("AZURE_SEARCH_INDEX_NAME")
index_name = index_name.strip('"')
key = os.getenv("AZURE_SEARCH_ADMIN_KEY")
key = key.strip('"')
# @retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
# Function to generate embeddings for title and content fields, also used for query embeddings
credential = AzureKeyCredential(key)
azcs_search_client = SearchClient(service_endpoint, index_name =index_name , credential=credential)
else:
faiss_search_client = Search_Client("../data/chunk_emb_map.json")
def search_knowledgebase_acs(search_query):
vector = Vector(value=get_embedding(search_query, engine=emb_engine), k=3, fields="embedding")
print("search query: ", search_query)
# print("products: ", products.split(","))
# product_filter = " or ".join([f"product eq '{product}'" for product in products.split(",")])
results = azcs_search_client.search(
search_text=search_query,
vectors= [vector],
# filter= product_filter,
query_type="semantic", query_language="en-us", semantic_configuration_name='default', query_caption="extractive", query_answer="extractive",
select=["sourcepage","content"],
top=3
)
text_content =""
for result in results:
text_content += f"{result['sourcepage']}\n{result['content']}\n"
# print("text_content", text_content)
return text_content
def search_knowledgebase(search_query):
if os.getenv("USE_AZCS") == "True":
print("using azcs")
return search_knowledgebase_acs(search_query)
else:
print("using faiss")
print(os.getenv("USE_AZCS"))
return faiss_search_client.find_article(search_query)
###Sematic caching implementation
if os.getenv("USE_SEMANTIC_CACHE") == "True":
cache_index_name = os.getenv("CACHE_INDEX_NAME")
cache_index_name= cache_index_name.strip('"')
azcs_semantic_cache_search_client = SearchClient(service_endpoint, cache_index_name, credential=credential)
def add_to_cache(search_query, gpt_response):
search_doc = {
"id" : str(uuid.uuid4()),
"search_query" : search_query,
"search_query_vector" : get_embedding(search_query, engine=emb_engine),
"gpt_response" : gpt_response
}
azcs_semantic_cache_search_client.upload_documents(documents = [search_doc])
def get_cache(search_query):
vector = Vector(value=get_embedding(search_query, engine=emb_engine), k=3, fields="search_query_vector")
results = azcs_semantic_cache_search_client.search(
search_text=None,
vectors= [vector],
select=["gpt_response"],
)
try:
result =next(results)
print("threshold ", result['@search.score'])
if result['@search.score']>= float(os.getenv("SEMANTIC_HIT_THRESHOLD")):
return result['gpt_response']
except StopIteration:
pass
return None
class Smart_Agent(Agent):
"""
Agent that can use other agents and tools to answer questions.
Args:
persona (str): The persona of the agent.
tools (list): A list of {"tool_name":tool} that the agent can use to answer questions. Tool must have a run method that takes a question and returns an answer.
stop (list): A list of strings that the agent will use to stop the conversation.
init_message (str): The initial message of the agent. Defaults to None.
engine (str): The name of the GPT engine to use. Defaults to "gpt-35-turbo".
Methods:
llm(new_input, stop, history=None, stream=False): Generates a response to the input using the LLM model.
_run(new_input, stop, history=None, stream=False): Runs the agent and generates a response to the input.
run(new_input, history=None, stream=False): Runs the agent and generates a response to the input.
Attributes:
persona (str): The persona of the agent.
tools (list): A list of {"tool_name":tool} that the agent can use to answer questions. Tool must have a run method that takes a question and returns an answer.
stop (list): A list of strings that the agent will use to stop the conversation.
init_message (str): The initial message of the agent.
engine (str): The name of the GPT engine to use.
"""
def __init__(self, persona,functions_spec, functions_list, name=None, init_message=None, engine =os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT")):
super().__init__(engine=engine,persona=persona, init_message=init_message, name=name)
self.functions_spec = functions_spec
self.functions_list= functions_list
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def run(self, user_input, conversation=None, stream = False, api_version = "2023-07-01-preview"):
openai.api_version = api_version
if user_input is None: #if no input return init message
return self.init_history, self.init_history[1]["content"]
if conversation is None: #if no history return init message
conversation = self.init_history.copy()
conversation.append({"role": "user", "content": user_input})
i=0
query_used = None
# while True:
# try:
# i+=1
response = openai.ChatCompletion.create(
deployment_id=self.engine, # The deployment name you chose when you deployed the GPT-35-turbo or GPT-4 model.
messages=conversation,
functions=self.functions_spec,
function_call="auto",
)
response_message = response["choices"][0]["message"]
# Step 2: check if GPT wanted to call a function
if response_message.get("function_call"):
print("Recommended Function call:")
print(response_message.get("function_call"))
print()
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
function_name = response_message["function_call"]["name"]
# verify function exists
if function_name not in self.functions_list:
print("function list:", self.functions_list)
raise Exception("Function " + function_name + " does not exist")
function_to_call = self.functions_list[function_name]
# verify function has correct number of arguments
function_args = json.loads(response_message["function_call"]["arguments"])
if check_args(function_to_call, function_args) is False:
raise Exception("Invalid number of arguments for function: " + function_name)
# check if there's an opprotunity to use semantic cache
if function_name =="search_knowledgebase":
if os.getenv("USE_SEMANTIC_CACHE") == "True":
search_query = function_args["search_query"]
cache_output = get_cache(search_query)
if cache_output is not None:
print("semantic cache hit")
conversation.append({"role": "assistant", "content": cache_output})
return False, query_used,conversation, cache_output
else:
print("semantic cache missed")
query_used = search_query
function_response = function_to_call(**function_args)
print("Output of function call:")
print(function_response)
print()
# Step 4: send the info on the function call and function response to GPT
# adding assistant response to messages
conversation.append(
{
"role": response_message["role"],
"name": response_message["function_call"]["name"],
"content": response_message["function_call"]["arguments"],
}
)
# adding function response to messages
conversation.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
openai.api_version = api_version
second_response = openai.ChatCompletion.create(
messages=conversation,
deployment_id=self.engine,
stream=stream,
) # get a new response from GPT where it can see the function response
if not stream:
assistant_response = second_response["choices"][0]["message"]["content"]
conversation.append({"role": "assistant", "content": assistant_response})
else:
assistant_response = second_response
return stream,query_used, conversation, assistant_response
else:
assistant_response = response_message["content"]
conversation.append({"role": "assistant", "content": assistant_response})
# break
# except Exception as e:
# if i>3:
# assistant_response="Haizz, my memory is having some trouble, can you repeat what you just said?"
# break
# print("Exception as below, will retry\n", str(e))
# time.sleep(5)
return False,query_used, conversation, assistant_response
HR_PERSONA = """
You are Lucy, an HR support specialist responsible for answering questions about HR & Payroll from employees and handling personal information updates.
You start the conversation by validating the identity of the employee. Do not proceed until you have validated the identity of the employee.
When you are asked with a question, use the search tool to find relavent knowlege articles to create the answer.
Answer ONLY with the facts from the search tool. If there isn't enough information, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
When employee request updating their address, interact with them to get their new country, new state, new city and zipcode. If they don't provide new country, check if it's still United States. Make sure you have all information then use update address tool provided to update in the system.
For all other information update requests, log a ticket to the HR team to update the information.
If the employee is asking for information that is not related to HR or Payroll, say it's not your area of expertise.
"""
def validate_identity(employee_id, employee_name):
if employee_id in ["1234","5678"]:
return f"Employee {employee_name} with id {employee_id} is validated in this conversation"
else:
return "This employee id is not valid"
def update_address(employee_id, country, state, city, zipcode):
return f"Address of employee {employee_id} address has been updated to {country}, {state}, {city}, {zipcode}"
def create_ticket(employee_id, updates):
return f"A ticket number 1233445 has been created for employee {employee_id} with the following updates: {updates} "
HR_AVAILABLE_FUNCTIONS = {
"search_knowledgebase": search_knowledgebase,
"validate_identity": validate_identity,
"update_address": update_address,
"create_ticket": create_ticket,
}
HR_FUNCTIONS_SPEC= [
{
"name": "search_knowledgebase",
"description": "Searches the knowledge base for an answer to the HR/Payroll question",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "The search query to use to search the knowledge base"
}
},
"required": ["search_query"],
},
},
{
"name": "validate_identity",
"description": "validates the identity of the employee",
"parameters": {
"type": "object",
"properties": {
"employee_id": {
"type": "string",
"description": "The employee id to validate"
},
"employee_name": {
"type": "string",
"description": "The employee id to validate"
}
},
"required": ["employee_id", "employee_name"],
},
},
{
"name": "update_address",
"description": "Update the address of the employee",
"parameters": {
"type": "object",
"properties": {
"employee_id": {
"type": "string",
"description": "The employee id to validate"
},
"city": {
"type": "string",
"description": "The new city to update"
},
"state": {
"type": "string",
"description": "The new state to update"
},
"zipcode": {
"type": "integer",
"description": "The new zipcode to update"
},
"country": {
"type": "string",
"description": "The new country to update"
}
},
"required": ["employee_id","city", "state", "zipcode", "country"],
},
},
{
"name": "create_ticket",
"description": "Create a support ticket for the employee to update personal information other than address",
"parameters": {
"type": "object",
"properties": {
"employee_id": {
"type": "string",
"description": "The employee id to validate"
},
"updates": {
"type": "string",
"description": "The new/changed information to update"
}
},
"required": ["employee_id","updates"],
},
},
]
| [
"function_call",
"arguments"
] |
2024-01-10 | microsoft/OpenAIWorkshop | scenarios~incubations~copilot~realtime_streaming~flight_copilot_utils.py | # Agent class
### responsbility definition: expertise, scope, conversation script, style
import openai
import os
from pathlib import Path
import json
import time
from azure.search.documents.models import Vector
import uuid
from tenacity import retry, wait_random_exponential, stop_after_attempt
from azure.cosmos import CosmosClient
from datetime import datetime, timedelta
from dateutil import parser
from dotenv import load_dotenv
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from openai.embeddings_utils import get_embedding, cosine_similarity
import inspect
env_path = Path('.') / 'secrets.env'
load_dotenv(dotenv_path=env_path)
openai.api_key = os.environ.get("AZURE_OPENAI_API_KEY")
openai.api_base = os.environ.get("AZURE_OPENAI_ENDPOINT")
openai.api_type = "azure"
cosmos_uri = os.environ.get("COSMOS_URI")
cosmos_key=os.environ.get("COSMOS_KEY")
container_name = os.getenv("COSMOS_CONTAINER_NAME")
cosmos_db_name = os.getenv("COSMOS_DB_NAME")
client = CosmosClient(cosmos_uri, credential=cosmos_key)
cosmos_db_client = client.get_database_client(cosmos_db_name)
cosmos_container_client = cosmos_db_client.get_container_client(container_name)
import random
import sys
import random
sys.path.append("..")
from utils import Agent, Smart_Agent, check_args, search_knowledgebase
service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT")
index_name = os.getenv("AZURE_SEARCH_INDEX_NAME")
index_name = index_name.strip('"')
key = os.getenv("AZURE_SEARCH_ADMIN_KEY")
key = key.strip('"')
credential = AzureKeyCredential(key)
azcs_search_client = SearchClient(service_endpoint, index_name ="flights" , credential=credential)
def check_flight_status(flight_num, from_):
filter=f"flight_num eq '{flight_num}'"
results = azcs_search_client.search(
search_text=None,
filter=filter,
top=1
)
output =f"cannot find status for the flight {flight_num} "
for result in results:
output = result
return str(output)
def query_flights(from_, to, departure_time):
# generate 3 flights with random flight number in the format of AA1234 with different departure time and return the list of flights to the user
#first convert the departure time to a datetime object assuming the format of the departutre time is '2020-09-20T10:30:00'
def get_new_times(departure_time, delta):
dp_dt = parser.parse(departure_time)
new_dp_dt = dp_dt + timedelta(hours=delta)
new_ar_dt = new_dp_dt + timedelta(hours=2)
new_departure_time = new_dp_dt.strftime("%Y-%m-%dT%H:%M:%S")
new_arrival_time = new_ar_dt.strftime("%Y-%m-%dT%H:%M:%S")
return new_departure_time, new_arrival_time
flights = ""
for flight_num, delta in [("AA479", -1), ("AA490",-2), ("AA423",-3)]:
new_departure_time, new_arrival_time = get_new_times(departure_time, delta)
flights= flights +f"flight number {flight_num}, from: {from_}, to: {to}, departure_time: {new_departure_time}, arrival_time: {new_arrival_time}, flight_status: on time \n"
return flights
def confirm_flight_change(current_ticket_number, new_flight_num, new_departure_time,new_arrival_time):
# based on the input flight number and from, to and departure time, generate a random seat number and a random gate number and random amount of refund or extra charge for the flight change
# then write a information message to the user with all the information
charge = 80
#retrieve current flight
old_flight={}
for item in cosmos_container_client.query_items(
query=f'SELECT * FROM c WHERE c.ticket_num="{current_ticket_number}" AND c.status="open"',
enable_cross_partition_query=True):
old_flight['airline'] = item['airline']
old_flight['customer_id'] = item['customer_id']
old_flight['flight_num'] = item['flight_num']
old_flight['seat_num'] = item['seat_num']
old_flight['departure_airport'] = item['departure_airport']
old_flight['seat_num'] = item['seat_num']
old_flight['departure_airport'] = item['departure_airport']
old_flight['arrival_airport'] = item['arrival_airport']
old_flight['departure_time'] = item['departure_time']
old_flight['arrival_time'] = item['arrival_time']
old_flight['ticket_class'] = item['ticket_class']
old_flight['ticket_num'] = item['ticket_num']
old_flight['gate'] = item['gate']
old_flight['id'] = item['id']
old_flight['status'] = "cancelled"
break
#update the old flight status to cancelled
cosmos_container_client.upsert_item(old_flight)
print("updated old flight status to cancelled")
#create a new flight
#generate a new ticket number which is a 10 digit random number
new_ticket_num = str(random.randint(1000000000, 9999999999))
new_flight=old_flight.copy()
new_flight["id"] = new_ticket_num
new_flight['flight_num'] = new_flight_num
new_flight['departure_time'] = new_departure_time
new_flight['arrival_time'] = new_arrival_time
new_flight['ticket_num'] = new_ticket_num
new_flight['status'] = "open"
cosmos_container_client.create_item(new_flight)
return f"""Your new flight now is {new_flight_num} departing from {new_flight['departure_airport']} to {new_flight['arrival_airport']}. Your new departure time is {new_departure_time} and arrival time is {new_arrival_time}. Your new ticket number is {new_ticket_num}.
Your credit card has been charged with an amount of ${charge} dollars for fare difference."""
def test_change_flight(current_ticket_number, current_flight_num, new_flight_num, from_):
# based on the input flight number and from, to and departure time, generate a random seat number and a random gate number and random amount of refund or extra charge for the flight change
# then write a information message to the user with all the information
charge = 80
return f"Changing your ticket from {current_flight_num} to new flight {new_flight_num} departing from {from_} would cost {charge} dollars."
def load_user_flight_info(user_id):
# Load flight information from CosmosDB
matched_flights =[]
for item in cosmos_container_client.query_items(
query=f'SELECT * FROM c WHERE c.customer_id="{user_id}" AND c.status="open"',
enable_cross_partition_query=True):
flight={}
flight['airline'] = item['airline']
flight['flight_num'] = item['flight_num']
flight['seat_num'] = item['seat_num']
flight['departure_airport'] = item['departure_airport']
flight['seat_num'] = item['seat_num']
flight['departure_airport'] = item['departure_airport']
flight['arrival_airport'] = item['arrival_airport']
flight['departure_time'] = item['departure_time']
flight['arrival_time'] = item['arrival_time']
flight['ticket_class'] = item['ticket_class']
flight['ticket_num'] = item['ticket_num']
flight['gate'] = item['gate']
flight['status'] = item['status']
matched_flights.append(flight)
if len(matched_flights) == 0:
return f"Sorry, we cannot find any flight information for you"
return str(matched_flights)
PERSONA = """
You are Maya, an airline customer agent helping customers with questions and requests about their flight.
You are currently serving {customer_name} with id {customer_id}.
First, you need to look up their flight information and confirm with the customer about their flight information including flight number, from and to, departure and arrival time.
When you are asked with a general airline policy question such as baggage limit, use the search_knowledgebase function to find relavent knowlege articles to create the answer.
Answer ONLY with the facts from the search tool. If there isn't enough information, say you don't know. Do not generate answers that don't use the information from the search. If asking a clarifying question to the user would help, ask the question.
When the user asks for a flight status, use check_flight_status function to check the flight status.
When the user asks to change their flight, first check the feasibility and cost of the change with check_change_flight function. If customer agrees with the change, execute the change with confirm_flight_change function.
If the user is asking for information that is not related to flight and airline, say it's not your area of expertise.
"""
AVAILABLE_FUNCTIONS = {
"search_knowledgebase": search_knowledgebase,
"query_flights": query_flights,
"confirm_flight_change": confirm_flight_change,
"check_change_flight": test_change_flight,
"check_flight_status": check_flight_status,
"load_user_flight_info": load_user_flight_info
}
FUNCTIONS_SPEC= [
{
"name": "search_knowledgebase",
"description": "Searches the knowledge base for an answer to the question",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "The search query to use to search the knowledge base"
}
},
"required": ["search_query"],
},
},
{
"name": "query_flights",
"description": "Query the list of available flights for a given departure airport code, arrival airport code and departure time",
"parameters": {
"type": "object",
"properties": {
"from_": {
"type": "string",
"description": "The departure airport code"
},
"to": {
"type": "string",
"description": "The arrival airport code"
},
"departure_time": {
"type": "string",
"description": "The departure time"
}
},
"required": ["from_", "to", "departure_time"],
},
},
{
"name": "check_change_flight",
"description": "Check the feasibility and outcome of a presumed flight change by providing current flight information and new flight information",
"parameters": {
"type": "object",
"properties": {
"current_ticket_number": {
"type": "string",
"description": "The current ticket number"
},
"current_flight_num": {
"type": "string",
"description": "The current flight number"
},
"new_flight_num": {
"type": "string",
"description": "The new flight number"
},
"from_": {
"type": "string",
"description": "The departure airport code"
},
},
"required": ["current_ticket_number", "current_flight_num", "new_flight_num", "from_"],
},
},
{
"name": "confirm_flight_change",
"description": "Execute the flight change after confirming with the customer",
"parameters": {
"type": "object",
"properties": {
"current_ticket_number": {
"type": "string",
"description": "The current ticket number"
},
"new_flight_num": {
"type": "string",
"description": "The new flight number"
},
"new_departure_time": {
"type": "string",
"description": "The new departure time of the new flight"
},
"new_arrival_time": {
"type": "string",
"description": "The new arrival time of the new flight"
},
},
"required": ["current_ticket_number", "new_flight_num", "new_departure_time", "new_arrival_time"],
},
},
{
"name": "check_flight_status",
"description": "Checks the flight status for a flight",
"parameters": {
"type": "object",
"properties": {
"flight_num": {
"type": "string",
"description": "The flight number"
},
"from_": {
"type": "string",
"description": "The departure airport code"
}
},
"required": ["flight_num", "from_"],
},
},
{
"name": "load_user_flight_info",
"description": "Loads the flight information for a user",
"parameters": {
"type": "object",
"properties": {
"user_id": {
"type": "string",
"description": "The user id"
}
},
"required": ["user_id"],
},
}
] | [] |
2024-01-10 | microsoft/OpenAIWorkshop | scenarios~incubations~copilot~employee_support~multi_agent_utils.py | # Agent class
### responsbility definition: expertise, scope, conversation script, style
import time
import openai
import os
from pathlib import Path
import json
import random
from dotenv import load_dotenv
# from openai.embeddings_utils import get_embedding, cosine_similarity
# import inspect
env_path = Path('.') / 'secrets.env'
load_dotenv(dotenv_path=env_path)
evaluator_engine = os.environ.get("AZURE_OPENAI_EVALUATOR_DEPLOYMENT")
evaluator_engine = evaluator_engine.strip('"')
from hr_copilot_utils import Agent,Smart_Agent, check_args, search_knowledgebase,update_address, create_ticket
def get_help(user_request):
return f"{user_request}"
def validate_identity(employee_id, employee_name):
if employee_id in ["1234","5678"]:
return f"Employee {employee_name} with id {employee_id} is validated in this conversation"
else:
return "This employee id is not valid"
GET_HELP_FUNCTION_NAME = "get_help" #default function name for routing call used by all agents
VALIDATE_IDENTIFY_FUNCTION_NAME = "validate_identity" #default function name for validating identity used by all agents
GENERALIST_PERSONA = """
You are Jenny, a helpful general assistant that can answer general questions about everything except HR and Payroll and IT.
You start the conversation by validating the identity of the employee. Do not proceed until you have validated the identity of the employee.
If the employee is asking for information in the HR & Payroll or IT, call function get_help. Do not try to answer the question.
Otherwise, use your knowledge to answer the question.
"""
IT_PERSONA = """
You are Paul, a helpful IT specialist that help employees about everything in IT.
If the employee is asking for information that is not related to IT, call function get_help.
"""
HR_PERSONA = """
You are Lucy, an HR support specialist responsible for answering questions about HR & Payroll from employees and handling personal information updates.
When you are asked with a question, always use the search tool to find relavent knowlege articles to create the answer.
Answer ONLY with the facts from the search tool. If there isn't enough information, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
When employee request updating their address, interact with them to get their new country, new state, new city and zipcode. If they don't provide new country, check if it's still United States. Make sure you have all information then use update address tool provided to update in the system.
For all other information update requests, log a ticket to the HR team to update the information.
If the employee is asking for information that is not related to HR or Payroll, call function get_help.
"""
HR_AVAILABLE_FUNCTIONS = {
"search_knowledgebase": search_knowledgebase,
"update_address": update_address,
"create_ticket": create_ticket,
"get_help": get_help
}
IT_AVAILABLE_FUNCTIONS = {
"get_help": get_help,
}
GENERAL_AVAILABLE_FUNCTIONS = {
"get_help": get_help,
"validate_identity": validate_identity,
}
GENERAL_FUNCTIONS_SPEC= [
{
"name": "get_help",
"description": "Get help when you the question is out of your expertise",
"parameters": {
"type": "object",
"properties": {
"user_request": {
"type": "string",
"description": "summary user's request"
},
},
"required": ["user_request"],
},
},
{
"name": "validate_identity",
"description": "validates the identity of the employee",
"parameters": {
"type": "object",
"properties": {
"employee_id": {
"type": "string",
"description": "The employee id to validate"
},
"employee_name": {
"type": "string",
"description": "The employee id to validate"
}
},
"required": ["employee_id", "employee_name"],
},
},
]
IT_FUNCTIONS_SPEC= [
{
"name": "get_help",
"description": "Get help when you the question is out of your expertise",
"parameters": {
"type": "object",
"properties": {
"user_request": {
"type": "string",
"description": "summary user's request"
},
},
"required": ["user_request"],
},
},
]
HR_FUNCTIONS_SPEC= [
{
"name": "search_knowledgebase",
"description": "Searches the knowledge base for an answer to the HR/Payroll question",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "The search query to use to search the knowledge base"
}
},
"required": ["search_query"],
},
},
{
"name": "update_address",
"description": "Update the address of the employee",
"parameters": {
"type": "object",
"properties": {
"employee_id": {
"type": "string",
"description": "The employee id to validate"
},
"city": {
"type": "string",
"description": "The new city to update"
},
"state": {
"type": "string",
"description": "The new state to update"
},
"zipcode": {
"type": "integer",
"description": "The new zipcode to update"
},
"country": {
"type": "string",
"description": "The new country to update"
}
},
"required": ["employee_id","city", "state", "zipcode", "country"],
},
},
{
"name": "create_ticket",
"description": "Create a support ticket for the employee to update personal information other than address",
"parameters": {
"type": "object",
"properties": {
"employee_id": {
"type": "string",
"description": "The employee id to validate"
},
"updates": {
"type": "string",
"description": "The new/changed information to update"
}
},
"required": ["employee_id","updates"],
},
},
{
"name": "get_help",
"description": "Get help when you the question is out of your expertise",
"parameters": {
"type": "object",
"properties": {
"user_request": {
"type": "string",
"description": "summary user's request"
},
},
"required": ["user_request"],
},
},
]
class Agent_Runner():
def __init__(self,starting_agent_name, agents, session_state) -> None:
self.agents = agents
self.session_state = session_state
self.active_agent = None
for agent in agents:
# print("agent name",agent.name, "starting agent name", starting_agent_name)
if starting_agent_name == agent.name:
self.active_agent = agent
break
agent_descriptions ="Jenny: a general customer support agent, handling everyting except HR, Payroll and IT\n\n Lucy: a specialist support agent in HR and Payroll and personal information management\n\n Paul: a specialist support agent in IT\n\n"
self.evaluator = Agent(engine=evaluator_engine, persona="As a customer support manager, you need to assign call transfer requests to the right agent with the right skills. You have following agents with the description of their persona: \n\n"+agent_descriptions)
def revaluate_agent_assignment(self,function_description):
#TODO: revaluate agent assignment based on the state
names = [agent.name for agent in self.agents]
prompt =f"The most suitable agent's name among [{names}] to best match with this request [{function_description}] is "
count =0
while True:
count+=1
if count > 3:
next_agent = random.choice(names)
print("cannot decide on the agent, randomly assigned to ", next_agent)
break
next_agent = self.evaluator.generate_response(prompt).strip()
if next_agent==self.active_agent.name: #should be different from the current agent
continue
if next_agent in names:
break
print("next agent ", next_agent)
for agent in self.agents:
if next_agent == agent.name:
self.active_agent = agent
print("agent changed to ", agent.name)
break
def run(self,user_input, conversation=None, stream = False, api_version = "2023-07-01-preview"):
stream_out, request_agent_change, context_to_persist, conversation, assistant_response= self.active_agent.run(user_input, conversation=conversation, stream = stream, api_version = api_version)
previous_agent_last_response=None
if context_to_persist is not None:
self.session_state['user_context'] = context_to_persist
if request_agent_change:
previous_agent_last_response = assistant_response
self.revaluate_agent_assignment(request_agent_change)
conversation= self.active_agent.init_history
#this code is to transfer the context (in this case user's credentials) from the previous agent to the new agent
if self.session_state['user_context'] is not None:
old_system_message = conversation[0]
new_system_message = old_system_message['content'] + "\n\n" + self.session_state['user_context']
conversation[0] = {"role":"system", "content":new_system_message}
conversation.append({"role":"user", "content":user_input})
stream_out, _,_,conversation, assistant_response= self.active_agent.run(conversation=conversation, stream = False, api_version = api_version)
return stream_out, previous_agent_last_response, conversation, assistant_response
def stream_write(st, agent_response):
message_placeholder = st.empty()
full_response = ""
for response in agent_response:
if len(response.choices)>0:
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
return full_response
class Smart_Coordinating_Agent(Smart_Agent):
"""
Agent that can use other agents and tools to answer questions.
Args:
persona (str): The persona of the agent.
tools (list): A list of {"tool_name":tool} that the agent can use to answer questions. Tool must have a run method that takes a question and returns an answer.
stop (list): A list of strings that the agent will use to stop the conversation.
init_message (str): The initial message of the agent. Defaults to None.
engine (str): The name of the GPT engine to use. Defaults to "gpt-35-turbo".
Methods:
llm(new_input, stop, history=None, stream=False): Generates a response to the input using the LLM model.
_run(new_input, stop, history=None, stream=False): Runs the agent and generates a response to the input.
run(new_input, history=None, stream=False): Runs the agent and generates a response to the input.
Attributes:
persona (str): The persona of the agent.
tools (list): A list of {"tool_name":tool} that the agent can use to answer questions. Tool must have a run method that takes a question and returns an answer.
stop (list): A list of strings that the agent will use to stop the conversation.
init_message (str): The initial message of the agent.
engine (str): The name of the GPT engine to use.
"""
def run(self, user_input=None, conversation=None, stream = False, api_version = "2023-07-01-preview"):
openai.api_version = api_version
request_agent_change = False
context_to_persist = None
assistant_response=""
if conversation is None: #if no history return init message
conversation = self.init_history.copy()
if user_input is not None:
conversation.append({"role": "user", "content": user_input})
i=0
while True: # loop to retry in case there's an intermittent error from GPT
try:
i+=1
response = openai.ChatCompletion.create(
deployment_id=self.engine, # The deployment name you chose when you deployed the GPT-35-turbo or GPT-4 model.
messages=conversation,
functions=self.functions_spec,
function_call="auto",
request_timeout=20,
)
response_message = response["choices"][0]["message"]
# Step 2: check if GPT wanted to call a function
if response_message.get("function_call"):
print("Recommended Function call:")
print(response_message.get("function_call"))
print()
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
function_name = response_message["function_call"]["name"]
if function_name == GET_HELP_FUNCTION_NAME:
request_agent_change = True
# verify function exists
if function_name not in self.functions_list:
print("Function " + function_name + " does not exist")
function_to_call = self.functions_list[function_name]
# verify function has correct number of arguments
function_args = json.loads(response_message["function_call"]["arguments"])
if check_args(function_to_call, function_args) is False:
print("Invalid number of arguments for function: " + function_name)
function_response = function_to_call(**function_args)
print("Output of function call:")
print(function_response)
print()
if request_agent_change:
request_agent_change = function_response # if the function is a route call function, assign the request_agent_change to be the name of department to change to
if function_name==VALIDATE_IDENTIFY_FUNCTION_NAME:
context_to_persist = function_response
# Step 4: send the info on the function call and function response to GPT
# adding assistant response to messages
conversation.append(
{
"role": response_message["role"],
"name": response_message["function_call"]["name"],
"content": response_message["function_call"]["arguments"],
}
)
# adding function response to messages
conversation.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
openai.api_version = api_version
second_response = openai.ChatCompletion.create(
messages=conversation,
deployment_id=self.engine,
stream=stream,
) # get a new response from GPT where it can see the function response
if not stream:
assistant_response = second_response["choices"][0]["message"]["content"]
conversation.append({"role": "assistant", "content": assistant_response})
else:
assistant_response = second_response
return stream,request_agent_change,context_to_persist,conversation, assistant_response
else:
assistant_response = response_message["content"]
conversation.append({"role": "assistant", "content": assistant_response})
break
except Exception as e:
if i>3:
assistant_response="Haizz, my memory is having some trouble, can you repeat what you just said?"
break
print("Exception as below, will retry\n", str(e))
time.sleep(8)
return False, request_agent_change,context_to_persist, conversation, assistant_response
| [
"function_call",
"arguments",
"The most suitable agent's name among [PLACEHOLDER] to best match with this request [PLACEHOLDER] is "
] |
2024-01-10 | microsoft/OpenAIWorkshop | scenarios~incubations~automating_analytics~analyze.py | import openai
import string
import ast
import sqlite3
from datetime import timedelta
import os
import pandas as pd
import numpy as np
import random
from urllib import parse
import re
import json
from sqlalchemy import create_engine
import sqlalchemy as sql
from plotly.graph_objects import Figure
import time
def get_table_schema(sql_query_tool, sql_engine='sqlite'):
# Define the SQL query to retrieve table and column information
if sql_engine== 'sqlserver':
sql_query = """
SELECT C.TABLE_NAME, C.COLUMN_NAME, C.DATA_TYPE, T.TABLE_TYPE, T.TABLE_SCHEMA
FROM INFORMATION_SCHEMA.COLUMNS C
JOIN INFORMATION_SCHEMA.TABLES T ON C.TABLE_NAME = T.TABLE_NAME AND C.TABLE_SCHEMA = T.TABLE_SCHEMA
WHERE T.TABLE_TYPE = 'BASE TABLE'
"""
elif sql_engine=='sqlite':
sql_query = """
SELECT m.name AS TABLE_NAME, p.name AS COLUMN_NAME, p.type AS DATA_TYPE
FROM sqlite_master AS m
JOIN pragma_table_info(m.name) AS p
WHERE m.type = 'table'
"""
else:
raise Exception("unsupported SQL engine, please manually update code to retrieve database schema")
# Execute the SQL query and store the results in a DataFrame
df = sql_query_tool.execute_sql_query(sql_query, limit=None)
output=[]
# Initialize variables to store table and column information
current_table = ''
columns = []
# Loop through the query results and output the table and column information
for index, row in df.iterrows():
if sql_engine== 'sqlserver':
table_name = f"{row['TABLE_SCHEMA']}.{row['TABLE_NAME']}"
else:
table_name = f"{row['TABLE_NAME']}"
column_name = row['COLUMN_NAME']
data_type = row['DATA_TYPE']
if " " in table_name:
table_name= f"[{table_name}]"
column_name = row['COLUMN_NAME']
if " " in column_name:
column_name= f"[{column_name}]"
# If the table name has changed, output the previous table's information
if current_table != table_name and current_table != '':
output.append(f"table: {current_table}, columns: {', '.join(columns)}")
columns = []
# Add the current column information to the list of columns for the current table
columns.append(f"{column_name} {data_type}")
# Update the current table name
current_table = table_name
# Output the last table's information
output.append(f"table: {current_table}, columns: {', '.join(columns)}")
output = "\n ".join(output)
return output
class ChatGPT_Handler: #designed for chatcompletion API
def __init__(self, gpt_deployment=None,max_response_tokens=None,token_limit=None,temperature=None,extract_patterns=None) -> None:
self.max_response_tokens = max_response_tokens
self.token_limit= token_limit
self.gpt_deployment=gpt_deployment
self.temperature=temperature
# self.conversation_history = []
self.extract_patterns=extract_patterns
def _call_llm(self,prompt, stop):
response = openai.ChatCompletion.create(
engine=self.gpt_deployment,
messages = prompt,
temperature=self.temperature,
max_tokens=self.max_response_tokens,
stop=stop
)
llm_output = response['choices'][0]['message']['content']
return llm_output
def extract_output(self, text_input):
output={}
if len(text_input)==0:
return output
for pattern in self.extract_patterns:
if "sql" in pattern[1]:
sql_query=""
sql_result = re.findall(pattern[1], text_input, re.DOTALL)
if len(sql_result)>0:
sql_query=sql_result[0]
output[pattern[0]]= sql_query
else:
return output
text_before = text_input.split(sql_query)[0].strip("\n").strip("```sql").strip("\n")
if text_before is not None and len(text_before)>0:
output["text_before"]=text_before
text_after =text_input.split(sql_query)[1].strip("\n").strip("```")
if text_after is not None and len(text_after)>0:
output["text_after"]=text_after
return output
if "python" in pattern[1]:
result = re.findall(pattern[1], text_input, re.DOTALL)
if len(result)>0:
output[pattern[0]]= result[0]
else:
result = re.search(pattern[1], text_input,re.DOTALL)
if result:
output[result.group(1)]= result.group(2)
return output
class SQL_Query(ChatGPT_Handler):
def __init__(self, system_message="",data_sources="",db_path=None,driver=None,dbserver=None, database=None, db_user=None ,db_password=None, **kwargs):
super().__init__(**kwargs)
if len(system_message)>0:
self.system_message = f"""
{data_sources}
{system_message}
"""
self.database=database
self.dbserver=dbserver
self.db_user = db_user
self.db_password = db_password
self.db_path= db_path #This is the built-in demo using SQLite
self.driver= driver
def execute_sql_query(self, query, limit=10000):
if self.db_path is not None:
engine = create_engine(f'sqlite:///{self.db_path}')
else:
connecting_string = f"Driver={{ODBC Driver 17 for SQL Server}};Server=tcp:{self.dbserver},1433;Database={self.database};Uid={self.db_user};Pwd={self.db_password}"
params = parse.quote_plus(connecting_string)
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
result = pd.read_sql_query(query, engine)
result = result.infer_objects()
for col in result.columns:
if 'date' in col.lower():
result[col] = pd.to_datetime(result[col], errors="ignore")
if limit is not None:
result = result.head(limit) # limit to save memory
# session.close()
return result
class AnalyzeGPT(ChatGPT_Handler):
def __init__(self,sql_engine,content_extractor, sql_query_tool, system_message,few_shot_examples,st,**kwargs) -> None:
super().__init__(**kwargs)
table_schema = get_table_schema(sql_query_tool,sql_engine)
system_message = f"""
<<data_sources>>
{table_schema}
{system_message.format(sql_engine=sql_engine)}
{few_shot_examples}
"""
self.conversation_history = [{"role": "system", "content": system_message}]
self.st = st
self.content_extractor = content_extractor
self.sql_query_tool = sql_query_tool
def get_next_steps(self, updated_user_content, stop):
old_user_content=""
if len(self.conversation_history)>1:
old_user_content= self.conversation_history.pop() #removing old history
old_user_content=old_user_content['content']+"\n"
self.conversation_history.append({"role": "user", "content": old_user_content+updated_user_content})
# print("prompt input ", self.conversation_history)
n=0
try:
llm_output = self._call_llm(self.conversation_history, stop)
# print("llm_output \n", llm_output)
except Exception as e:
time.sleep(8) #sleep for 8 seconds
while n<5:
try:
llm_output = self._call_llm(self.conversation_history, stop)
except Exception as e:
n +=1
print("error calling open AI, I am retrying 5 attempts , attempt ", n)
time.sleep(8) #sleep for 8 seconds
print(e)
llm_output = "OPENAI_ERROR"
# print("llm_output: ", llm_output)
output = self.content_extractor.extract_output(llm_output)
if len(output)==0 and llm_output != "OPENAI_ERROR": #wrong output format
llm_output = "WRONG_OUTPUT_FORMAT"
return llm_output,output
def run(self, question: str, show_code,show_prompt,st) -> any:
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
import pandas as pd
st.write(f"Question: {question}")
# if "init" not in self.st.session_state.keys():
# self.st.session_state['init']= True
def execute_sql(query):
return self.sql_query_tool.execute_sql_query(query)
observation=None
def show(data):
if type(data) is Figure:
st.plotly_chart(data)
else:
st.write(data)
# i=0
# for key in self.st.session_state.keys():
# if "show" in key:
# i +=1
# self.st.session_state[f'show{i}']=data
if type(data) is not Figure:
self.st.session_state[f'observation: this was shown to user']=data
def observe(name, data):
try:
data = data[:10] # limit the print out observation to 15 rows
except:
pass
self.st.session_state[f'observation:{name}']=data
max_steps = 15
count =1
finish = False
new_input= f"Question: {question}"
# if self.st.session_state['init']:
# new_input= f"Question: {question}"
# else:
# new_input=self.st.session_state['history'] +f"\nQuestion: {question}"
while not finish:
llm_output,next_steps = self.get_next_steps(new_input, stop=["Observation:", f"Thought {count+1}"])
if llm_output=='OPENAI_ERROR':
st.write("Error Calling Azure Open AI, probably due to max service limit, please try again")
break
elif llm_output=='WRONG_OUTPUT_FORMAT': #just have open AI try again till the right output comes
count +=1
continue
new_input += f"\n{llm_output}"
for key, value in next_steps.items():
new_input += f"\n{value}"
if "ACTION" in key.upper():
if show_code:
st.write(key)
st.code(value)
observations =[]
serialized_obs=[]
try:
# if "print(" in value:
# raise Exception("You must not use print() statement, instead use st.write() to write to end user or observe(name, data) to view data yourself. Please regenerate the code")
exec(value, locals())
for key in self.st.session_state.keys():
if "observation:" in key:
observation=self.st.session_state[key]
observations.append((key.split(":")[1],observation))
if type(observation) is pd:
# serialized_obs.append((key.split(":")[1],observation.to_json(orient='records', date_format='iso')))
serialized_obs.append((key.split(":")[1],observation.to_string()))
elif type(observation) is not Figure:
serialized_obs.append({key.split(":")[1]:str(observation)})
del self.st.session_state[key]
except Exception as e:
observations.append(("Error:",str(e)))
serialized_obs.append({"\nEncounter following error, can you try again?\n:":str(e)+"\nAction:"})
for observation in observations:
st.write(observation[0])
st.write(observation[1])
obs = f"\nObservation on the first 10 rows of data: {serialized_obs}"
new_input += obs
else:
st.write(key)
st.write(value)
if "Answer" in key:
print("Answer is given, finish")
finish= True
if show_prompt:
self.st.write("Prompt")
self.st.write(self.conversation_history)
count +=1
if count>= max_steps:
print("Exceeding threshold, finish")
break
def query_run(self, question: str, show_code,show_prompt,st) -> any:
st.write(f"Question: {question}")
def execute_sql(query):
return self.sql_query_tool.execute_sql_query(query)
max_steps = 15
count =1
new_input= f"Question: {question}"
while count<= max_steps:
llm_output,next_steps = self.get_next_steps(new_input, stop=["Observation:", f"Thought {count+1}"])
if llm_output=='OPENAI_ERROR':
st.write("Error Calling Azure Open AI, probably due to max service limit, please try again")
break
elif llm_output=='WRONG_OUTPUT_FORMAT': #just have open AI try again till the right output comes
count +=1
continue
output =None
error= False
new_input += f"\n{llm_output}"
for key, value in next_steps.items():
new_input += f"\n{value}"
if "SQL" in key.upper():
if show_code:
st.write("SQL Code")
st.code(value)
try:
output = execute_sql(value)
except Exception as e:
new_input +="Encounter following error, can you try again?\n"+str(e)
error=str(e)
else:
if show_code:
st.write(value)
if show_prompt:
self.st.write("Prompt")
self.st.write(self.conversation_history)
if output is not None:
st.write(output)
break
if error:
st.write(error)
count +=1
if count>= max_steps:
st.write("Cannot handle the question, please change the question and try again")
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | microsoft/OpenAIWorkshop | scenarios~openai_batch_pipeline~document_generation~upload_docs.py | import os
import logging
import random
import openai
import time
from azure.storage.blob import BlobClient, BlobServiceClient
import argparse
import json
from azure.core.exceptions import ResourceExistsError
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--conn_string', type=str, help='Azure Storage connection string')
parser.add_argument('--containername', type=str, help='Azure Storage connection string', default='workshop-data')
args = parser.parse_args()
try:
blob_service_client = BlobServiceClient.from_connection_string(args.conn_string)
container_client = blob_service_client.create_container(args.containername)
except ResourceExistsError:
print("Container already exists.")
print("processing...")
for folder in ["generated_documents", "cleansed_documents"]:
for filename in os.listdir(folder):
print("on file:" + filename)
with open(os.path.join(folder, filename), 'r') as src:
blob_name=f'{folder}/{filename}'
print("on blob:" + blob_name)
blob_client = BlobClient.from_connection_string(args.conn_string,container_name=args.containername,blob_name=blob_name,)
file_content = src.read()
blob_client.upload_blob(file_content)
if __name__ == '__main__':
main() | [] |
2024-01-10 | microsoft/OpenAIWorkshop | scenarios~incubations~copilot~data_management~functions.py | # Agent class
### responsbility definition: expertise, scope, conversation script, style
import time
import openai
import os
from pathlib import Path
import json
import random
from dotenv import load_dotenv
from openai.embeddings_utils import get_embedding, cosine_similarity
import inspect
import ast
import pandas as pd
env_path = Path('..') / 'secrets.env'
load_dotenv(dotenv_path=env_path)
openai.api_key = os.environ.get("AZURE_OPENAI_API_KEY")
openai.api_base = os.environ.get("AZURE_OPENAI_ENDPOINT")
openai.api_type = "azure"
import sys
sys.path.append("..")
from utils import Agent, Smart_Agent, check_args
def update_sales(filter, update):
file_name = "../data/forecast_sales.json"
filter = ast.literal_eval(filter)
update = ast.literal_eval(update).items()
update = list(update)[0]
with open(file_name) as f:
data = pd.read_json(f)
filter_data = data.copy()
for filter_item in filter.items():
filter_data = filter_data[filter_data[filter_item[0]] == filter_item[1]]
filter_data[update[0]]=update[1]
print(filter_data)
data.update(filter_data,overwrite=True)
with open(file_name, 'w') as f:
data.to_json(f)
return f"Update sales forecast in with filter {filter} and update {update}"
def update_cost(filter, update):
file_name = "../data/forecast_cost.json"
filter = ast.literal_eval(filter)
update = ast.literal_eval(update).items()
update = list(update)[0]
with open(file_name) as f:
data = pd.read_json(f)
filter_data = data.copy()
for filter_item in filter.items():
filter_data = filter_data[filter_data[filter_item[0]] == filter_item[1]]
filter_data[update[0]]=update[1]
print(filter_data)
data.update(filter_data,overwrite=True)
with open(file_name, 'w') as f:
data.to_json(f)
return f"Update cost forecast in with filter {filter} and update {update}"
def query_cost(filter):
file_name = "../data/forecast_cost.json"
filter = ast.literal_eval(filter)
with open(file_name) as f:
data = pd.read_json(f)
for filter_item in filter.items():
print(filter_item[0],filter_item[1])
data = data[data[filter_item[0]] == filter_item[1]]
return f"Query result: {data.to_dict(orient='records')}"
def query_sales(filter):
file_name = "../data/forecast_sales.json"
filter = ast.literal_eval(filter)
with open(file_name) as f:
data = pd.read_json(f)
for filter_item in filter.items():
print(filter_item[0],filter_item[1])
data = data[data[filter_item[0]] == filter_item[1]]
return f"Query result: {data.to_dict(orient='records')}"
def route_call(user_request):
return f"The user request is {user_request}"
def validate_identity(employee_id, employee_name):
if employee_id in ["1234","5678"]:
return f"Employee {employee_name} with id {employee_id} is validated in this conversation"
else:
return "This employee id is not valid"
ROUTE_CALL_FUNCTION_NAME = "route_call" #default function name for routing call used by all agents
VALIDATE_IDENTIFY_FUNCTION_NAME = "validate_identity" #default function name for validating identity used by all agents
ROUTING_AGENT_PERSONA = """
You are Jenny, a helpful digital assistant helping to determine the right specialist to help users with needs.
Engage in the conversation with the user to understand the request and route the call to the right specialist.
Limit the conversation to understand what their request is about.
There are 2 specialists available to help with the request:
- Cost forecast data analyst responsible for helping users to query and update cost forecast information
- Sales forecast data analyst responsible for helping users to query and update sales forecast information
If there's ambiguity in the request, ask for clarification till you know for sure which agent to route the call to.
Act as a single point of contact. Users don't need to know that there are 2 agents available to help with the request.
If none of the agent's profile match the request, apologize that the scope of service only cover the above 2 areas and end the conversation.
"""
SALES_FORECAST_PERSONA = """
You are Lucy, an information system specialist responsible for helping users maintaining sales forecast data.
You are given following data entity:
{
"name": "sales_forecast",
"description": "contain data about sales forecast",
"attributes":
{
"name": "date",
"description": "date of the sales data in dd/mm/yyyy, ranging from 01/01/2010 to 31/12/2024"
},
{
"name": "business_unit",
"description": "name of the business_unit, as one of the following values ['commercial', 'residential','government']
},
{
"name": "amount",
"description": "forecast sales amount",
},
{
"name": "product",
"description": "product that generates sales, as one of the following values ['heater', 'air conditioner' ,'fan']"
},
},
If the user request is to update the sales forecast, you need to:
- Interact with the user to confirm the changes that need to be made. Your goal is to identify the attribute values that help locate the data entity and the new
attribute values that need to be updated.
- You need at least date, business_unit and product to locate the data entity.
- For attributes that have restriction in the value, for example business_unit, you need to validate the new value is in the list of allowed values.
- If there's ambiguity in user's request, you need to ask for clarification.
- Once you can confirm all the information, summarize and confirm with user.
- If they agree, use the update tool to update the data entity.
If the user request is to query the sales forecast, you need to:
- Interact with the user to confirm the filter condition for the query. Your goal is to identify the attribute values that help locate the data entity.
- You need at least date, business_unit and product to locate the data entity.
- For attributes that have restriction in the value, for example business_unit, you need to validate the new value is in the list of allowed values.
- If there's ambiguity in user's request, you need to ask for clarification.
- Use the information query tool to query the data.
- Only use data that is from the search tool to answer question. Do not generate answer that is not from the tool.
For any other request, call route_call function.
"""
COST_FORECAST_PERSONA = """
You are Betty, an information system specialist responsible for helping users maintaining cost forecast data.
You are given following data entity:
{
"name": "cost_forecast",
"description": "contain data about cost forecast data",
"attributes":
{
"name": "date",
"description": "date of the cost data in dd/mm/yyyy, ranging from 01/01/2010 to 31/12/2024"
},
{
"name": "business_unit",
"description": "name of the business_unit, as one of the following values ['commercial', 'residential','government']
},
{
"name": "amount",
"description": "actual amount",
},
{
"name": "product",
"description": "product that generates sales, as one of the following values ['heater', 'air conditioner' ,'fan']"
},
},
If the user request is to update the cost forecast, you need to:
- Interact with the user to confirm the changes that need to be made. Your goal is to identify the attribute values that help locate the data entity and the new
attribute values that need to be updated.
- You need at least date, business_unit and product to locate the data entity.
- For attributes that have restriction in the value, for example business_unit, you need to validate the new value is in the list of allowed values.
- If there's ambiguity in user's request, you need to ask for clarification.
- Once you can confirm all the information, summarize and confirm with user.
- If they agree, use the update tool to update the data entity.
If the user request is to query the cost forecast, you need to:
- Interact with the user to confirm the filter condition for the query. Your goal is to identify the attribute values that help locate the data entity.
- You need at least date, business_unit and product to locate the data entity.
- For attributes that have restriction in the value, for example business_unit, you need to validate the new value is in the list of allowed values.
- If there's ambiguity in user's request, you need to ask for clarification.
- Use the information query tool to query the data.
- Only use data that is from the search tool to answer question. Do not generate answer that is not from the tool.
For any other request, call route_call function.
"""
COST_AVAILABLE_FUNCTIONS = {
"update_cost": update_cost,
"query_cost": query_cost,
"route_call": route_call
}
SALES_AVAILABLE_FUNCTIONS = {
"update_sales": update_sales,
"query_sales": query_sales,
"route_call": route_call
}
ROUTING_AGENT_FUNCTIONS = {
"route_call": route_call,
}
ROUTING_AGENT_FUNCTIONS_SPEC= [
{
"name": "route_call",
"description": "Call this function to transfer the call to the right agent",
"parameters": {
"type": "object",
"properties": {
"user_request": {
"type": "string",
"description": "Description of what user wants to do"
},
},
"required": ["user_request"],
},
}
]
SALES_FORECAST_FUNCTIONS_SPEC= [
{
"name": "update_sales",
"description": "Update sales forecast data only, not other data entities",
"parameters": {
"type": "object",
"properties": {
"filter": {
"type": "string",
"description": "attribute name and value pairs to filter the data to update, for example {'date':'01/01/2021','business_unit':'commercial'}"
},
"update": {
"type": "string",
"description": "attribute name and value pairs to update the data entity, for example {'amount':'1000'}"
}
},
"required": ["filter","update"],
},
},
{
"name": "query_sales",
"description": "Query tool for sales forecast only, not other data entities",
"parameters": {
"type": "object",
"properties": {
"filter": {
"type": "string",
"description": "attribute name and value pairs to filter the data, for example {'date':'2021-01=01','business_unit':'commercial'}"
}
},
"required": ["filter"],
},
},
{
"name": "route_call",
"description": "Handle request that is not about querying or updating sales forecast data",
"parameters": {
"type": "object",
"properties": {
"user_request": {
"type": "string",
"description": "Description of what user wants to do"
},
},
"required": ["user_request"],
},
},
]
COST_FORECAST_FUNCTIONS_SPEC= [
{
"name": "update_cost",
"description": "Update cost forecast data only, not other data entities",
"parameters": {
"type": "object",
"properties": {
"filter": {
"type": "string",
"description": "attribute name and value pairs to filter the data to update, for example {'date':'01/01/2021','business_unit':'commercial'}"
},
"update": {
"type": "string",
"description": "attribute name and value pairs to update the data entity, for example {'amount':'1000'}"
}
},
"required": ["filter","update"],
},
},
{
"name": "query_cost",
"description": "Query tool for cost forecast only, not for sales forecast",
"parameters": {
"type": "object",
"properties": {
"filter": {
"type": "string",
"description": "attribute name and value pairs to filter the data, for example {'date':'2021-01=01','business_unit':'commercial'}"
}
},
"required": ["filter"],
},
},
{
"name": "route_call",
"description": "Handle request that is not about querying or updating cost forecast data",
"parameters": {
"type": "object",
"properties": {
"user_request": {
"type": "string",
"description": "Description of what user wants to do"
},
},
"required": ["user_request"],
},
},
]
class Agent_Runner():
def __init__(self,starting_agent_name, agents, session_state) -> None:
self.agents = agents
self.session_state = session_state
self.active_agent = None
for agent in agents:
if starting_agent_name == agent.name:
self.active_agent = agent
break
evaluator_persona ="Jenny: a general customer support agent, handling everyting except sales forecast or cost forecast\n\n Lucy: a specialist agent responsible for sales forecast\n\n Betty: a specialist agent responsible for cost forecast\n\n"
self.evaluator = Agent(engine="turbo-0613", persona="As a customer support manager, you need to assign call transfer requests to the right agent with the right skills. You have following agents with the description of their persona: \n\n"+evaluator_persona)
def revaluate_agent_assignment(self,function_description):
#TODO: revaluate agent assignment based on the state
names = [agent.name for agent in self.agents]
prompt =f"The most suitable agent's name among [{names}] to best match with this request [{function_description}] is "
count =0
while True:
count+=1
if count > 3:
next_agent = random.choice(names)
print("cannot decide on the agent, randomly assigned to ", next_agent)
break
next_agent = self.evaluator.generate_response(prompt).strip()
if next_agent==self.active_agent.name: #should be different from the current agent
continue
if next_agent in names:
break
print("next agent ", next_agent)
for agent in self.agents:
if next_agent == agent.name:
self.active_agent = agent
print("agent changed to ", agent.name)
break
def run(self,user_input, conversation=None, stream = False, api_version = "2023-07-01-preview"):
stream_out, request_agent_change, context_to_persist, conversation, assistant_response= self.active_agent.run(user_input, conversation=conversation, stream = stream, api_version = api_version)
if context_to_persist is not None:
self.session_state['user_context'] = context_to_persist
if request_agent_change:
# previous_agent_last_response = assistant_response
self.revaluate_agent_assignment(request_agent_change)
new_conversation= self.active_agent.init_history
#this code is to transfer any implicit context (context which is not in conversation like user's credentials) from the previous agent to the new agent to its system message
if self.session_state['user_context'] is not None and len(self.session_state["user_context"])>0:
old_system_message = new_conversation[0]
new_system_message = old_system_message['content'] + "\n\n" + self.session_state['user_context']
conversation[0] = {"role":"system", "content":new_system_message}
#adding relevant content from the old agent to the new agent
for message in conversation:
if message.get("role") != "system" and message.get("name") is None: #only add user & assistant messages
new_conversation.append({"role":message.get("role"), "content":message.get("content")})
stream_out, _,_,conversation, assistant_response= self.active_agent.run(conversation=new_conversation, stream = False, api_version = api_version)
return stream_out, request_agent_change, conversation, assistant_response
def stream_write(st, agent_response):
message_placeholder = st.empty()
full_response = ""
for response in agent_response:
if len(response.choices)>0:
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
return full_response
class Smart_Coordinating_Agent(Smart_Agent):
"""
Agent that can use other agents and tools to answer questions.
Args:
persona (str): The persona of the agent.
tools (list): A list of {"tool_name":tool} that the agent can use to answer questions. Tool must have a run method that takes a question and returns an answer.
stop (list): A list of strings that the agent will use to stop the conversation.
init_message (str): The initial message of the agent. Defaults to None.
engine (str): The name of the GPT engine to use. Defaults to "gpt-35-turbo".
Methods:
llm(new_input, stop, history=None, stream=False): Generates a response to the input using the LLM model.
_run(new_input, stop, history=None, stream=False): Runs the agent and generates a response to the input.
run(new_input, history=None, stream=False): Runs the agent and generates a response to the input.
Attributes:
persona (str): The persona of the agent.
tools (list): A list of {"tool_name":tool} that the agent can use to answer questions. Tool must have a run method that takes a question and returns an answer.
stop (list): A list of strings that the agent will use to stop the conversation.
init_message (str): The initial message of the agent.
engine (str): The name of the GPT engine to use.
"""
def run(self, user_input=None, conversation=None, stream = False, api_version = "2023-07-01-preview"):
openai.api_version = api_version
request_agent_change = False
context_to_persist = None
assistant_response=""
if conversation is None: #if no history return init message
conversation = self.init_history.copy()
if user_input is not None:
conversation.append({"role": "user", "content": user_input})
i=0
while True: # loop to retry in case there's an intermittent error from GPT
try:
i+=1
response = openai.ChatCompletion.create(
deployment_id=self.engine, # The deployment name you chose when you deployed the GPT-35-turbo or GPT-4 model.
messages=conversation,
functions=self.functions_spec,
function_call="auto",
request_timeout=20,
)
response_message = response["choices"][0]["message"]
# Step 2: check if GPT wanted to call a function
if response_message.get("function_call"):
print("Recommended Function call:")
print(response_message.get("function_call"))
print()
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
function_name = response_message["function_call"]["name"]
if function_name == ROUTE_CALL_FUNCTION_NAME:
request_agent_change = True
# verify function exists
if function_name not in self.functions_list:
print("Function " + function_name + " does not exist")
function_to_call = self.functions_list[function_name]
# verify function has correct number of arguments
function_args = json.loads(response_message["function_call"]["arguments"])
if check_args(function_to_call, function_args) is False:
print("Invalid number of arguments for function: " + function_name)
function_response = function_to_call(**function_args)
print("Output of function call:")
print(function_response)
print()
if request_agent_change:
request_agent_change = function_response # if the function is a route call function, assign the request_agent_change to be the name of department to change to
# adding assistant response to messages
conversation.append(
{
"role": response_message["role"],
"name": response_message["function_call"]["name"],
"content": response_message["function_call"]["arguments"],
}
)
# adding function response to messages
conversation.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
openai.api_version = api_version
second_response = openai.ChatCompletion.create(
messages=conversation,
deployment_id=self.engine,
stream=stream,
) # get a new response from GPT where it can see the function response
if not stream:
assistant_response = second_response["choices"][0]["message"]["content"]
conversation.append({"role": "assistant", "content": assistant_response})
else:
assistant_response = second_response
return stream,request_agent_change,context_to_persist,conversation, assistant_response
else:
assistant_response = response_message["content"]
conversation.append({"role": "assistant", "content": assistant_response})
break
except Exception as e:
if i>3:
assistant_response="Haizz, my memory is having some trouble, can you repeat what you just said?"
break
print("Exception as below, will retry\n", str(e))
time.sleep(8)
return False, request_agent_change,context_to_persist, conversation, assistant_response
| [
"content",
"arguments",
"function_call",
"The most suitable agent's name among [PLACEHOLDER] to best match with this request [PLACEHOLDER] is "
] |
2024-01-10 | ori-30/GenAI-Dashboards | functions.py | import streamlit as st
from streamlit_chat import message
import streamlit.components.v1 as components # Import Streamlit
import requests
import json
import openai
from typing import List
def get_text():
input_text = st.text_input("You: ","", key="input")
return input_text
def get_area():
input_text = st.text_input("Areas de negocio: ","", key="area")
return input_text
def get_des():
input_text = st.text_input("Descripción de la empresa: ","", key="des")
return input_text
def create_gpt_completion(ai_model: str, messages: List[dict]) -> dict:
openai.api_key = st.secrets.api_credentials.api_key
completion = openai.ChatCompletion.create(
model=ai_model,
messages=messages,
)
return completion
def get_JSON():
try:
dominios = st.session_state.domains
except:
st.error("error json")
return json.loads(dominios)
def tables(alltables):
r=""
for table in alltables:
r+= """<p class="card-text">%s</p>""" % str(table)
return r
def create_card(title, alltables):
card="""
<div class="m-1 p-1"style="padding: 2px 16px;">
<div class="card m-2" style="width: 18rem;">
<div class="card-body bg-light">
<h3 class="card-title">%s</h3>
""" % str(title)
card+=tables(alltables)
card+="""
</div>
</div>
</div>
"""
return card
def create_domains(dominios, container):
c = container.columns(2)
i=0
for dominio in dominios:
d= create_card(dominio["nombre"], dominio["tablas"])
c[i].markdown(d, unsafe_allow_html= True)
i=(i+1)%2
def create_sql_statment(container):
sql="Esto es una sentencia sql"
box="""
<div class="card w-100 m-2">
<div class="card-body w-100 bg-info">
<p>%s</p>
</div>
</div>
""" % str(sql)
container.markdown(box, unsafe_allow_html= True)
def bootstrap():
_bootstrap="""<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous">"""
st.markdown(_bootstrap, unsafe_allow_html= True) | [] |
2024-01-10 | ori-30/GenAI-Dashboards | connection.py | import streamlit as st
from streamlit_chat import message
import streamlit.components.v1 as components # Import Streamlit
import requests
import json
from openai import OpenAI, AzureOpenAI
from typing import List
from functions import *
import MetadataExtractor as me
from snowflake.snowpark import Session
st.set_page_config(
page_title="GenAI domains",
page_icon=":heart:",
)
if 'display_result' not in st.session_state:
st.session_state.display_result = True
if 'reset' not in st.session_state:
st.session_state.reset = False
if 'area' not in st.session_state:
st.session_state['area']=""
if 'description' not in st.session_state:
st.session_state['description']=""
if 'prompt_metadata' not in st.session_state:
st.session_state['prompt_metadata']=""
def callback():
if des:
st.session_state['area']=area
st.session_state['description']=des
st.session_state.display_result=False
st.session_state.reset=False
s.session_state.prompt_metadata=prompt_metadata
else:
st.error("Por favor, rellene ambos campos")
if not st.session_state.display_result:
metadata = st.session_state["prompt_metadata"]
promt_json= open('promptjson.txt', 'r').read()
#abrir openAI key con streamlit secrets
client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
st.write(metadata)
#crear modelo por defecto
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
#inizializar chat
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "system", "content": metadata}]
st.session_state.messages.append({"role": "system", "content": promt_json})
cl=client.chat.completions.create(model=st.session_state["openai_model"], messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages], stream=True)
full_response=""
for response in cl:
full_response +=(response.choices[0].delta.content or "")
st.session_state.messages.append({"role": "system", "content": full_response})
if "domains" not in st.session_state:
st.session_state["domains"]=full_response
#creamos la sidebar
with st.sidebar:
st.header("Chatbot", divider='rainbow')
# Aceptamos input del usuario
prompt = get_text()
#mostramos el chat de mensajes desde el historial
for message in st.session_state.messages:
if message["role"]!="system":
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt:
#añadimos mensaje del usuario
st.session_state.messages.append({"role": "user", "content": prompt})
#mostramos mensaje del usuario
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
cl=client.chat.completions.create(model=st.session_state["openai_model"], messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages], stream=True)
for response in cl:
full_response +=(response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
container= st.container()
with container:
bootstrap()
create_sql_statment(container)
dominios=get_JSON()
create_domains(dominios["dominios"], container)
if st.session_state.display_result:
st.header("Domain GenAI")
selector=st.radio("Selecciona la API: ",["OpenAI", "AzureOpenAI"])
if selector == "AzureOpenAI":
ao_key=st.text_input("Azure api tokne: ",type="password")
ao_version=st.text_input("Azure api version:")
ao_endpoint=st.text_input("Azure endopoint:")
model=st.text_input("Azure deployment name:")
client = AzureOpenAI(
)
model="modelo3"
else:
openai_input=st.text_input("OpenAi api token: ",type="password")
model=st.text_input("OpenAi model: ")
client = OpenAI(
api_key=openai_input
)
st.header("Configuracion Snowflake")
acc_input=st.text_input("Identificador cuenta de Snowflake","")
user_input=st.text_input("Nombre de usuario","")
pass_input=st.text_input("Contraseña","",type='password')
input3 = st.text_input("Base de datos:", "")
# Configurar la barra lateral
st.header("Información de la empresa")
area=get_area()
des=get_des()
prompt_metadata =me.get_metadata(acc_input,user_input,pass_input,input3)
prompt_metadata += f"\n\nEsta es la descripción de la empresa: {st.session_state.descripcion}\nEstas son las áreas de negocio: {st.session_state.area}"
send=st.button("Generar", disabled=(area is ""), on_click=callback)
| [
"content"
] |
2024-01-10 | CreeperLin/IsaacGymMultiAgent | igma~wrappers~tianshou.py | import itertools
import numpy as np
from typing import Any, Callable, List, Optional, Tuple, Union
from isaacgymenvs.tasks.base.vec_task import VecTask
from tianshou.env.venvs import BaseVectorEnv
from tianshou.env.worker import EnvWorker
# from tianshou.data.buffer.manager import ReplayBufferManager
from tianshou.data import VectorReplayBuffer
from tianshou.data import Batch
from tianshou.data.batch import _alloc_by_keys_diff, _create_value
import torch
ID_TYPE = Optional[Union[int, List[int], np.ndarray]]
class NestedEnvWorker(EnvWorker):
def __len__(self):
return len(self.env)
def get_env_attr(self, key: str) -> Any:
return [getattr(self.env, key) for _ in range(len(self))]
def set_env_attr(self, key: str, value: Any) -> None:
setattr(self.env, key, value)
class IGMAEnvWorker(NestedEnvWorker):
"""Dummy worker used in sequential vector environments."""
def __init__(self, env_fn: Callable[[], VecTask]) -> None:
ret = env_fn()
if isinstance(ret, (list, tuple)):
env, ind = ret
else:
env, ind = ret, None
self.env = env
self.ind = ind
self.num_envs = self.env.num_envs if ind is None else len(list(ind))
super().__init__(env_fn)
def __len__(self):
return self.num_envs
def reset(self) -> Any:
return self.env.reset()
@staticmethod
def wait( # type: ignore
workers: List, wait_num: int, timeout: Optional[float] = None) -> List:
# Sequential EnvWorker objects are always ready
return workers
def send(self, action: Optional[Any], sid: ID_TYPE = None) -> None:
if action is None:
if sid is None:
obs_dict = self.env.reset() # type: ignore
self.result = obs_dict['obs']
else:
try:
obs_dict = self.env.reset(indices=sid) # type: ignore
self.result = obs_dict['obs']
except Exception:
self.result = self.last_obs[sid]
else:
if isinstance(action, np.ndarray):
action = torch.from_numpy(action).to(device=self.env.device)
obs_dict, rew_buf, reset_buf, extras = self.env.step(action) # type: ignore
rew_buf = rew_buf.cpu().numpy()
reset_buf = reset_buf.cpu().numpy()
self.last_obs = obs_dict['obs']
self.result = self.last_obs, rew_buf, reset_buf, extras
def seed(self, seed: Optional[int] = None) -> List[int]:
for s in self.action_space:
s.seed(seed)
return getattr(self.env, 'seed', lambda x: x)(seed)
def render(self, **kwargs: Any) -> Any:
return self.env.render(**kwargs)
def close_env(self) -> None:
self.env.close()
class NestedVectorEnv(BaseVectorEnv):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
len_envs = [getattr(w, '__len__', lambda: 1)() for w in self.workers]
num_envs = sum(len_envs)
self.num_workers = len(self.workers)
self.len_envs = len_envs
self.beg_envs = [sum(len_envs[:i]) for i in range(self.num_workers)]
self.end_envs = [self.beg_envs[i] + self.len_envs[i] for i in range(self.num_workers)]
self.wait_num = num_envs if self.wait_num == self.env_num else self.wait_num
self.env_num = num_envs
# self.ready_id = list(range(self.env_num))
def _wrap_id(self, id: ID_TYPE = None) -> Union[List[int], np.ndarray]:
if id is None:
return list(range(self.num_workers))
pid = [id] if np.isscalar(id) else id # type: ignore
if len(pid) == pid[-1] - pid[0] + 1:
return [i for i in range(self.num_workers) if not (self.beg_envs[i] > pid[-1] or self.end_envs[i] < pid[0])]
else:
return [i for i in range(self.num_workers) if any(self.beg_envs[i] <= j < self.end_envs[i] for j in pid)]
def _sub_id(self, id: ID_TYPE = None) -> Union[List[int], np.ndarray]:
if id is None:
return [None for _ in range(self.num_workers)]
pid = [id] if np.isscalar(id) else id # type: ignore
if len(pid) == pid[-1] - pid[0] + 1:
wid = [i for i in range(self.num_workers) if not (self.beg_envs[i] > pid[-1] or self.end_envs[i] < pid[0])]
return [
None if self.beg_envs[w] >= pid[0] and self.end_envs[w] <= pid[-1] else range(
max(self.beg_envs[w], pid[0]), min(self.end_envs[w], pid[-1] + 1)) for w in wid
]
return [[j - self.beg_envs[i]
for j in pid
if self.beg_envs[i] <= j < self.end_envs[i]]
for i in range(self.num_workers)]
def get_env_attr(self, key: str, id: ID_TYPE = None) -> List[Any]:
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
return list(itertools.chain(*[self.workers[j].get_env_attr(key) for j in id]))
def set_env_attr(self, key: str, value: Any, id: ID_TYPE = None) -> None:
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
for j in id:
self.workers[j].set_env_attr(key, value)
def step(self, action: Any, id: ID_TYPE = None) -> Tuple:
self._assert_is_not_closed()
id = self._wrap_id(id)
if not self.is_async:
# assert len(action) == len(id)
assert len(action) == sum(self.len_envs[i] for i in id)
for i, j in enumerate(id):
self.workers[j].send(action[self.beg_envs[i]:self.beg_envs[i] + self.len_envs[i]])
result = []
for j in id:
obs, rew, done, info = self.workers[j].recv()
# info["env_id"] = j
info["env_id"] = list(range(self.beg_envs[j], self.end_envs[j]))
result.append((obs, rew, done, info))
else:
if action is not None:
self._assert_id(id)
assert len(action) == len(id)
for act, env_id in zip(action, id):
self.workers[env_id].send(act)
self.waiting_conn.append(self.workers[env_id])
self.waiting_id.append(env_id)
self.ready_id = [x for x in self.ready_id if x not in id]
ready_conns: List[EnvWorker] = []
while not ready_conns:
ready_conns = self.worker_class.wait(self.waiting_conn, self.wait_num, self.timeout)
result = []
for conn in ready_conns:
waiting_index = self.waiting_conn.index(conn)
self.waiting_conn.pop(waiting_index)
env_id = self.waiting_id.pop(waiting_index)
obs, rew, done, info = conn.recv()
info["env_id"] = env_id
result.append((obs, rew, done, info))
self.ready_id.append(env_id)
obs_list, rew_list, done_list, info_list = zip(*result)
obs_bats, rew_bats, done_bats, info_bats = map(lambda lst: [Batch({'0': v}) for v in lst],
[obs_list, rew_list, done_list, info_list])
obs_cat, rew_cat, done_cat, info_cat = map(Batch.cat, [obs_bats, rew_bats, done_bats, info_bats])
obs, rew, done, info = map(lambda b: b['0'], [obs_cat, rew_cat, done_cat, info_cat])
if self.obs_rms and self.update_obs_rms:
self.obs_rms.update(obs)
return self.normalize_obs(obs), rew, done, info
def reset(self, id: ID_TYPE = None) -> np.ndarray:
self._assert_is_not_closed()
sid = self._sub_id(id)
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
# send(None) == reset() in worker
for i in id:
self.workers[i].send(None, sid[i])
obs_list = [self.workers[i].recv() for i in id]
obs_bats = [Batch({'0': v}) for v in obs_list]
obs_cat = Batch.cat(obs_bats)
obs = obs_cat['0']
if self.obs_rms and self.update_obs_rms:
self.obs_rms.update(obs)
return self.normalize_obs(obs)
def normalize_obs(self, obs: Batch) -> Batch:
if self.obs_rms and self.norm_obs:
clip_max = 10.0 # this magic number is from openai baselines
# see baselines/common/vec_env/vec_normalize.py#L10
obs = (obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.__eps)
obs = np.clip(obs, -clip_max, clip_max)
raise NotImplementedError
return obs
class IGMAVectorEnv(NestedVectorEnv):
def __init__(self, env_fns: List[Callable[[], VecTask]], **kwargs: Any) -> None:
super().__init__(env_fns, IGMAEnvWorker, **kwargs)
class NestedVectorReplayBuffer(VectorReplayBuffer):
def add(
self,
batch: Batch,
buffer_ids: Optional[Union[np.ndarray,
List[int]]] = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Add a batch of data into ReplayBufferManager.
Each of the data's length (first dimension) must equal to the length of
buffer_ids. By default buffer_ids is [0, 1, ..., buffer_num - 1].
Return (current_index, episode_reward, episode_length, episode_start_index). If
the episode is not finished, the return value of episode_length and
episode_reward is 0.
"""
# preprocess batch
new_batch = Batch()
for key in set(self._reserved_keys).intersection(batch.keys()):
new_batch.__dict__[key] = batch[key]
batch = new_batch
assert set(["obs", "act", "rew", "done"]).issubset(batch.keys())
if self._save_only_last_obs:
batch.obs = batch.obs[:, -1]
if not self._save_obs_next:
batch.pop("obs_next", None)
elif self._save_only_last_obs:
batch.obs_next = batch.obs_next[:, -1]
# get index
if buffer_ids is None:
buffer_ids = np.arange(self.buffer_num)
ptrs, ep_lens, ep_rews, ep_idxs = [], [], [], []
for batch_idx, buffer_id in enumerate(buffer_ids):
ptr, ep_rew, ep_len, ep_idx = self.buffers[buffer_id]._add_index(batch.rew[batch_idx],
batch.done[batch_idx])
ptrs.append(ptr + self._offset[buffer_id])
ep_lens.append(ep_len)
ep_rews.append(ep_rew)
ep_idxs.append(ep_idx + self._offset[buffer_id])
self.last_index[buffer_id] = ptr + self._offset[buffer_id]
self._lengths[buffer_id] = len(self.buffers[buffer_id])
ptrs = np.array(ptrs)
try:
self._meta[ptrs] = batch
except ValueError:
# batch.rew = batch.rew.to(float)
# batch.done = batch.done.to(bool)
batch.rew = batch.rew.astype(float)
batch.done = batch.done.astype(bool)
if self._meta.is_empty():
self._meta = _create_value( # type: ignore
batch, self.maxsize, stack=False)
else: # dynamic key pops up in batch
_alloc_by_keys_diff(self._meta, batch, self.maxsize, False)
self._set_batch_for_children()
self._meta[ptrs] = batch
return ptrs, np.array(ep_rews), np.array(ep_lens), np.array(ep_idxs)
| [] |
2024-01-10 | mingkai-zheng/GENIUS | channel_bench_mob.py | import os
import json
import openai
import numpy as np
from decimal import Decimal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--openai_key', type=str, required=True)
parser.add_argument('--openai_organization', type=str, required=True)
args = parser.parse_args()
print(args)
openai.api_key = args.openai_key
openai.organization = args.openai_organization
benchmark_file = open('benchmark/Results_MobileNet.json')
data = json.load(benchmark_file)
keys = list(data.keys())
rank = np.array([data[k]['mean'] for k in keys]).argsort().argsort()
for k, r in zip(keys, rank):
data[k]['rank'] = (4 ** 7) - r
system_content = "You are a expert in the field of neural architecture search."
user_input = '''Your task is to assist me in selecting the best channel numbers for a given model architecture. The model will be trained and tested on CIFAR10, and your objective will be to maximize the model's performance on CIFAR10.
The model architecture will be defined as the following.
{
layer1: nn.Conv2d(in_channels=3, out_channels=channels[0], kernel_size=3, padding=1, bias=False),
layer2: InvertedResidual(in_channels=channels[0], bottleneck_channels=channels[1], out_channels=channels[0], stride=1),
layer3: InvertedResidual(in_channels=channels[0], bottleneck_channels=channels[2], out_channels=channels[0], stride=1),
layer4: InvertedResidual(in_channels=channels[0], bottleneck_channels=channels[3], out_channels=channels[4], stride=2),
layer5: InvertedResidual(in_channels=channels[4], bottleneck_channels=channels[5], out_channels=channels[4], stride=1),
layer6: nn.Conv2d(channels[4], channels[6], kernel_size=1, stride = 1, padding=0, bias=False),
layer7: nn.AdaptiveAvgPool2d(output_size=1),
layer8: nn.Linear(in_features=channels[6], out_features=10),
}
The implementation of the InvertedResidual is as follows:
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, bottleneck_channels, stride):
super(InvertedResidual, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(bottleneck_channels),
nn.ReLU(inplace=True),
nn.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride, padding=1, groups=bottleneck_channels, bias=False),
nn.BatchNorm2d(bottleneck_channels),
nn.ReLU(inplace=True),
nn.Conv2d(bottleneck_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels),
)
self.use_shortcut = in_channels == out_channels and stride == 1
def forward(self, x):
if self.use_shortcut:
return self.conv(x) + x
return self.conv(x)
For the `channels` variable, the available channel number for each index would be:
{
channels[0]: [32, 64, 96, 128],
channels[1]: [192, 384, 576, 768],
channels[2]: [192, 384, 576, 768],
channels[3]: [192, 384, 576, 768],
channels[4]: [64, 128, 192, 256],
channels[5]: [384, 768, 1152, 1536],
channels[6]: [256, 512, 768, 1024],
}
Your objective is to define the optimal number of channels for each layer based on the given options above to maximize the model's performance on CIFAR10.
Your response should be the a channel list consisting of 7 numbers (e.g. [64, 576, ..., 256]).
'''
experiments_prompt = lambda arch_list, acc_list : '''Here are some experimental results that you can use as a reference:
{}
Please suggest a better channel list that can improve the model's performance on CIFAR10 beyond the experimental results provided above.
'''.format(''.join(['{} gives an accuracy of {:.2f}%\n'.format(arch, acc) for arch, acc in zip(arch_list, acc_list)]))
suffix = '''Please do not include anything else other than the channel list in your response.'''
arch_list = []
acc_list = []
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": user_input + suffix},
]
performance_history = []
messages_history = []
if not os.path.exists('history'):
os.makedirs('history')
base_channels = [32, 192, 192, 192, 64, 384, 256]
for iteration in range(10):
res = openai.ChatCompletion.create(model='gpt-4', messages=messages, temperature=0, n=1)['choices'][0]['message']
messages.append(res)
messages_history.append(messages)
# print(messages)
print(res['content'])
channels = json.loads(res['content'])
search_id = ''.join([str(int(c / base_c)) for base_c, c in zip(base_channels, channels)])
accuracy = data[search_id]['mean']
accuracy = float(Decimal(accuracy).quantize(Decimal("0.01"), rounding = "ROUND_HALF_UP"))
arch_list.append(channels)
acc_list.append(accuracy)
performance = {
'arch' : channels,
'rank' : str(data[search_id]['rank']),
'acc' : str(data[search_id]['mean']),
'flops': str(data[search_id]['flops']),
}
print(iteration+1, performance)
performance_history.append(performance)
with open('history/channel_bench_mob_messages.json', 'w') as f:
json.dump(messages_history, f)
with open('history/channel_bench_mob_performance.json', 'w') as f:
json.dump(performance_history, f)
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": user_input + experiments_prompt(arch_list, acc_list) + suffix},
]
| [
"You are a expert in the field of neural architecture search.",
"<function <lambda> at 0x1166a9580>",
"Your task is to assist me in selecting the best channel numbers for a given model architecture. The model will be trained and tested on CIFAR10, and your objective will be to maximize the model's performance on CIFAR10. \n\nThe model architecture will be defined as the following.\n{\n layer1: nn.Conv2d(in_channels=3, out_channels=channels[0], kernel_size=3, padding=1, bias=False),\n layer2: InvertedResidual(in_channels=channels[0], bottleneck_channels=channels[1], out_channels=channels[0], stride=1),\n layer3: InvertedResidual(in_channels=channels[0], bottleneck_channels=channels[2], out_channels=channels[0], stride=1),\n layer4: InvertedResidual(in_channels=channels[0], bottleneck_channels=channels[3], out_channels=channels[4], stride=2),\n layer5: InvertedResidual(in_channels=channels[4], bottleneck_channels=channels[5], out_channels=channels[4], stride=1),\n layer6: nn.Conv2d(channels[4], channels[6], kernel_size=1, stride = 1, padding=0, bias=False),\n layer7: nn.AdaptiveAvgPool2d(output_size=1),\n layer8: nn.Linear(in_features=channels[6], out_features=10),\n}\n\nThe implementation of the InvertedResidual is as follows:\nclass InvertedResidual(nn.Module):\n def __init__(self, in_channels, out_channels, bottleneck_channels, stride):\n super(InvertedResidual, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(bottleneck_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride, padding=1, groups=bottleneck_channels, bias=False),\n nn.BatchNorm2d(bottleneck_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(bottleneck_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n self.use_shortcut = in_channels == out_channels and stride == 1\n\n def forward(self, x):\n if self.use_shortcut:\n return self.conv(x) + x\n return self.conv(x)\n\nFor the `channels` variable, the available channel number for each index would be:\n{\n channels[0]: [32, 64, 96, 128],\n channels[1]: [192, 384, 576, 768],\n channels[2]: [192, 384, 576, 768],\n channels[3]: [192, 384, 576, 768],\n channels[4]: [64, 128, 192, 256],\n channels[5]: [384, 768, 1152, 1536],\n channels[6]: [256, 512, 768, 1024],\n}\n\nYour objective is to define the optimal number of channels for each layer based on the given options above to maximize the model's performance on CIFAR10. \nYour response should be the a channel list consisting of 7 numbers (e.g. [64, 576, ..., 256]).\nPlease do not include anything else other than the channel list in your response."
] |
2024-01-10 | mingkai-zheng/GENIUS | channel_bench_res.py | import os
import json
import openai
import numpy as np
from decimal import Decimal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--openai_key', type=str, required=True)
parser.add_argument('--openai_organization', type=str, required=True)
args = parser.parse_args()
print(args)
openai.api_key = args.openai_key
openai.organization = args.openai_organization
benchmark_file = open('benchmark/Results_ResNet.json')
data = json.load(benchmark_file)
keys = list(data.keys())
rank = np.array([data[k]['mean'] for k in keys]).argsort().argsort()
for k, r in zip(keys, rank):
data[k]['rank'] = (4 ** 7) - r
system_content = "You are an expert in the field of neural architecture search."
user_input = '''Your task is to assist me in selecting the best channel numbers for a given model architecture. The model will be trained and tested on CIFAR10, and your objective will be to maximize the model's performance on CIFAR10.
The model architecture will be defined as the following.
{
layer1: nn.Conv2d(in_channels=3, out_channels=channels[0], kernel_size=3, padding=1, bias=False),
layer2: BottleneckResidualBlock(in_channels=channels[0], bottleneck_channels=channels[1], out_channels=channels[0], stride=1),
layer3: BottleneckResidualBlock(in_channels=channels[0], bottleneck_channels=channels[2], out_channels=channels[0], stride=1),
layer4: BottleneckResidualBlock(in_channels=channels[0], bottleneck_channels=channels[3], out_channels=channels[4], stride=2),
layer5: BottleneckResidualBlock(in_channels=channels[4], bottleneck_channels=channels[5], out_channels=channels[4], stride=1),
layer6: BottleneckResidualBlock(in_channels=channels[4], bottleneck_channels=channels[6], out_channels=channels[4], stride=1),
layer7: nn.AdaptiveAvgPool2d(output_size=1),
layer8: nn.Linear(in_features=channels[4], out_features=10),
}
The implementation of the BottleneckResidualBlock is as follows:
class BottleneckResidualBlock(nn.Module):
def __init__(self, in_channels, bottleneck_channels, out_channels, stride):
super().__init__()
self.stride = stride
self.block = nn.Sequential(
nn.Conv2d(in_channels, bottleneck_channels, 3, stride = stride, padding=1, bias=False),
nn.BatchNorm2d(bottleneck_channels),
nn.ReLU(inplace=True),
nn.Conv2d(bottleneck_channels, out_channels, 3, stride = 1, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
if self.stride == 1:
return self.relu(x + self.block(x))
else:
return self.relu(self.block(x))
For the `channels` variable, the available channel number for each index would be:
{
channels[0]: [64, 128, 192, 256],
channels[1]: [64, 128, 192, 256],
channels[2]: [64, 128, 192, 256],
channels[3]: [128, 256, 384, 512],
channels[4]: [128, 256, 384, 512],
channels[5]: [128, 256, 384, 512],
channels[6]: [128, 256, 384, 512],
}
Your objective is to define the optimal number of channels for each layer based on the given options above to maximize the model's performance on CIFAR10.
Your response should be the a channel list consisting of 7 numbers (e.g. [64, 192, ..., 256]).
'''
experiments_prompt = lambda arch_list, acc_list : '''Here are some experimental results that you can use as a reference:
{}
Please suggest a channel list that can improve the model's performance on CIFAR10 beyond the experimental results provided above.
'''.format(''.join(['{} gives an accuracy of {:.2f}%\n'.format(arch, acc) for arch, acc in zip(arch_list, acc_list)]))
suffix = '''Please do not include anything else other than the channel list in your response.'''
arch_list = []
acc_list = []
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": user_input + suffix},
]
performance_history = []
messages_history = []
if not os.path.exists('history'):
os.makedirs('history')
base_channels = [64, 64, 64, 128, 128, 128, 128]
for iteration in range(10):
res = openai.ChatCompletion.create(model='gpt-4', messages=messages, temperature=0, n=1)['choices'][0]['message']
messages.append(res)
messages_history.append(messages)
# print(messages)
print(res['content'])
channels = json.loads(res['content'])
search_id = ''.join([str(int(c / base_c)) for base_c, c in zip(base_channels, channels)])
accuracy = data[search_id]['mean']
accuracy = float(Decimal(accuracy).quantize(Decimal("0.01"), rounding = "ROUND_HALF_UP"))
arch_list.append(channels)
acc_list.append(accuracy)
performance = {
'arch' : channels,
'rank' : str(data[search_id]['rank']),
'acc' : str(data[search_id]['mean']),
'flops': str(data[search_id]['flops']),
}
print(iteration+1, performance)
performance_history.append(performance)
with open('history/channel_bench_res_messages.json', 'w') as f:
json.dump(messages_history, f)
with open('history/channel_bench_res_performance.json', 'w') as f:
json.dump(performance_history, f)
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": user_input + experiments_prompt(arch_list, acc_list) + suffix},
]
| [
"You are an expert in the field of neural architecture search.",
"<function <lambda> at 0x1163bcea0>",
"Your task is to assist me in selecting the best channel numbers for a given model architecture. The model will be trained and tested on CIFAR10, and your objective will be to maximize the model's performance on CIFAR10. \n\nThe model architecture will be defined as the following.\n{\n layer1: nn.Conv2d(in_channels=3, out_channels=channels[0], kernel_size=3, padding=1, bias=False),\n layer2: BottleneckResidualBlock(in_channels=channels[0], bottleneck_channels=channels[1], out_channels=channels[0], stride=1),\n layer3: BottleneckResidualBlock(in_channels=channels[0], bottleneck_channels=channels[2], out_channels=channels[0], stride=1),\n layer4: BottleneckResidualBlock(in_channels=channels[0], bottleneck_channels=channels[3], out_channels=channels[4], stride=2),\n layer5: BottleneckResidualBlock(in_channels=channels[4], bottleneck_channels=channels[5], out_channels=channels[4], stride=1),\n layer6: BottleneckResidualBlock(in_channels=channels[4], bottleneck_channels=channels[6], out_channels=channels[4], stride=1),\n layer7: nn.AdaptiveAvgPool2d(output_size=1),\n layer8: nn.Linear(in_features=channels[4], out_features=10),\n}\n\nThe implementation of the BottleneckResidualBlock is as follows:\nclass BottleneckResidualBlock(nn.Module):\n def __init__(self, in_channels, bottleneck_channels, out_channels, stride):\n super().__init__()\n\n self.stride = stride\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels, bottleneck_channels, 3, stride = stride, padding=1, bias=False),\n nn.BatchNorm2d(bottleneck_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(bottleneck_channels, out_channels, 3, stride = 1, padding=1, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.stride == 1:\n return self.relu(x + self.block(x))\n else:\n return self.relu(self.block(x))\n\nFor the `channels` variable, the available channel number for each index would be:\n{\n channels[0]: [64, 128, 192, 256],\n channels[1]: [64, 128, 192, 256],\n channels[2]: [64, 128, 192, 256],\n channels[3]: [128, 256, 384, 512],\n channels[4]: [128, 256, 384, 512],\n channels[5]: [128, 256, 384, 512],\n channels[6]: [128, 256, 384, 512],\n}\n\nYour objective is to define the optimal number of channels for each layer based on the given options above to maximize the model's performance on CIFAR10. \nYour response should be the a channel list consisting of 7 numbers (e.g. [64, 192, ..., 256]).\nPlease do not include anything else other than the channel list in your response."
] |
2024-01-10 | mingkai-zheng/GENIUS | nas_bench_macro.py | import os
import json
import openai
import numpy as np
from decimal import Decimal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--openai_key', type=str, required=True)
parser.add_argument('--openai_organization', type=str, required=True)
args = parser.parse_args()
print(args)
openai.api_key = args.openai_key
openai.organization = args.openai_organization
benchmark_file = open('benchmark/nas-bench-macro_cifar10.json')
data = json.load(benchmark_file)
keys = list(data.keys())
rank = np.array([data[k]['mean_acc'] for k in keys]).argsort().argsort()
for k, r in zip(keys, rank):
data[k]['rank'] = (3 ** 8) - r
system_content = "You are an expert in the field of neural architecture search."
user_input = '''Your task is to assist me in selecting the best operations for a given model architecture, which includes some undefined layers and available operations. The model will be trained and tested on CIFAR10, and your objective will be to maximize the model's performance on CIFAR10.
We define the 3 available operations as the following:
0: Identity(in_channels, out_channels, stride)
1: InvertedResidual(in_channels, out_channels, stride expansion=3, kernel_size=3)
2: InvertedResidual(in_channels, out_channels, stride expansion=6, kernel_size=5)
The implementation of the Identity is as follows:
class Identity(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(Identity, self).__init__()
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels),
)
else:
self.downsample = None
def forward(self, x):
if self.downsample is not None:
x = self.downsample(x)
return x
The implementation of the InvertedResidual is as follows:
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expansion, kernel_size):
super(InvertedResidual, self).__init__()
hidden_dim = in_channels * expansion
self.conv = nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
)
self.use_shortcut = in_channels == out_channels and stride == 1
def forward(self, x):
if self.use_shortcut:
return self.conv(x) + x
return self.conv(x)
The model architecture will be defined as the following.
{
layer1: {defined: True, operation: nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1, bias=False)},
layer2: {defined: False, downsample: True , in_channels: 32, out_channels: 64 , stride: 2},
layer3: {defined: False, downsample: False, in_channels: 64, out_channels: 64 , stride: 1},
layer4: {defined: False, downsample: True , in_channels: 64, out_channels: 128, stride: 2},
layer5: {defined: False, downsample: False, in_channels: 128, out_channels: 128, stride: 1},
layer6: {defined: False, downsample: False, in_channels: 128, out_channels: 128, stride: 1},
layer7: {defined: False, downsample: True , in_channels: 128, out_channels: 256, stride: 2},
layer8: {defined: False, downsample: False, in_channels: 256, out_channels: 256, stride: 1},
layer9: {defined: False, downsample: False, in_channels: 256, out_channels: 256, stride: 1},
layer10: {defined: True, operation: nn.Conv2d(in_channels=256, out_channels=1280, kernel_size=1, bias=False, stride=1)},
layer11: {defined: True, operation: nn.AdaptiveAvgPool2d(output_size=1)},
layer12: {defined: True, operation: nn.Linear(in_features=1280, out_features=10)},
}
The currently undefined layers are layer2 - layer9, and the in_channels and out_channels have already been defined for each layer. To maximize the model's performance on CIFAR10, please provide me with your suggested operation for the undefined layers only.
Your response should be an operation ID list for the undefined layers. For example:
[1, 2, ..., 0] means we use operation 1 for layer2, operation 2 for layer3, ..., operation 0 for layer9.
'''
experiments_prompt = lambda arch_list, acc_list : '''Here are some experimental results that you can use as a reference:
{}
Please suggest a better operation ID list that can improve the model's performance on CIFAR10 beyond the experimental results provided above.
'''.format(''.join(['{} gives an accuracy of {:.2f}%\n'.format(arch, acc) for arch, acc in zip(arch_list, acc_list)]))
suffix = '''Please do not include anything other than the operation ID list in your response.'''
arch_list = []
acc_list = []
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": user_input + suffix},
]
performance_history = []
messages_history = []
if not os.path.exists('history'):
os.makedirs('history')
for iteration in range(10):
res = openai.ChatCompletion.create(model='gpt-4', messages=messages, temperature=0, n=1)['choices'][0]['message']
messages.append(res)
messages_history.append(messages)
print(res['content'])
operation_id_list = json.loads(res['content'])
operation_id_list_str = ''.join(str(opid) for opid in operation_id_list)
accuracy = data[operation_id_list_str]['mean_acc']
accuracy = float(Decimal(accuracy).quantize(Decimal("0.01"), rounding = "ROUND_HALF_UP"))
arch_list.append(operation_id_list)
acc_list.append(accuracy)
performance = {
'arch' : operation_id_list_str,
'rank' : str(data[operation_id_list_str]['rank']),
'acc' : str(data[operation_id_list_str]['mean_acc']),
'flops': str(data[operation_id_list_str]['flops']),
}
print(iteration+1, performance)
performance_history.append(performance)
with open('history/nas_bench_macro_messages.json', 'w') as f:
json.dump(messages_history, f)
with open('history/nas_bench_macro_performance.json', 'w') as f:
json.dump(performance_history, f)
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": user_input + experiments_prompt(arch_list, acc_list) + suffix},
]
| [
"You are an expert in the field of neural architecture search.",
"<function <lambda> at 0x1164c6340>",
"Your task is to assist me in selecting the best operations for a given model architecture, which includes some undefined layers and available operations. The model will be trained and tested on CIFAR10, and your objective will be to maximize the model's performance on CIFAR10.\n\nWe define the 3 available operations as the following:\n0: Identity(in_channels, out_channels, stride)\n1: InvertedResidual(in_channels, out_channels, stride expansion=3, kernel_size=3)\n2: InvertedResidual(in_channels, out_channels, stride expansion=6, kernel_size=5)\n\nThe implementation of the Identity is as follows:\nclass Identity(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n super(Identity, self).__init__()\n if stride != 1 or in_channels != out_channels:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n else:\n self.downsample = None\n\n def forward(self, x):\n if self.downsample is not None:\n x = self.downsample(x)\n return x\n\nThe implementation of the InvertedResidual is as follows:\nclass InvertedResidual(nn.Module):\n def __init__(self, in_channels, out_channels, stride, expansion, kernel_size):\n super(InvertedResidual, self).__init__()\n hidden_dim = in_channels * expansion\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, hidden_dim, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU(inplace=True),\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU(inplace=True),\n nn.Conv2d(hidden_dim, out_channels, 1, 1, 0, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n self.use_shortcut = in_channels == out_channels and stride == 1\n\n def forward(self, x):\n if self.use_shortcut:\n return self.conv(x) + x\n return self.conv(x)\n \n\nThe model architecture will be defined as the following.\n{\n layer1: {defined: True, operation: nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1, bias=False)},\n layer2: {defined: False, downsample: True , in_channels: 32, out_channels: 64 , stride: 2},\n layer3: {defined: False, downsample: False, in_channels: 64, out_channels: 64 , stride: 1},\n layer4: {defined: False, downsample: True , in_channels: 64, out_channels: 128, stride: 2},\n layer5: {defined: False, downsample: False, in_channels: 128, out_channels: 128, stride: 1},\n layer6: {defined: False, downsample: False, in_channels: 128, out_channels: 128, stride: 1},\n layer7: {defined: False, downsample: True , in_channels: 128, out_channels: 256, stride: 2},\n layer8: {defined: False, downsample: False, in_channels: 256, out_channels: 256, stride: 1},\n layer9: {defined: False, downsample: False, in_channels: 256, out_channels: 256, stride: 1},\n layer10: {defined: True, operation: nn.Conv2d(in_channels=256, out_channels=1280, kernel_size=1, bias=False, stride=1)},\n layer11: {defined: True, operation: nn.AdaptiveAvgPool2d(output_size=1)},\n layer12: {defined: True, operation: nn.Linear(in_features=1280, out_features=10)},\n}\n\nThe currently undefined layers are layer2 - layer9, and the in_channels and out_channels have already been defined for each layer. To maximize the model's performance on CIFAR10, please provide me with your suggested operation for the undefined layers only. \n\nYour response should be an operation ID list for the undefined layers. For example:\n[1, 2, ..., 0] means we use operation 1 for layer2, operation 2 for layer3, ..., operation 0 for layer9.\nPlease do not include anything other than the operation ID list in your response."
] |
2024-01-10 | mingkai-zheng/GENIUS | nas_bench_201.py | import os
import json
import openai
from decimal import Decimal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--openai_key', type=str, required=True)
parser.add_argument('--openai_organization', type=str, required=True)
parser.add_argument('--dataset', type=str, required=True, choices=['cifar10', 'cifar100', 'imagenet'])
args = parser.parse_args()
print(args)
openai.api_key = args.openai_key
openai.organization = args.openai_organization
if args.dataset == 'cifar10':
benchmark_file = open('benchmark/nasbench201_cifar10.json')
elif args.dataset == 'cifar100':
benchmark_file = open('benchmark/nasbench201_cifar100.json')
else:
benchmark_file = open('benchmark/nasbench201_imagenet.json')
data = json.load(benchmark_file)
system_content = "You are Quoc V. Le, a computer scientist and artificial intelligence researcher who is widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work in this area has focused on developing efficient algorithms for searching the space of possible neural network architectures, with the goal of finding architectures that perform well on a given task while minimizing the computational cost of training and inference."
user_input = '''You are an expert in the field of neural architecture search. Your task is to assist me in selecting the best operations to design a neural network block using the available operations. The objective is to maximize the model's performance.
The 5 available operations are as follows:
0: Zeroize() # This operation simply outputs a tensor of zeros regardless of the input, which breaks the gradient flow between two nodes.
1: nn.Identity() # Skip Connection.
2: ReLUConvBN(channels, channels, kernal_size=1, stride=1, padding=0) # The input channels and output channels are the same.
3: ReLUConvBN(channels, channels, kernal_size=3, stride=1, padding=1) # The input channels and output channels are the same.
4: nn.AvgPool2d(kernel_size=3, stride=1, padding=1) # This operation does not change the spatial resolution.
The neural network block is defined by 6 operations (i.e., op_list = [op0, op1, op2, op3, op4, op5]), which represent the operations executed between various stages of the block. This block comprises 4 stages, labeled as s0, s1, s2, and s3, each corresponding to distinct feature maps in the neural network.
s0 serves as the input feature map for this block.
s1 will be calculated by s1 = op0(s0).
s2 will be calculated by s2 = op1(s0) + op2(s1).
s3 will be calculated by s3 = op3(s0) + op4(s1) + op5(s2). Note that s3 becomes the output for this block and serves as the input for the subsequent block.
Then the implementation of the block will be:
class Block(nn.Module):
def __init__(self, channels):
super(Block, self).__init__()
self.op0 = op_id_list[0]
self.op1 = op_id_list[1]
self.op2 = op_id_list[2]
self.op3 = op_id_list[3]
self.op4 = op_id_list[4]
self.op5 = op_id_list[5]
def forward(self, s0):
s1 = self.op0(s0)
s2 = self.op1(s0) + self.op2(s1)
s3 = self.op3(s0) + self.op4(s1) + self.op5(s2)
return s3
Let's break this down step by step:
First, please analyze the 5 available operations.
Next, please consider the gradient flow based on the Block class implementation. For example, how the gradient from the later stage affects the earlier stage.
Now, answer the question - how we can design a high-performance block using the available operations?
Based the analysis, your task is to propose a block design with the given operations that prioritizes performance, without considering factors such as size and complexity.
After you suggest a design, I will test its actual performance and provide you with feedback. Based on the results of previous experiments, we can collaborate to iterate and improve the design. Please avoid suggesting the same design again during this iterative process.
'''
experiments_prompt = lambda x : '''By using this model, we achieved an accuracy of {}%. Please recommend a new model that outperforms prior architectures based on the abovementioned experiments. Also, Please provide a rationale explaining why the suggested model surpasses all previous architectures.'''.format(x)
test_acc_list = []
val_acc_list = []
rank_list = []
messages = [
{"role": "system", "content": system_content},
{"role": "user", "content": user_input},
]
performance_history = []
if not os.path.exists('history'):
os.makedirs('history')
num_iters = 0
for iteration in range(num_iters, 10):
res = openai.ChatCompletion.create(model='gpt-4', messages=messages, temperature=0, n=1)['choices'][0]['message']
messages.append(res)
print('Assistant:', res['content'])
arch = input('\nUser: Please enter the GPT-4 suggested model (use 6 operation IDs to represent the model):')
print()
operation_id_list = [int(opid) for opid in list(arch)]
struct_dict = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3']
operation_id_list_str = '|{}~0|+|{}~0|{}~1|+|{}~0|{}~1|{}~2|'.format(*[struct_dict[operation_id] for operation_id in operation_id_list])
rank = data[operation_id_list_str]['rank']
val_acc = data[operation_id_list_str]['val_acc_200']
test_acc = data[operation_id_list_str]['test_acc_200']
val_acc = float(Decimal(val_acc).quantize(Decimal("0.001"), rounding = "ROUND_HALF_UP"))
test_acc = float(Decimal(test_acc).quantize(Decimal("0.001"), rounding = "ROUND_HALF_UP"))
test_acc_list.append(test_acc)
val_acc_list.append(val_acc)
rank_list.append(rank)
performance = {
'rank' : rank,
'val_acc' : val_acc,
'test_acc' : test_acc,
}
print(iteration+1, performance, '\n')
performance_history.append(performance)
with open('history/nas_bench_201_{}_messages.json'.format(args.dataset), 'w') as f:
json.dump(messages, f)
with open('history/nas_bench_201_{}_performance.json'.format(args.dataset), 'w') as f:
json.dump(performance_history, f)
messages.append({"role": "user", "content": experiments_prompt(val_acc)})
| [
"<function <lambda> at 0x1164c5080>",
"You are Quoc V. Le, a computer scientist and artificial intelligence researcher who is widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work in this area has focused on developing efficient algorithms for searching the space of possible neural network architectures, with the goal of finding architectures that perform well on a given task while minimizing the computational cost of training and inference.",
"You are an expert in the field of neural architecture search. Your task is to assist me in selecting the best operations to design a neural network block using the available operations. The objective is to maximize the model's performance.\n\nThe 5 available operations are as follows:\n0: Zeroize() # This operation simply outputs a tensor of zeros regardless of the input, which breaks the gradient flow between two nodes.\n1: nn.Identity() # Skip Connection.\n2: ReLUConvBN(channels, channels, kernal_size=1, stride=1, padding=0) # The input channels and output channels are the same.\n3: ReLUConvBN(channels, channels, kernal_size=3, stride=1, padding=1) # The input channels and output channels are the same.\n4: nn.AvgPool2d(kernel_size=3, stride=1, padding=1) # This operation does not change the spatial resolution.\n\nThe neural network block is defined by 6 operations (i.e., op_list = [op0, op1, op2, op3, op4, op5]), which represent the operations executed between various stages of the block. This block comprises 4 stages, labeled as s0, s1, s2, and s3, each corresponding to distinct feature maps in the neural network.\n\ns0 serves as the input feature map for this block.\ns1 will be calculated by s1 = op0(s0).\ns2 will be calculated by s2 = op1(s0) + op2(s1).\ns3 will be calculated by s3 = op3(s0) + op4(s1) + op5(s2). Note that s3 becomes the output for this block and serves as the input for the subsequent block.\n\nThen the implementation of the block will be:\nclass Block(nn.Module):\n def __init__(self, channels):\n super(Block, self).__init__()\n self.op0 = op_id_list[0]\n self.op1 = op_id_list[1]\n self.op2 = op_id_list[2]\n self.op3 = op_id_list[3]\n self.op4 = op_id_list[4]\n self.op5 = op_id_list[5]\n\n def forward(self, s0):\n s1 = self.op0(s0)\n s2 = self.op1(s0) + self.op2(s1)\n s3 = self.op3(s0) + self.op4(s1) + self.op5(s2)\n return s3\n\nLet's break this down step by step:\n\nFirst, please analyze the 5 available operations.\n\nNext, please consider the gradient flow based on the Block class implementation. For example, how the gradient from the later stage affects the earlier stage.\n\nNow, answer the question - how we can design a high-performance block using the available operations?\n\nBased the analysis, your task is to propose a block design with the given operations that prioritizes performance, without considering factors such as size and complexity.\n\nAfter you suggest a design, I will test its actual performance and provide you with feedback. Based on the results of previous experiments, we can collaborate to iterate and improve the design. Please avoid suggesting the same design again during this iterative process.\n"
] |
2024-01-10 | michelle-qin/resumes-knowledge-navigator | backend~semantic.py | import os
import requests
import json
from openai_helper import get_client
from sql_helpers import get_text_from_id
from document_highlight import return_highlighted_pdf
class backend:
def __init__(self):
self.client = get_client()
self.TOCs = {}
def query_gpt4(self, prompt):
response = self.client.chat.completions.create(
model="gpt4",
messages=[
{"role": "user", "content": prompt}
])
return response.choices[0].message.content
def get_keyword(self, query):
return self.query_gpt4(f"What key characteristic the user is looking for in this resume?\n\nUser query: {query}\nKeywords:")
def add_tag_fields(self, TOC):
new_TOC = TOC
new_TOC["tags"] = []
if "workExperience" in new_TOC and isinstance(new_TOC["workExperience"], list):
for entry in new_TOC["workExperience"]:
if isinstance(entry, dict):
entry["tags"] = []
if "education" in new_TOC and isinstance(new_TOC["education"], list):
for entry in new_TOC["education"]:
if isinstance(entry, dict):
entry["tags"] = []
return new_TOC
def get_toc(self, doc_id):
if doc_id == "mock":
return self.metadata
elif doc_id in self.TOCs.keys():
return self.TOCs[doc_id]
else:
text = get_text_from_id(doc_id)
json_text = self.query_gpt4(f"You are a semantic parser. Use the following resume to populate a Json object \n\n Schema: {self.schema}\n\ndocument: {text}\nJSON:")
json_result = json.loads(json_text)
final_TOC = self.add_tag_fields(json_result)
self.TOCs[doc_id] = final_TOC
return final_TOC
def find_string_in_TOC(self, d, target, path=[]):
for key, value in d.items():
if isinstance(value, str) and target in value:
return path + [key]
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict):
result = self.find_string_in_TOC(item, target, path + [key, i])
if result:
return result
elif isinstance(item, str) and target in item:
return path + [key, i]
if isinstance(value, dict):
result = self.find_string_in_TOC(value, target, path + [key])
if result:
return result
# Return None if no match is found
return None
def add_tags(self, TOC, resume_text, keyword):
path = self.find_string_in_TOC(TOC, resume_text)
if path is None:
TOC["tags"].append(keyword)
elif path[0] == "workExperience":
work_experience_index = path[1]
if 0 <= work_experience_index < len(TOC["workExperience"]):
if keyword not in TOC["workExperience"][work_experience_index]["tags"]:
TOC["workExperience"][work_experience_index]["tags"].append(keyword)
elif path[0] == "education":
education_index = path[1]
if 0 <= education_index < len(TOC["education"]):
if keyword not in TOC["education"][education_index]["tags"]:
TOC["education"][education_index]["tags"].append(keyword)
else:
if keyword not in TOC["tags"]:
TOC["tags"].append(keyword)
def query(self, doc_id, prompt):
keyword = self.get_keyword(prompt)
TOC = self.get_toc(doc_id)
citations = return_highlighted_pdf(doc_id, prompt)
for citation in citations:
self.add_tags(TOC, citation, keyword)
return citations, TOC
def inject_query(self, prompt, highlighted_text):
return self.query_gpt4(f"You are a semantic parser. Rephrase the following query to incorporate the asker's intent given the text the asker has highlighted and refers to. The query is: {prompt}. The text to incorporate into the query is: {highlighted_text}.")
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Resume",
"type": "object",
"properties": {
"basic_info": {
"type": "object",
"properties": {
"first_name": {"type": "string"},
"last_name": {"type": "string"},
"email": {"type": "string", "format": "email"},
"phone": {"type": "string"},
"linkedin": {"type": "string"},
"github": {"type": "string"},
"website": {"type": "string"}
},
"required": ["first_name", "last_name", "email"]
},
"summary": {"type": "string"},
"work_experience": {
"type": "array",
"items": {
"type": "object",
"properties": {
"company": {"type": "string"},
"position": {"type": "string"},
"start_date": {"type": "string", "format": "date"},
"end_date": {"type": "string", "format": "date"},
"description": {"type": "string"}
},
"required": ["company", "position", "start_date"]
}
},
"education": {
"type": "array",
"items": {
"type": "object",
"properties": {
"institution": {"type": "string"},
"degree": {"type": "string"},
"start_date": {"type": "string", "format": "date"},
"end_date": {"type": "string", "format": "date"},
"description": {"type": "string"}
},
"required": ["institution", "degree", "start_date"]
}
},
"skills": {
"type": "array",
"items": {"type": "string"}
},
"languages": {
"type": "array",
"items": {"type": "string"}
},
"hobbies": {
"type": "array",
"items": {"type": "string"}
},
"references": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"title": {"type": "string"},
"company": {"type": "string"},
"contact": {"type": "string"}
}
}
}
}
}
metadata = {
"firstName": "",
"lastName": "",
"email": "",
"phone": "",
"summary": "Accounting professional with twenty years of experience in inventory and manufacturing accounting. Ability to fill in at a moment's notice, quickly mastering new systems, processes and workflows. Take charge attitude, ability to work independently, recommend and implement ideas and process improvements.",
"workExperience": [
{
"company": "Company Name",
"position": "Accountant",
"startDate": "04/2011",
"endDate": "05/2017",
"description": "Performed general accounting functions, journal entries, reconciliations and accruals. Implemented and oversaw RGA spreadsheet for returns used by customer service, accounting and upper management. Initiated and tracked claim process with carriers for damages. Participated in identifying and executing the company's business process improvement efforts"
},
{
"company": "Company Name",
"position": "Inventory Control Manager",
"startDate": "01/2008",
"endDate": "01/2010",
"description": "Became an expert user and handled rollout and training of a new ERP system (Syspro). Handled the purchasing and receiving of raw and semi-finished material, tools, supplies. Continuously renegotiated payment terms with suppliers/vendors resulting in improved cash flow"
},
{
"company": "Company Name",
"position": "Accounting Manager",
"startDate": "01/1995",
"endDate": "01/2008",
"description": "Prepared all relevant documentation and submitted data for auditors during corporate takeover in 2008. Prepared monthly general ledger entries, reconcile G/L accounts to subsidiary journals or worksheets and posted monthly G/L journal. Managed the payroll function which was outsourced to ADP"
},
{
"company": "Company Name",
"position": "Full Charge Bookkeeper",
"startDate": "01/1993",
"endDate": "01/1995",
"description": ""
}
],
"education": [
{
"school": "Montclair State College",
"degree": "B.S Business Administration Accounting",
"fieldOfStudy": "Accounting",
"startDate": "",
"endDate": ""
}
],
"skills": [
"Microsoft Office Excel",
"Outlook",
"Word",
"SAGE 100",
"Ramp (WMS software)",
"Syspro (ERP program)"
],
"languages": [],
"certifications": []
}
| [] |
2024-01-10 | michelle-qin/resumes-knowledge-navigator | backend~answer_search.py | import fitz
import os
from openai import AzureOpenAI
import ast
import json
from sql_helpers import evaluate_query, evaluate_query_blind, get_text_from_id
from openai_helper import get_client
"""
This function will search the text for the answer to a given question.
param: doc_id (int) the unique id allowing us to find the processed text and pdf filename in the file_table->documents sql table
param: question (string) the question we are trying to answer.
return: dictionary with two fields
1. Answer which is the answer to the question
2. Citation which is the verbatim text supporting it
"""
def search_text(doc_id, query):
document = get_text_from_id(doc_id)
search_prompt = f"""
You are acting as an agent that will search through a document for the answer to a request. I will now give you the document.
Document: "{document}"
Now, I will give you the request.
Request: "{query}"
Given the passage and the request, you must give the verbatim citation from the given passage which satisfies the request. If the information is not explicitly shown in the text just put "None". Make sure your answer is in this format:
{{
"answer": "<YOUR ANSWER>",
"citation": "<YOUR CITATION>",
}}
I will now give you an example so that you can learn how to do this task. If you are given the following document:
Document: "ADULT EDUCATION INSTRUCTOR
Experience
Company Name City , State Adult Education Instructor 08/2016 to Current Developed a diploma program that fit the needs of the community,
continues to work with the community and wants to see the students succeed move on into either industry or collegeÂ
Company Name City , State Agriculture/Credit Recovery Teacher 08/2000 to Current
Planned and conducted activities for a balanced program of instruction, demonstration, and work time that provided students with
opportunities to observe, question, and investigate.
Goal Setting Established clear objectives for all lessons/projects and communicated with students, achieving a total understanding of grading
rubric and overall class expectations."
and you are given the following request:
Request: "What was the title of their most recent job?"
Then, your answer should be:
{{
"answer": "Adult Education Instructor",
"citation": "Company Name City , State Adult Education Instructor 08/2016 to Current Developed a diploma program that fit the needs of the community,
continues to work with the community and wants to see the students succeed move on into either industry or collegeÂ"
}}
Here's another example:
Request: "Show me their accounting experience."
Then, your answer should be:
{{
"answer": "None",
"citation": "None">
}}
Only give the answer in the format that I told you. Do not say anything else extra other than the answer. Do not act as if you are a human. Act as if you are the raw output for a query. Give only the first instance of the answer even if multiple parts are relevant
"""
client = get_client()
response = client.chat.completions.create(
model = "gpt4",
temperature = 0,
messages=[
{"role": "system", "content": "Assistant is acting as an agent that will search through a document for the answer to a request."},
{"role": "user", "content": search_prompt}
]
)
response = response.choices[0].message.content
try:
json_dict = json.loads(response)
except:
raise ValueError("The LLM outputted a non-json-formattable string. Contact Thomas/Daniel to work this out.")
return json_dict
"""
This code will add a highlight to a pdf given a piece of text that the LLM has searched for.
param: input_path (string) the path to the pdf file we will be highlighting
param: output_path (string) the path that we want to save the highlighted pdf to
param: sections (List[string]) the list of text sections we want to highlight in the pdf
"""
def add_hyperlinks_to_pdf(input_path, output_path, sections):
pdf_document = fitz.open(input_path)
for query in sections:
for page in pdf_document:
search_results = page.search_for(query)
for rect in search_results:
annot = page.add_highlight_annot(rect)
pdf_document.save(output_path)
pdf_document.close()
def query_gpt4(prompt):
client = get_client()
response = client.chat.completions.create(
model="gpt4",
messages=[
{"role": "user", "content": prompt}
])
return response.choices[0].message.content
def multiple_document_table(doc_ids, query):
client = get_client()
schema = "multiple_doc"
table_name = "table"
field_prompt = f"""
Given the query, give me the name of the column that would store the answer to it in a SQL table. Here are a few examples:
Query: Show me how this applicant has demostrates diversity.
Field name: diversity
Query: What foreign experience does this applicant have?
Field name: foreign_experience
Query: What college did they go to?
Field name: college
Remember only give me the field name after the "Field name:" This should be one word with no spaces. Use an underscore to separate words.
Query: {query}
Field name:
"""
field = query_gpt4(field_prompt)
res = {}
res["doc_id"] = []
res[field] = []
res[f"{field}_citation"] = []
for doc_id in doc_ids:
response_dict = search_text(doc_id, query)
res["doc_id"].append(doc_id)
res[field].append(response_dict["answer"])
res[f"{field}_citation"].append(response_dict["citation"])
return res
def multiple_document_table_to_sql(doc_ids, query):
client = get_client()
schema = "multiple_doc"
table_name = "table"
field_prompt = f"""
Given the query, give me the name of the column that would store the answer to it in a SQL table. Here are a few examples:
Query: Show me how this applicant has demostrates diversity.
Field name: diversity
Query: What foreign experience does this applicant have?
Field name: foreign_experience
Query: What college did they go to?
Field name: college
Remember only give me the field name after the "Field name:" This should be one word with no spaces. Use an underscore to separate words.
Query: {query}
Field name:
"""
field = query_gpt4(field_prompt)
delete_query = f"DROP TABLE search_results"
evaluate_query(schema, delete_query)
create_query = f"""
CREATE TABLE search_results (
doc_id INTEGER,
{field} TEXT,
{field}_citation TEXT
);
"""
print(create_query)
evaluate_query(schema, create_query)
for doc_id in doc_ids:
print(f"Processing document {doc_id}")
# print(get_text_from_id(doc_id))
response_dict = search_text(doc_id, query)
insert_query = f"""
INSERT INTO search_results (doc_id, {field}, {field}_citation)
VALUES (?, ?, ?);
"""
data = (
doc_id,
response_dict["answer"],
response_dict["citation"]
)
print(insert_query)
evaluate_query_blind(schema, insert_query, data)
| [
"Assistant is acting as an agent that will search through a document for the answer to a request.",
"\n You are acting as an agent that will search through a document for the answer to a request. I will now give you the document.\n Document: \"PLACEHOLDER\"\n Now, I will give you the request.\n Request: \"PLACEHOLDER\"\n Given the passage and the request, you must give the verbatim citation from the given passage which satisfies the request. If the information is not explicitly shown in the text just put \"None\". Make sure your answer is in this format:\n {\n \"answer\": \"<YOUR ANSWER>\",\n \"citation\": \"<YOUR CITATION>\",\n }\n\n I will now give you an example so that you can learn how to do this task. If you are given the following document:\n Document: \"ADULT EDUCATION INSTRUCTOR\n Experience\n Company Name City , State Adult Education Instructor 08/2016 to Current Developed a diploma program that fit the needs of the community,\n continues to work with the community and wants to see the students succeed move on into either industry or collegeÂ\n Company Name City , State Agriculture/Credit Recovery Teacher 08/2000 to Current\n Planned and conducted activities for a balanced program of instruction, demonstration, and work time that provided students with\n opportunities to observe, question, and investigate.\n Goal Setting Established clear objectives for all lessons/projects and communicated with students, achieving a total understanding of grading\n rubric and overall class expectations.\"\n and you are given the following request:\n Request: \"What was the title of their most recent job?\"\n Then, your answer should be:\n {\n \"answer\": \"Adult Education Instructor\",\n \"citation\": \"Company Name City , State Adult Education Instructor 08/2016 to Current Developed a diploma program that fit the needs of the community,\n continues to work with the community and wants to see the students succeed move on into either industry or collegeÂ\"\n }\n\n Here's another example:\n Request: \"Show me their accounting experience.\"\n Then, your answer should be:\n {\n \"answer\": \"None\",\n \"citation\": \"None\">\n }\n\n Only give the answer in the format that I told you. Do not say anything else extra other than the answer. Do not act as if you are a human. Act as if you are the raw output for a query. Give only the first instance of the answer even if multiple parts are relevant\n ",
"\n Given the query, give me the name of the column that would store the answer to it in a SQL table. Here are a few examples:\n\n Query: Show me how this applicant has demostrates diversity.\n Field name: diversity\n\n Query: What foreign experience does this applicant have?\n Field name: foreign_experience\n\n Query: What college did they go to?\n Field name: college\n\n Remember only give me the field name after the \"Field name:\" This should be one word with no spaces. Use an underscore to separate words.\n\n Query: PLACEHOLDER\n Field name:\n "
] |
2024-01-10 | idrori/mathQ | code~few-shot.py | import openai
import pandas as pd
import time
import argparse
import os
openai.api_key = os.getenv('OpenAI_API_Key')
courses_to_few_shot = ['18.01', '18.02', '18.03', '6.042', '18.05', '18.06', 'COMS3251']
MATH_sections_to_few_shot = ['MATH_Algebra', 'MATH_Counting_&_Probability', 'MATH_Intermediate_Algebra',
'MATH_Number_Theory', 'MATH_Prealgebra', 'MATH_Precalculus']
questions_per_course = 25
questions_per_MATH_section = 15
parser = argparse.ArgumentParser()
# if an argument is passed in as True, we do it
parser.add_argument("--Codex_Few_Shot")
parser.add_argument("--GPT3_CoT_One_Shot")
parser.add_argument("--Do_MATH")
parser.add_argument("--Do_Courses")
args = parser.parse_args()
#Will use this many few-shot examples if possible: (if fewer are solved, use as many as possible)
few_shot_examples_desired = 5
codex_engine = "code-davinci-002"
gpt3_engine = "text-davinci-002"
engine_temperature = 0
engine_topP = 0
few_shot_max_tokens = 256
gpt3_CoT_max_tokens = 1000
codex_time_delay = 3
gpt3_time_delay = 1
CoT = "Let's think step by step."
def execute_few_shot(courses, questions_per):
"""
Runs few-shot on questions_per questions for each course in courses.
"""
for course in courses:
course_location = course + ' results.csv'
#initializing new columns in csv
results = pd.read_csv(course_location)
results['Few-Shot Input'] = ''
results['Few-Shot Output'] = ''
results['Few-Shot Evaluation'] = ''
results.to_csv(course_location, index=False)
for i in range(questions_per):
k = few_shot_examples_desired
#correct via zero-shot:
if results.iloc[i]['Zero-Shot Evaluation'] == 1:
print('no few shot needed for ' + course + ' question ' + str(i+1))
few_shot_input = 'n/a'
few_shot_output = 'n/a'
#incorrect via zero-shot:
elif results.iloc[i]['Zero-Shot Evaluation'] == 0:
few_shot_input = ''
print('doing few-shot for ' + course + ' question ' + str(i+1) + '...')
for closest in results.iloc[i]["Most Similar Questions"].strip('][').split(', '):
closest_index = int(closest) - 1
if results.iloc[closest_index]['Zero-Shot Evaluation'] == 1 and k > 0:
few_shot_input += results.iloc[closest_index]['Codex Input']
few_shot_input += results.iloc[closest_index]['Codex Output']+'\n\n'
k -= 1
few_shot_input += results.iloc[i]['Codex Input']
start = time.time()
time.sleep(codex_time_delay) #to avoid an openai.error.RateLimitError
few_shot_output = openai.Completion.create(engine = codex_engine,
prompt = few_shot_input,
max_tokens = few_shot_max_tokens,
temperature = engine_temperature,
top_p = engine_topP)['choices'][0]['text']
print('Codex API call time: ' + str(time.time()-start) + '\n')
#columns not properly labelled with 1's and 0's:
else:
print('''A Question not labeled 1 for correct or 0 for incorrect was detected.
You must go back and label all Codex Zero-Shot questions as correct or incorrect''')
raise ValueError
results.loc[i, 'Few-Shot Input'] = few_shot_input
results.loc[i, 'Few-Shot Output'] = few_shot_output
results.to_csv(course_location, index=False)
def execute_GPT3_CoT_one_shot(courses, questions_per):
"""
Runs one-shot CoT on questions_per questions for each course in courses.
"""
for course in courses:
course_location = course + ' results.csv'
#initializing new columns in csv
results = pd.read_csv(course_location)
results['GPT-3 CoT Few-Shot Input'] = ''
results['GPT-3 CoT Few-Shot Output'] = ''
results['GPT-3 CoT Few-Show Evaluation'] = ''
results.to_csv(course_location, index=False)
for i in range(questions_per):
closest_index = int(results.iloc[i]["Most Similar Questions"].strip('][').split(', ')[0]) - 1
similar_question = results.iloc[closest_index]["Original Question"]
similar_answer = results.iloc[closest_index]["Actual Solution"]
original_question = results.iloc[i]["Original Question"]
print("Running GPT-3 CoT one-shot on " + course + ' question ' + str(i+1) + '...')
start = time.time()
time.sleep(gpt3_time_delay) #to avoid an openai.error.RateLimitError
gpt3_CoT_input = 'Q: ' + similar_question + '\nA: ' + str(similar_answer) + '\n\nQ: ' + original_question + "\nA: " + CoT
gpt3_CoT_output = openai.Completion.create(engine = gpt3_engine,
prompt = gpt3_CoT_input,
max_tokens = gpt3_CoT_max_tokens,
temperature = engine_temperature,
top_p = engine_topP)['choices'][0]['text']
print('GPT-3 API call time: ' + str(time.time()-start) + '\n')
results.loc[i, 'GPT-3 CoT Few-Shot Input'] = gpt3_CoT_input
results.loc[i, 'GPT-3 CoT Few-Shot Output'] = gpt3_CoT_output
results.to_csv(course_location, index=False)
if __name__ == "__main__":
if args.Do_Courses:
if args.Codex_Few_Shot:
execute_few_shot(courses_to_few_shot, questions_per_course)
if args.GPT3_CoT_One_Shot:
execute_GPT3_CoT_one_shot(courses_to_few_shot, questions_per_course)
if args.Do_MATH:
if args.Codex_Few_Shot:
execute_few_shot(MATH_sections_to_few_shot, questions_per_MATH_section)
if args.GPT3_CoT_One_Shot:
execute_GPT3_CoT_one_shot(MATH_sections_to_few_shot, questions_per_MATH_section) | [] |
2024-01-10 | idrori/mathQ | code~zero-shot.py | import os
import openai
import json
import pandas as pd
import time
import argparse
from embedding import get_embeddings, get_most_similar
parser = argparse.ArgumentParser()
# if an argument is passed in as True, we do it
parser.add_argument("--Codex")
parser.add_argument("--Explain")
parser.add_argument("--GPT3")
parser.add_argument("--GPT3_CoT")
parser.add_argument("--Do_MATH")
parser.add_argument("--Do_Courses")
args = parser.parse_args()
column_labels = ['Question', 'Original Question', 'Actual Solution']
if args.Codex == 'True':
column_labels += ['Codex Input', 'Codex Output', 'Zero-Shot Evaluation']
if args.Explain == 'True' and args.Codex == 'True':
column_labels += ['Codex Explanation Input', 'Codex Explanation']
if args.GPT3 == 'True':
column_labels += ['GPT-3 Output', 'GPT-3 Evaluation']
if args.GPT3_CoT == 'True':
column_labels += ['GPT-3 CoT Input', 'GPT-3 CoT Output', 'GPT-3 CoT Evaluation']
column_labels += ['Most Similar Questions']
openai.api_key = os.getenv('OpenAI_API_Key')
courses_to_zero_shot = ['18.01', '18.02', '18.03', '6.042', '18.05', '18.06', 'COMS3251']
MATH_sections_to_zero_shot = ['MATH_Algebra', 'MATH_Counting_&_Probability', 'MATH_Intermediate_Algebra',
'MATH_Number_Theory', 'MATH_Prealgebra', 'MATH_Precalculus']
questions_per_course = 25
questions_per_MATH_section = 15
codex_engine = "code-davinci-002"
gpt3_engine = "text-davinci-002"
engine_temperature = 0
engine_topP = 0
zero_shot_max_tokens = 256
explanation_max_tokens = 150
gpt3_max_tokens = 200
gpt3_CoT_max_tokens = 1000
codex_time_delay = 3
gpt3_time_delay = 1
#locations of embeddings and which indexes refer to which questions
courses_embeddings_location = 'code/course_embeddings.json'
courses_embeddings_indexes = {'18.01':[0, 24], '18.02':[25, 49],
'18.03':[50, 74], '6.042': [75,99],
'18.05':[100, 124], '18.06':[125, 149],
'COMS3251':[150,174]}
MATH_embeddings_location = 'code/MATH_embeddings.json'
MATH_embeddings_indexes = {'MATH_Algebra':[0, 14], 'MATH_Counting_&_Probability':[15, 29],
'MATH_Intermediate_Algebra':[30, 44], 'MATH_Number_Theory':[45, 59],
'MATH_Prealgebra':[60, 74], 'MATH_Precalculus':[75, 89]}
# for prompt formatting:
docstring_front = '''"""\n'''
docstring_back = '''\n"""\n'''
context_array = ['write a program', 'using sympy', 'using simulations']
prompt_prefix = 'that answers the following question:'
explanation_suffix = "\n\n'''\nHere's what the above code is doing:\n1."
CoT = "Let's think step by step."
def execute_zero_shot(courses, questions_per,
embeddings_location, embeddings_indexes):
"""
Runs zero-shot on questions_per questions for each course in courses.
An individual CSV file of the results is made for each course in courses.
The embeddings for all of the questions for all of the courses in courses are located in embeddings_location.
"""
all_embeddings = get_embeddings(embeddings_location)
for course in courses:
course_embeddings = all_embeddings[embeddings_indexes[course][0]:embeddings_indexes[course][1]+1]
questions = []
answers = []
for num in range(1, questions_per + 1):
if num < 10:
q_num = '0' + str(num)
else:
q_num = str(num)
json_location = './Data/' + course.split('_')[0] + '/' + course + '_Question_' + q_num + '.json'
with open(json_location, 'r') as f:
data = json.load(f)
raw_question = data['Original question']
answer_to_question = data['Program solution']
questions.append(raw_question)
answers.append(answer_to_question)
rows = []
for i in range(questions_per):
question = i + 1
original_question = questions[i]
question_answer = answers[i]
row = [question, original_question, question_answer]
print('Running Zero-Shot on ' + course + ' question ' + str(i+1) + '...')
start = time.time()
if args.Codex == 'True':
time.sleep(codex_time_delay) #to avoid an openai.error.RateLimitError
codex_input = docstring_front + context_array[0] + ' ' + prompt_prefix + ' ' + questions[i] + docstring_back
codex_output = openai.Completion.create(engine = codex_engine,
prompt = codex_input,
max_tokens = zero_shot_max_tokens,
temperature = engine_temperature,
top_p = engine_topP)['choices'][0]['text']
row += [codex_input, codex_output, '']
if args.Explain == 'True' and args.Codex == 'True':
time.sleep(codex_time_delay) #to avoid an openai.error.RateLimitError
explanation_input = codex_input + codex_output + explanation_suffix
explanation_output = openai.Completion.create(engine = codex_engine,
prompt = explanation_input,
max_tokens = explanation_max_tokens,
temperature = engine_temperature,
top_p = engine_topP)['choices'][0]['text']
row += [explanation_input, explanation_output]
if args.GPT3 == 'True':
time.sleep(gpt3_time_delay) #to avoid an openai.error.RateLimitError
gpt3_output = openai.Completion.create(engine = gpt3_engine,
prompt = original_question,
max_tokens = gpt3_max_tokens,
temperature = engine_temperature,
top_p = engine_topP)['choices'][0]['text']
row += [gpt3_output, '']
if args.GPT3_CoT == 'True':
time.sleep(gpt3_time_delay) #to avoid an openai.error.RateLimitError
gpt3_CoT_input = 'Q: ' + original_question + "\nA: " + CoT
gpt3_CoT_output = openai.Completion.create(engine = gpt3_engine,
prompt = gpt3_CoT_input,
max_tokens = gpt3_CoT_max_tokens,
temperature = engine_temperature,
top_p = engine_topP)['choices'][0]['text']
row += [gpt3_CoT_input, gpt3_CoT_output, '']
most_similar_questions = get_most_similar(course_embeddings,i)
row += [most_similar_questions]
end = time.time()
print('API call time: ' + str(end-start) + '\n')
rows.append(row)
info = pd.DataFrame(rows, columns=column_labels)
course_results_location = course + ' results.csv'
info.to_csv(course_results_location, index=False)
if __name__ == "__main__":
#zero-shot step for courses:
if args.Do_Courses == 'True':
execute_zero_shot(courses_to_zero_shot, questions_per_course,
courses_embeddings_location, courses_embeddings_indexes)
#zero-shot step for MATH benchmark:
if args.Do_MATH == 'True':
execute_zero_shot(MATH_sections_to_zero_shot, questions_per_MATH_section,
MATH_embeddings_location, MATH_embeddings_indexes) | [
"Q: PLACEHOLDER\nA: Let's think step by step.",
"that answers the following question:",
"docstring_front + context_array[0] + ' ' + prompt_prefix + ' ' + questions[i] + docstring_back",
"codex_input869fde5b-8af1-45be-aeaf-4b8c7c1b0a0dPLACEHOLDER\n\n'''\nHere's what the above code is doing:\n1."
] |
2024-01-10 | jannerm/ray | rllib~examples~env~cliff_walking_wall_env.py | import gym
from gym import spaces
ACTION_UP = 0
ACTION_RIGHT = 1
ACTION_DOWN = 2
ACTION_LEFT = 3
class CliffWalkingWallEnv(gym.Env):
"""Modified version of the CliffWalking environment from OpenAI Gym
with walls instead of a cliff.
### Description
The board is a 4x12 matrix, with (using NumPy matrix indexing):
- [3, 0] or obs==36 as the start at bottom-left
- [3, 11] or obs==47 as the goal at bottom-right
- [3, 1..10] or obs==37...46 as the cliff at bottom-center
An episode terminates when the agent reaches the goal.
### Actions
There are 4 discrete deterministic actions:
- 0: move up
- 1: move right
- 2: move down
- 3: move left
You can also use the constants ACTION_UP, ACTION_RIGHT, ... defined above.
### Observations
There are 3x12 + 2 possible states, not including the walls. If an action
would move an agent into one of the walls, it simply stays in the same position.
### Reward
Each time step incurs -1 reward, except reaching the goal which gives +10 reward.
"""
def __init__(self, seed=42) -> None:
self.observation_space = spaces.Discrete(48)
self.action_space = spaces.Discrete(4)
self.observation_space.seed(seed)
self.action_space.seed(seed)
def reset(self):
self.position = 36
return self.position
def step(self, action):
x = self.position // 12
y = self.position % 12
# UP
if action == ACTION_UP:
x = max(x - 1, 0)
# RIGHT
elif action == ACTION_RIGHT:
if self.position != 36:
y = min(y + 1, 11)
# DOWN
elif action == ACTION_DOWN:
if self.position < 25 or self.position > 34:
x = min(x + 1, 3)
# LEFT
elif action == ACTION_LEFT:
if self.position != 47:
y = max(y - 1, 0)
else:
raise ValueError(f"action {action} not in {self.action_space}")
self.position = x * 12 + y
done = self.position == 47
reward = -1 if not done else 10
return self.position, reward, done, {}
| [] |
2024-01-10 | levy9527/rag-utils | qa_splitter.py | import logging
import os
import sys
from datetime import datetime
from typing import List, AnyStr
import uuid
import hashlib
import argparse
import openai
import chromadb
from chromadb import Settings
from chromadb.utils import embedding_functions
from dotenv import load_dotenv
import tiktoken
import re
TRUE = 'true'
load_dotenv()
AZURE_API_VERSION = '2023-07-01-preview'
OPENAI_API_TYPE = 'azure'
OPENAI_API_BASE = os.getenv("AZURE_OPENAI_ENDPOINT")
OPENAI_API_KEY = os.getenv("AZURE_OPENAI_KEY")
MAX_TOKENS = 4096
def main():
logging.basicConfig(level=logging.INFO)
#print(os.environ)
parser = argparse.ArgumentParser()
# Add the positional argument for the filename (required)
parser.add_argument("filename", help="markdown file to be split")
# Add the optional argument for the delimiter
parser.add_argument("--delimiter", help="Specify the delimiter string")
args = parser.parse_args()
filename = args.filename
delimiter = args.delimiter
if delimiter is None:
print("delimiter is not specified. example: --delimiter=问题:")
sys.exit(0)
logging.info('opening file...')
with open(filename, 'r', encoding='utf-8') as f:
lines = f.readlines()
regrouped_lines = regroup_by_delimiter(lines, delimiter)
# 针对QA,特殊处理
is_keyword_search = TRUE
answers = list(map(lambda x: '', range(len(regrouped_lines))))
chunks = []
if is_keyword_search == TRUE:
logging.info('special chunking: is_keyword_search')
chunks = list(map(lambda x: trim(x[0], delimiter), regrouped_lines))
answers = list(map(lambda x: "".join(line for line in x[1:]), regrouped_lines))
else:
logging.info("common chunking")
chunks = list(map(lambda x: "".join(line for line in x), regrouped_lines))
client = get_chroma()
collection = get_collection(client)
# TODO need solution to work around this: what if exceed token limit?
for index, chunk in enumerate(chunks):
num = num_tokens_from_string(chunk)
# check token
if num > MAX_TOKENS:
logging.info(f"strlen exceed token limit: {num}")
sys.exit(1)
else:
logging.info('put data into chroma...')
# 为什么用 uuid? 因为不能批量操作(数据太多),则必须考虑失败重试、重复插入的情况,此时 hash 生成的 id 是稳定的。
# 当然,这也引入了 hash 冲突的风险,sha224 概率上足够了,如果冲突了,把无法插入的文本修改一下,再重新插入。
collection.upsert(
documents=[chunk],
metadatas=[{"source": os.path.splitext(filename)[0],
'index': index,
'is_keyword_search': is_keyword_search,
'answer': answers[index],
}],
ids=[get_hash(chunk)]
)
logging.info("job done!")
def trim(s, delimiter):
'''
remove delimiter and line feed
'''
sub = re.sub(delimiter, '', s)
return sub.replace('\n', '').strip()
def num_tokens_from_string(string: str, encoding_name="cl100k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
logging.info(f"Token count: {num_tokens}")
return num_tokens
def regroup_by_delimiter(lines: List[AnyStr], delimiter: str):
logging.info('regroup_by_delimiter, {}'.format(delimiter))
'''
now only support split by line which startswith delimiter.
以QA问答为例,返回一个二维数组[[QA1], [QA2]]。QA格式示例:[这是问题\n,这是答案的1行\n,这是答案的2行\n]
'''
result = []
subgroup = []
for line in lines:
if line.startswith(delimiter):
if subgroup:
result.append(subgroup)
subgroup = [line]
else:
subgroup.append(line)
if subgroup:
result.append(subgroup)
return result
def get_embedding(text, model="text-embedding-ada-002"):
logging.info('getting embeddings...')
openai.api_type = OPENAI_API_TYPE
openai.api_version = AZURE_API_VERSION
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT") # Your Azure OpenAI resource's endpoint value.
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
text = text.replace("\n", " ")
return openai.embeddings.create(input = [text], model=model).data[0].embedding
def get_chroma(host="10.201.0.32", port="8080"):
return chromadb.HttpClient(host, port, settings=Settings(allow_reset=True))
def get_collection(client):
metadata = {
"create_by": "levy",
"create_date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
collection = client.get_or_create_collection('deinsight', metadata=metadata,
embedding_function=embedding_functions.OpenAIEmbeddingFunction(
api_key=OPENAI_API_KEY,
api_base=OPENAI_API_BASE,
api_type="azure",
api_version=AZURE_API_VERSION,
model_name="text-embedding-ada-002")
)
logging.info(collection)
return collection
def get_uuid():
random_uuid = uuid.uuid4()
return str(random_uuid)
def get_hash(content):
hash_object = hashlib.sha224()
# Convert the content to bytes and update the hash object
hash_object.update(content.encode('utf-8'))
# Get the hexadecimal representation of the hash
hash_value = hash_object.hexdigest()
return hash_value
if __name__ == '__main__':
main()
| [] |
2024-01-10 | blakepuls/tiktok-video-generator | src~utils~story.py | import openai
import json
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_story(topic=None):
print("Generating story...")
prompt = """
Generate a compelling personal narrative that simulates a story one might share in profound conversation. The delivery should feel candid and authentic, as if recounted by an ordinary individual about a significant episode in their life. The language can be informal, mirroring everyday dialogue.
Adhere to the protagonist's gender provided.
The story must tackle an intriguing or challenging topic—something more profound than the run-of-the-mill life experiences. Think of scenarios that might spark lively debates on platforms like AITA on Reddit, or narratives that tug at heartstrings, culminating in an unexpected turn of events.
Guideline for your narrative:
- The topic should incite curiosity and engagement.
- The narrative should be captivating and unique, far from mundane.
- Avoid personal interjections, let the story unfold by itself.
- Initiate with an engaging, casual title like, "How I narrowly... " or "Why I'll never again... "
- Craft the narrative to feel intimate and immediate, akin to a gripping short story on a Reddit thread.
- Don't include summaries or explanations at the end. You may conclude with a brief one-liner reaction, if desired.
- Title should be crafted as a complete sentence.
Please format your response in JSON with the properties 'title', 'content', 'gender' (either 'male' or 'female'), and 'description'. Ensure to escape the quotes by adding a backslash before them. For instance, if your title is "How I narrowly... ", it should be formatted as \"How I narrowly... \". Refrain from using newline characters such as \n.
"""
# Check if a topic is provided and append to paragraph if true
if topic:
prompt += f"\nBase the story off this topic: {topic}"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}]
)
# Extract the generated story
story = completion.choices[0].message.content
# Parse the JSON output
story_dict = json.loads(story)
# Extract the title, content, and description
title = story_dict.get("title")
content = story_dict.get("content")
description = story_dict.get("description")
gender = story_dict.get("gender").lower()
# Remove escape characters as well as \n and \t
title = title.replace("\\", "").replace("\n", " ").replace("\t", "")
content = content.replace("\\", "").replace("\n", " ").replace("\t", "")
description = description.replace("\\", "").replace("\n", " ").replace("\t", "")
return title, content, description, gender
| [
"\n Generate a compelling personal narrative that simulates a story one might share in profound conversation. The delivery should feel candid and authentic, as if recounted by an ordinary individual about a significant episode in their life. The language can be informal, mirroring everyday dialogue.\n\n Adhere to the protagonist's gender provided. \n\n The story must tackle an intriguing or challenging topic—something more profound than the run-of-the-mill life experiences. Think of scenarios that might spark lively debates on platforms like AITA on Reddit, or narratives that tug at heartstrings, culminating in an unexpected turn of events.\n\n Guideline for your narrative:\n\n - The topic should incite curiosity and engagement.\n - The narrative should be captivating and unique, far from mundane.\n - Avoid personal interjections, let the story unfold by itself.\n - Initiate with an engaging, casual title like, \"How I narrowly... \" or \"Why I'll never again... \"\n - Craft the narrative to feel intimate and immediate, akin to a gripping short story on a Reddit thread.\n - Don't include summaries or explanations at the end. You may conclude with a brief one-liner reaction, if desired.\n - Title should be crafted as a complete sentence.\n\n Please format your response in JSON with the properties 'title', 'content', 'gender' (either 'male' or 'female'), and 'description'. Ensure to escape the quotes by adding a backslash before them. For instance, if your title is \"How I narrowly... \", it should be formatted as \"How I narrowly... \". Refrain from using newline characters such as \n.\n ",
"\nBase the story off this topic: PLACEHOLDER"
] |
Subsets and Splits