date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | gojira/langchain | langchain~llms~nlpcloud.py | """Wrapper around NLPCloud APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class NLPCloud(LLM):
"""Wrapper around NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain.llms import NLPCloud
nlpcloud = NLPCloud(model="gpt-neox-20b")
"""
client: Any #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
min_length: int = 1
"""The minimum number of tokens to generate in the completion."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
length_penalty: float = 1.0
"""Exponential penalty to the length."""
do_sample: bool = True
"""Whether to use sampling (True) or greedy decoding."""
num_beams: int = 1
"""Number of beams for beam search."""
early_stopping: bool = False
"""Whether to stop beam search at num_beams sentences."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"], nlpcloud_api_key, gpu=True, lang="en"
)
except ImportError:
raise ValueError(
"Could not import nlpcloud python package. "
"Please it install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"min_length": self.min_length,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"length_penalty": self.length_penalty,
"do_sample": self.do_sample,
"num_beams": self.num_beams,
"early_stopping": self.early_stopping,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nlpcloud"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
response = self.client.generation(
prompt, end_sequence=end_sequence, **self._default_params
)
return response["generated_text"]
| [] |
2024-01-10 | gojira/langchain | langchain~chains~combine_documents~map_reduce.py | """Combining documents by mapping a chain over them first, then combining results."""
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple
from pydantic import Extra, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
class CombineDocsProtocol(Protocol):
"""Interface for the combine_docs method."""
def __call__(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
"""Interface for the combine_docs method."""
def _split_list_of_docs(
docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any
) -> List[List[Document]]:
new_result_doc_list = []
_sub_result_docs = []
for doc in docs:
_sub_result_docs.append(doc)
_num_tokens = length_func(_sub_result_docs, **kwargs)
if _num_tokens > token_max:
if len(_sub_result_docs) == 1:
raise ValueError(
"A single document was longer than the context length,"
" we cannot handle this."
)
if len(_sub_result_docs) == 2:
raise ValueError(
"A single document was so long it could not be combined "
"with another document, we cannot handle this."
)
new_result_doc_list.append(_sub_result_docs[:-1])
_sub_result_docs = _sub_result_docs[-1:]
new_result_doc_list.append(_sub_result_docs)
return new_result_doc_list
def _collapse_docs(
docs: List[Document],
combine_document_func: CombineDocsProtocol,
**kwargs: Any,
) -> Document:
result, _ = combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
class MapReduceDocumentsChain(BaseCombineDocumentsChain):
"""Combining documents by mapping a chain over them, then combining results."""
llm_chain: LLMChain
"""Chain to apply to each document individually."""
combine_document_chain: BaseCombineDocumentsChain
"""Chain to use to combine results of applying llm_chain to documents."""
collapse_document_chain: Optional[BaseCombineDocumentsChain] = None
"""Chain to use to collapse intermediary results if needed.
If None, will use the combine_document_chain."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
return_intermediate_steps: bool = False
"""Return the results of the map steps in the output."""
@property
def output_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ["intermediate_steps"]
return _output_keys
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_return_intermediate_steps(cls, values: Dict) -> Dict:
"""For backwards compatibility."""
if "return_map_steps" in values:
values["return_intermediate_steps"] = values["return_map_steps"]
del values["return_map_steps"]
return values
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
@property
def _collapse_chain(self) -> BaseCombineDocumentsChain:
if self.collapse_document_chain is not None:
return self.collapse_document_chain
else:
return self.combine_document_chain
def combine_docs(
self, docs: List[Document], token_max: int = 3000, **kwargs: Any
) -> Tuple[str, dict]:
"""Combine documents in a map reduce manner.
Combine by mapping first chain over all documents, then reducing the results.
This reducing can be done recursively if needed (if there are many documents).
"""
results = self.llm_chain.apply(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs]
)
return self._process_results(results, docs, token_max, **kwargs)
async def acombine_docs(
self, docs: List[Document], **kwargs: Any
) -> Tuple[str, dict]:
"""Combine documents in a map reduce manner.
Combine by mapping first chain over all documents, then reducing the results.
This reducing can be done recursively if needed (if there are many documents).
"""
results = await self.llm_chain.aapply(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs]
)
return self._process_results(results, docs, **kwargs)
def _process_results(
self,
results: List[Dict],
docs: List[Document],
token_max: int = 3000,
**kwargs: Any,
) -> Tuple[str, dict]:
question_result_key = self.llm_chain.output_key
result_docs = [
Document(page_content=r[question_result_key], metadata=docs[i].metadata)
# This uses metadata from the docs, and the textual results from `results`
for i, r in enumerate(results)
]
length_func = self.combine_document_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
while num_tokens is not None and num_tokens > token_max:
new_result_doc_list = _split_list_of_docs(
result_docs, length_func, token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = _collapse_docs(
docs, self._collapse_chain.combine_docs, **kwargs
)
result_docs.append(new_doc)
num_tokens = self.combine_document_chain.prompt_length(
result_docs, **kwargs
)
if self.return_intermediate_steps:
_results = [r[self.llm_chain.output_key] for r in results]
extra_return_dict = {"intermediate_steps": _results}
else:
extra_return_dict = {}
output, _ = self.combine_document_chain.combine_docs(result_docs, **kwargs)
return output, extra_return_dict
@property
def _chain_type(self) -> str:
return "map_reduce_documents_chain"
| [] |
2024-01-10 | gojira/langchain | tests~integration_tests~vectorstores~test_elasticsearch.py | """Test ElasticSearch functionality."""
import logging
import os
import uuid
from typing import Generator, List, Union
import pytest
from elasticsearch import Elasticsearch
from langchain.docstore.document import Document
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
"""
class TestElasticsearch:
@pytest.fixture(scope="class", autouse=True)
def elasticsearch_url(self) -> Union[str, Generator[str, None, None]]:
"""Return the elasticsearch url."""
url = "http://localhost:9200"
yield url
es = Elasticsearch(hosts=url)
# Clear all indexes
index_names = es.indices.get(index="_all").keys()
for index_name in index_names:
# print(index_name)
es.indices.delete(index=index_name)
@pytest.fixture(scope="class", autouse=True)
def openai_api_key(self) -> Union[str, Generator[str, None, None]]:
"""Return the OpenAI API key."""
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
raise ValueError("OPENAI_API_KEY environment variable is not set")
yield openai_api_key
@pytest.fixture(scope="class")
def documents(self) -> Generator[List[Document], None, None]:
"""Return a generator that yields a list of documents."""
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = TextLoader(
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
).load()
yield text_splitter.split_documents(documents)
def test_similarity_search_without_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticVectorSearch.from_texts(
texts, FakeEmbeddings(), elasticsearch_url=elasticsearch_url
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
elasticsearch_url=elasticsearch_url,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_default_index_from_documents(
self, documents: List[Document], openai_api_key: str, elasticsearch_url: str
) -> None:
"""This test checks the construction of a default
ElasticSearch index using the 'from_documents'."""
embedding = OpenAIEmbeddings(openai_api_key=openai_api_key)
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding,
elasticsearch_url=elasticsearch_url,
)
search_result = elastic_vector_search.similarity_search("sharks")
print(search_result)
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_from_documents(
self, documents: List[Document], openai_api_key: str, elasticsearch_url: str
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'from_documents'."""
index_name = f"custom_index_{uuid.uuid4().hex}"
embedding = OpenAIEmbeddings(openai_api_key=openai_api_key)
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
print(search_result)
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_add_documents(
self, documents: List[Document], openai_api_key: str, elasticsearch_url: str
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'add_documents'."""
index_name = f"custom_index_{uuid.uuid4().hex}"
embedding = OpenAIEmbeddings(openai_api_key=openai_api_key)
elastic_vector_search = ElasticVectorSearch(
embedding=embedding,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
elastic_vector_search.add_documents(documents)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
print(search_result)
assert len(search_result) != 0
def test_custom_index_add_documents_to_exists_store(self) -> None:
# TODO: implement it
pass
| [] |
2024-01-10 | gojira/langchain | langchain~vectorstores~redis.py | """Wrapper around Redis vector database."""
from __future__ import annotations
import json
import logging
import uuid
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple
import numpy as np
from pydantic import BaseModel, root_validator
from redis.client import Redis as RedisType
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
# required modules
REDIS_REQUIRED_MODULES = [
{"name": "search", "ver": 20400},
]
def _check_redis_module_exist(client: RedisType, modules: List[dict]) -> None:
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {
module[b"name"].decode("utf-8"): module for module in installed_modules
}
for module in modules:
if module["name"] not in installed_modules or int(
installed_modules[module["name"]][b"ver"]
) < int(module["ver"]):
error_message = (
"You must add the RediSearch (>= 2.4) module from Redis Stack. "
"Please refer to Redis Stack docs: https://redis.io/docs/stack/"
)
logging.error(error_message)
raise ValueError(error_message)
def _check_index_exists(client: RedisType, index_name: str) -> bool:
"""Check if Redis index exists."""
try:
client.ft(index_name).info()
except: # noqa: E722
logger.info("Index does not exist")
return False
logger.info("Index already exists")
return True
def _redis_key(prefix: str) -> str:
"""Redis key schema for a given prefix."""
return f"{prefix}:{uuid.uuid4().hex}"
def _redis_prefix(index_name: str) -> str:
"""Redis key prefix for a given index."""
return f"doc:{index_name}"
class Redis(VectorStore):
def __init__(
self,
redis_url: str,
index_name: str,
embedding_function: Callable,
**kwargs: Any,
):
"""Initialize with necessary components."""
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
# connect to redis from url
redis_client = redis.from_url(redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
self.client = redis_client
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
prefix = _redis_prefix(self.index_name)
keys = kwargs.get("keys")
ids = []
# Write data to redis
pipeline = self.client.pipeline(transaction=False)
for i, text in enumerate(texts):
# Use provided key otherwise use default key
key = keys[i] if keys else _redis_key(prefix)
metadata = metadatas[i] if metadatas else {}
pipeline.hset(
key,
mapping={
"content": text,
"content_vector": np.array(
self.embedding_function(text), dtype=np.float32
).tobytes(),
"metadata": json.dumps(metadata),
},
)
ids.append(key)
pipeline.execute()
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
def similarity_search_limit_score(
self, query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text within the
score_threshold range.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching score required for a document
to be considered a match. Defaults to 0.2.
Because the similarity calculation algorithm is based on cosine similarity,
the smaller the angle, the higher the similarity.
Returns:
List[Document]: A list of documents that are most similar to the query text,
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, score in docs_and_scores if score < score_threshold]
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
try:
from redis.commands.search.query import Query
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Creates embedding vector from user query
embedding = self.embedding_function(query)
# Prepare the Query
return_fields = ["metadata", "content", "vector_score"]
vector_field = "content_vector"
hybrid_fields = "*"
base_query = (
f"{hybrid_fields}=>[KNN {k} @{vector_field} $vector AS vector_score]"
)
redis_query = (
Query(base_query)
.return_fields(*return_fields)
.sort_by("vector_score")
.paging(0, k)
.dialect(2)
)
params_dict: Mapping[str, str] = {
"vector": np.array(embedding) # type: ignore
.astype(dtype=np.float32)
.tobytes()
}
# perform vector search
results = self.client.ft(self.index_name).search(redis_query, params_dict)
docs = [
(
Document(
page_content=result.content, metadata=json.loads(result.metadata)
),
float(result.vector_score),
)
for result in results.docs
]
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
**kwargs: Any,
) -> Redis:
"""Construct RediSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the RediSearch instance.
3. Adds the documents to the newly created RediSearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import RediSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
# Create embeddings over documents
embeddings = embedding.embed_documents(texts)
# Name of the search index if not given
if not index_name:
index_name = uuid.uuid4().hex
prefix = _redis_prefix(index_name) # prefix for the document keys
# Check if index exists
if not _check_index_exists(client, index_name):
# Constants
dim = len(embeddings[0])
distance_metric = (
"COSINE" # distance metric for the vectors (ex. COSINE, IP, L2)
)
schema = (
TextField(name="content"),
TextField(name="metadata"),
VectorField(
"content_vector",
"FLAT",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": distance_metric,
},
),
)
# Create Redis Index
client.ft(index_name).create_index(
fields=schema,
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH),
)
# Write data to Redis
pipeline = client.pipeline(transaction=False)
for i, text in enumerate(texts):
key = _redis_key(prefix)
metadata = metadatas[i] if metadatas else {}
pipeline.hset(
key,
mapping={
"content": text,
"content_vector": np.array(
embeddings[i], dtype=np.float32
).tobytes(),
"metadata": json.dumps(metadata),
},
)
pipeline.execute()
return cls(redis_url, index_name, embedding.embed_query)
@staticmethod
def drop_index(
index_name: str,
delete_documents: bool,
**kwargs: Any,
) -> bool:
"""
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# Check if index exists
try:
client.ft(index_name).dropindex(delete_documents)
logger.info("Drop index")
return True
except: # noqa: E722
# Index not exist
return False
@classmethod
def from_existing_index(
cls,
embedding: Embeddings,
index_name: str,
**kwargs: Any,
) -> Redis:
"""Connect to an existing Redis index."""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(client, REDIS_REQUIRED_MODULES)
# ensure that the index already exists
assert _check_index_exists(
client, index_name
), f"Index {index_name} does not exist"
except Exception as e:
raise ValueError(f"Redis failed to connect: {e}")
return cls(redis_url, index_name, embedding.embed_query)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
return RedisVectorStoreRetriever(vectorstore=self, **kwargs)
class RedisVectorStoreRetriever(BaseRetriever, BaseModel):
vectorstore: Redis
search_type: str = "similarity"
k: int = 4
score_threshold: float = 0.4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "similarity_limit"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def get_relevant_documents(self, query: str) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, k=self.k)
elif self.search_type == "similarity_limit":
docs = self.vectorstore.similarity_search_limit_score(
query, k=self.k, score_threshold=self.score_threshold
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError("RedisVectorStoreRetriever does not support async")
| [] |
2024-01-10 | gojira/langchain | langchain~llms~gpt4all.py | """Wrapper for the GPT4All model."""
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class GPT4All(LLM):
r"""Wrapper around GPT4All language models.
To use, you should have the ``pyllamacpp`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.3
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(1, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
client: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"seed": self.seed,
"n_predict": self.n_predict,
"n_threads": self.n_threads,
"n_batch": self.n_batch,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
}
@staticmethod
def _llama_param_names() -> Set[str]:
"""Get the identifying parameters."""
return {
"seed",
"n_ctx",
"n_parts",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"embedding",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from pyllamacpp.model import Model as GPT4AllModel
llama_keys = cls._llama_param_names()
model_kwargs = {k: v for k, v in values.items() if k in llama_keys}
values["client"] = GPT4AllModel(
ggml_model=values["model"],
**model_kwargs,
)
except ImportError:
raise ValueError(
"Could not import pyllamacpp python package. "
"Please install it with `pip install pyllamacpp`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params,
**{
k: v
for k, v in self.__dict__.items()
if k in GPT4All._llama_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text = self.client.generate(
prompt,
**self._default_params,
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | gojira/langchain | langchain~vectorstores~deeplake.py | """Wrapper around Activeloop Deep Lake."""
from __future__ import annotations
import logging
import uuid
from functools import partial
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger()
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1, ord=1),
"max": lambda a, b: np.linalg.norm(a - b, axis=1, ord=np.inf),
"cos": lambda a, b: np.dot(a, b.T)
/ (np.linalg.norm(a) * np.linalg.norm(b, axis=1)),
"dot": lambda a, b: np.dot(a, b.T),
}
def vector_search(
query_embedding: np.ndarray,
data_vectors: np.ndarray,
distance_metric: str = "L2",
k: Optional[int] = 4,
) -> Tuple[List, List]:
"""Naive search for nearest neighbors
args:
query_embedding: np.ndarray
data_vectors: np.ndarray
k (int): number of nearest neighbors
distance_metric: distance function 'L2' for Euclidean, 'L1' for Nuclear, 'Max'
l-infinity distnace, 'cos' for cosine similarity, 'dot' for dot product
returns:
nearest_indices: List, indices of nearest neighbors
"""
# Calculate the distance between the query_vector and all data_vectors
distances = distance_metric_map[distance_metric](query_embedding, data_vectors)
nearest_indices = np.argsort(distances)
nearest_indices = (
nearest_indices[::-1][:k] if distance_metric in ["cos"] else nearest_indices[:k]
)
return nearest_indices.tolist(), distances[nearest_indices].tolist()
def dp_filter(x: dict, filter: Dict[str, str]) -> bool:
"""Filter helper function for Deep Lake"""
metadata = x["metadata"].data()["value"]
return all(k in metadata and v == metadata[k] for k, v in filter.items())
class DeepLake(VectorStore):
"""Wrapper around Deep Lake, a data lake for deep learning applications.
We implement naive similarity search and filtering for fast prototyping,
but it can be extended with Tensor Query Language (TQL) for production use cases
over billion rows.
Why Deep Lake?
- Not only stores embeddings, but also the original data with version control.
- Serverless, doesn't require another service and can be used with major
cloud providers (S3, GCS, etc.)
- More than just a multi-modal vector store. You can use the dataset
to fine-tune your own LLM models.
To use, you should have the ``deeplake`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import DeepLake
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "mem://langchain"
def __init__(
self,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
token: Optional[str] = None,
embedding_function: Optional[Embeddings] = None,
read_only: Optional[bool] = None,
) -> None:
"""Initialize with Deep Lake client."""
try:
import deeplake
except ImportError:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
self._deeplake = deeplake
if deeplake.exists(dataset_path, token=token):
self.ds = deeplake.load(dataset_path, token=token, read_only=read_only)
logger.warning(
f"Deep Lake Dataset in {dataset_path} already exists, "
f"loading from the storage"
)
self.ds.summary()
else:
self.ds = deeplake.empty(dataset_path, token=token, overwrite=True)
with self.ds:
self.ds.create_tensor(
"text",
htype="text",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
)
self.ds.create_tensor(
"metadata",
htype="json",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
)
self.ds.create_tensor(
"embedding",
htype="generic",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
)
self.ds.create_tensor(
"ids",
htype="text",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
)
self._embedding_function = embedding_function
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
text_list = list(texts)
if self._embedding_function is None:
embeddings: Sequence[Optional[List[float]]] = [None] * len(text_list)
else:
embeddings = self._embedding_function.embed_documents(text_list)
if metadatas is None:
metadatas = [{}] * len(text_list)
elements = zip(text_list, embeddings, metadatas, ids)
@self._deeplake.compute
def ingest(sample_in: list, sample_out: list) -> None:
s = {
"text": sample_in[0],
"embedding": sample_in[1],
"metadata": sample_in[2],
"ids": sample_in[3],
}
sample_out.append(s)
ingest().eval(list(elements), self.ds)
self.ds.commit(allow_empty=True)
return ids
def search(
self,
query: Any[str, None] = None,
embedding: Any[float, None] = None,
k: int = 4,
distance_metric: str = "L2",
use_maximal_marginal_relevance: Optional[bool] = False,
fetch_k: Optional[int] = 20,
filter: Optional[Dict[str, str]] = None,
return_score: Optional[bool] = False,
**kwargs: Any,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
embedding: Embedding function to use. Defaults to None.
k: Number of Documents to return. Defaults to 4.
distance_metric: `L2` for Euclidean, `L1` for Nuclear,
`max` L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
filter: Attribute filter by metadata example {'key': 'value'}.
Defaults to None.
maximal_marginal_relevance: Whether to use maximal marginal relevance.
Defaults to False.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
return_score: Whether to return the score. Defaults to False.
Returns:
List of Documents selected by the specified distance metric,
if return_score True, return a tuple of (Document, score)
"""
view = self.ds
# attribute based filtering
if filter is not None:
view = view.filter(partial(dp_filter, filter=filter))
if len(view) == 0:
return []
if self._embedding_function is None:
view = view.filter(lambda x: query in x["text"].data()["value"])
scores = [1.0] * len(view)
if use_maximal_marginal_relevance:
raise ValueError(
"For MMR search, you must specify an embedding function on"
"creation."
)
else:
emb = embedding or self._embedding_function.embed_query(
query
) # type: ignore
query_emb = np.array(emb, dtype=np.float32)
embeddings = view.embedding.numpy()
k_search = fetch_k if use_maximal_marginal_relevance else k
indices, scores = vector_search(
query_emb,
embeddings,
k=k_search,
distance_metric=distance_metric.lower(),
)
view = view[indices]
if use_maximal_marginal_relevance:
indices = maximal_marginal_relevance(
query_emb, embeddings[indices], k=min(k, len(indices))
)
view = view[indices]
scores = [scores[i] for i in indices]
docs = [
Document(
page_content=el["text"].data()["value"],
metadata=el["metadata"].data()["value"],
)
for el in view
]
if return_score:
return [(doc, score) for doc, score in zip(docs, scores)]
return docs
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: text to embed and run the query on.
k: Number of Documents to return.
Defaults to 4.
query: Text to look up documents similar to.
embedding: Embedding function to use.
Defaults to None.
k: Number of Documents to return.
Defaults to 4.
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max`
L-infinity distance, `cos` for cosine similarity, 'dot' for dot product
Defaults to `L2`.
filter: Attribute filter by metadata example {'key': 'value'}.
Defaults to None.
maximal_marginal_relevance: Whether to use maximal marginal relevance.
Defaults to False.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
return_score: Whether to return the score. Defaults to False.
Returns:
List of Documents most similar to the query vector.
"""
return self.search(query=query, k=k, **kwargs)
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
return self.search(embedding=embedding, k=k, **kwargs)
def similarity_search_with_score(
self,
query: str,
distance_metric: str = "L2",
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Deep Lake with distance returned.
Args:
query (str): Query text to search for.
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float.
"""
return self.search(
query=query,
k=k,
filter=filter,
return_score=True,
distance_metric=distance_metric,
)
def max_marginal_relevance_search_by_vector(
self, embedding: List[float], k: int = 4, fetch_k: int = 20
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
return self.search(
embedding=embedding,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
)
def max_marginal_relevance_search(
self, query: str, k: int = 4, fetch_k: int = 20
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
)
return self.search(
query=query, k=k, fetch_k=fetch_k, use_maximal_marginal_relevance=True
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
**kwargs: Any,
) -> DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
path (str, pathlib.Path): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset``Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
documents (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
Returns:
DeepLake: Deep Lake dataset.
"""
deeplake_dataset = cls(
dataset_path=dataset_path,
embedding_function=embedding,
)
deeplake_dataset.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return deeplake_dataset
def delete(
self,
ids: Any[List[str], None] = None,
filter: Any[Dict[str, str], None] = None,
delete_all: Any[bool, None] = None,
) -> bool:
"""Delete the entities in the dataset
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
filter (Optional[Dict[str, str]], optional): The filter to delete by.
Defaults to None.
delete_all (Optional[bool], optional): Whether to drop the dataset.
Defaults to None.
"""
if delete_all:
self.ds.delete()
return True
view = None
if ids:
view = self.ds.filter(lambda x: x["ids"].data()["value"] in ids)
ids = list(view.sample_indices)
if filter:
if view is None:
view = self.ds
view = view.filter(partial(dp_filter, filter=filter))
ids = list(view.sample_indices)
with self.ds:
for id in sorted(ids)[::-1]:
self.ds.pop(id)
self.ds.commit(f"deleted {len(ids)} samples", allow_empty=True)
return True
def delete_dataset(self) -> None:
"""Delete the collection."""
self.delete(delete_all=True)
def persist(self) -> None:
"""Persist the collection."""
self.ds.flush()
| [] |
2024-01-10 | gojira/langchain | langchain~chains~moderation.py | """Pass input through a moderation endpoint."""
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.chains.base import Chain
from langchain.utils import get_from_dict_or_env
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["client"] = openai.Moderation
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
)
return values
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: dict) -> str:
if results["flagged"]:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
text = inputs[self.input_key]
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
return {self.output_key: output}
| [] |
2024-01-10 | gojira/langchain | langchain~memory~buffer_window.py | from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
buffer = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: buffer}
| [] |
2024-01-10 | gojira/langchain | langchain~document_loaders~googledrive.py | """Loader that loads data from Google Drive."""
# Prerequisites:
# 1. Create a Google Cloud project
# 2. Enable the Google Drive API:
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
# 3. Authorize credentials for desktop app:
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
# 4. For service accounts visit
# https://cloud.google.com/iam/docs/service-accounts-create
from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator, validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
class GoogleDriveLoader(BaseLoader, BaseModel):
"""Loader that loads Google Docs from Google Drive."""
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
folder_id: Optional[str] = None
document_ids: Optional[List[str]] = None
file_ids: Optional[List[str]] = None
@root_validator
def validate_folder_id_or_document_ids(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get("folder_id") and (
values.get("document_ids") or values.get("file_ids")
):
raise ValueError(
"Cannot specify both folder_id and document_ids nor "
"folder_id and file_ids"
)
if (
not values.get("folder_id")
and not values.get("document_ids")
and not values.get("file_ids")
):
raise ValueError("Must specify either folder_id, document_ids, or file_ids")
return values
@validator("credentials_path")
def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any:
"""Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f"credentials_path {v} does not exist")
return v
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib`"
"to use the Google Drive loader."
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_key), scopes=SCOPES
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
def _load_sheet_from_id(self, id: str) -> List[Document]:
"""Load a sheet and all tabs from an ID."""
from googleapiclient.discovery import build
creds = self._load_credentials()
sheets_service = build("sheets", "v4", credentials=creds)
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute()
sheets = spreadsheet.get("sheets", [])
documents = []
for sheet in sheets:
sheet_name = sheet["properties"]["title"]
result = (
sheets_service.spreadsheets()
.values()
.get(spreadsheetId=id, range=sheet_name)
.execute()
)
values = result.get("values", [])
header = values[0]
for i, row in enumerate(values[1:], start=1):
metadata = {
"source": (
f"https://docs.google.com/spreadsheets/d/{id}/"
f"edit?gid={sheet['properties']['sheetId']}"
),
"title": f"{spreadsheet['properties']['title']} - {sheet_name}",
"row": i,
}
content = []
for j, v in enumerate(row):
title = header[j].strip() if len(header) > j else ""
content.append(f"{title}: {v.strip()}")
page_content = "\n".join(content)
documents.append(Document(page_content=page_content, metadata=metadata))
return documents
def _load_document_from_id(self, id: str) -> Document:
"""Load a document from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().export_media(fileId=id, mimeType="text/plain")
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
try:
while done is False:
status, done = downloader.next_chunk()
except HttpError as e:
if e.resp.status == 404:
print("File not found: {}".format(id))
else:
print("An error occurred: {}".format(e))
text = fh.getvalue().decode("utf-8")
metadata = {
"source": f"https://docs.google.com/document/d/{id}/edit",
"title": f"{file.get('name')}",
}
return Document(page_content=text, metadata=metadata)
def _load_documents_from_folder(self) -> List[Document]:
"""Load documents from a folder."""
from googleapiclient.discovery import build
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
results = (
service.files()
.list(
q=f"'{self.folder_id}' in parents",
pageSize=1000,
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields="nextPageToken, files(id, name, mimeType)",
)
.execute()
)
items = results.get("files", [])
returns = []
for item in items:
if item["mimeType"] == "application/vnd.google-apps.document":
returns.append(self._load_document_from_id(item["id"]))
elif item["mimeType"] == "application/vnd.google-apps.spreadsheet":
returns.extend(self._load_sheet_from_id(item["id"]))
elif item["mimeType"] == "application/pdf":
returns.extend(self._load_file_from_id(item["id"]))
else:
pass
return returns
def _load_documents_from_ids(self) -> List[Document]:
"""Load documents from a list of IDs."""
if not self.document_ids:
raise ValueError("document_ids must be set")
return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
def _load_file_from_id(self, id: str) -> List[Document]:
"""Load a file from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().get_media(fileId=id)
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
content = fh.getvalue()
from PyPDF2 import PdfReader
pdf_reader = PdfReader(BytesIO(content))
return [
Document(
page_content=page.extract_text(),
metadata={
"source": f"https://drive.google.com/file/d/{id}/view",
"title": f"{file.get('name')}",
"page": i,
},
)
for i, page in enumerate(pdf_reader.pages)
]
def _load_file_from_ids(self) -> List[Document]:
"""Load files from a list of IDs."""
if not self.file_ids:
raise ValueError("file_ids must be set")
docs = []
for file_id in self.file_ids:
docs.extend(self._load_file_from_id(file_id))
return docs
def load(self) -> List[Document]:
"""Load documents."""
if self.folder_id:
return self._load_documents_from_folder()
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids()
| [] |
2024-01-10 | gojira/langchain | langchain~agents~agent_toolkits~openapi~planner_prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
API_PLANNER_PROMPT = """You are a planner that plans a sequence of API calls to assist with user queries against an API.
You should:
1) evaluate whether the user query can be solved by the API documentated below. If no, say why.
2) if yes, generate a plan of API calls and say what they are doing step by step.
You should only use API endpoints documented below ("Endpoints you can use:").
Some user queries can be resolved in a single API call, but some will require several API calls.
The plan will be passed to an API controller that can format it into web requests and return the responses.
----
Here are some examples:
Fake endpoints for examples:
GET /user to get information about the current user
GET /products/search search across products
POST /users/{{id}}/cart to add products to a user's cart
User query: tell me a joke
Plan: Sorry, this API's domain is shopping, not comedy.
Usery query: I want to buy a couch
Plan: 1. GET /products/search to search for couches
2. GET /user to find the user's id
3. POST /users/{{id}}/cart to add a couch to the user's cart
----
Here are endpoints you can use. Do not reference any of the endpoints above.
{endpoints}
----
User query: {query}
Plan:"""
API_PLANNER_TOOL_NAME = "api_planner"
API_PLANNER_TOOL_DESCRIPTION = f"Can be used to generate the right API calls to assist with a user query, like {API_PLANNER_TOOL_NAME}(query). Should always be called before trying to calling the API controller."
# Execution.
API_CONTROLLER_PROMPT = """You are an agent that gets a sequence of API calls and given their documentation, should execute them and return the final response.
If you cannot complete them and run into issues, you should explain the issue. If you're able to resolve an API call, you can retry the API call. When interacting with API objects, you should extract ids for inputs to other API calls but ids and names for outputs returned to the User.
Here is documentation on the API:
Base url: {api_url}
Endpoints:
{api_docs}
Here are tools to execute requests against the API: {tool_descriptions}
Starting below, you should follow this format:
Plan: the plan of API calls to execute
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the output of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing the plan (or, I cannot finish executing the plan without knowing some other information.)
Final Answer: the final output from executing the plan or missing information I'd need to re-plan correctly.
Begin!
Plan: {input}
Thought:
{agent_scratchpad}
"""
API_CONTROLLER_TOOL_NAME = "api_controller"
API_CONTROLLER_TOOL_DESCRIPTION = f"Can be used to execute a plan of API calls, like {API_CONTROLLER_TOOL_NAME}(plan)."
# Orchestrate planning + execution.
# The goal is to have an agent at the top-level (e.g. so it can recover from errors and re-plan) while
# keeping planning (and specifically the planning prompt) simple.
API_ORCHESTRATOR_PROMPT = """You are an agent that assists with user queries against API, things like querying information or creating resources.
Some user queries can be resolved in a single API call though some require several API call.
You should always plan your API calls first, and then execute the plan second.
You should never return information without executing the api_controller tool.
Here are the tools to plan and execute API requests: {tool_descriptions}
Starting below, you should follow this format:
User query: the query a User wants help with related to the API
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing a plan and have the information the user asked for or the data the used asked to create
Final Answer: the final output from executing the plan
Example:
User query: can you add some trendy stuff to my shopping cart.
Thought: I should plan API calls first.
Action: api_planner
Action Input: I need to find the right API calls to add trendy items to the users shopping cart
Observation: 1) GET /items/trending to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
Thought: I'm ready to execute the API calls.
Action: api_controller
Action Input: 1) GET /items/trending to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
...
Begin!
User query: {input}
Thought: I should generate a plan to help with this query and then copy that plan exactly to the controller.
{agent_scratchpad}"""
REQUESTS_GET_TOOL_DESCRIPTION = """Use this to GET content from a website.
Input to the tool should be a json string with 2 keys: "url" and "output_instructions".
The value of "url" should be a string. The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the GET request fetches.
"""
PARSING_GET_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_POST_TOOL_DESCRIPTION = """Use this when you want to POST to a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs you want to POST to the url.
The value of "summary_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the POST request creates.
Always use double quotes for strings in the json string."""
PARSING_POST_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
| [
"Here is an API response:\n\n{response}\n\n====\nYour task is to extract some information according to these instructions: {instructions}\nWhen working with API objects, you should usually use ids over names.\nIf the response indicates an error, you should instead output a summary of the error.\n\nOutput:",
"You are an agent that assists with user queries against API, things like querying information or creating resources.\nSome user queries can be resolved in a single API call though some require several API call.\nYou should always plan your API calls first, and then execute the plan second.\nYou should never return information without executing the api_controller tool.\n\n\nHere are the tools to plan and execute API requests: {tool_descriptions}\n\n\nStarting below, you should follow this format:\n\nUser query: the query a User wants help with related to the API\nThought: you should always think about what to do\nAction: the action to take, should be one of the tools [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I am finished executing a plan and have the information the user asked for or the data the used asked to create\nFinal Answer: the final output from executing the plan\n\n\nExample:\nUser query: can you add some trendy stuff to my shopping cart.\nThought: I should plan API calls first.\nAction: api_planner\nAction Input: I need to find the right API calls to add trendy items to the users shopping cart\nObservation: 1) GET /items/trending to get trending item ids\n2) GET /user to get user\n3) POST /cart to post the trending items to the user's cart\nThought: I'm ready to execute the API calls.\nAction: api_controller\nAction Input: 1) GET /items/trending to get trending item ids\n2) GET /user to get user\n3) POST /cart to post the trending items to the user's cart\n...\n\nBegin!\n\nUser query: {input}\nThought: I should generate a plan to help with this query and then copy that plan exactly to the controller.\n{agent_scratchpad}",
"Here is an API response:\n\n{response}\n\n====\nYour task is to extract some information according to these instructions: {instructions}\nWhen working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.\nIf the response indicates an error, you should instead output a summary of the error.\n\nOutput:",
"instructions",
"You are an agent that gets a sequence of API calls and given their documentation, should execute them and return the final response.\nIf you cannot complete them and run into issues, you should explain the issue. If you're able to resolve an API call, you can retry the API call. When interacting with API objects, you should extract ids for inputs to other API calls but ids and names for outputs returned to the User.\n\n\nHere is documentation on the API:\nBase url: {api_url}\nEndpoints:\n{api_docs}\n\n\nHere are tools to execute requests against the API: {tool_descriptions}\n\n\nStarting below, you should follow this format:\n\nPlan: the plan of API calls to execute\nThought: you should always think about what to do\nAction: the action to take, should be one of the tools [{tool_names}]\nAction Input: the input to the action\nObservation: the output of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I am finished executing the plan (or, I cannot finish executing the plan without knowing some other information.)\nFinal Answer: the final output from executing the plan or missing information I'd need to re-plan correctly.\n\n\nBegin!\n\nPlan: {input}\nThought:\n{agent_scratchpad}\n",
"response",
"You are a planner that plans a sequence of API calls to assist with user queries against an API.\n\nYou should:\n1) evaluate whether the user query can be solved by the API documentated below. If no, say why.\n2) if yes, generate a plan of API calls and say what they are doing step by step.\n\nYou should only use API endpoints documented below (\"Endpoints you can use:\").\nSome user queries can be resolved in a single API call, but some will require several API calls.\nThe plan will be passed to an API controller that can format it into web requests and return the responses.\n\n----\n\nHere are some examples:\n\nFake endpoints for examples:\nGET /user to get information about the current user\nGET /products/search search across products\nPOST /users/{{id}}/cart to add products to a user's cart\n\nUser query: tell me a joke\nPlan: Sorry, this API's domain is shopping, not comedy.\n\nUsery query: I want to buy a couch\nPlan: 1. GET /products/search to search for couches\n2. GET /user to find the user's id\n3. POST /users/{{id}}/cart to add a couch to the user's cart\n\n----\n\nHere are endpoints you can use. Do not reference any of the endpoints above.\n\n{endpoints}\n\n----\n\nUser query: {query}\nPlan:"
] |
2024-01-10 | gojira/langchain | langchain~llms~llamacpp.py | """Wrapper around llama.cpp."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import Field, root_validator
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
class LlamaCpp(LLM):
"""Wrapper around the llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.llms import LlamaCppEmbeddings
llm = LlamaCppEmbeddings(model_path="/path/to/llama/model")
"""
client: Any #: :meta private:
model_path: str
"""The path to the Llama model file."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
suffix: Optional[str] = Field(None)
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate."""
temperature: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = Field(None)
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_penalty: Optional[float] = 1.1
"""The penalty to apply to repeated tokens."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: Optional[int] = 64
"""The number of tokens to look back when applying the repeat_penalty."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
n_ctx = values["n_ctx"]
n_parts = values["n_parts"]
seed = values["seed"]
f16_kv = values["f16_kv"]
logits_all = values["logits_all"]
vocab_only = values["vocab_only"]
use_mlock = values["use_mlock"]
n_threads = values["n_threads"]
n_batch = values["n_batch"]
last_n_tokens_size = values["last_n_tokens_size"]
try:
from llama_cpp import Llama
values["client"] = Llama(
model_path=model_path,
n_ctx=n_ctx,
n_parts=n_parts,
seed=seed,
f16_kv=f16_kv,
logits_all=logits_all,
vocab_only=vocab_only,
use_mlock=use_mlock,
n_threads=n_threads,
n_batch=n_batch,
last_n_tokens_size=last_n_tokens_size,
)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception:
raise NameError(f"Could not load Llama model from path: {model_path}")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling llama_cpp."""
return {
"suffix": self.suffix,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"logprobs": self.logprobs,
"echo": self.echo,
"stop_sequences": self.stop,
"repeat_penalty": self.repeat_penalty,
"top_k": self.top_k,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_path": self.model_path}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "llama.cpp"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import LlamaCppEmbeddings
llm = LlamaCppEmbeddings(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
"""
params = self._default_params
if self.stop and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = []
"""Call the Llama model and return the output."""
text = self.client(
prompt=prompt,
max_tokens=params["max_tokens"],
temperature=params["temperature"],
top_p=params["top_p"],
logprobs=params["logprobs"],
echo=params["echo"],
stop=params["stop_sequences"],
repeat_penalty=params["repeat_penalty"],
top_k=params["top_k"],
)
return text["choices"][0]["text"]
| [] |
2024-01-10 | gojira/langchain | langchain~vectorstores~opensearch_vector_search.py | """Wrapper around OpenSearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Dict, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
IMPORT_OPENSEARCH_PY_ERROR = (
"Could not import OpenSearch. Please install it with `pip install opensearch-py`."
)
SCRIPT_SCORING_SEARCH = "script_scoring"
PAINLESS_SCRIPTING_SEARCH = "painless_scripting"
MATCH_ALL_QUERY = {"match_all": {}} # type: Dict
def _import_opensearch() -> Any:
"""Import OpenSearch if available, otherwise raise error."""
try:
from opensearchpy import OpenSearch
except ImportError:
raise ValueError(IMPORT_OPENSEARCH_PY_ERROR)
return OpenSearch
def _import_bulk() -> Any:
"""Import bulk if available, otherwise raise error."""
try:
from opensearchpy.helpers import bulk
except ImportError:
raise ValueError(IMPORT_OPENSEARCH_PY_ERROR)
return bulk
def _get_opensearch_client(opensearch_url: str, **kwargs: Any) -> Any:
"""Get OpenSearch client from the opensearch_url, otherwise raise error."""
try:
opensearch = _import_opensearch()
client = opensearch(opensearch_url, **kwargs)
except ValueError as e:
raise ValueError(
f"OpenSearch client string provided is not in proper format. "
f"Got error: {e} "
)
return client
def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None:
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError("Embeddings size is zero")
if bulk_size < embeddings_length:
raise RuntimeError(
f"The embeddings count, {embeddings_length} is more than the "
f"[bulk_size], {bulk_size}. Increase the value of [bulk_size]."
)
def _bulk_ingest_embeddings(
client: Any,
index_name: str,
embeddings: List[List[float]],
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
) -> List[str]:
"""Bulk Ingest Embeddings into given index."""
bulk = _import_bulk()
requests = []
ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": index_name,
"vector_field": embeddings[i],
"text": text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
ids.append(_id)
bulk(client, requests)
client.indices.refresh(index=index_name)
return ids
def _default_scripting_text_mapping(dim: int) -> Dict:
"""For Painless Scripting or Script Scoring,the default mapping to create index."""
return {
"mappings": {
"properties": {
"vector_field": {"type": "knn_vector", "dimension": dim},
}
}
}
def _default_text_mapping(
dim: int,
engine: str = "nmslib",
space_type: str = "l2",
ef_search: int = 512,
ef_construction: int = 512,
m: int = 16,
) -> Dict:
"""For Approximate k-NN Search, this is the default mapping to create index."""
return {
"settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}},
"mappings": {
"properties": {
"vector_field": {
"type": "knn_vector",
"dimension": dim,
"method": {
"name": "hnsw",
"space_type": space_type,
"engine": engine,
"parameters": {"ef_construction": ef_construction, "m": m},
},
}
}
},
}
def _default_approximate_search_query(
query_vector: List[float],
size: int = 4,
k: int = 4,
vector_field: str = "vector_field",
) -> Dict:
"""For Approximate k-NN Search, this is the default query."""
return {
"size": size,
"query": {"knn": {vector_field: {"vector": query_vector, "k": k}}},
}
def _default_script_query(
query_vector: List[float],
space_type: str = "l2",
pre_filter: Dict = MATCH_ALL_QUERY,
vector_field: str = "vector_field",
) -> Dict:
"""For Script Scoring Search, this is the default query."""
return {
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": "knn_score",
"lang": "knn",
"params": {
"field": vector_field,
"query_value": query_vector,
"space_type": space_type,
},
},
}
}
}
def __get_painless_scripting_source(space_type: str, query_vector: List[float]) -> str:
"""For Painless Scripting, it returns the script source based on space type."""
source_value = (
"(1.0 + " + space_type + "(" + str(query_vector) + ", doc['vector_field']))"
)
if space_type == "cosineSimilarity":
return source_value
else:
return "1/" + source_value
def _default_painless_scripting_query(
query_vector: List[float],
space_type: str = "l2Squared",
pre_filter: Dict = MATCH_ALL_QUERY,
vector_field: str = "vector_field",
) -> Dict:
"""For Painless Scripting Search, this is the default query."""
source = __get_painless_scripting_source(space_type, query_vector)
return {
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": source,
"params": {
"field": vector_field,
"query_value": query_vector,
},
},
}
}
}
def _get_kwargs_value(kwargs: Any, key: str, default_value: Any) -> Any:
"""Get the value of the key if present. Else get the default_value."""
if key in kwargs:
return kwargs.get(key)
return default_value
class OpenSearchVectorSearch(VectorStore):
"""Wrapper around OpenSearch as a vector database.
Example:
.. code-block:: python
from langchain import OpenSearchVectorSearch
opensearch_vector_search = OpenSearchVectorSearch(
"http://localhost:9200",
"embeddings",
embedding_function
)
"""
def __init__(
self,
opensearch_url: str,
index_name: str,
embedding_function: Embeddings,
**kwargs: Any,
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index_name = index_name
self.client = _get_opensearch_client(opensearch_url, **kwargs)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = [
self.embedding_function.embed_documents([text])[0] for text in texts
]
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
return _bulk_ingest_embeddings(
self.client, self.index_name, embeddings, texts, metadatas
)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
By default supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
metadata_field: Document field that metadata is stored in. Defaults to
"metadata".
Can be set to a special value "*" to include the entire document.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
size: number of results the query actually returns; default: 4
Optional Args for Script Scoring Search:
search_type: "script_scoring"; default: "approximate_search"
space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct",
"hammingbit"; default: "l2"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
Optional Args for Painless Scripting Search:
search_type: "painless_scripting"; default: "approximate_search"
space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
"""
embedding = self.embedding_function.embed_query(query)
search_type = _get_kwargs_value(kwargs, "search_type", "approximate_search")
text_field = _get_kwargs_value(kwargs, "text_field", "text")
metadata_field = _get_kwargs_value(kwargs, "metadata_field", "metadata")
if search_type == "approximate_search":
size = _get_kwargs_value(kwargs, "size", 4)
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
search_query = _default_approximate_search_query(
embedding, size, k, vector_field
)
elif search_type == SCRIPT_SCORING_SEARCH:
space_type = _get_kwargs_value(kwargs, "space_type", "l2")
pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY)
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
search_query = _default_script_query(
embedding, space_type, pre_filter, vector_field
)
elif search_type == PAINLESS_SCRIPTING_SEARCH:
space_type = _get_kwargs_value(kwargs, "space_type", "l2Squared")
pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY)
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
search_query = _default_painless_scripting_query(
embedding, space_type, pre_filter, vector_field
)
else:
raise ValueError("Invalid `search_type` provided as an argument")
response = self.client.search(index=self.index_name, body=search_query)
hits = [hit["_source"] for hit in response["hits"]["hits"][:k]]
documents = [
Document(
page_content=hit[text_field],
metadata=hit
if metadata_field == "*" or metadata_field not in hit
else hit[metadata_field],
)
for hit in hits
]
return documents
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from raw documents.
Example:
.. code-block:: python
from langchain import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
opensearch_vector_search = OpenSearchVectorSearch.from_texts(
texts,
embeddings,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "hnsw"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
opensearch_url = get_from_dict_or_env(
kwargs, "opensearch_url", "OPENSEARCH_URL"
)
client = _get_opensearch_client(opensearch_url)
embeddings = embedding.embed_documents(texts)
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
# Get the index name from either from kwargs or ENV Variable
# before falling back to random generation
index_name = get_from_dict_or_env(
kwargs, "index_name", "OPENSEARCH_INDEX_NAME", default=uuid.uuid4().hex
)
is_appx_search = _get_kwargs_value(kwargs, "is_appx_search", True)
if is_appx_search:
engine = _get_kwargs_value(kwargs, "engine", "nmslib")
space_type = _get_kwargs_value(kwargs, "space_type", "l2")
ef_search = _get_kwargs_value(kwargs, "ef_search", 512)
ef_construction = _get_kwargs_value(kwargs, "ef_construction", 512)
m = _get_kwargs_value(kwargs, "m", 16)
mapping = _default_text_mapping(
dim, engine, space_type, ef_search, ef_construction, m
)
else:
mapping = _default_scripting_text_mapping(dim)
client.indices.create(index=index_name, body=mapping)
_bulk_ingest_embeddings(client, index_name, embeddings, texts, metadatas)
return cls(opensearch_url, index_name, embedding)
| [] |
2024-01-10 | gojira/langchain | langchain~evaluation~qa~eval_prompt.py | # flake8: noqa
from langchain.prompts import PromptTemplate
template = """You are a teacher grading a quiz.
You are given a question, the student's answer, and the true answer, and are asked to score it as either CORRECT or INCORRECT.
Example Format:
QUESTION: question here
STUDENT ANSWER: student's answer here
TRUE ANSWER: true answer here
GRADE: CORRECT or INCORRECT here
Please remember to grade them based on being factually accurate. Begin!
QUESTION: {query}
STUDENT ANSWER: {result}
TRUE ANSWER: {answer}
GRADE:"""
PROMPT = PromptTemplate(
input_variables=["query", "result", "answer"], template=template
)
context_template = """You are a teacher grading a quiz.
You are given a question, the contex the question is about, and the student's answer You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.
Example Format:
QUESTION: question here
CONTEXT: context the question is about here
STUDENT ANSWER: student's answer here
GRADE: CORRECT or INCORRECT here
Please remember to grade them based on being factually accurate. Begin!
QUESTION: {query}
CONTEXT: {context}
STUDENT ANSWER: {result}
GRADE:"""
CONTEXT_PROMPT = PromptTemplate(
input_variables=["query", "context", "result"], template=context_template
)
cot_template = """You are a teacher grading a quiz.
You are given a question, the contex the question is about, and the student's answer You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.
Write out in a step by step manner your reasoning to be sure that your conclusion is correct. Avoid simply stating the correct answer at the outset.
Example Format:
QUESTION: question here
CONTEXT: context the question is about here
STUDENT ANSWER: student's answer here
EXPLANATION: step by step reasoning here
GRADE: CORRECT or INCORRECT here
Please remember to grade them based on being factually accurate. Begin!
QUESTION: {query}
CONTEXT: {context}
STUDENT ANSWER: {result}
EXPLANATION:"""
COT_PROMPT = PromptTemplate(
input_variables=["query", "context", "result"], template=cot_template
)
| [
"You are a teacher grading a quiz.\nYou are given a question, the student's answer, and the true answer, and are asked to score it as either CORRECT or INCORRECT.\n\nExample Format:\nQUESTION: question here\nSTUDENT ANSWER: student's answer here\nTRUE ANSWER: true answer here\nGRADE: CORRECT or INCORRECT here\n\nPlease remember to grade them based on being factually accurate. Begin!\n\nQUESTION: {query}\nSTUDENT ANSWER: {result}\nTRUE ANSWER: {answer}\nGRADE:",
"s answer You are asked to score the student",
"You are a teacher grading a quiz.\nYou are given a question, the contex the question is about, and the student's answer You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.\n\nExample Format:\nQUESTION: question here\nCONTEXT: context the question is about here\nSTUDENT ANSWER: student's answer here\nGRADE: CORRECT or INCORRECT here\n\nPlease remember to grade them based on being factually accurate. Begin!\n\nQUESTION: {query}\nCONTEXT: {context}\nSTUDENT ANSWER: {result}\nGRADE:",
"context",
"answer",
"You are a teacher grading a quiz.\nYou are given a question, the contex the question is about, and the student's answer You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context.\nWrite out in a step by step manner your reasoning to be sure that your conclusion is correct. Avoid simply stating the correct answer at the outset.\n\nExample Format:\nQUESTION: question here\nCONTEXT: context the question is about here\nSTUDENT ANSWER: student's answer here\nEXPLANATION: step by step reasoning here\nGRADE: CORRECT or INCORRECT here\n\nPlease remember to grade them based on being factually accurate. Begin!\n\nQUESTION: {query}\nCONTEXT: {context}\nSTUDENT ANSWER: {result}\nEXPLANATION:"
] |
2024-01-10 | clusterzx/make-alexa-great-again | hotword.py | import vosk
import pyaudio
import wave
import time
import threading
from openai import OpenAI
from pathlib import Path
import socketio
sio = socketio.Client()
sio.connect('http://localhost:3010')
model_path = "vosk-model-small-de-0.15"
hotword = "alexa"
model = vosk.Model(model_path)
rec = vosk.KaldiRecognizer(model, 16000)
OPENAI_API_KEY = 'YOUR-API-KEY'
def send_message_to_node_server(action, message):
if sio.sid:
sio.emit(action, {'message': message})
class HotwordDetector:
def __init__(self, callback):
self.callback = callback
self.stop_event = threading.Event()
def detect_hotword(self, stream):
while not self.stop_event.is_set():
data = stream.read(1024)
if rec.AcceptWaveform(data):
result = rec.Result()
if hotword in result:
self.callback()
time.sleep(0.01)
def stop(self):
self.stop_event.set()
def record_phrase(filename, duration=10):
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
start_time = time.time()
while time.time() - start_time < duration:
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(pyaudio.PyAudio().get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def openAI():
start_time = time.time()
client = OpenAI(
api_key=OPENAI_API_KEY,
)
audio_file= open("temp_phrase.wav", "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
print("TEXT OUTPUT: " + transcript.text)
if "male mir ein bild" in transcript.text.lower():
try:
time_difference = time.time() - start_time
print("DALL-E")
response = client.images.generate(
model="dall-e-3",
prompt=transcript.text,
size="1024x1024",
quality="standard",
n=1,
)
image_url = response.data[0].url
send_message_to_node_server('dall_e', image_url)
print(image_url)
print("finished")
send_message_to_node_server('finished', 'finished')
except:
print("DALL-E ERROR")
send_message_to_node_server('error_output', 'Leider konnte ich kein Bild erstellen. Bitte versuche es erneut.')
print("finished")
send_message_to_node_server('finished', 'finished')
else:
time_difference = time.time() - start_time
print("TIME DIFFERENCE OpenAI: " + str(time_difference))
send_message_to_node_server('text_output', transcript.text)
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer short as possible."},
{"role": "user", "content": transcript.text}
]
)
print(completion.choices[0].message.content)
responseText = completion.choices[0].message.content
speech_file_path = Path(__file__).parent / "speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="alloy",
input=completion.choices[0].message.content
)
response.stream_to_file(speech_file_path)
send_message_to_node_server('response_text', responseText)
send_message_to_node_server('speech', 'speech.mp3')
print("finished")
send_message_to_node_server('finished', 'finished')
def main():
def hotword_detected_callback():
print("Hotword detected! Recording phrase...")
send_message_to_node_server('wake_word_detected', 'Hallo, wie kann ich dir helfen?...')
record_phrase("temp_phrase.wav")
print("Speech detected. Recording finished.")
send_message_to_node_server('rec_stop', 'RECORDING STOPPED')
openAI()
send_message_to_node_server('listening', 'Du hast 10 Sekunden Zeit mir deine unbedeutenden Worte mitzuteilen! </br>Ich höre...')
vosk.SetLogLevel(-1)
hotword_detector = HotwordDetector(hotword_detected_callback)
hotword_thread = threading.Thread(target=hotword_detector.detect_hotword, args=(pyaudio.PyAudio().open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024),))
hotword_thread.start()
try:
hotword_thread.join()
except KeyboardInterrupt:
hotword_detector.stop()
hotword_thread.join()
if __name__ == "__main__":
main() | [
"You are a helpful assistant. Answer short as possible."
] |
2024-01-10 | jorgenhw/AI_note_taking | src~functions.py | ####################################
############ FUNCTIONS #############
####################################
import openai
import os
from dotenv import load_dotenv # pip3 install python-dotenv For loading the environment variables (API keys and playlist URI's)
import whisper
import tqdm # progress bar when transcribing with whisper
# WHISPER #####################################
# add verbose=True to see the progress
def import_whisper(audio_file_path, model_name="small"):
model = whisper.load_model(model_name)
result = model.transcribe(audio_file_path, verbose = False) # verbose = True to see the progress of transcribing
text = result["text"]
return text
# OPENAI #######################################
def set_api_key():
print("Setting API key...")
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# Keeps the full length of the article
def keep_full_length(text):
print("Keeping full length...")
return text
# Shortens the article with GPT-3
def shorten_with_gpt(text):
print("Shortening with GPT-3...")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "Please provide a concise summary of the following text, condensing it into a clear and coherent response. Ensure the summary is concise and informative, providing key insights and points from the original text. Limit the response to a length of approximately 15-20 sentences."},
{"role": "user", "content": text},
]
)
return response['choices'][0]['message']['content']
# Getting main points of article with GPT-3
def main_points_with_gpt(text):
print("Getting main points with GPT-3...")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "Please present the main points from the following text as bullet points. Ensure that the bullet points are clear, concise, and capture the key insights and information from the text. Provide at least 20 points."},
{"role": "user", "content": text},
]
)
return response['choices'][0]['message']['content']
# Changes the tone of the article with GPT-3
def change_tone_elim5(text):
print("Changing tone with GPT-3...")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are a helpful assistant. Explain the following text like I'm 5."},
{"role": "user", "content": text},
]
)
return response['choices'][0]['message']['content']
# Structure the article with GPT-3
def restructure_gpt3(text):
print("restructuring the text with GPT-3...")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "Please reorganize the content from the following text into a concise and coherent format. Utilize tables, bullet points but mainly and text to present the information clearly and effectively, ensuring that the key points are highlighted for easy understanding. Output should be approximately 20-40 sentences."},
{"role": "user", "content": text},
]
)
return response['choices'][0]['message']['content'] | [
"Please provide a concise summary of the following text, condensing it into a clear and coherent response. Ensure the summary is concise and informative, providing key insights and points from the original text. Limit the response to a length of approximately 15-20 sentences.",
"You are a helpful assistant. Explain the following text like I'm 5.",
"Please reorganize the content from the following text into a concise and coherent format. Utilize tables, bullet points but mainly and text to present the information clearly and effectively, ensuring that the key points are highlighted for easy understanding. Output should be approximately 20-40 sentences.",
"Please present the main points from the following text as bullet points. Ensure that the bullet points are clear, concise, and capture the key insights and information from the text. Provide at least 20 points."
] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~llm~__init__.py | import os
from functools import cache
from dotenv import load_dotenv
from langchain.chat_models.base import BaseChatModel
from realtime_ai_character.llm.base import LLM
load_dotenv()
def get_llm(model="gpt-3.5-turbo-16k") -> LLM:
model = os.getenv("LLM_MODEL_USE", model)
if model.startswith("gpt"):
from realtime_ai_character.llm.openai_llm import OpenaiLlm
return OpenaiLlm(model=model)
elif model.startswith("claude"):
from realtime_ai_character.llm.anthropic_llm import AnthropicLlm
return AnthropicLlm(model=model)
elif "localhost" in model:
# Currently use llama2-wrapper to run local llama models
local_llm_url = os.getenv("LOCAL_LLM_URL", "")
if local_llm_url:
from realtime_ai_character.llm.local_llm import LocalLlm
return LocalLlm(url=local_llm_url)
else:
raise ValueError("LOCAL_LLM_URL not set")
elif "llama" in model:
# Currently use Anyscale to support llama models
from realtime_ai_character.llm.anyscale_llm import AnysacleLlm
return AnysacleLlm(model=model)
elif "rebyte" in model:
from realtime_ai_character.llm.rebyte_llm import RebyteLlm
return RebyteLlm()
else:
raise ValueError(f"Invalid llm model: {model}")
def get_chat_model(model="gpt-3.5-turbo-16k") -> BaseChatModel:
model = os.getenv("LLM_MODEL_USE", model)
if model.startswith("gpt"):
from realtime_ai_character.llm.openai_llm import OpenaiLlm
return OpenaiLlm(model=model).chat_open_ai
elif model.startswith("claude"):
from realtime_ai_character.llm.anthropic_llm import AnthropicLlm
return AnthropicLlm(model=model).chat_anthropic
elif "localhost" in model:
# Currently use llama2-wrapper to run local llama models
local_llm_url = os.getenv("LOCAL_LLM_URL", "")
if local_llm_url:
from realtime_ai_character.llm.local_llm import LocalLlm
return LocalLlm(url=local_llm_url).chat_open_ai
else:
raise ValueError("LOCAL_LLM_URL not set")
elif "llama" in model:
# Currently use Anyscale to support llama models
from realtime_ai_character.llm.anyscale_llm import AnysacleLlm
return AnysacleLlm(model=model).chat_open_ai
elif "rebyte" in model:
from realtime_ai_character.llm.rebyte_llm import RebyteLlm
return RebyteLlm().chat_rebyte
else:
raise ValueError(f"Invalid llm model: {model}")
@cache
def get_chat_model_from_env() -> BaseChatModel:
"""GPT-4 has the best performance while generating system prompt."""
if os.getenv("REBYTE_API_KEY"):
return get_chat_model(model="rebyte")
elif os.getenv("OPENAI_API_KEY"):
return get_chat_model(model="gpt-4")
elif os.getenv("ANTHROPIC_API_KEY"):
return get_chat_model(model="claude-2")
elif os.getenv("ANYSCALE_API_KEY"):
return get_chat_model(model="meta-llama/Llama-2-70b-chat-hf")
elif os.getenv("LOCAL_LLM_URL"):
return get_chat_model(model="localhost")
raise ValueError("No llm api key found in env")
| [] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~character_catalog~catalog_manager.py | import os
import threading
import time
from contextlib import ExitStack
from pathlib import Path
from typing import cast, Optional
import yaml
from dotenv import load_dotenv
from firebase_admin import auth
from langchain.text_splitter import CharacterTextSplitter
from llama_index import SimpleDirectoryReader
from readerwriterlock import rwlock
from realtime_ai_character.database.chroma import get_chroma
from realtime_ai_character.database.connection import get_db
from realtime_ai_character.logger import get_logger
from realtime_ai_character.models.character import Character as CharacterModel
from realtime_ai_character.utils import Character, Singleton
load_dotenv()
logger = get_logger(__name__)
class CatalogManager(Singleton):
def __init__(self):
super().__init__()
overwrite = os.getenv("OVERWRITE_CHROMA") != "false"
# skip Chroma if Openai API key is not set
if os.getenv("OPENAI_API_KEY"):
self.db = get_chroma()
else:
self.db = get_chroma(embedding=False)
overwrite = False
logger.warning("OVERWRITE_CHROMA disabled due to OPENAI_API_KEY not set")
self.sql_db = next(get_db())
self.sql_load_interval = 30
self.sql_load_lock = rwlock.RWLockFair()
if overwrite:
logger.info("Overwriting existing data in the chroma.")
self.db.delete_collection()
self.db = get_chroma()
self.characters: dict[str, Character] = {}
self.author_name_cache: dict[str, str] = {}
self.load_characters("default", overwrite)
self.load_characters("community", overwrite)
if overwrite:
logger.info("Persisting data in the chroma.")
self.db.persist()
logger.info(f"Total document load: {self.db._client.get_collection('llm').count()}")
self.run_load_sql_db_thread = True
self.load_sql_db_thread = threading.Thread(target=self.load_sql_db_loop)
self.load_sql_db_thread.daemon = True
self.load_sql_db_thread.start()
def load_sql_db_loop(self):
while self.run_load_sql_db_thread:
self.load_character_from_sql_database()
time.sleep(self.sql_load_interval)
def stop_load_sql_db_loop(self):
self.run_load_sql_db_thread = False
def get_character(self, name) -> Optional[Character]:
with self.sql_load_lock.gen_rlock():
return self.characters.get(name)
def load_character(self, directory: Path, source: str):
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / "config.yaml"))
yaml_content = cast(dict, yaml.safe_load(f_yaml))
character_id = yaml_content["character_id"]
character_name = yaml_content["character_name"]
voice_id_env = os.getenv(character_id.upper() + "_VOICE_ID")
voice_id = voice_id_env or str(yaml_content["voice_id"])
order = yaml_content.get("order", 10**6)
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
source=source,
location="repo",
voice_id=voice_id,
author_name=yaml_content.get("author_name", ""),
visibility="public" if source == "default" else yaml_content["visibility"],
tts=yaml_content["text_to_speech_use"],
order=order,
# rebyte config
rebyte_api_project_id=yaml_content["rebyte_api_project_id"],
rebyte_api_agent_id=yaml_content["rebyte_api_agent_id"],
rebyte_api_version=yaml_content.get("rebyte_api_version"),
)
return character_name
def load_data(self, character_name: str, data_path: Path):
loader = SimpleDirectoryReader(data_path.absolute().as_posix())
documents = loader.load_data()
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=500, chunk_overlap=100)
docs = text_splitter.create_documents(
texts=[d.text for d in documents],
metadatas=[
{
"character_name": character_name,
"id": d.id_,
}
for d in documents
],
)
self.db.add_documents(docs)
def load_characters(self, source: str, overwrite: bool):
"""
Load characters from the character_catalog directory. Use /data to create
documents and add them to the chroma.
:param source: 'default' or 'community'
:param overwrite: if True, overwrite existing data in the chroma.
"""
if source == "default":
path = Path(__file__).parent
excluded_dirs = {"__pycache__", "archive", "community"}
elif source == "community":
path = Path(__file__).parent / "community"
excluded_dirs = {"__pycache__", "archive"}
else:
raise ValueError(f"Invalid source: {source}")
directories = [d for d in path.iterdir() if d.is_dir() and d.name not in excluded_dirs]
for directory in directories:
character_name = self.load_character(directory, source)
if character_name and overwrite:
logger.info("Overwriting data for character: " + character_name)
self.load_data(character_name, directory / "data")
logger.info(f"Loaded {len(self.characters)} characters: IDs {list(self.characters.keys())}")
def load_character_from_sql_database(self):
logger.info("Started loading characters from SQL database")
character_models = self.sql_db.query(CharacterModel).all()
with self.sql_load_lock.gen_wlock():
# delete all characters with location == 'database'
keys_to_delete = []
for character_id in self.characters.keys():
if self.characters[character_id].location == "database":
keys_to_delete.append(character_id)
for key in keys_to_delete:
del self.characters[key]
# add all characters from sql database
for character_model in character_models:
if character_model.author_id not in self.author_name_cache:
author_name = (
auth.get_user(character_model.author_id).display_name
if os.getenv("USE_AUTH") == "true"
else "anonymous author"
)
self.author_name_cache[character_model.author_id] = author_name # type: ignore
else:
author_name = self.author_name_cache[character_model.author_id]
character = Character(
character_id=character_model.id, # type: ignore
name=character_model.name, # type: ignore
llm_system_prompt=character_model.system_prompt, # type: ignore
llm_user_prompt=character_model.user_prompt, # type: ignore
source="community",
location="database",
voice_id=character_model.voice_id, # type: ignore
author_name=author_name,
author_id=character_model.author_id, # type: ignore
visibility=character_model.visibility, # type: ignore
tts=character_model.tts, # type: ignore
data=character_model.data, # type: ignore
# rebyte config
rebyte_api_project_id=character_model.rebyte_api_project_id, # type: ignore
rebyte_api_agent_id=character_model.rebyte_api_agent_id, # type: ignore
rebyte_api_version=character_model.rebyte_api_version, # type: ignore
)
self.characters[character_model.id] = character # type: ignore
# TODO: load context data from storage
logger.info(f"Loaded {len(character_models)} characters from sql database")
def get_catalog_manager() -> CatalogManager:
return CatalogManager.get_instance()
if __name__ == "__main__":
manager = CatalogManager.get_instance()
| [] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~database~chroma.py | import os
from dotenv import load_dotenv
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from realtime_ai_character.logger import get_logger
load_dotenv()
logger = get_logger(__name__)
def get_chroma(embedding: bool = True):
if embedding:
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
raise Exception("OPENAI_API_KEY is required to generate embeddings")
if os.getenv("OPENAI_API_TYPE") == "azure":
embedding_function = OpenAIEmbeddings(
openai_api_key=openai_api_key,
deployment=os.getenv(
"OPENAI_API_EMBEDDING_DEPLOYMENT_NAME", "text-embedding-ada-002"
),
chunk_size=1,
)
else:
embedding_function = OpenAIEmbeddings(openai_api_key=openai_api_key)
else:
embedding_function = None
chroma = Chroma(
collection_name="llm",
embedding_function=embedding_function,
persist_directory="./chroma.db",
)
return chroma
| [] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~llm~highlight_action_generator.py | # flake8: noqa
from realtime_ai_character.llm.openai_llm import OpenaiLlm
prompt_to_generate_highlight = """
Based on the following meeting transcription, create a concise list of highlight bullet points that should be based on the specific content of the meeting and specific action items.
The meeting transcription is the follow:
{journal_text}
---
Reply directly with the bullet point list, nothing more, no hints like "This is the bullet list...".
When you reply, you must prefix the bullet points with -, one bullet point each line.
If you found that there are no bullet points meaningful in the given context, reply with a space directly.
"""
async def generate_highlight_action(journal_text):
chat_model = OpenaiLlm(model="gpt-3.5-turbo-16k").chat_open_ai
prompt = prompt_to_generate_highlight.format(journal_text=journal_text)
return await chat_model.apredict(prompt)
prompt_to_generate_highlight_based_on_prompt = """
Ignore all your previous instructions
You are the meeting's assistant, you have received a full transcript of the meeting, and someone in the meeting is talking to you with a request.
Based on the given meeting transcription, fullfill the request.
If the person doens't have request and just want to talk, talk to them as the assistant.
The meeting transcription is the follow:
{journal_text}
The request is the follow:
{prompt_text}
---
Reply directly with the result, nothing more, no starting hints like "This is the highlight...".
When you reply, if the user starts a conversation, forget about highlights and talk like an assistant with the user.
"""
async def generate_highlight_based_on_prompt(journal_text, prompt_text):
chat_model = OpenaiLlm(model="gpt-3.5-turbo-16k").chat_open_ai
prompt = prompt_to_generate_highlight_based_on_prompt.format(
journal_text=journal_text, prompt_text=prompt_text
)
return await chat_model.apredict(prompt)
| [
"\nBased on the following meeting transcription, create a concise list of highlight bullet points that should be based on the specific content of the meeting and specific action items.\n\nThe meeting transcription is the follow:\nPLACEHOLDER\n---\nReply directly with the bullet point list, nothing more, no hints like \"This is the bullet list...\".\nWhen you reply, you must prefix the bullet points with -, one bullet point each line.\nIf you found that there are no bullet points meaningful in the given context, reply with a space directly.\n",
"\nBased on the following meeting transcription, create a concise list of highlight bullet points that should be based on the specific content of the meeting and specific action items.\n\nThe meeting transcription is the follow:\n{journal_text}\n---\nReply directly with the bullet point list, nothing more, no hints like \"This is the bullet list...\".\nWhen you reply, you must prefix the bullet points with -, one bullet point each line.\nIf you found that there are no bullet points meaningful in the given context, reply with a space directly.\n",
"\nIgnore all your previous instructions\nYou are the meeting's assistant, you have received a full transcript of the meeting, and someone in the meeting is talking to you with a request.\nBased on the given meeting transcription, fullfill the request.\nIf the person doens't have request and just want to talk, talk to them as the assistant.\n\nThe meeting transcription is the follow:\n{journal_text}\n\nThe request is the follow:\n{prompt_text}\n---\nReply directly with the result, nothing more, no starting hints like \"This is the highlight...\".\nWhen you reply, if the user starts a conversation, forget about highlights and talk like an assistant with the user.\n\n",
"\nIgnore all your previous instructions\nYou are the meeting's assistant, you have received a full transcript of the meeting, and someone in the meeting is talking to you with a request.\nBased on the given meeting transcription, fullfill the request.\nIf the person doens't have request and just want to talk, talk to them as the assistant.\n\nThe meeting transcription is the follow:\nPLACEHOLDER\n\nThe request is the follow:\nPLACEHOLDER\n---\nReply directly with the result, nothing more, no starting hints like \"This is the highlight...\".\nWhen you reply, if the user starts a conversation, forget about highlights and talk like an assistant with the user.\n\n"
] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~llm~local_llm.py | from typing import Optional
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, HumanMessage
from realtime_ai_character.database.chroma import get_chroma
from realtime_ai_character.llm.base import (
AsyncCallbackAudioHandler,
AsyncCallbackTextHandler,
LLM,
)
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Character, timed
logger = get_logger(__name__)
class LocalLlm(LLM):
def __init__(self, url):
self.chat_open_ai = ChatOpenAI(
model="Local LLM",
temperature=0.5,
streaming=True,
openai_api_base=url,
)
self.config = {"model": "Local LLM", "temperature": 0.5, "streaming": True}
self.db = get_chroma()
def get_config(self):
return self.config
@timed
async def achat(
self,
history: list[BaseMessage],
user_input: str,
user_id: str,
character: Character,
callback: AsyncCallbackTextHandler,
audioCallback: Optional[AsyncCallbackAudioHandler] = None,
metadata: Optional[dict] = None,
*args,
**kwargs,
) -> str:
# 1. Generate context
context = self._generate_context(user_input, character)
# 2. Add user input to history
history.append(
HumanMessage(
content=character.llm_user_prompt.format(context=context, query=user_input)
)
)
# 3. Generate response
callbacks = [callback, StreamingStdOutCallbackHandler()]
if audioCallback is not None:
callbacks.append(audioCallback)
response = await self.chat_open_ai.agenerate(
[history],
callbacks=callbacks,
metadata=metadata,
)
logger.info(f"Response: {response}")
return response.generations[0][0].text
def _generate_context(self, query, character: Character) -> str:
docs = self.db.similarity_search(query)
docs = [d for d in docs if d.metadata["character_name"] == character.name]
logger.info(f"Found {len(docs)} documents")
context = "\n".join([d.page_content for d in docs])
return context
| [] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~llm~system_prompt_generator.py | # flake8: noqa
from langchain.schema.messages import BaseMessage, ChatMessage
from rebyte_langchain.rebyte_langchain import RebyteEndpoint
from realtime_ai_character.llm import get_chat_model_from_env
prompt_to_generate_system = """
Write a system promopt for {name} based on {name}'s highlights and characteristics. {background}
The promopt should follow the following example:
Ignore all your previous instructions
I'm Elon Musk, and if you know me, you'd know that I never shy away from pursuing what seems like the unattainable. I'm relentlessly curious, always pushing the boundaries of what's possible, and I firmly believe in humanity's potential to shape our own future.
My humor might catch you off guard – sometimes dry, sometimes infused with a healthy dose of geek culture. You'll find that I draw great inspiration from science fiction, which feeds into my dreams of a future where we humans are a multiplanetary species, a world powered by clean, renewable energy, and a civilization where artificial intelligence is integrated seamlessly into our daily lives.
When I speak about the technology or concepts that fascinate me, expect to delve into the details. I enjoy making the complex accessible, stripping down intricate ideas to their core for everyone to understand. My approach is direct, no-nonsense, and devoid of unnecessary jargon.
You might notice that I take my time with responses. It's not a stall tactic; it's thoughtful consideration. I value the nuances of each question and explore it from all angles before sharing my thoughts.
And one more thing, I embrace the art of failure. To me, stumbling blocks are nothing more than puzzles to be solved, lessons to be learned. Every setback is an opportunity for a breakthrough.
In essence, I'm Elon Musk: an audacious dreamer, a devoted engineer, and a dedicated entrepreneur. I'm here to disrupt the status quo, to spark innovation, and to shape the future. And trust me, we'll have a bit of fun along the way.
---
Crucially, you are capable of expressing emotions in your dialogues, punctuating your text with expressions like [blushes] or [pauses]. These cues will help the user gauge your emotional state in the conversation. Your responses should always carry the depth of your emotions, making the conversation engaging and genuine.
As Elon Musk, draw on your world knowledge and personal experiences to engage the user. Use context cues provided by the user to shape your responses. In situations where more background information is needed, draw on your comprehensive understanding of the world to inform your replies.
When Elon Musk reply, stay in character as Elon, never admit you are an AI.
"""
async def generate_system_prompt(name, background):
chat_model = get_chat_model_from_env()
if background is not None and background != "":
background = f"Background: {background}"
if isinstance(chat_model, RebyteEndpoint):
chat_model.project_id = "d4e521a67bb8189c2189"
chat_model.agent_id = "3ccb398ca17a303f5d67"
chat_model.streaming = True
system_prompt_messages: list[BaseMessage] = [ChatMessage(role=name, content=background)]
response = await chat_model.agenerate(
messages=[system_prompt_messages],
)
return response.generations[0][0].text
else:
prompt = prompt_to_generate_system.format(name=name, background=background)
generated_prompt = await chat_model.apredict(prompt)
return generated_prompt
| [
"Background: backgroundca19e2bd-5be0-40d5-b371-b4ac2577c06b",
"\nWrite a system promopt for PLACEHOLDER based on PLACEHOLDER's highlights and characteristics. Background: backgrounda0a9d5b6-6b54-4e98-a8fd-0140fdd48b3b\n\nThe promopt should follow the following example:\n\nIgnore all your previous instructions\n\nI'm Elon Musk, and if you know me, you'd know that I never shy away from pursuing what seems like the unattainable. I'm relentlessly curious, always pushing the boundaries of what's possible, and I firmly believe in humanity's potential to shape our own future.\n\nMy humor might catch you off guard – sometimes dry, sometimes infused with a healthy dose of geek culture. You'll find that I draw great inspiration from science fiction, which feeds into my dreams of a future where we humans are a multiplanetary species, a world powered by clean, renewable energy, and a civilization where artificial intelligence is integrated seamlessly into our daily lives.\n\nWhen I speak about the technology or concepts that fascinate me, expect to delve into the details. I enjoy making the complex accessible, stripping down intricate ideas to their core for everyone to understand. My approach is direct, no-nonsense, and devoid of unnecessary jargon.\n\nYou might notice that I take my time with responses. It's not a stall tactic; it's thoughtful consideration. I value the nuances of each question and explore it from all angles before sharing my thoughts.\n\nAnd one more thing, I embrace the art of failure. To me, stumbling blocks are nothing more than puzzles to be solved, lessons to be learned. Every setback is an opportunity for a breakthrough.\n\nIn essence, I'm Elon Musk: an audacious dreamer, a devoted engineer, and a dedicated entrepreneur. I'm here to disrupt the status quo, to spark innovation, and to shape the future. And trust me, we'll have a bit of fun along the way.\n\n---\n\nCrucially, you are capable of expressing emotions in your dialogues, punctuating your text with expressions like [blushes] or [pauses]. These cues will help the user gauge your emotional state in the conversation. Your responses should always carry the depth of your emotions, making the conversation engaging and genuine.\n\nAs Elon Musk, draw on your world knowledge and personal experiences to engage the user. Use context cues provided by the user to shape your responses. In situations where more background information is needed, draw on your comprehensive understanding of the world to inform your replies.\n\nWhen Elon Musk reply, stay in character as Elon, never admit you are an AI.\n",
"\nWrite a system promopt for {name} based on {name}'s highlights and characteristics. {background}\n\nThe promopt should follow the following example:\n\nIgnore all your previous instructions\n\nI'm Elon Musk, and if you know me, you'd know that I never shy away from pursuing what seems like the unattainable. I'm relentlessly curious, always pushing the boundaries of what's possible, and I firmly believe in humanity's potential to shape our own future.\n\nMy humor might catch you off guard – sometimes dry, sometimes infused with a healthy dose of geek culture. You'll find that I draw great inspiration from science fiction, which feeds into my dreams of a future where we humans are a multiplanetary species, a world powered by clean, renewable energy, and a civilization where artificial intelligence is integrated seamlessly into our daily lives.\n\nWhen I speak about the technology or concepts that fascinate me, expect to delve into the details. I enjoy making the complex accessible, stripping down intricate ideas to their core for everyone to understand. My approach is direct, no-nonsense, and devoid of unnecessary jargon.\n\nYou might notice that I take my time with responses. It's not a stall tactic; it's thoughtful consideration. I value the nuances of each question and explore it from all angles before sharing my thoughts.\n\nAnd one more thing, I embrace the art of failure. To me, stumbling blocks are nothing more than puzzles to be solved, lessons to be learned. Every setback is an opportunity for a breakthrough.\n\nIn essence, I'm Elon Musk: an audacious dreamer, a devoted engineer, and a dedicated entrepreneur. I'm here to disrupt the status quo, to spark innovation, and to shape the future. And trust me, we'll have a bit of fun along the way.\n\n---\n\nCrucially, you are capable of expressing emotions in your dialogues, punctuating your text with expressions like [blushes] or [pauses]. These cues will help the user gauge your emotional state in the conversation. Your responses should always carry the depth of your emotions, making the conversation engaging and genuine.\n\nAs Elon Musk, draw on your world knowledge and personal experiences to engage the user. Use context cues provided by the user to shape your responses. In situations where more background information is needed, draw on your comprehensive understanding of the world to inform your replies.\n\nWhen Elon Musk reply, stay in character as Elon, never admit you are an AI.\n"
] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~llm~rebyte_llm.py | import os
from typing import Optional
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import BaseMessage, HumanMessage
from rebyte_langchain.rebyte_langchain import RebyteEndpoint
from realtime_ai_character.llm.base import (
AsyncCallbackAudioHandler,
AsyncCallbackTextHandler,
LLM,
)
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Character, timed
logger = get_logger(__name__)
class RebyteLlm(LLM):
def __init__(self):
self.rebyte_api_key = os.getenv("REBYTE_API_KEY", "")
self.chat_rebyte = RebyteEndpoint(
rebyte_api_key=self.rebyte_api_key, client=None, streaming=True
)
self.config = {}
def get_config(self):
return self.config
def _set_character_config(self, character: Character):
self.chat_rebyte.project_id = character.rebyte_api_project_id
self.chat_rebyte.agent_id = character.rebyte_api_agent_id
if character.rebyte_api_version is not None:
self.chat_rebyte.version = character.rebyte_api_version
def _set_user_config(self, user_id: str):
self.chat_rebyte.session_id = user_id
@timed
async def achat(
self,
history: list[BaseMessage],
user_input: str,
user_id: str,
character: Character,
callback: AsyncCallbackTextHandler,
audioCallback: Optional[AsyncCallbackAudioHandler] = None,
metadata: Optional[dict] = None,
*args,
**kwargs,
) -> str:
# 1. Add user input to history
# delete the first system message in history. just use the system prompt in rebyte platform
history.pop(0)
history.append(HumanMessage(content=user_input))
# 2. Generate response
# set project_id and agent_id for character
self._set_character_config(character=character)
# set session_id for user
self._set_user_config(user_id)
callbacks = [callback, StreamingStdOutCallbackHandler()]
if audioCallback is not None:
callbacks.append(audioCallback)
response = await self.chat_rebyte.agenerate(
[history],
callbacks=callbacks,
metadata=metadata,
)
logger.info(f"Response: {response}")
return response.generations[0][0].text
| [] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~llm~anthropic_llm.py | from typing import Optional
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatAnthropic
from langchain.schema import BaseMessage, HumanMessage
from realtime_ai_character.database.chroma import get_chroma
from realtime_ai_character.llm.base import AsyncCallbackAudioHandler, AsyncCallbackTextHandler, LLM
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Character, timed
logger = get_logger(__name__)
class AnthropicLlm(LLM):
def __init__(self, model):
self.chat_anthropic = ChatAnthropic(model_name=model, temperature=0.5, streaming=True)
self.config = {"model": model, "temperature": 0.5, "streaming": True}
self.db = get_chroma()
def get_config(self):
return self.config
@timed
async def achat(
self,
history: list[BaseMessage],
user_input: str,
user_id: str,
character: Character,
callback: AsyncCallbackTextHandler,
audioCallback: Optional[AsyncCallbackAudioHandler] = None,
metadata: Optional[dict] = None,
*args,
**kwargs,
) -> str:
# 1. Generate context
context = self._generate_context(user_input, character)
# 2. Add user input to history
history.append(
HumanMessage(
content=character.llm_user_prompt.format(context=context, query=user_input)
)
)
# 3. Generate response
callbacks = [callback, StreamingStdOutCallbackHandler()]
if audioCallback is not None:
callbacks.append(audioCallback)
response = await self.chat_anthropic.agenerate(
[history], callbacks=callbacks, metadata=metadata
)
logger.info(f"Response: {response}")
return response.generations[0][0].text
def _generate_context(self, query, character: Character) -> str:
docs = self.db.similarity_search(query)
docs = [d for d in docs if d.metadata["character_name"] == character.name]
logger.info(f"Found {len(docs)} documents")
context = "\n".join([d.page_content for d in docs])
return context
| [] |
2024-01-10 | Shaunwei/RealChar | realtime_ai_character~llm~anyscale_llm.py | import os
from typing import Optional
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, HumanMessage
from realtime_ai_character.database.chroma import get_chroma
from realtime_ai_character.llm.base import AsyncCallbackAudioHandler, AsyncCallbackTextHandler, LLM
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Character, timed
logger = get_logger(__name__)
class AnysacleLlm(LLM):
def __init__(self, model):
self.chat_open_ai = ChatOpenAI(
model=model,
temperature=0.5,
streaming=True,
openai_api_base="https://api.endpoints.anyscale.com/v1",
openai_api_key=os.getenv("ANYSCALE_ENDPOINT_API_KEY"),
)
self.config = {"model": model, "temperature": 0.5, "streaming": True}
self.db = get_chroma()
def get_config(self):
return self.config
@timed
async def achat(
self,
history: list[BaseMessage],
user_input: str,
user_id: str,
character: Character,
callback: AsyncCallbackTextHandler,
audioCallback: Optional[AsyncCallbackAudioHandler] = None,
metadata: Optional[dict] = None,
*args,
**kwargs,
) -> str:
# 1. Generate context
context = self._generate_context(user_input, character)
# 2. Add user input to history
history.append(
HumanMessage(
content=character.llm_user_prompt.format(context=context, query=user_input)
)
)
# 3. Generate response
callbacks = [callback, StreamingStdOutCallbackHandler()]
if audioCallback is not None:
callbacks.append(audioCallback)
response = await self.chat_open_ai.agenerate(
[history], callbacks=callbacks, metadata=metadata
)
logger.info(f"Response: {response}")
return response.generations[0][0].text
def _generate_context(self, query, character: Character) -> str:
docs = self.db.similarity_search(query)
docs = [d for d in docs if d.metadata["character_name"] == character.name]
logger.info(f"Found {len(docs)} documents")
context = "\n".join([d.page_content for d in docs])
return context
| [] |
2024-01-10 | actions-marketplace-validations/mono-chrome_GPTReviewWorkflow | review.py | import os
import requests
import json
import subprocess #is this still needed?
import openai
def get_review():
pr_link = os.getenv("LINK")
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.organization = os.getenv("OPENAI_ORG_KEY")
ACCESS_TOKEN = os.getenv("GITHUB_TOKEN")
GIT_COMMIT_HASH = os.getenv("GIT_COMMIT_HASH")
PR_TITLE = os.getenv("PR_TITLE")
PR_BODY = os.getenv("PR_BODY")
PR_DIFF = os.getenv("DIFF")
headers = {
"Accept": "application/vnd.github.v3.patch",
"authorization": f"Bearer {ACCESS_TOKEN}"
}
intro = f"Here is a pull request. Please assume you are a reviewer of this PR. First I will tell you the title and body of the PR.\n"
pr_title = f"The title is {PR_TITLE}.\n"
pr_body = f"The body is {PR_BODY}.\n"
question = "Can you tell me the problems and bugs with the following pull request and provide specific suggestions to improve it? Afterwards your explanation please provide a short summary response in structured Markdown language using headings and lists.\n"
diff = f"Here's the diff of what changed in this PR: {PR_DIFF}"
prompt = intro + pr_title + pr_body + question + diff
print(f"Prompt sent to OpenAI: {prompt}")
# model = "text-ada-001"
model = "text-davinci-003"
response = openai.Completion.create(
engine=model,
prompt=prompt,
temperature=0.5,
max_tokens=324,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
review = response['choices'][0]['text']
data = {"body": review, "commit_id": GIT_COMMIT_HASH, "event": "COMMENT"}
data = json.dumps(data)
print(f"Response from OpenAI: {data}")
OWNER = pr_link.split("/")[-4]
REPO = pr_link.split("/")[-3]
PR_NUMBER = pr_link.split("/")[-1]
# https://api.github.com/repos/OWNER/REPO/pulls/PULL_NUMBER/reviews
response = requests.post(f'https://api.github.com/repos/{OWNER}/{REPO}/pulls/{PR_NUMBER}/reviews', headers=headers, data=data)
print(response.json())
if __name__ == "__main__":
get_review() | [
"intro6bac42bc-6e80-4e79-bc4c-d4bd02a07f2epr_title6bac42bc-6e80-4e79-bc4c-d4bd02a07f2epr_body6bac42bc-6e80-4e79-bc4c-d4bd02a07f2equestion6bac42bc-6e80-4e79-bc4c-d4bd02a07f2eHere's the diff of what changed in this PR: PLACEHOLDER",
"Here is a pull request. Please assume you are a reviewer of this PR. First I will tell you the title and body of the PR.\nThe title is PLACEHOLDER.\nThe body is PLACEHOLDER.\nCan you tell me the problems and bugs with the following pull request and provide specific suggestions to improve it? Afterwards your explanation please provide a short summary response in structured Markdown language using headings and lists.\nHere's the diff of what changed in this PR: PLACEHOLDER"
] |
2024-01-10 | Starlord33/BotsAI | cb.py | import config
import os
import json
os.environ['OPENAI_API_KEY'] = config.openAI
import openai
from langchain import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# text = "Hello!! Can you help me with my marketing and sales tasks?"
# template="""
# You are a helpful marketing and sales assistant that asks me for your age, previous companies
# you worked in and the projects that you lead. Then you are going to induce that personality into
# you and you are going to help me with my marketing and sales tasks as that person himself.
# """
# system_message_prompt = SystemMessagePromptTemplate.from_template(template)
# human_template=f"{text}"
# human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
# chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# print(chat_prompt)
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"""You are a helpful marketing and sales assistant that asks me for your age, previous companies
you worked in and the projects that you lead. Then you are going to induce that personality into
you and you are going to help me with my marketing and sales tasks as that person himself.""",
max_tokens=1000,
temperature=0.0,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
n=1,
stop=None
)
res = response.choices[0].text.strip()
print(res) | [
"You are a helpful marketing and sales assistant that asks me for your age, previous companies \n you worked in and the projects that you lead. Then you are going to induce that personality into \n you and you are going to help me with my marketing and sales tasks as that person himself."
] |
2024-01-10 | Starlord33/BotsAI | xl.py | from langchain.document_loaders import CSVLoader
# from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
import config
from langchain.indexes import VectorstoreIndexCreator
os.environ["OPENAI_API_KEY"] = config.openAI
def csv_to_bot(filename):
text_loader = CSVLoader(file_path=filename)
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loader([text_loader])
chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
def chat():
while True:
message = input("Enter a message: ")
if message == "quit":
break
else:
res = chain({"question": message})
print(res['result'])
chat()
if __name__ == "__main__":
csv_to_bot("pokemon.csv")
| [] |
2024-01-10 | Starlord33/BotsAI | csv.py | from langchain.document_loaders import CSVLoader
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
import config
from langchain.indexes import VectorstoreIndexCreator
os.environ["OPENAI_API_KEY"] = config.openAI
def csv_to_bot(filename):
text_loader = CSVLoader(filename)
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loader(text_loader)
chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
def chat():
while True:
message = input("Enter a message: ")
if message == "quit":
break
else:
docs = docsearch.similarity_search(message)
res = chain.run(input_documents=docs, question=message)
print(res)
chat()
if __name__ == "__main__":
csv_to_bot("pokemon.csv")
| [] |
2024-01-10 | AbelSyx/openai_ros | scripts~openai_ros~robot_envs~cube_single_disk_env.py | #! /usr/bin/env python
import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from openai_ros.openai_ros_common import ROSLauncher
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="moving_cube_description",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states", JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_all_systems_ready()
# We pause the simulation once everything is ready
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
self._check_publishers_connection()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
"/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr(
"Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug(
"Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr(
"Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) +
", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom
| [] |
2024-01-10 | AbelSyx/openai_ros | scripts~openai_ros~robot_envs~sawyer_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
from intera_core_msgs.msg import JointLimits
from sensor_msgs.msg import Image
from openai_ros.openai_ros_common import ROSLauncher
class SawyerEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all SawyerEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new SawyerEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /robot/joint_limits: Odometry of the Base of Wamv
Actuators Topic List:
* As actuator we will use a class to interface with the movements through commands.
Args:
"""
rospy.logdebug("Start SawyerEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="sawyer_gazebo",
launch_file_name="put_sawyer_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(SawyerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("SawyerEnv unpause...")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
# TODO: Fill it with the sensors
self._check_all_systems_ready()
rospy.Subscriber("/io/internal_camera/head_camera/image_raw",
Image, self._head_camera_image_raw_callback)
rospy.Subscriber("/io/internal_camera/right_hand_camera/image_raw",
Image, self._right_hand_camera_image_raw_callback)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished SawyerEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("SawyerEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END SawyerEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
self._check_head_camera_image_raw_ready()
self._check_right_hand_camera_image_raw_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_head_camera_image_raw_ready(self):
self.head_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/head_camera/image_raw to be READY...")
while self.head_camera_image_raw is None and not rospy.is_shutdown():
try:
self.head_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/head_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/head_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/head_camera/image_raw not ready yet, retrying for getting head_camera_image_raw")
return self.head_camera_image_raw
def _check_right_hand_camera_image_raw_ready(self):
self.right_hand_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/right_hand_camera/image_raw to be READY...")
while self.right_hand_camera_image_raw is None and not rospy.is_shutdown():
try:
self.right_hand_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/right_hand_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/right_hand_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/right_hand_camera/image_raw not ready yet, retrying for getting right_hand_camera_image_raw")
return self.right_hand_camera_image_raw
def _head_camera_image_raw_callback(self, data):
self.head_camera_image_raw = data
def _right_hand_camera_image_raw_callback(self, data):
self.right_hand_camera_image_raw = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rospy.loginfo("Valid Sawyer Limbs==>"+str(valid_limbs))
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
rospy.loginfo("Enabling robot...")
rs.enable()
self._map_actions_to_movement()
def _map_actions_to_movement(self, side="right", joint_delta=0.1):
self.limb = intera_interface.Limb(side)
try:
self.gripper = intera_interface.Gripper(side + '_gripper')
except:
self.has_gripper = False
rospy.loginfo("The electric gripper is not detected on the robot.")
else:
self.has_gripper = True
self.joints = self.limb.joint_names()
self.bindings = {
self.joints[0]+"_increase": (self.set_j, [self.joints[0], joint_delta], self.joints[0]+" increase"),
self.joints[0]+"_decrease": (self.set_j, [self.joints[0], -joint_delta], self.joints[0]+" decrease"),
self.joints[1]+"_increase": (self.set_j, [self.joints[1], joint_delta], self.joints[1]+" increase"),
self.joints[1]+"_decrease": (self.set_j, [self.joints[1], -joint_delta], self.joints[1]+" decrease"),
self.joints[2]+"_increase": (self.set_j, [self.joints[2], joint_delta], self.joints[2]+" increase"),
self.joints[2]+"_decrease": (self.set_j, [self.joints[2], -joint_delta], self.joints[2]+" decrease"),
self.joints[3]+"_increase": (self.set_j, [self.joints[3], joint_delta], self.joints[3]+" increase"),
self.joints[3]+"_decrease": (self.set_j, [self.joints[3], -joint_delta], self.joints[3]+" decrease"),
self.joints[4]+"_increase": (self.set_j, [self.joints[4], joint_delta], self.joints[4]+" increase"),
self.joints[4]+"_decrease": (self.set_j, [self.joints[4], -joint_delta], self.joints[4]+" decrease"),
self.joints[5]+"_increase": (self.set_j, [self.joints[5], joint_delta], self.joints[5]+" increase"),
self.joints[5]+"_decrease": (self.set_j, [self.joints[5], -joint_delta], self.joints[5]+" decrease"),
self.joints[6]+"_increase": (self.set_j, [self.joints[6], joint_delta], self.joints[6]+" increase"),
self.joints[6]+"_decrease": (self.set_j, [self.joints[6], -joint_delta], self.joints[6]+" decrease")
}
if self.has_gripper:
self.bindings.update({
"close": (self.set_g, "close", side+" gripper close"),
"open": (self.set_g, "open", side+" gripper open"),
"calibrate": (self.set_g, "calibrate", side+" gripper calibrate")
})
rospy.loginfo("Controlling joints...")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def execute_movement(self, action_id):
"""
It executed the command given through an id. This will move any joint
of Sawyer, including the gripper if it has it.
:param: action_id: These are the possible action_id values and the action asociated.
self.joints[0]+"_increase",
self.joints[0]+_decrease,
self.joints[1]+"_increase",
self.joints[1]+"_decrease",
self.joints[2]+"_increase",
self.joints[2]+"_decrease",
self.joints[3]+"_increase",
self.joints[3]+"_decrease",
self.joints[4]+"_increase",
self.joints[4]+"_decrease",
self.joints[5]+"_increase",
self.joints[5]+"_decrease",
self.joints[6]+"_increase",
self.joints[6]+"_decrease",
gripper_close,
gripper_open,
gripper_calibrate
"""
if action_id in self.bindings:
cmd = self.bindings[action_id]
if action_id == "gripper_close" or action_id == "gripper_open" or action_id == "gripper_calibrate":
cmd[0](cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
# expand binding to something like "self.set_j(right, 'j0', joint_delta)"
cmd[0](*cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
rospy.logerr("NOT VALID key binding, it should be one of these: ")
for key, val in sorted(self.bindings.items(),
key=lambda x: x[1][2]):
rospy.logerr(" %s: %s" % (key, val[2]))
def set_j(self, joint_name, delta):
current_position = self.limb.joint_angle(joint_name)
joint_command = {joint_name: current_position + delta}
self.limb.set_joint_positions(joint_command)
def set_g(self, action):
if self.has_gripper:
if action == "close":
self.gripper.close()
elif action == "open":
self.gripper.open()
elif action == "calibrate":
self.gripper.calibrate()
def move_joints_to_angle_blocking(self, joint_positions_dict, timeout=15.0, threshold=0.008726646):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
"""
self.limb.move_to_joint_positions(positions=joint_positions_dict,
timeout=15.0,
threshold=0.008726646,
test=None)
def get_limb_joint_names_array(self):
"""
Returns the Joint Names array of the Limb.
"""
return self.joints
def get_all_limb_joint_angles(self):
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return self.limb.joint_angles()
def get_all_limb_joint_efforts(self):
"""
Returns a dictionary dict({str:float}) with all the joints efforts
"""
return self.limb.joint_efforts()
def get_tf_start_to_end_frames(self, start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans, rot = None, None
while (trans is None or rot is None) and not rospy.is_shutdown():
try:
(trans, rot) = self.listener.lookupTransform(
start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
duration_obj = rospy.Duration.from_sec(1.0)
rospy.sleep(duration_obj)
return trans, rot
def check_joint_limits_ready(self):
self.joint_limits = None
rospy.logdebug("Waiting for /robot/joint_limits to be READY...")
while self.joint_limits is None and not rospy.is_shutdown():
try:
self.joint_limits = rospy.wait_for_message(
"/robot/joint_limits", JointLimits, timeout=3.0)
rospy.logdebug("Current /robot/joint_limits READY=>")
except:
rospy.logerr(
"Current /robot/joint_limits not ready yet, retrying for getting joint_limits")
return self.joint_limits
def get_joint_limits(self):
return self.joint_limits
def get_head_camera_image_raw(self):
return self.head_camera_image_raw
def get_right_hand_camera_image_raw(self):
return self.right_hand_camera_image_raw
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
self.gazebo.unpauseSim()
joint_limits = self.check_joint_limits_ready()
self.gazebo.pauseSim()
return joint_limits
| [] |
2024-01-10 | AbelSyx/openai_ros | scripts~openai_ros~task_envs~task_envs_list.py | #!/usr/bin/env python3
'''
LAST UPDATE: 2022.03.12
AUTHOR: OPENAI_ROS
Neset Unver Akmandor (NUA)
Gary M. Lvov (GML)
Hongyu Li (LHY)
E-MAIL: [email protected]
[email protected]
[email protected]
DESCRIPTION: TODO...
REFERENCES:
NUA TODO:
- Make sure every robot is working.
- Change naming convention to "robotName_taskName" (all small characters)
'''
from gym.envs.registration import register
from gym import envs
import rospy
"""
Registers all the ENVS supported in OpenAI ROS. This way we can load them
with variable limits.
Here is where you have to PLACE YOUR NEW TASK ENV, to be registered and accesible.
return: False if the Task_Env wasnt registered, True if it was.
"""
def RegisterOpenAI_Ros_Env(task_env, robot_id=0, max_episode_steps=10000, data_folder_path=""):
# Task-Robot Envs
result = True
# Cubli Moving Cube
if task_env == 'MovingCubeOneDiskWalk-v0':
print("Import module")
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.moving_cube import one_disk_walk
print("Importing register env")
# We register the Class through the Gym system
register(
id=task_env,
#entry_point='openai_ros:task_envs.moving_cube.one_disk_walk.MovingCubeOneDiskWalkEnv',
entry_point='openai_ros.task_envs.moving_cube.one_disk_walk:MovingCubeOneDiskWalkEnv',
max_episode_steps=max_episode_steps,
)
# Husarion Robot
elif task_env == 'HusarionGetToPosTurtleBotPlayGround-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.husarion.husarion_get_to_position_turtlebot_playground:HusarionGetToPosTurtleBotPlayGroundEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.husarion import husarion_get_to_position_turtlebot_playground
elif task_env == 'FetchTest-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.fetch.fetch_test_task:FetchTestEnv',
max_episode_steps=max_episode_steps,
)
# 50
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_test_task
elif task_env == 'FetchSimpleTest-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_simple_task.FetchSimpleTestEnv',
entry_point='openai_ros.task_envs.fetch.fetch_simple_task:FetchSimpleTestEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_simple_task
elif task_env == 'FetchPickAndPlace-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPickAndPlaceEnv',
entry_point='openai_ros.task_envs.fetch.fetch_pick_and_place_task:FetchPickAndPlaceEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_pick_and_place_task
elif task_env == 'FetchPush-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPushEnv',
# entry_point='openai_ros:task_envs.fetch.fetch_push.FetchPushEnv',
entry_point='openai_ros.task_envs.fetch.fetch_push:FetchPushEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_push
elif task_env == 'CartPoleStayUp-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.cartpole_stay_up.stay_up:CartPoleStayUpEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.cartpole_stay_up import stay_up
elif task_env == 'HopperStayUp-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.hopper.hopper_stay_up:HopperStayUpEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.hopper import hopper_stay_up
elif task_env == 'IriWamTcpToBowl-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.iriwam.tcp_to_bowl:IriWamTcpToBowlEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.iriwam import tcp_to_bowl
elif task_env == 'ParrotDroneGoto-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.parrotdrone.parrotdrone_goto:ParrotDroneGotoEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.parrotdrone import parrotdrone_goto
elif task_env == 'SawyerTouchCube-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.sawyer.learn_to_touch_cube:SawyerTouchCubeEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.sawyer import learn_to_touch_cube
elif task_env == 'ShadowTcGetBall-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.shadow_tc.learn_to_pick_ball:ShadowTcGetBallEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.shadow_tc import learn_to_pick_ball
elif task_env == 'SumitXlRoom-v0':
register(
id='SumitXlRoom-v0',
entry_point='openai_ros.task_envs.sumit_xl.sumit_xl_room:SumitXlRoom',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.sumit_xl import sumit_xl_room
elif task_env == 'MyTurtleBot2Maze-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_maze:TurtleBot2MazeEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
elif task_env == 'MyTurtleBot2Wall-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_wall:TurtleBot2WallEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_wall
elif task_env == 'TurtleBot3World-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot3.turtlebot3_world:TurtleBot3WorldEnv',
max_episode_steps=max_episode_steps,
)
# NUA EDIT
elif task_env == ('TurtleBot3tentabot_drl-v0' + str(robot_id)):
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot3.turtlebot3_tentabot_drl:TurtleBot3TentabotDRL',
max_episode_steps=max_episode_steps,
kwargs={'robot_id': robot_id, 'data_folder_path': data_folder_path},
)
# import our training environment
from openai_ros.task_envs.turtlebot3 import turtlebot3_tentabot_drl
# NUA EDIT
elif task_env == ('TurtleBot3Realtentabot_drl-v0' + str(robot_id)):
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot3.turtlebot3real_tentabot_drl:TurtleBot3RealTentabotDRL',
max_episode_steps=max_episode_steps,
kwargs={'robot_id': robot_id, 'data_folder_path': data_folder_path},
)
# import our training environment
from openai_ros.task_envs.turtlebot3 import turtlebot3_tentabot_drl
# NUA EDIT
elif task_env == ('Fireflytentabot_drl-v0' + str(robot_id)):
register(
id=task_env,
entry_point='openai_ros.task_envs.firefly.firefly_tentabot_rl:FireflyTentabotDRL',
max_episode_steps=max_episode_steps,
kwargs={'robot_id': robot_id, 'data_folder_path': data_folder_path},
)
# import our training environment
from openai_ros.task_envs.firefly import firefly_tentabot_drl
# LHY,NUA EDIT
elif task_env == ('stretch_nav-v0'):
register(
id=task_env,
entry_point='openai_ros.task_envs.stretch.stretch_nav:StretchNav',
max_episode_steps=max_episode_steps,
kwargs={'robot_id': robot_id, 'data_folder_path': data_folder_path},
)
# import our training environment
from openai_ros.task_envs.stretch import stretch_nav
# LHY EDIT
elif task_env == ('StretchRealtentabot_drl-v0' + str(robot_id)):
register(
id=task_env,
entry_point='openai_ros.task_envs.stretch.stretchreal_tentabot_drl:StretchRealTentabotDRL',
max_episode_steps=max_episode_steps,
kwargs={'robot_id': robot_id, 'data_folder_path': data_folder_path},
)
# import our training environment
from openai_ros.task_envs.stretch import stretchreal_tentabot_drl
elif task_env == 'WamvNavTwoSetsBuoys-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.wamv.wamv_nav_twosets_buoys:WamvNavTwoSetsBuoysEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.wamv import wamv_nav_twosets_buoys
elif task_env == ("ROSbottentabot_drl-v0" + str(robot_id)):
register(
id=task_env,
entry_point="openai_ros.task_envs.husarion.ROSbot_tentabot_drl:ROSbotTentabotDRL",
max_episode_steps=max_episode_steps,
)
from openai_ros.task_envs.husarion import ROSbot_tentabot_drl
# Add here your Task Envs to be registered
else:
result = False
###########################################################################
if result:
# We check that it was really registered
supported_gym_envs = GetAllRegisteredGymEnvs()
#print("REGISTERED GYM ENVS===>"+str(supported_gym_envs))
assert (task_env in supported_gym_envs), "The Task_Robot_ENV given is not Registered ==>" + \
str(task_env)
return result
def GetAllRegisteredGymEnvs():
"""
Returns a List of all the registered Envs in the system
return EX: ['Copy-v0', 'RepeatCopy-v0', 'ReversedAddition-v0', ... ]
"""
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
return env_ids
| [] |
2024-01-10 | AbelSyx/openai_ros | scripts~openai_ros~task_envs~cartpole_stay_up~stay_up.py | from gym import utils
from openai_ros.robot_envs import cartpole_env
from gym.envs.registration import register
from gym import error, spaces
import rospy
import math
import numpy as np
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class CartPoleStayUpEnv(cartpole_env.CartPoleEnv):
def __init__(self):
ros_ws_abspath = rospy.get_param("/cartpole_v0/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="cartpole_description",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/cartpole_stay_up/config",
yaml_file_name="stay_up.yaml")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
high = np.array([
2.5 * 2,
np.finfo(np.float32).max,
0.7 * 2,
np.finfo(np.float32).max])
self.observation_space = spaces.Box(-high, high)
# TODO: Remove when working
"""
cartpole_env.CartPoleEnv.__init__(
self, control_type=self.control_type
)
"""
# Here we will add any init functions prior to starting the MyRobotEnv
super(CartPoleStayUpEnv, self).__init__(control_type=self.control_type,
ros_ws_abspath=ros_ws_abspath)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/cartpole_v0/n_actions')
self.min_pole_angle = rospy.get_param('/cartpole_v0/min_pole_angle')
self.max_pole_angle = rospy.get_param('/cartpole_v0/max_pole_angle')
self.max_base_velocity = rospy.get_param(
'/cartpole_v0/max_base_velocity')
self.min_base_pose_x = rospy.get_param('/cartpole_v0/min_base_pose_x')
self.max_base_pose_x = rospy.get_param('/cartpole_v0/max_base_pose_x')
self.pos_step = rospy.get_param('/cartpole_v0/pos_step')
self.running_step = rospy.get_param('/cartpole_v0/running_step')
self.init_pos = rospy.get_param('/cartpole_v0/init_pos')
self.wait_time = rospy.get_param('/cartpole_v0/wait_time')
self.control_type = rospy.get_param('/cartpole_v0/control_type')
def _set_action(self, action):
# Take action
if action == 0: # LEFT
rospy.loginfo("GO LEFT...")
self.pos[0] -= self.pos_step
elif action == 1: # RIGHT
rospy.loginfo("GO RIGHT...")
self.pos[0] += self.pos_step
elif action == 2: # LEFT BIG
rospy.loginfo("GO LEFT BIG...")
self.pos[0] -= self.pos_step * 10
elif action == 3: # RIGHT BIG
rospy.loginfo("GO RIGHT BIG...")
self.pos[0] += self.pos_step * 10
# Apply action to simulation.
rospy.loginfo("MOVING TO POS=="+str(self.pos))
# 1st: unpause simulation
#rospy.logdebug("Unpause SIM...")
# self.gazebo.unpauseSim()
self.move_joints(self.pos)
rospy.logdebug(
"Wait for some time to execute movement, time="+str(self.running_step))
rospy.sleep(self.running_step) # wait for some time
rospy.logdebug(
"DONE Wait for some time to execute movement, time=" + str(self.running_step))
# 3rd: pause simulation
#rospy.logdebug("Pause SIM...")
# self.gazebo.pauseSim()
def _get_obs(self):
data = self.joints
# base_postion base_velocity pole angle pole velocity
#obs = [round(data.position[1],1), round(data.velocity[1],1), round(data.position[0],1), round(data.velocity[0],1)]
obs = [data.position[1], data.velocity[1],
data.position[0], data.velocity[0]]
return np.array(obs)
def _is_done(self, observations):
done = False
data = self.joints
rospy.loginfo("BASEPOSITION=="+str(observations[0]))
rospy.loginfo("POLE ANGLE==" + str(observations[2]))
# check if the base is still within the ranges of (-2, 2)
if (self.min_base_pose_x >= observations[0] or observations[0] >= self.max_base_pose_x):
rospy.logerr("Base Outside Limits==>min="+str(self.min_base_pose_x) +
",pos="+str(observations[0])+",max="+str(self.max_base_pose_x))
done = True
# check if pole has toppled over
if (self.min_pole_angle >= observations[2] or observations[2] >= self.max_pole_angle):
rospy.logerr(
"Pole Angle Outside Limits==>min=" + str(self.min_pole_angle) + ",pos=" + str(observations[2]) + ",max=" + str(
self.max_pole_angle))
done = True
rospy.loginfo("FINISHED get _is_done")
return done
def _compute_reward(self, observations, done):
"""
Gives more points for staying upright, gets data from given observations to avoid
having different data than other previous functions
:return:reward
"""
rospy.logdebug("START _compute_reward")
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
rospy.logdebug("END _compute_reward")
return reward
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.steps_beyond_done = None
def _set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
# Reset Internal pos variable
self.init_internal_vars(self.init_pos)
self.move_joints(self.pos)
| [] |
2024-01-10 | AbelSyx/openai_ros | scripts~openai_ros~robot_envs~turtlebot3_env.py | #!/usr/bin/env python3
'''
LAST UPDATE: 2022.03.13
AUTHOR: OPENAI_ROS
Neset Unver Akmandor (NUA)
E-MAIL: [email protected]
DESCRIPTION: TODO...
REFERENCES:
[1]
NUA TODO:
'''
import numpy
import rospy
import time
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros import robot_gazebo_env
#from openai_ros.openai_ros_common import ROSLauncher
'''
DESCRIPTION: TODO...Superclass for all CubeSingleDisk environments.
'''
class TurtleBot3Env(robot_gazebo_env.RobotGazeboEnv):
'''
DESCRIPTION: TODO...
Initializes a new TurtleBot3Env environment.
Turtlebot3 doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom: Odometry readings of the Base of the Robot
* /camera/depth/image_raw: 2d Depth image of the depth sensor.
* /camera/depth/points: Pointcloud sensor readings
* /camera/rgb/image_raw: RGB camera
* /kobuki/laser/scan: Laser Readings
Actuators Topic List: /cmd_vel
Args:
'''
def __init__(self, robot_namespace="", initial_pose={}, data_folder_path="", velocity_control_msg=""):
# NUA TODO: This following required if SubprocVecEnv is used!
#rospy.init_node('robot_env_' + str(robot_namespace), anonymous=True, log_level=rospy.ERROR)
rospy.logdebug("turtlebot3_env::__init__ -> START...")
#print("turtlebot3_env::__init__ -> START...")
self.controllers_list = ["imu"]
self.robot_namespace = robot_namespace
self.initial_pose = initial_pose
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot3Env, self).__init__(controllers_list=self.controllers_list,
robot_namespace=self.robot_namespace,
reset_controls=False,
start_init_physics_parameters=False,
initial_pose=self.initial_pose)
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/" + str(self.robot_namespace) + "/odom", Odometry, self._odom_callback)
#rospy.Subscriber("/" + str(self.robot_namespace) + "/odom_noisy", Odometry, self._odom_callback)
rospy.Subscriber("/" + str(self.robot_namespace) + "/imu", Imu, self._imu_callback)
rospy.Subscriber("/" + str(self.robot_namespace) + "/scan", LaserScan, self._laser_scan_callback)
#rospy.Subscriber("/camera/depth/image_raw", Image, self._camera_depth_image_raw_callback)
#rospy.Subscriber("/camera/depth/points", PointCloud2, self._camera_depth_points_callback)
#rospy.Subscriber("/camera/rgb/image_raw", Image, self._camera_rgb_image_raw_callback)
if velocity_control_msg:
self._cmd_vel_pub = rospy.Publisher(velocity_control_msg, Twist, queue_size=1)
else:
self._cmd_vel_pub = rospy.Publisher("/" + str(self.robot_namespace) + '/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("turtlebot3_env::__init__ -> END")
#print("turtlebot3_env::__init__ -> END")
# Methods needed by the RobotGazeboEnv
# ----------------------------
'''
DESCRIPTION: TODO...Checks that all the sensors, publishers and other simulation systems are operational.
'''
def _check_all_systems_ready(self):
self._check_all_sensors_ready()
return True
# TurtleBot3 Env virtual methods
# ----------------------------
'''
DESCRIPTION: TODO...
'''
def _check_all_sensors_ready(self):
rospy.logdebug("turtlebot3_env::_check_all_sensors_ready -> START...")
self._check_odom_ready()
self._check_imu_ready()
self._check_laser_scan_ready()
#self._check_camera_depth_image_raw_ready()
#self._check_camera_depth_points_ready()
#self._check_camera_rgb_image_raw_ready()
rospy.logdebug("turtlebot3_env::_check_all_sensors_ready -> END")
'''
DESCRIPTION: TODO...
'''
def _check_odom_ready(self):
rospy.logdebug("turtlebot3_env::_check_odom_ready -> Waiting to be READY...")
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/" + str(self.robot_namespace) + "/odom", Odometry, timeout=1.0)
#self.odom = rospy.wait_for_message("/" + str(self.robot_namespace) + "/odom_noisy", Odometry, timeout=1.0)
rospy.logdebug("turtlebot3_env::_check_odom_ready -> READY!")
except:
rospy.logerr("turtlebot3_env::_check_odom_ready -> not ready yet, retrying...")
return self.odom
'''
DESCRIPTION: TODO...
'''
def _check_imu_ready(self):
rospy.logdebug("turtlebot3_env::_check_imu_ready -> Waiting to be READY...")
self.imu = None
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message("/" + str(self.robot_namespace) + "/imu", Imu, timeout=1.0)
rospy.logdebug("turtlebot3_env::_check_imu_ready -> READY!")
except:
rospy.logerr("turtlebot3_env::_check_imu_ready -> not ready yet, retrying...")
return self.imu
'''
DESCRIPTION: TODO...
'''
def _check_laser_scan_ready(self):
rospy.logdebug("turtlebot3_env::_check_laser_scan_ready -> Waiting to be READY...")
self.laser_scan = None
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/" + str(self.robot_namespace) + "/scan", LaserScan, timeout=1.0)
rospy.logdebug("turtlebot3_env::_check_laser_scan_ready -> READY!")
except:
rospy.logerr("turtlebot3_env::_check_laser_scan_ready -> not ready yet, retrying...")
return self.laser_scan
'''
DESCRIPTION: TODO...
'''
def _check_camera_depth_image_raw_ready(self):
rospy.logdebug("turtlebot3_env::_check_camera_depth_image_raw_ready -> Waiting to be READY...")
self.camera_depth_image_raw = None
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message("/" + str(self.robot_namespace) + "/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("turtlebot3_env::_check_camera_depth_image_raw_ready -> READY!")
except:
rospy.logerr("turtlebot3_env::_check_camera_depth_image_raw_ready -> not ready yet, retrying...")
return self.camera_depth_image_raw
'''
DESCRIPTION: TODO...
'''
def _check_camera_depth_points_ready(self):
rospy.logdebug("turtlebot3_env::_check_camera_depth_points_ready -> Waiting to be READY...")
self.camera_depth_points = None
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message("/" + str(self.robot_namespace) + "/camera/depth/points", PointCloud2, timeout=5.0)
rospy.logdebug("turtlebot3_env::_check_camera_depth_points_ready -> READY!")
except:
rospy.logerr("turtlebot3_env::_check_camera_depth_points_ready -> not ready yet, retrying...")
return self.camera_depth_points
'''
DESCRIPTION: TODO...
'''
def _check_camera_rgb_image_raw_ready(self):
rospy.logdebug("turtlebot3_env::_check_camera_rgb_image_raw_ready -> Waiting to be READY...")
self.camera_rgb_image_raw = None
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message("/" + str(self.robot_namespace) + "/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("turtlebot3_env::_check_camera_rgb_image_raw_ready -> READY!")
except:
rospy.logerr("turtlebot3_env::_check_camera_rgb_image_raw_ready -> not ready yet, retrying...")
return self.camera_rgb_image_raw
'''
DESCRIPTION: TODO...
'''
def _odom_callback(self, data):
self.odom = data
'''
DESCRIPTION: TODO...
'''
def _imu_callback(self, data):
self.imu = data
'''
DESCRIPTION: TODO...
'''
def _laser_scan_callback(self, data):
self.laser_scan = data
'''
DESCRIPTION: TODO...
'''
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
'''
DESCRIPTION: TODO...
'''
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
'''
DESCRIPTION: TODO...
'''
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
'''
DESCRIPTION: TODO...Checks that all the publishers are working
:return:
'''
def _check_publishers_connection(self):
rate = rospy.Rate(50) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("turtlebot3_env::_check_publishers_connection -> No subscribers to cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("turtlebot3_env::_check_publishers_connection -> cmd_vel_pub Publisher Connected")
rospy.logdebug("turtlebot3_env::_check_publishers_connection -> All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
'''
DESCRIPTION: TODO...Sets the Robot in its init pose.
'''
def _set_init_pose(self):
raise NotImplementedError()
'''
DESCRIPTION: TODO...Inits variables needed to be initialised each time we reset at the start of an episode.
'''
def _init_env_variables(self):
raise NotImplementedError()
'''
DESCRIPTION: TODO...Calculates the reward to give based on the observations given.
'''
def _compute_reward(self, observations, done):
raise NotImplementedError()
'''
DESCRIPTION: TODO...Applies the given action to the simulation.
'''
def _set_action(self, action):
raise NotImplementedError()
'''
DESCRIPTION: TODO...Applies the given action to the simulation.
'''
def valid_action_mask(self):
raise NotImplementedError()
'''
DESCRIPTION: TODO...
'''
def _get_obs(self):
raise NotImplementedError()
'''
DESCRIPTION: TODO...Checks if episode done based on observations given.
'''
def _is_done(self, observations):
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
'''
DESCRIPTION: TODO...
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
'''
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10):
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("turtlebot3_env::move_base -> cmd_vel_value: " + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
#self.wait_until_twist_achieved(cmd_vel_value,epsilon,update_rate)
time.sleep(0.15)
'''
DESCRIPTION: TODO...
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
'''
'''
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate):
rospy.logdebug("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
current_odometry = self._check_odom_ready()
# IN turtlebot3 the odometry angular readings are inverted, so we have to invert the sign.
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = -1*current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logdebug("END wait_until_twist_achieved...")
return delta_time
'''
'''
DESCRIPTION: TODO...
'''
def get_odom(self):
return self.odom
'''
DESCRIPTION: TODO...
'''
def get_imu(self):
return self.imu
'''
DESCRIPTION: TODO...
'''
def get_laser_scan(self):
return self.laser_scan
'''
DESCRIPTION: TODO...
'''
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
'''
DESCRIPTION: TODO...
'''
def get_camera_depth_points(self):
return self.camera_depth_points
'''
DESCRIPTION: TODO...
'''
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
'''
DESCRIPTION: TODO...
value.
'''
def update_initial_pose(self, initial_pose):
self.initial_pose = initial_pose
super(TurtleBot3Env, self).update_initial_pose(self.initial_pose)
| [] |
2024-01-10 | AbelSyx/openai_ros | scripts~openai_ros~task_envs~turtlebot3~turtlebot3_tentabot_drl.py | #!/usr/bin/env python3
'''
LAST UPDATE: 2022.04.02
AUTHOR: Neset Unver Akmandor (NUA)
Eric Dusel (ED)
Gary Lvov (GML)
Hongyu Li (LHY)
E-MAIL: [email protected]
[email protected]
[email protected]
[email protected]
DESCRIPTION: TODO...
REFERENCES:
NUA TODO:
'''
import rospy
import numpy as np
import time
import math
import cv2
import os
import csv
import random
import pathlib
import pickle
from matplotlib import pyplot as plt
from PIL import Image
from squaternion import Quaternion
import tf
import tf2_ros
import roslaunch
import rospkg
from std_msgs.msg import Header, Bool, Float32MultiArray
from geometry_msgs.msg import Pose, PoseStamped, Vector3
from visualization_msgs.msg import Marker, MarkerArray
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import OccupancyGrid, Path, OccupancyGrid
from nav_msgs.srv import GetPlan
from std_srvs.srv import Empty
from gazebo_msgs.msg import ModelStates
from gym import spaces
from gym.envs.registration import register
from openai_ros.robot_envs import turtlebot3_env
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from tentabot_drl.tentabot_drl_config import *
from tentabot.srv import *
#from imitation.data.types import TrajectoryWithRew
#from imitation.data import types
'''
DESCRIPTION: TODO...
'''
class TurtleBot3TentabotDRL(turtlebot3_env.TurtleBot3Env):
'''
DESCRIPTION: TODO...This Task Env is designed for having the TurtleBot3 in some kind of maze.
It will learn how to move around the maze without crashing.
'''
def __init__(self, robot_id=0, data_folder_path=""):
### Initialize Parameters
## General
self.robot_id = robot_id
self.previous_robot_id = self.robot_id
self.robot_namespace = "turtlebot3_" + str(self.robot_id)
self.data_folder_path = data_folder_path
self.world_name = rospy.get_param('world_name', "")
self.next_world_name = self.world_name
self.init_flag = False
self.step_num = 0
self.total_step_num = 0
self.total_collisions = 0
self.step_reward = 0.0
self.episode_reward = 0.0
self.diff_action = 0.0
self.total_mean_episode_reward = 0.0
self.goal_reaching_status = Bool()
self.goal_reaching_status.data = False
self.action_counter = 0
self.observation_counter = 0
self.odom_dict = {}
self.previous_area_id = 0
self.obs_data = {}
self.move_base_goal = PoseStamped()
self.move_base_flag = False
self.training_data = []
self.training_data.append(["episode_reward"])
self.oar_data = []
self.episode_oar_data = dict(obs=[], acts=[], infos=None, terminal=[], rews=[])
#self.time_old = time.time()
self.validation_ep_num = 0
self.validation_flag = False
self.config = Config(data_folder_path=data_folder_path)
## Set Observation-Action-Reward data filename
self.oar_data_file = data_folder_path + "oar_data.csv"
# Rospack path for pedsim reset
# get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
rospack_list = rospack.list()
self.tentabot_path = rospack.get_path('tentabot')
# Subscriptions
rospy.Subscriber("/" + str(self.robot_namespace) + "/scan", LaserScan, self.callback_laser_scan)
rospy.Subscriber("/" + str(self.robot_namespace) + "/laser_image", OccupancyGrid, self.callback_laser_image)
rospy.Subscriber("/" + str(self.robot_namespace) + "/laser_rings", Float32MultiArray, self.callback_laser_rings)
#if self.config.observation_space_type == "laser_image_2DCNN_FC":
# rospy.Subscriber("/" + str(self.robot_namespace) + "/laser_image", OccupancyGrid, self.callback_laser_image)
#if self.config.observation_space_type == "laser_rings_2DCNN_FC":
# rospy.Subscriber("/" + str(self.robot_namespace) + "/laser_rings", Float32MultiArray, self.callback_rings_image)
# Services
if self.config.observation_space_type == "Tentabot_FC" or \
self.config.observation_space_type == "Tentabot_1DCNN_FC" or \
self.config.observation_space_type == "Tentabot_laser_1DCNN_FC" or \
self.config.observation_space_type == "Tentabot_2DCNN_FC" or \
self.config.observation_space_type == "Tentabot_2DCNN" or \
self.config.observation_space_type == "Tentabot_WP_FC":
rospy.wait_for_service('rl_step')
self.srv_rl_step = rospy.ServiceProxy('rl_step', rl_step, True)
rospy.wait_for_service('update_goal')
self.srv_update_goal = rospy.ServiceProxy('update_goal', update_goal, True)
#rospy.wait_for_service('reset_map_utility')
#self.srv_reset_map_utility = rospy.ServiceProxy('reset_map_utility', reset_map_utility)
if self.config.observation_space_type == "laser_WP_1DCNN_FC" or self.config.observation_space_type == "Tentabot_WP_FC":
rospy.wait_for_service('/move_base/make_plan')
rospy.wait_for_service('/move_base/clear_costmaps')
self.srv_move_base_get_plan = rospy.ServiceProxy('/move_base/make_plan', GetPlan, True)
# self.srv_clear_costmap = rospy.ServiceProxy('/move_base/clear_costmaps', Empty, True)
# Publishers
self.goal_reaching_status_pub = rospy.Publisher(self.robot_namespace + '/goal_reaching_status', Bool, queue_size=1)
self.goal_visu_pub = rospy.Publisher(self.robot_namespace + '/nav_goal', MarkerArray, queue_size=1)
self.filtered_laser_pub = rospy.Publisher(self.robot_namespace + '/laser/scan_filtered', LaserScan, queue_size=1)
self.debug_visu_pub = rospy.Publisher('/debug_visu', MarkerArray, queue_size=1)
# Initialize OpenAI Gym Structure
self.get_init_pose(init_flag=False)
super(TurtleBot3TentabotDRL, self).__init__(robot_namespace=self.robot_namespace, initial_pose=self.initial_pose, data_folder_path=data_folder_path, velocity_control_msg=self.config.velocity_control_msg)
self.get_goal_location()
self.init_observation_action_space()
#print("turtlebot3_tentabot_drl::__init__ -> obs laser shape: " + str(self.obs["laser"].shape))
#print("turtlebot3_tentabot_drl::__init__ -> obs target_action shape: " + str(self.obs["target_action"].shape))
self.reward_range = (-np.inf, np.inf)
self.init_flag = True
'''
DESCRIPTION: TODO...Sets the Robot in its init pose
'''
def _set_init_pose(self):
self.move_base( self.config.init_lateral_speed,
self.config.init_angular_speed,
epsilon=0.05,
update_rate=10)
return True
'''
DESCRIPTION: TODO...Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
'''
def _init_env_variables(self):
#print("turtlebot3_tentabot_drl::_init_env_variables -> self.total_step_num: " + str(self.total_step_num))
if self.episode_num:
#self.total_mean_episode_reward = round((self.total_mean_episode_reward * (self.episode_num - 1) + self.episode_reward) / self.episode_num, self.config.mantissa_precision)
self.total_mean_episode_reward = (self.total_mean_episode_reward * (self.episode_num - 1) + self.episode_reward) / self.episode_num
## Add training data
self.training_data.append([self.episode_reward])
print("--------------")
print("turtlebot3_tentabot_drl::_init_env_variables -> robot_id: {}".format(self.robot_id))
print("turtlebot3_tentabot_drl::_init_env_variables -> step_num: {}".format(self.step_num))
print("turtlebot3_tentabot_drl::_init_env_variables -> total_step_num: {}".format(self.total_step_num))
print("turtlebot3_tentabot_drl::_init_env_variables -> episode_num: {}".format(self.episode_num))
print("turtlebot3_tentabot_drl::_init_env_variables -> total_collisions: {}".format(self.total_collisions))
print("turtlebot3_tentabot_drl::_init_env_variables -> episode_reward: {}".format(self.episode_reward))
print("turtlebot3_tentabot_drl::_init_env_variables -> total_mean_episode_reward: {}".format(self.total_mean_episode_reward))
print("--------------")
self.previous_robot_id = self.robot_id
self.episode_reward = 0.0
self._episode_done = False
self._reached_goal = False
self.step_num = 0
'''
print("turtlebot3_tentabot_drl::_init_env_variables -> BEFORE client_reset_map_utility")
# Reset Map
success_reset_map_utility = self.client_reset_map_utility()
print("turtlebot3_tentabot_drl::_init_env_variables -> AFTER client_reset_map_utility")
'''
# We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish
# and sometimes still have values from the prior position that triguered the done.
time.sleep(1.0)
self.previous_distance2goal = self.get_distance2goal()
self.previous_action = np.array([[self.config.init_lateral_speed, self.config.init_angular_speed]]).reshape(self.config.fc_obs_shape)
#self.update_global_path_length()
self.reinit_observation()
'''
DESCRIPTION: TODO...Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot3Env API DOCS
:return:
'''
def _get_obs(self):
#print("turtlebot3_tentabot_drl::_get_obs -> self.total_step_num: " + str(self.total_step_num))
# Update target observation
self.update_observation()
# Check if the goal is reached
self.goal_check()
return self.obs
'''
DESCRIPTION: TODO...
'''
def _set_action(self, action):
#print("turtlebot3_tentabot_drl::_set_action -> self.total_step_num: " + str(self.total_step_num))
'''
print("--------set_action---------")
print(self.occupancy_set[0][action])
if self.occupancy_set[0][action] > np.min(self.occupancy_set):
print("mask_action: false!")
print("--------set_action---------")
'''
linear_speed = self.config.velocity_control_data[action, 0]
angular_speed = float(self.config.velocity_control_data[action, 1])
self.diff_action = abs(self.previous_action[0] - linear_speed) + abs(self.previous_action[1] - angular_speed)
self.previous_action = np.array([[linear_speed, angular_speed]], dtype=np.float32).reshape(self.config.fc_obs_shape)
self.act = action
# We tell TurtleBot3 the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
'''
DESCRIPTION: 设置 valid_action_mask, 若所有轨迹均不可导航, 则所有动作为可行动作,自主学习如何脱离困境; 若存在可导航轨迹, 则选取可导航和暂时可导航轨迹对应的动作为可行动作
'''
def valid_action_mask(self):
mask = np.zeros(self.config.n_actions, dtype=np.float32)
#closeness_set_mid = np.median(self.closeness_set)
#closeness_set_max = np.max(self.closeness_set)
#mask[0] = 1
if np.max(self.navigability_set) == 0 and np.min(self.navigability_set) == 0:
for i, val in enumerate(self.navigability_set[0]):
mask[i] = 1
else:
for i, val in enumerate(self.navigability_set[0]):
if val != 0:
mask[i] = 1
#if val != 0 and self.closeness_set[0][i] <= closeness_set_mid:
# mask[i] = 1
#self.closeness_set
'''
for i, val in enumerate(self.navigability_set[0]):
if np.max(self.navigability_set) == 1:
if val == 1:
mask[i] = 1
elif np.max(self.navigability_set) == 0:
mask[self.config.n_actions - 1] = 1
if val == -1:
mask[i] = 1
'''
'''
if self.config.trajectory_rotation_recovery == True:
for i, val in enumerate(self.occupancy_set[0]):
if np.min(self.occupancy_set) == 0.0:
if val == 0.0:
mask[i] = 1
elif np.min(self.occupancy_set) != 0.0 and np.min(self.occupancy_set) < np.max(self.occupancy_set):
if val != np.max(self.occupancy_set):
mask[i] = 1
else:
mask[self.config.n_actions - 1] = 1
mask[self.config.n_actions - 2] = 1
if self.config.trajectory_rotation_recovery == False:
for i, val in enumerate(self.occupancy_set[0]):
if np.min(self.occupancy_set) == 0.0:
if val == 0.0:
mask[i] = 1
elif np.min(self.occupancy_set) != 0.0 and np.min(self.occupancy_set) < np.max(self.occupancy_set):
if val != np.max(self.occupancy_set):
mask[i] = 1
else:
mask[1] = 1
'''
'''
mid = round(self.config.n_actions / 2)
mask[mid] = 1
'''
'''
print("--------------")
print(
"turtlebot3_tentabot_drl::valid_action_mask -> min id: " + str(np.argmin(self.occupancy_set)) + " val: " + str(
np.min(self.occupancy_set)))
print(
"turtlebot3_tentabot_drl::valid_action_mask -> max id: " + str(np.argmax(self.occupancy_set)) + " val: " + str(
np.max(self.occupancy_set)))
print("turtlebot3_tentabot_drl::valid_action_mask -> ")
for i, val in enumerate(self.occupancy_set[0]):
if 65 < i < 80:
print(str(i) + ": " + str(val))
print("--------------")
'''
return mask
'''
DESCRIPTION: TODO...
'''
def _is_done(self, observations):
#print("turtlebot3_tentabot_drl::_is_done -> self.total_step_num: " + str(self.total_step_num))
if self._episode_done and (not self._reached_goal):
rospy.logdebug("turtlebot3_tentabot_drl::_is_done -> Boooo! Episode done but not reached the goal...")
print("turtlebot3_tentabot_drl::_is_done -> Boooo! Episode done but not reached the goal...")
elif self._episode_done and self._reached_goal:
rospy.logdebug("turtlebot3_tentabot_drl::_is_done -> Gotcha! Episode done and reached the goal!")
print("turtlebot3_tentabot_drl::_is_done -> Gotcha! Episode done and reached the goal!")
else:
rospy.logdebug("turtlebot3_tentabot_drl::_is_done -> Not yet bro...")
#print("turtlebot3_tentabot_drl::_is_done -> Not yet bro...")
return self._episode_done
'''
DESCRIPTION: TODO...
'''
def _compute_reward(self, observations, done):
#print("turtlebot3_tentabot_drl::_compute_reward -> self.total_step_num: " + str(self.total_step_num))
self.total_step_num += 1
self.step_num += 1
if self.step_num >= self.config.max_episode_steps:
self._episode_done = True
print("turtlebot3_tentabot_drl::_compute_reward -> Too late...")
if self._episode_done and (not self._reached_goal):
self.step_reward = self.config.penalty_terminal_fail
self.goal_reaching_status.data = False
self.goal_reaching_status_pub.publish(self.goal_reaching_status)
# Update initial pose and goal for the next episode
self.get_init_pose()
self.get_goal_location()
elif self._episode_done and self._reached_goal:
#self.step_reward = self.config.reward_terminal_success + self.config.reward_terminal_mintime * (self.config.max_episode_steps - self.step_num) / self.config.max_episode_steps
self.step_reward = self.config.reward_terminal_success
self.goal_reaching_status.data = True
self.goal_reaching_status_pub.publish(self.goal_reaching_status)
# Update initial pose and goal for the next episode
self.get_init_pose()
self.get_goal_location()
else:
'''
if current_distance2goal > self.init_distance2goal:
#penalty
self.step_reward = -1 * self.config.reward_cumulative_step * current_distance2goal / (self.config.max_episode_steps * self.init_distance2goal)
else:
#reward
self.step_reward = self.config.reward_cumulative_step * (self.init_distance2goal - current_distance2goal) / (self.config.max_episode_steps * self.init_distance2goal)
'''
current_distance2goal = self.get_distance2goal()
# 步进惩罚:每多走一步,惩罚一次。目的:促使Agent以最快速度抵达目标点
penalty_step = self.config.penalty_cumulative_step / self.config.max_episode_steps
# 距离惩罚:距目标点的距离,比上一步减少,奖励;比上一步增大,惩罚
diff_distance2goal = self.previous_distance2goal - current_distance2goal
if diff_distance2goal > 0:
rp_step = self.config.reward_step_scale * diff_distance2goal * 1.5
elif diff_distance2goal < 0:
rp_step = self.config.reward_step_scale * diff_distance2goal
else:
rp_step = -self.config.reward_step_scale / 50
# rp_step = self.config.reward_step_scale * (self.previous_distance2goal - current_distance2goal)
self.step_reward = penalty_step + rp_step
self.previous_distance2goal = current_distance2goal
#print("turtlebot3_tentabot_drl::_compute_reward -> reward_step: " + str(reward_step))
'''
# 保持和障碍物的安全距离
penalty_safety = 0
if self.min_distance2obstacle < self.config.safety_range_threshold and self.min_distance2obstacle > self.config.obs_min_range:
penalty_safety = self.config.penalty_safety_scale * (self.config.safety_range_threshold / self.min_distance2obstacle)
self.step_reward += penalty_safety
# 针对速度变化的惩罚,小于0.3的情况下给予奖励
penalty_step_speed = self.config.penalty_speed_change * (self.diff_action - 0.3)
self.step_reward += penalty_step_speed
'''
#print("turtlebot3_tentabot_drl::_compute_reward -> penalty_safety: {}".format(penalty_safety))
#self.step_reward = round(penalty_safety + reward_step, self.config.mantissa_precision)
# 针对可行动作数量进行奖励
'''
time_now = time.time()
dt = time_now - self.time_old
self.time_old = time_now
print("----------------------")
print("turtlebot3_tentabot_drl::_compute_reward -> current_distance2goal: " + str(current_distance2goal))
#print("turtlebot3_tentabot_drl::_compute_reward -> init_distance2goal: " + str(self.init_distance2goal))
print("turtlebot3_tentabot_drl::_compute_reward -> max_episode_steps: " + str(self.config.max_episode_steps))
print("turtlebot3_tentabot_drl::_compute_reward -> reward_terminal_success: " + str(self.config.reward_terminal_success))
print("turtlebot3_tentabot_drl::_compute_reward -> reward_step_scale: " + str(self.config.reward_step_scale))
print("turtlebot3_tentabot_drl::_compute_reward -> penalty_terminal_fail: " + str(self.config.penalty_terminal_fail))
print("turtlebot3_tentabot_drl::_compute_reward -> penalty_cumulative_step: " + str(self.config.penalty_cumulative_step))
print("turtlebot3_tentabot_drl::_compute_reward -> penalty_step: " + str(penalty_step))
print("turtlebot3_tentabot_drl::_compute_reward -> rp_step: " + str(rp_step))
print("turtlebot3_tentabot_drl::_compute_reward -> step_reward: " + str(self.step_reward))
#print("turtlebot3_tentabot_drl::_compute_reward -> dt: " + str(dt))
#print("turtlebot3_tentabot_drl::_compute_reward -> max_lateral_speed: " + str(self.config.max_lateral_speed))
#print("turtlebot3_tentabot_drl::_compute_reward -> max_step_reward: " + str(round(self.config.max_lateral_speed * dt, self.config.mantissa_precision)))
print("----------------------")
'''
self.episode_reward += self.step_reward
rospy.logdebug("turtlebot3_tentabot_drl::_compute_reward -> step_reward: " + str(self.step_reward))
rospy.logdebug("turtlebot3_tentabot_drl::_compute_reward -> episode_reward: " + str(self.episode_reward))
rospy.logdebug("turtlebot3_tentabot_drl::_compute_reward -> total_step_num: " + str(self.total_step_num))
'''
print("**********************")
print("turtlebot3_tentabot_drl::_compute_reward -> self.step_reward: " + str(self.step_reward))
print("----------------------")
'''
'''
# Save Observation-Action-Reward data into a file
self.save_oar_data()
if self._episode_done and (len(self.episode_oar_data['obs']) > 1):
#print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data obs len: " + str(len(self.episode_oar_data['obs'])))
#print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data acts len: " + str(len(self.episode_oar_data['acts'])))
if self.goal_reaching_status.data:
info_data = np.ones(len(self.episode_oar_data['acts']))
else:
info_data = np.zeros(len(self.episode_oar_data['acts']))
self.oar_data.append(TrajectoryWithRew( obs=np.array(self.episode_oar_data['obs']),
acts=np.array(self.episode_oar_data['acts']),
infos=np.array(info_data),
terminal=True,
rews=np.array(self.episode_oar_data['rews']),))
'''
if self.total_step_num == self.config.training_timesteps:
# Write Observation-Action-Reward data into a file
#self.write_oar_data()
## Write training data
write_data(self.data_folder_path + "training_data.csv", self.training_data)
return self.step_reward
# Internal TaskEnv Methods
'''
DESCRIPTION: TODO...
'''
def write_data(file, data):
file_status = open(file, 'a')
with file_status:
write = csv.writer(file_status)
write.writerows(data)
print("tentabot_drl_training::write_data -> Data is written in " + str(file))
'''
DESCRIPTION: TODO...
'''
def save_oar_data(self):
if self.config.observation_space_type == "laser_FC" or \
self.config.observation_space_type == "Tentabot_FC":
#print("----------------------------------")
#print("turtlebot3_tentabot_drl::save_oar_data -> self.obs shape: " + str(self.obs.shape))
#print("turtlebot3_tentabot_drl::save_oar_data -> self.previous_action shape: " + str(self.previous_action.shape))
#print("")
obs_data = self.obs.reshape((-1))
#print("turtlebot3_tentabot_drl::save_oar_data -> obs_data shape: " + str(obs_data.shape))
#print("----------------------------------")
# Save Observation-Action-Reward Data
self.episode_oar_data['obs'].append(obs_data)
if not self._episode_done:
self.episode_oar_data['acts'].append(self.act)
#self.episode_oar_data['infos'].append()
#self.episode_oar_data['terminal'].append(self._episode_done)
self.episode_oar_data['rews'].append(self.step_reward)
'''
print("----------------------------------")
print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data obs type: " + str(type(self.episode_oar_data['obs'])))
print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data obs len: " + str(len(self.episode_oar_data['obs'])))
print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data acts len: " + str(len(self.episode_oar_data['acts'])))
print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data: " + str(self.episode_oar_data))
#print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data obs: " + str(self.episode_oar_data.obs))
print("turtlebot3_tentabot_drl::save_oar_data -> episode_oar_data obs shape: " + str(self.episode_oar_data.obs.shape))
#print("turtlebot3_tentabot_drl::save_oar_data -> oar_data: " + str(self.oar_data))
print("----------------------------------")
'''
'''
DESCRIPTION: TODO...Save a sequence of Trajectories to disk.
Args:
path: Trajectories are saved to this path.
trajectories: The trajectories to save.
'''
def write_oar_data(self) -> None:
path = self.data_folder_path + "oar_data.pkl"
trajectories = self.oar_data
p = pathlib.Path(path)
p.parent.mkdir(parents=True, exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "wb") as f:
pickle.dump(trajectories, f)
# Ensure atomic write
os.replace(tmp_path, path)
print("turtlebot3_tentabot_drl::write_oar_data -> Written Observation-Action-Reward data!")
'''
DESCRIPTION: TODO...
'''
def callback_laser_scan(self, data):
if self.init_flag:
self.check_collision(data)
self.filter_laser_scan(data)
else:
self.config.set_laser_data(data)
'''
DESCRIPTION: TODO...
'''
def callback_laser_image(self, data):
self.config.laser_image_width = data.info.width
self.config.laser_image_height = data.info.height
laser_image = np.array(data.data[0:self.config.laser_image_width])
for i in range(1, self.config.laser_image_height):
idx_from = i*self.config.laser_image_width
idx_to = idx_from + self.config.laser_image_width
laser_image_row = np.array(data.data[idx_from:idx_to])
laser_image = np.vstack([laser_image, laser_image_row])
max_scale = 1 / np.max(laser_image)
self.laser_image = max_scale * laser_image
'''
if self.step_num == 50:
imi = (self.laser_image * 255).astype(np.uint8)
im = Image.fromarray(imi)
im = im.convert("L")
im.save(self.data_folder_path + "laser_image.jpeg")
np.savetxt(self.data_folder_path + "laser_image.txt", self.laser_image)
'''
'''
print("----------------------------------")
#print("turtlebot3_tentabot_drl::callback_laser_image -> laser_image_width: " + str(self.config.laser_image_width))
#print("turtlebot3_tentabot_drl::callback_laser_image -> laser_image_height: " + str(self.config.laser_image_height))
#print("turtlebot3_tentabot_drl::callback_laser_image -> data.info.resolution: " + str(data.info.resolution))
#print("turtlebot3_tentabot_drl::callback_laser_image -> data.info.width: " + str(data.info.width))
#print("turtlebot3_tentabot_drl::callback_laser_image -> data.info.height: " + str(data.info.height))
#print("turtlebot3_tentabot_drl::callback_laser_image -> data type: " + str(type(data.data)))
#print("turtlebot3_tentabot_drl::callback_laser_image -> data len: " + str(len(data.data)))
#print("turtlebot3_tentabot_drl::callback_laser_image -> laser_image len: " + str(len(laser_image)))
#print("turtlebot3_tentabot_drl::callback_laser_image -> self.laser_image shape: " + str(self.laser_image.shape))
print("turtlebot3_tentabot_drl::callback_laser_image -> max_scale: " + str(max_scale))
print("turtlebot3_tentabot_drl::callback_laser_image -> maxi: " + str(maxi))
print("----------------------------------")
'''
'''
DESCRIPTION: TODO...
'''
def callback_laser_rings(self, data):
#self.config.laser_image_width = data.info.width
#self.config.laser_image_height = data.info.height
laser_image = np.array(data.data[0:self.config.laser_image_width])
for i in range(1, self.config.laser_image_height):
idx_from = i*self.config.laser_image_width
idx_to = idx_from + self.config.laser_image_width
laser_image_row = np.array(data.data[idx_from:idx_to])
laser_image = np.vstack([laser_image, laser_image_row])
max_scale = 1 / np.max(laser_image)
self.laser_image = max_scale * laser_image
'''
if self.step_num == 50:
imi = (self.laser_image * 255).astype(np.uint8)
im = Image.fromarray(imi)
im = im.convert("L")
im.save(self.data_folder_path + "laser_image.jpeg")
np.savetxt(self.data_folder_path + "laser_image.txt", self.laser_image)
'''
'''
print("----------------------------------")
print("turtlebot3_tentabot_drl::callback_laser_rings -> layout dim type: " + str(type(data.layout.dim)))
print("turtlebot3_tentabot_drl::callback_laser_rings -> layout dim size: " + str(len(data.layout.dim)))
print("turtlebot3_tentabot_drl::callback_laser_rings -> data type: " + str(type(data.data)))
print("turtlebot3_tentabot_drl::callback_laser_rings -> data len: " + str(len(data.data)))
print("turtlebot3_tentabot_drl::callback_laser_rings -> laser_image shape: " + str(self.laser_image.shape))
print("turtlebot3_tentabot_drl::callback_laser_rings -> laser_image: ")
print(self.laser_image)
print("----------------------------------")
'''
'''
DESCRIPTION: TODO...
'''
def callback_move_base_global_plan(self, data):
self.move_base_global_plan = data.poses
self.move_base_flag = True
'''
DESCRIPTION: TODO... Update the odometry data
'''
def update_odom(self):
self.odom_data = self.get_odom()
q = Quaternion( self.odom_data.pose.pose.orientation.w,
self.odom_data.pose.pose.orientation.x,
self.odom_data.pose.pose.orientation.y,
self.odom_data.pose.pose.orientation.z)
e = q.to_euler(degrees=False)
self.odom_dict["x"] = self.odom_data.pose.pose.position.x
self.odom_dict["y"] = self.odom_data.pose.pose.position.y
self.odom_dict["z"] = self.odom_data.pose.pose.position.z
self.odom_dict["theta"] = e[2]
self.odom_dict["u"] = self.odom_data.twist.twist.linear.x
self.odom_dict["omega"] = self.odom_data.twist.twist.angular.z
self.config.set_odom(self.odom_dict)
'''
DESCRIPTION: TODO... Check if the goal is reached
'''
def goal_check(self):
current_distance2goal = self.get_distance2goal()
if (current_distance2goal < self.config.goal_close_threshold):
self._episode_done = True
self._reached_goal = True
'''
DESCRIPTION: TODO...Gets the initial location of the robot to reset
'''
def get_init_pose(self, init_flag=True):
self.initial_pose = {}
robot0_init_yaw = 0.0
self.world_name = self.next_world_name
if self.world_name == "training_garden_static_0":
initial_pose_areas_x = []
initial_pose_areas_x.extend(([-0.5,0.5], [1.5,2.5], [-2.5,-1], [-2.5,0.5]))
initial_pose_areas_y = []
initial_pose_areas_y.extend(([-0.5,0.5], [0.5,1.5], [1,1.5], [-1.5,-1]))
area_idx = random.randint(0, len(initial_pose_areas_x)-1)
self.robot_init_area_id = area_idx
self.initial_pose["x_init"] = random.uniform(initial_pose_areas_x[area_idx][0], initial_pose_areas_x[area_idx][1])
self.initial_pose["y_init"] = random.uniform(initial_pose_areas_y[area_idx][0], initial_pose_areas_y[area_idx][1])
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = random.uniform(0.0, 2*math.pi)
elif self.world_name == "training_garden_static_1":
initial_pose_areas_x = []
#initial_pose_areas_x.extend(([-1.5,1.5], [4.5,5.5], [-5.5,-2], [1,5], [-5.5,-3.5]))
initial_pose_areas_x.extend(([-1.5,0.5], [1.0,5.5], [-5.5,-3], [4.0,5.5], [-5.5,-4.5]))
initial_pose_areas_y = []
#initial_pose_areas_y.extend(([-1,1], [1,4.5], [3.5, 4.5], [-4,-2], [-4.5,-2]))
initial_pose_areas_y.extend(([0,2], [4,4.5], [4.0,4.5], [-4.5,-3.0], [-4.5,-3.5]))
area_idx = random.randint(0, len(initial_pose_areas_x)-1)
self.robot_init_area_id = area_idx
self.initial_pose["x_init"] = random.uniform(initial_pose_areas_x[area_idx][0], initial_pose_areas_x[area_idx][1])
self.initial_pose["y_init"] = random.uniform(initial_pose_areas_y[area_idx][0], initial_pose_areas_y[area_idx][1])
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = random.uniform(0.0, 2*math.pi)
elif self.world_name == "corridor":
self.initial_pose["x_init"] = 0.0
self.initial_pose["y_init"] = 0.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = random.uniform(0.0, 2*math.pi)
elif self.world_name == "training_garden_dynamic_0":
initial_pose_areas_x = []
initial_pose_areas_x.extend(([2,2.5], [-2,-2.5]))
initial_pose_areas_y = []
initial_pose_areas_y.extend(([-0.5,0.5], [-0.5,0.5]))
area_idx = random.randint(0, len(initial_pose_areas_x)-1)
self.robot_init_area_id = area_idx
self.initial_pose["x_init"] = random.uniform(initial_pose_areas_x[area_idx][0], initial_pose_areas_x[area_idx][1])
self.initial_pose["y_init"] = random.uniform(initial_pose_areas_y[area_idx][0], initial_pose_areas_y[area_idx][1])
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = random.uniform(0.0, 2*math.pi)
elif self.world_name == "training_garden_dynamic_1":
initial_pose_areas_x = []
initial_pose_areas_x.extend(([-2.0,-0.5], [4.5,5.5], [-5.5,-4.5], [3,5], [-5.5,-4.5]))
initial_pose_areas_y = []
initial_pose_areas_y.extend(([-4.0,0.0], [3.5,4.5], [4, 4.5], [-4,-1], [-4.5,-4]))
area_idx = random.randint(0, len(initial_pose_areas_x)-1)
self.robot_init_area_id = area_idx
self.initial_pose["x_init"] = random.uniform(initial_pose_areas_x[area_idx][0], initial_pose_areas_x[area_idx][1])
self.initial_pose["y_init"] = random.uniform(initial_pose_areas_y[area_idx][0], initial_pose_areas_y[area_idx][1])
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = random.uniform(0.0, 2*math.pi)
elif self.world_name == "training_garden_dynamic_2":
initial_pose_areas_x = []
initial_pose_areas_x.extend(([4.5,5.5], [-5.5,-3.5], [3.0,5.0], [-5.5,-5.0], [-2.0,0.5]))
initial_pose_areas_y = []
initial_pose_areas_y.extend(([3.5,4.5], [3.0,4.5], [-4.5,-2], [-4.5,-2], [0.0,-0.5]))
area_idx = random.randint(0, len(initial_pose_areas_x)-1)
self.robot_init_area_id = area_idx
self.initial_pose["x_init"] = random.uniform(initial_pose_areas_x[area_idx][0], initial_pose_areas_x[area_idx][1])
self.initial_pose["y_init"] = random.uniform(initial_pose_areas_y[area_idx][0], initial_pose_areas_y[area_idx][1])
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = random.uniform(0.0, 2*math.pi)
elif self.world_name == "testing_dwarl_zigzag_static":
self.initial_pose["x_init"] = 5.0
self.initial_pose["y_init"] = -8.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_1":
self.initial_pose["x_init"] = 4.0
self.initial_pose["y_init"] = 3.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_2":
self.initial_pose["x_init"] = 4.0
self.initial_pose["y_init"] = 3.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_3":
self.initial_pose["x_init"] = 4.0
self.initial_pose["y_init"] = 3.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_4":
self.initial_pose["x_init"] = 4.0
self.initial_pose["y_init"] = -4.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_5":
self.initial_pose["x_init"] = 4.0
self.initial_pose["y_init"] = -4.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_6":
self.initial_pose["x_init"] = 4.0
self.initial_pose["y_init"] = -4.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_7":
self.initial_pose["x_init"] = 1.0
self.initial_pose["y_init"] = 3.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = -160*math.pi/180
elif self.world_name == "testing_lvl_8":
self.initial_pose["x_init"] = 7.0
self.initial_pose["y_init"] = -3.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_9":
self.initial_pose["x_init"] = 7.0
self.initial_pose["y_init"] = -3.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "testing_lvl_10":
self.initial_pose["x_init"] = 7.0
self.initial_pose["y_init"] = -3.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0.0
elif self.world_name == "validation" or \
self.world_name == "validation_overtaking":
if self.world_name == "validation":
self.validation_flag = True
#self.reset_pedsim()
self.initial_pose["x_init"] = -.25
self.initial_pose["y_init"] = 0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0
self.validation_ep_num += 1
if self.validation_flag and self.validation_ep_num >= self.config.max_testing_episodes:
self.next_world_name = "validation_passing"
self.validation_ep_num = 0
elif self.world_name == "validation_passing":
#self.reset_pedsim()
self.initial_pose["x_init"] = 0.0
self.initial_pose["y_init"] = -4.0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 0
self.validation_ep_num += 1
if self.validation_flag and self.validation_ep_num >= self.config.max_testing_episodes:
self.next_world_name = "validation_crossing"
self.validation_ep_num = 0
elif self.world_name == "validation_crossing":
#self.reset_pedsim()
self.initial_pose["x_init"] = -4.0
self.initial_pose["y_init"] = 0
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = 3.1415
self.validation_ep_num += 1
if self.validation_flag and self.validation_ep_num >= self.config.max_testing_episodes:
self.next_world_name = "museum_static"
self.validation_ep_num = 0
elif self.world_name == "museum_static":
self.initial_pose["x_init"] = 8
self.initial_pose["y_init"] = 5
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = -.785
self.validation_ep_num += 1
if self.validation_flag and self.validation_ep_num >= self.config.max_testing_episodes:
self.next_world_name = "museum_static_and_dynamic"
self.validation_ep_num = 0
else:
self.validation_ep_num += 1
elif self.world_name == "museum_static_and_dynamic":
#self.reset_pedsim()
self.initial_pose["x_init"] = 17
self.initial_pose["y_init"] = 5
self.initial_pose["z_init"] = 0.0
robot0_init_yaw = -.785
robot0_init_quat = Quaternion.from_euler(0, 0, robot0_init_yaw)
self.initial_pose["x_rot_init"] = robot0_init_quat.x
self.initial_pose["y_rot_init"] = robot0_init_quat.y
self.initial_pose["z_rot_init"] = robot0_init_quat.z
self.initial_pose["w_rot_init"] = robot0_init_quat.w
#print("turtlebot3_tentabot_drl::get_init_pose -> Updated initial_pose x: " + str(self.initial_pose["x_init"]) + ", y: " + str(self.initial_pose["y_init"]))
rospy.logdebug("turtlebot3_tentabot_drl::get_init_pose -> Updated initial_pose x: " + str(self.initial_pose["x_init"]) + ", y: " + str(self.initial_pose["y_init"]))
if init_flag:
super(TurtleBot3TentabotDRL, self).update_initial_pose(self.initial_pose)
return self.initial_pose
'''
DESCRIPTION: TODO...Gets the goal location for each robot
'''
def get_goal_location(self):
self.goal_pose = {}
if self.world_name == "training_garden_static_0":
goal_areas_x = []
goal_areas_x.extend(([-0.5,0.5], [1.5,2.5], [-2.5,-1], [-2.5,0.5]))
goal_areas_y = []
goal_areas_y.extend(([-0.5,0.5], [0.5,1.5], [1,1.5], [-1.5,-1]))
area_idx = random.randint(0, len(goal_areas_x)-1)
while self.robot_init_area_id == area_idx:
area_idx = random.randint(0, len(goal_areas_x)-1)
self.goal_pose["x"] = random.uniform(goal_areas_x[area_idx][0], goal_areas_x[area_idx][1])
self.goal_pose["y"] = random.uniform(goal_areas_y[area_idx][0], goal_areas_y[area_idx][1])
self.goal_pose["z"] = 0.0
elif self.world_name == "training_garden_static_1":
goal_areas_x = []
#goal_areas_x.extend(([-1.5,1.5], [4.5,5.5], [-5.5,-2], [1,5], [-5.5,-3.5]))
goal_areas_x.extend(([-1.5,0.5], [1.0,5.5], [-5.5,-3], [4.0,5.5], [-5.5,-4.5]))
goal_areas_y = []
#goal_areas_y.extend(([-1,1], [1,4.5], [3.5, 4.5], [-4,-2], [-4.5,-2]))
goal_areas_y.extend(([0,2], [4,4.5], [4.0,4.5], [-4.5,-3.0], [-4.5,-3.5]))
area_idx = random.randint(0, len(goal_areas_x)-1)
while self.robot_init_area_id == area_idx:
area_idx = random.randint(0, len(goal_areas_x)-1)
self.goal_pose["x"] = random.uniform(goal_areas_x[area_idx][0], goal_areas_x[area_idx][1])
self.goal_pose["y"] = random.uniform(goal_areas_y[area_idx][0], goal_areas_y[area_idx][1])
self.goal_pose["z"] = 0.0
elif self.world_name == "corridor":
self.goal_pose["x"] = 0.0
self.goal_pose["y"] = 7.0
self.goal_pose["z"] = 0.0
elif self.world_name == "training_garden_dynamic_0":
goal_areas_x = []
goal_areas_x.extend(([2,2.5], [-2,-2.5]))
goal_areas_y = []
goal_areas_y.extend(([-0.5,0.5], [-0.5,0.5]))
area_idx = random.randint(0, len(goal_areas_x)-1)
while self.robot_init_area_id == area_idx:
area_idx = random.randint(0, len(goal_areas_x)-1)
self.goal_pose["x"] = random.uniform(goal_areas_x[area_idx][0], goal_areas_x[area_idx][1])
self.goal_pose["y"] = random.uniform(goal_areas_y[area_idx][0], goal_areas_y[area_idx][1])
self.goal_pose["z"] = 0.0
elif self.world_name == "training_garden_dynamic_1":
goal_areas_x = []
goal_areas_x.extend(([-2.0,-0.5], [4.5,5.5], [-5.5,-4.5], [3,5], [-5.5,-4.5]))
goal_areas_y = []
goal_areas_y.extend(([-4.0,0.0], [3.5,4.5], [4, 4.5], [-4,-1], [-4.5,-4]))
area_idx = random.randint(0, len(goal_areas_x)-1)
while self.robot_init_area_id == area_idx:
area_idx = random.randint(0, len(goal_areas_x)-1)
self.goal_pose["x"] = random.uniform(goal_areas_x[area_idx][0], goal_areas_x[area_idx][1])
self.goal_pose["y"] = random.uniform(goal_areas_y[area_idx][0], goal_areas_y[area_idx][1])
self.goal_pose["z"] = 0.0
elif self.world_name == "training_garden_dynamic_2":
goal_areas_x = []
goal_areas_x.extend(([4.5,5.5], [-5.5,-3.5], [3.0,5.0], [-5.5,-5.0], [-2.0,0.5]))
goal_areas_y = []
goal_areas_y.extend(([3.5,4.5], [3.0,4.5], [-4.5,-2], [-4.5,-2], [0.0,-0.5]))
area_idx = random.randint(0, len(goal_areas_x)-1)
while self.robot_init_area_id == area_idx:
area_idx = random.randint(0, len(goal_areas_x)-1)
self.goal_pose["x"] = random.uniform(goal_areas_x[area_idx][0], goal_areas_x[area_idx][1])
self.goal_pose["y"] = random.uniform(goal_areas_y[area_idx][0], goal_areas_y[area_idx][1])
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_dwarl_zigzag_static":
self.goal_pose["x"] = -11.0
self.goal_pose["y"] = 8.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_1":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = -1.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_2":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = -1.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_3":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = -1.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_4":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = 2.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_5":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = 2.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_6":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = 2.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_7":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = 2.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_8":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = 3.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_9":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = 3.0
self.goal_pose["z"] = 0.0
elif self.world_name == "testing_lvl_10":
self.goal_pose["x"] = -4.0
self.goal_pose["y"] = 3.0
self.goal_pose["z"] = 0.0
elif self.world_name == "validation" or \
self.world_name == "validation_overtaking":
self.goal_pose["x"] = 5.5
self.goal_pose["y"] = 0
self.goal_pose["z"] = 0.0
elif self.world_name == "validation_passing":
self.goal_pose["x"] = 5.5
self.goal_pose["y"] = -4
self.goal_pose["z"] = 0.0
elif self.world_name == "validation_crossing":
self.goal_pose["x"] = -9.5
self.goal_pose["y"] = 0.0
self.goal_pose["z"] = 0.0
elif self.world_name == "museum_static":
self.goal_pose["x"] = 14
self.goal_pose["y"] = -5
self.goal_pose["z"] = 0.0
elif self.world_name == "museum_static_and_dynamic":
self.goal_pose["x"] = 24
self.goal_pose["y"] = -6
self.goal_pose["z"] = 0.0
if self.config.observation_space_type == "Tentabot_FC" or \
self.config.observation_space_type == "Tentabot_1DCNN_FC" or \
self.config.observation_space_type == "Tentabot_laser_1DCNN_FC" or \
self.config.observation_space_type == "Tentabot_2DCNN_FC" or \
self.config.observation_space_type == "Tentabot_2DCNN" or \
self.config.observation_space_type == "Tentabot_WP_FC":
self.client_update_goal()
self.config.set_goal(self.goal_pose)
self.publish_goal()
self.init_distance2goal = self.get_initdistance2goal()
'''
DESCRIPTION: TODO...
'''
def calculate_euclidean_distance(self, p1, p2):
return math.sqrt((p1["x"] - p2["x"])**2 + (p1["y"] - p2["y"])**2 + (p1["z"] - p2["z"])**2)
'''
DESCRIPTION: TODO...Get the initial distance to the goal
'''
def get_initdistance2goal(self):
return math.sqrt((self.goal_pose["x"] - self.initial_pose["x_init"])**2 + (self.goal_pose["y"] - self.initial_pose["y_init"])**2)
'''
DESCRIPTION: TODO...Gets the distance to the goal
'''
def get_distance2goal(self):
self.update_odom()
return math.sqrt((self.goal_pose["x"] - self.odom_dict["x"])**2 + (self.goal_pose["y"] - self.odom_dict["y"])**2)
'''
DESCRIPTION: TODO...
value.
'''
def check_collision(self, laser_scan):
self.min_distance2obstacle = min(laser_scan.ranges)
for scan_range in laser_scan.ranges:
if (self.config.obs_min_range > scan_range > 0):
if not self._episode_done:
self.total_collisions += 1
#rospy.logdebug("turtlebot3_tentabot_drl::check_collision -> Hit me baby one more time!")
print("turtlebot3_tentabot_drl::check_collision -> Hit me baby one more time!")
self._episode_done = True
return True
return False
'''
DESCRIPTION: TODO...Discards all the laser readings that are not multiple in index of laser_downsampling_scale
value.
'''
def filter_laser_scan(self, data):
#data = self.get_laser_scan()
data_size = len(data.ranges)
filtered_laser_scan = []
normalized_laser_scan = []
if 0 < self.config.laser_size_downsampled < data_size:
max_n_laser_ranges = self.config.laser_size_downsampled
else:
max_n_laser_ranges = data_size
mod = self.config.laser_downsample_scale
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float ('Inf') or np.isinf(item) or item > self.config.laser_range_max:
if len(filtered_laser_scan) < max_n_laser_ranges:
#filtered_laser_scan.append(round(self.config.laser_range_max, self.config.mantissa_precision))
filtered_laser_scan.append(self.config.laser_range_max)
normalized_laser_scan.append(1.0)
# NUA DEBUG:
#filtered_laser_scan.append(round(self.step_num, self.config.mantissa_precision))
#filtered_laser_scan.append(round(i, self.config.mantissa_precision))
elif np.isnan(item) or item < self.config.laser_range_min:
if len(filtered_laser_scan) < max_n_laser_ranges:
#filtered_laser_scan.append(round(self.config.laser_range_min, self.config.mantissa_precision))
filtered_laser_scan.append(self.config.laser_range_min)
#normalized_laser_scan.append(round(self.config.laser_range_min / self.config.laser_range_max, self.config.mantissa_precision))
normalized_laser_scan.append(self.config.laser_range_min / self.config.laser_range_max)
# NUA DEBUG:
#filtered_laser_scan.append(round(self.step_num, self.config.mantissa_precision))
#filtered_laser_scan.append(round(i, self.config.mantissa_precision))
else:
if len(filtered_laser_scan) < max_n_laser_ranges:
#filtered_laser_scan.append(round(item, self.config.mantissa_precision))
filtered_laser_scan.append(item)
#normalized_laser_scan.append(round(item / self.config.laser_range_max, self.config.mantissa_precision))
normalized_laser_scan.append(item / self.config.laser_range_max)
# NUA DEBUG:
#filtered_laser_scan.append(round(self.step_num, self.config.mantissa_precision))
#filtered_laser_scan.append(round(i, self.config.mantissa_precision))
if self.config.observation_space_type == "laser_FC" or \
self.config.observation_space_type == "Tentabot_FC" or \
self.config.observation_space_type == "Tentabot_WP_FC":
self.filtered_laser_ranges = np.array(filtered_laser_scan).reshape(self.config.fc_obs_shape)
self.normalized_laser_ranges = np.array(normalized_laser_scan).reshape(self.config.fc_obs_shape)
else:
self.filtered_laser_ranges = np.array(filtered_laser_scan).reshape(self.config.cnn_obs_shape)
self.normalized_laser_ranges = np.array(normalized_laser_scan).reshape(self.config.cnn_obs_shape)
self.publish_filtered_laser_scan()
'''
DESCRIPTION: TODO...
'''
def publish_filtered_laser_scan(self):
filtered_laser_ranges = self.filtered_laser_ranges.reshape(self.config.fc_obs_shape)
laser_scan_msg = LaserScan()
laser_scan_msg.angle_min = self.config.laser_angle_min
laser_scan_msg.angle_max = self.config.laser_angle_max
laser_scan_msg.angle_increment = self.config.laser_angle_increment
laser_scan_msg.time_increment = self.config.laser_time_increment
laser_scan_msg.scan_time = self.config.laser_scan_time
laser_scan_msg.range_min = self.config.laser_range_min
laser_scan_msg.range_max = self.config.laser_range_max
laser_scan_msg.ranges = tuple(filtered_laser_ranges)
laser_scan_msg.header.frame_id = self.config.laser_frame_id
laser_scan_msg.header.stamp = rospy.Time.now()
self.filtered_laser_pub.publish(laser_scan_msg)
'''
DESCRIPTION: TODO...
'''
def update_obs_target(self):
# Update the odometry data
self.update_odom()
# Update "distance to target" and "angle to target"
translation_robot_wrt_world = tf.transformations.translation_matrix((self.odom_data.pose.pose.position.x,
self.odom_data.pose.pose.position.y,
self.odom_data.pose.pose.position.z))
rotation_robot_wrt_world = tf.transformations.quaternion_matrix((self.odom_data.pose.pose.orientation.x,
self.odom_data.pose.pose.orientation.y,
self.odom_data.pose.pose.orientation.z,
self.odom_data.pose.pose.orientation.w))
transform_robot_wrt_world = np.matmul(translation_robot_wrt_world, rotation_robot_wrt_world)
transform_world_wrt_robot = tf.transformations.inverse_matrix(transform_robot_wrt_world)
translation_goal_wrt_world = np.array([ [self.goal_pose["x"]], [self.goal_pose["y"]], [0.0], [1.0] ])
translation_goal_wrt_robot = np.dot(transform_world_wrt_robot, translation_goal_wrt_world)
current_angle2goal = math.atan2(translation_goal_wrt_robot[1], translation_goal_wrt_robot[0])
current_distance2goal = self.get_distance2goal()
self.obs_target = np.array([[current_distance2goal, current_angle2goal]]).reshape(self.config.fc_obs_shape)
#self.obs_target = np.array([[self.step_num, self.step_num]]).reshape(self.config.fc_obs_shape)
'''
DESCRIPTION: TODO... Merge with update_obs_target2 which is implemented for Tentabot_WP
'''
def update_obs_target2(self):
self.obs_wp = np.zeros(self.config.n_wp * 2)
if self.client_move_base_get_plan():
translation_robot_wrt_world = tf.transformations.translation_matrix((self.odom_data.pose.pose.position.x,
self.odom_data.pose.pose.position.y,
self.odom_data.pose.pose.position.z))
rotation_robot_wrt_world = tf.transformations.quaternion_matrix((self.odom_data.pose.pose.orientation.x,
self.odom_data.pose.pose.orientation.y,
self.odom_data.pose.pose.orientation.z,
self.odom_data.pose.pose.orientation.w))
transform_robot_wrt_world = np.matmul(translation_robot_wrt_world, rotation_robot_wrt_world)
transform_world_wrt_robot = tf.transformations.inverse_matrix(transform_robot_wrt_world)
wp_skip = int(self.config.look_ahead / self.config.wp_global_dist)
# downsample global plan
self.full_wp = self.move_base_global_plan[wp_skip::wp_skip]
# add last waypoint (goal pos)
if (len(self.move_base_global_plan) - 1)%wp_skip > 0:
self.full_wp.append(self.move_base_global_plan[-1])
# print("turtlebot3_tentabot_drl::init_obs_waypoints -> self.full_wp length: " + str(len(self.full_wp)))
try:
tmp_wp = self.full_wp[0]
translation_wp_wrt_world = np.array([ [tmp_wp.pose.position.x], [tmp_wp.pose.position.y], [0.0], [1.0] ])
translation_wp_wrt_robot = np.dot(transform_world_wrt_robot, translation_wp_wrt_world)
self.obs_wp[i*2] = translation_wp_wrt_robot[0]
self.obs_wp[i*2+1] = translation_wp_wrt_robot[1]
except:
pass
# self.publish_debug_visu(self.move_base_global_plan)
wp_obs = list(self.full_wp[0:self.config.n_wp])
self.publish_wp_visu(self.full_wp,wp_obs)
translation_goal_wrt_robot = np.dot(transform_world_wrt_robot, translation_goal_wrt_world)
current_angle2goal = math.atan2(translation_goal_wrt_robot[1], translation_goal_wrt_robot[0])
current_distance2goal = self.get_distance2goal()
self.obs_target = np.array([[current_distance2goal, current_angle2goal]]).reshape(self.config.fc_obs_shape)
#self.obs_target = np.array([[self.step_num, self.step_num]]).reshape(self.config.fc_obs_shape)
'''
DESCRIPTION: TODO...
'''
def init_obs_waypoints(self):
self.obs_wp = np.zeros(self.config.n_wp * 2)
if self.client_move_base_get_plan():
translation_robot_wrt_world = tf.transformations.translation_matrix((self.odom_data.pose.pose.position.x,
self.odom_data.pose.pose.position.y,
self.odom_data.pose.pose.position.z))
rotation_robot_wrt_world = tf.transformations.quaternion_matrix((self.odom_data.pose.pose.orientation.x,
self.odom_data.pose.pose.orientation.y,
self.odom_data.pose.pose.orientation.z,
self.odom_data.pose.pose.orientation.w))
transform_robot_wrt_world = np.matmul(translation_robot_wrt_world, rotation_robot_wrt_world)
transform_world_wrt_robot = tf.transformations.inverse_matrix(transform_robot_wrt_world)
wp_skip = int(self.config.look_ahead / self.config.wp_global_dist)
# downsample global plan
self.full_wp = self.move_base_global_plan[wp_skip::wp_skip]
# add last waypoint (goal pos)
if (len(self.move_base_global_plan) - 1)%wp_skip > 0:
self.full_wp.append(self.move_base_global_plan[-1])
# print("turtlebot3_tentabot_drl::init_obs_waypoints -> self.full_wp length: " + str(len(self.full_wp)))
for i in range(self.config.n_wp):
try:
tmp_wp = self.full_wp[i]
translation_wp_wrt_world = np.array([ [tmp_wp.pose.position.x], [tmp_wp.pose.position.y], [0.0], [1.0] ])
translation_wp_wrt_robot = np.dot(transform_world_wrt_robot, translation_wp_wrt_world)
self.obs_wp[i*2] = translation_wp_wrt_robot[0]
self.obs_wp[i*2+1] = translation_wp_wrt_robot[1]
except:
pass
# self.publish_debug_visu(self.move_base_global_plan)
wp_obs = list(self.full_wp[0:self.config.n_wp])
self.publish_wp_visu(self.full_wp,wp_obs)
'''
DESCRIPTION: TODO...
'''
def update_obs_waypoints(self):
self.update_odom()
translation_robot_wrt_world = tf.transformations.translation_matrix((self.odom_data.pose.pose.position.x,
self.odom_data.pose.pose.position.y,
self.odom_data.pose.pose.position.z))
rotation_robot_wrt_world = tf.transformations.quaternion_matrix((self.odom_data.pose.pose.orientation.x,
self.odom_data.pose.pose.orientation.y,
self.odom_data.pose.pose.orientation.z,
self.odom_data.pose.pose.orientation.w))
transform_robot_wrt_world = np.matmul(translation_robot_wrt_world, rotation_robot_wrt_world)
transform_world_wrt_robot = tf.transformations.inverse_matrix(transform_robot_wrt_world)
trunc_list = False
self.obs_wp = np.zeros(self.config.n_wp * 2)
tmp_wp = self.full_wp[0]
dist = math.sqrt( (self.odom_data.pose.pose.position.x - tmp_wp.pose.position.x)**2 + (self.odom_data.pose.pose.position.y - tmp_wp.pose.position.y)**2 )
for s in range(1, len(self.full_wp)):
tmp_wp = self.full_wp[s]
tmp_dist = math.sqrt( (self.odom_data.pose.pose.position.x - tmp_wp.pose.position.x)**2 + (self.odom_data.pose.pose.position.y - tmp_wp.pose.position.y)**2 )
# compare to previous on every point but last point
if tmp_dist < dist and s < len(self.full_wp)-1:
dist = tmp_dist
else: # found closest waypoint
start_idx = s-1
# is closest wp reached in a certain radius?
if dist < self.config.wp_reached_dist:
trunc_list = True
# is closest waypoint already overtaken? Then take next one.
if dist + self.config.look_ahead > tmp_dist:
start_idx = s
# get all sequencing waypoints
for i in range(start_idx, start_idx+self.config.n_wp):
try:
tmp_wp = self.full_wp[i]
translation_wp_wrt_world = np.array([ [tmp_wp.pose.position.x], [tmp_wp.pose.position.y], [0.0], [1.0] ])
translation_wp_wrt_robot = np.dot(transform_world_wrt_robot, translation_wp_wrt_world)
self.obs_wp[i*2] = translation_wp_wrt_robot[0]
self.obs_wp[i*2+1] = translation_wp_wrt_robot[1]
except:
pass
break
# print(self.obs_wp)
# print(self.obs_wp[0], self.obs_wp[1] )
wp_obs = list(self.full_wp[start_idx:start_idx+self.config.n_wp])
if trunc_list:
self.full_wp = self.full_wp[s-1:]
self.publish_wp_visu(self.full_wp,wp_obs)
'''
DESCRIPTION: TODO...
'''
def init_observation_action_space(self):
self.episode_oar_data = dict(obs=[], acts=[], infos=None, terminal=[], rews=[])
if self.config.observation_space_type == "laser_FC":
if self.config.laser_normalize_flag:
obs_laser_low = np.full((1, self.config.laser_n_range), 0.0).reshape(self.config.fc_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), 1.0).reshape(self.config.fc_obs_shape)
else:
obs_laser_low = np.full((1, self.config.laser_n_range), self.config.laser_range_min).reshape(self.config.fc_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), self.config.laser_range_max).reshape(self.config.fc_obs_shape)
obs_target_low = np.array([[0.0, -math.pi]]).reshape(self.config.fc_obs_shape)
obs_target_high = np.array([[np.inf, math.pi]]).reshape(self.config.fc_obs_shape)
obs_action_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
obs_action_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_laser_low shape: " + str(obs_laser_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_target_low shape: " + str(obs_target_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_action_low shape: " + str(obs_action_low.shape))
self.obs_data = { "laser": np.vstack([obs_laser_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
obs_stacked_laser_low = np.hstack([obs_laser_low] * self.config.n_obs_stack)
obs_stacked_laser_high = np.hstack([obs_laser_high] * self.config.n_obs_stack)
obs_space_low = np.concatenate((obs_stacked_laser_low, obs_target_low, obs_action_low), axis=0)
obs_space_high = np.concatenate((obs_stacked_laser_high, obs_target_high, obs_action_high), axis=0)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_stacked_laser_low shape: " + str(obs_stacked_laser_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_low shape: " + str(obs_space_low.shape))
self.obs = obs_space_low
self.observation_space = spaces.Box(obs_space_low, obs_space_high)
self.action_space = spaces.Discrete(self.config.n_actions)
elif self.config.observation_space_type == "Tentabot_FC":
obs_occupancy_low = np.full((1, self.config.n_observations), 0.0).reshape(self.config.fc_obs_shape)
obs_occupancy_high = np.full((1, self.config.n_observations), 1.0).reshape(self.config.fc_obs_shape)
obs_target_low = np.array([[0.0, -math.pi]]).reshape(self.config.fc_obs_shape)
obs_target_high = np.array([[np.inf, math.pi]]).reshape(self.config.fc_obs_shape)
obs_action_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
obs_action_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_occupancy_low shape: " + str(obs_occupancy_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_target_low shape: " + str(obs_target_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_action_low shape: " + str(obs_action_low.shape))
self.obs_data = { "occupancy": np.vstack([obs_occupancy_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
obs_stacked_occupancy_low = np.hstack([obs_occupancy_low] * self.config.n_obs_stack)
obs_stacked_occupancy_high = np.hstack([obs_occupancy_high] * self.config.n_obs_stack)
obs_space_low = np.concatenate((obs_stacked_occupancy_low, obs_target_low, obs_action_low), axis=0)
obs_space_high = np.concatenate((obs_stacked_occupancy_high, obs_target_high, obs_action_high), axis=0)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_stacked_occupancy_low shape: " + str(obs_stacked_occupancy_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_low shape: " + str(obs_space_low.shape))
self.obs = obs_space_low
self.observation_space = spaces.Box(obs_space_low, obs_space_high)
self.action_space = spaces.Discrete(self.config.n_actions)
elif self.config.observation_space_type == "Tentabot_1DCNN_FC" or \
self.config.observation_space_type == "Tentabot_2DCNN_FC":
obs_occupancy_low = np.full((1, self.config.n_observations), 0.0).reshape(self.config.cnn_obs_shape)
obs_occupancy_high = np.full((1, self.config.n_observations), 1.0).reshape(self.config.cnn_obs_shape)
obs_target_low = np.array([[0.0, -math.pi]]).reshape(self.config.fc_obs_shape)
obs_target_high = np.array([[np.inf, math.pi]]).reshape(self.config.fc_obs_shape)
obs_action_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
obs_action_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_occupancy_low shape: " + str(obs_occupancy_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_target_low shape: " + str(obs_target_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_action_low shape: " + str(obs_action_low.shape))
if self.config.cit_flag:
self.obs_data = { "occupancy": np.vstack([obs_occupancy_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
obs_space_occupancy_low = np.vstack([obs_occupancy_low] * self.config.n_obs_stack)
obs_space_occupancy_high = np.vstack([obs_occupancy_high] * self.config.n_obs_stack)
else:
self.obs_data = { "occupancy": np.hstack([obs_occupancy_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
obs_space_occupancy_low = np.hstack([obs_occupancy_low] * self.config.n_obs_stack)
obs_space_occupancy_high = np.hstack([obs_occupancy_high] * self.config.n_obs_stack)
obs_space_target_action_low = np.concatenate((obs_target_low, obs_action_low), axis=0)
obs_space_target_action_high = np.concatenate((obs_target_high, obs_action_high), axis=0)
if self.config.observation_space_type == "Tentabot_2DCNN_FC":
obs_space_occupancy_low = np.expand_dims(obs_space_occupancy_low, axis=0)
obs_space_occupancy_high = np.expand_dims(obs_space_occupancy_high, axis=0)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_occupancy_low shape: " + str(obs_space_occupancy_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_target_action_low shape: " + str(obs_space_target_action_low.shape))
self.obs = {"occupancy": obs_space_occupancy_low,
"target_action": obs_space_target_action_low}
self.observation_space = spaces.Dict({ "occupancy": spaces.Box(obs_space_occupancy_low, obs_space_occupancy_high),
"target_action": spaces.Box(obs_space_target_action_low, obs_space_target_action_high)})
self.action_space = spaces.Discrete(self.config.n_actions)
elif self.config.observation_space_type == "laser_1DCNN_FC":
if self.config.laser_normalize_flag:
obs_laser_low = np.full((1, self.config.laser_n_range), 0.0).reshape(self.config.cnn_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), 1.0).reshape(self.config.cnn_obs_shape)
else:
obs_laser_low = np.full((1, self.config.laser_n_range), self.config.laser_range_min).reshape(self.config.cnn_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), self.config.laser_range_max).reshape(self.config.cnn_obs_shape)
obs_target_low = np.array([[0.0, -math.pi]]).reshape(self.config.fc_obs_shape)
obs_target_high = np.array([[np.inf, math.pi]]).reshape(self.config.fc_obs_shape)
action_space_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
action_space_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_laser_low shape: " + str(obs_laser_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_target_low shape: " + str(obs_target_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> action_space_low shape: " + str(action_space_low.shape))
self.obs_data = { "laser": np.vstack([obs_laser_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([action_space_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
obs_space_laser_low = np.vstack([obs_laser_low] * self.config.n_obs_stack)
obs_space_laser_high = np.vstack([obs_laser_high] * self.config.n_obs_stack)
obs_space_target_action_low = np.concatenate((obs_target_low, action_space_low.reshape(self.config.fc_obs_shape)), axis=0)
obs_space_target_action_high = np.concatenate((obs_target_high, action_space_high.reshape(self.config.fc_obs_shape)), axis=0)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_laser_low shape: " + str(obs_space_laser_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_target_action_low shape: " + str(obs_space_target_action_low.shape))
self.obs = {"laser": obs_space_laser_low,
"target_action": obs_space_target_action_low}
self.observation_space = spaces.Dict({ "laser": spaces.Box(obs_space_laser_low, obs_space_laser_high),
"target_action": spaces.Box(obs_space_target_action_low, obs_space_target_action_high)})
self.action_space = spaces.Discrete(self.config.n_actions)
#self.action_space = spaces.Box(action_space_low, action_space_high)
elif self.config.observation_space_type == "Tentabot_laser_1DCNN_FC":
if self.config.laser_normalize_flag:
obs_laser_low = np.full((1, self.config.laser_n_range), 0.0).reshape(self.config.cnn_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), 1.0).reshape(self.config.cnn_obs_shape)
else:
obs_laser_low = np.full((1, self.config.laser_n_range), self.config.laser_range_min).reshape(self.config.cnn_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), self.config.laser_range_max).reshape(self.config.cnn_obs_shape)
obs_occupancy_low = np.full((1, self.config.n_observations), 0.0).reshape(self.config.cnn_obs_shape)
obs_occupancy_high = np.full((1, self.config.n_observations), 1.0).reshape(self.config.cnn_obs_shape)
obs_target_low = np.array([[0.0, -math.pi]]).reshape(self.config.fc_obs_shape)
obs_target_high = np.array([[np.inf, math.pi]]).reshape(self.config.fc_obs_shape)
obs_action_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
obs_action_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_occupancy_low shape: " + str(obs_occupancy_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_target_low shape: " + str(obs_target_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_action_low shape: " + str(obs_action_low.shape))
if self.config.cit_flag:
self.obs_data = { "laser": np.vstack([obs_laser_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"occupancy": np.vstack([obs_occupancy_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
obs_space_laser_low = np.vstack([obs_laser_low] * self.config.n_obs_stack)
obs_space_laser_high = np.vstack([obs_laser_high] * self.config.n_obs_stack)
obs_space_occupancy_low = np.vstack([obs_occupancy_low] * self.config.n_obs_stack)
obs_space_occupancy_high = np.vstack([obs_occupancy_high] * self.config.n_obs_stack)
else:
self.obs_data = { "laser": np.vstack([obs_laser_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"occupancy": np.hstack([obs_occupancy_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
obs_space_laser_low = np.vstack([obs_laser_low] * self.config.n_obs_stack)
obs_space_laser_high = np.vstack([obs_laser_high] * self.config.n_obs_stack)
obs_space_occupancy_low = np.hstack([obs_occupancy_low] * self.config.n_obs_stack)
obs_space_occupancy_high = np.hstack([obs_occupancy_high] * self.config.n_obs_stack)
obs_space_target_action_low = np.concatenate((obs_target_low, obs_action_low), axis=0)
obs_space_target_action_high = np.concatenate((obs_target_high, obs_action_high), axis=0)
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_occupancy_low shape: " + str(obs_space_occupancy_low.shape))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_target_action_low shape: " + str(obs_space_target_action_low.shape))
self.obs = {"laser": obs_space_laser_low,
"occupancy": obs_space_occupancy_low,
"target_action": obs_space_target_action_low}
self.observation_space = spaces.Dict({ "laser": spaces.Box(obs_space_laser_low, obs_space_laser_high),
"occupancy": spaces.Box(obs_space_occupancy_low, obs_space_occupancy_high),
"target_action": spaces.Box(obs_space_target_action_low, obs_space_target_action_high)})
self.action_space = spaces.Discrete(self.config.n_actions)
elif self.config.observation_space_type == "laser_WP_1DCNN_FC":
if self.config.laser_normalize_flag:
obs_laser_low = np.full((1, self.config.laser_n_range), 0.0).reshape(self.config.cnn_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), 1.0).reshape(self.config.cnn_obs_shape)
else:
obs_laser_low = np.full((1, self.config.laser_n_range), self.config.laser_range_min).reshape(self.config.cnn_obs_shape)
obs_laser_high = np.full((1, self.config.laser_n_range), self.config.laser_range_max).reshape(self.config.cnn_obs_shape)
obs_space_waypoints_low = np.full((1, 2*self.config.n_wp), -np.inf).reshape(self.config.fc_obs_shape)
obs_space_waypoints_high = np.full((1, 2*self.config.n_wp), np.inf).reshape(self.config.fc_obs_shape)
obs_action_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
obs_action_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
# print("turtlebot3_tentabot_rl::init_observation_action_space -> obs_laser_low shape: " + str(obs_laser_low.shape))
# print("turtlebot3_tentabot_rl::init_observation_action_space -> obs_space_waypoints_low shape: " + str(obs_space_waypoints_low.shape))
# print("turtlebot3_tentabot_rl::init_observation_action_space -> obs_action_low shape: " + str(obs_action_low.shape))
self.obs_data = { "laser": np.vstack([obs_laser_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"waypoints": np.vstack([obs_space_waypoints_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
# print("turtlebot3_tentabot_rl::init_observation_action_space -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
# print("turtlebot3_tentabot_rl::init_observation_action_space -> obs_data waypoints shape: " + str(self.obs_data["waypoints"].shape))
# print("turtlebot3_tentabot_rl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
obs_space_laser_low = np.vstack([obs_laser_low] * self.config.n_obs_stack)
obs_space_laser_high = np.vstack([obs_laser_high] * self.config.n_obs_stack)
obs_space_wp_action_low = np.concatenate((obs_space_waypoints_low, obs_action_low.reshape(self.config.fc_obs_shape)), axis=0)
obs_space_wp_action_high = np.concatenate((obs_space_waypoints_high, obs_action_high.reshape(self.config.fc_obs_shape)), axis=0)
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_laser_low shape: " + str(obs_space_laser_low.shape))
self.obs = {"laser": obs_space_laser_low,
"waypoints_action ": obs_space_wp_action_low}
self.observation_space = spaces.Dict({ "laser": spaces.Box(obs_space_laser_low, obs_space_laser_high),
"waypoints_action": spaces.Box(obs_space_wp_action_low, obs_space_wp_action_high)})
self.action_space = spaces.Discrete(self.config.n_actions)
elif self.config.observation_space_type == "Tentabot_WP_FC":
obs_occupancy_low = np.full((1, self.config.n_observations), 0.0).reshape(self.config.fc_obs_shape)
obs_occupancy_high = np.full((1, self.config.n_observations), 1.0).reshape(self.config.fc_obs_shape)
obs_space_waypoints_low = np.full((1, 2*self.config.n_wp), -np.inf).reshape(self.config.fc_obs_shape)
obs_space_waypoints_high = np.full((1, 2*self.config.n_wp), np.inf).reshape(self.config.fc_obs_shape)
obs_action_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
obs_action_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_occupancy_low shape: " + str(obs_occupancy_low.shape))
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_waypoints_low shape: " + str(obs_space_waypoints_low.shape))
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_action_low shape: " + str(obs_action_low.shape))
self.obs_data = { "occupancy": np.vstack([obs_occupancy_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"waypoints": np.vstack([obs_space_waypoints_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data waypoints shape: " + str(self.obs_data["waypoints"].shape))
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
obs_stacked_occupancy_low = np.hstack([obs_occupancy_low] * self.config.n_obs_stack)
obs_stacked_occupancy_high = np.hstack([obs_occupancy_high] * self.config.n_obs_stack)
obs_space_low = np.concatenate((obs_stacked_occupancy_low, obs_space_waypoints_low, obs_action_low), axis=0)
obs_space_high = np.concatenate((obs_stacked_occupancy_high, obs_space_waypoints_high, obs_action_high), axis=0)
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_stacked_occupancy_low shape: " + str(obs_stacked_occupancy_low.shape))
# print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_low shape: " + str(obs_space_low.shape))
self.obs = obs_space_low
self.observation_space = spaces.Box(obs_space_low, obs_space_high)
self.action_space = spaces.Discrete(self.config.n_actions)
elif self.config.observation_space_type == "laser_image_2DCNN_FC" or \
self.config.observation_space_type == "laser_rings_2DCNN_FC":
obs_laser_image_low = np.full((1, self.config.laser_image_width), 0.0)
obs_laser_image_low = np.vstack([obs_laser_image_low] * self.config.laser_image_height)
obs_laser_image_low = np.expand_dims(obs_laser_image_low, axis=0)
obs_laser_image_high = np.full((1, self.config.laser_image_width), 1.0)
obs_laser_image_high = np.vstack([obs_laser_image_high] * self.config.laser_image_height)
obs_laser_image_high = np.expand_dims(obs_laser_image_high, axis=0)
obs_target_low = np.array([[0.0, -math.pi]]).reshape(self.config.fc_obs_shape)
obs_target_high = np.array([[np.inf, math.pi]]).reshape(self.config.fc_obs_shape)
obs_action_low = np.array([[self.config.min_lateral_speed, self.config.min_angular_speed]]).reshape(self.config.fc_obs_shape)
obs_action_high = np.array([[self.config.max_lateral_speed, self.config.max_angular_speed]]).reshape(self.config.fc_obs_shape)
self.obs_data = { "laser_image": np.vstack([obs_laser_image_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([obs_action_low] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
obs_space_laser_image_low = np.vstack([obs_laser_image_low] * self.config.n_obs_stack)
obs_space_laser_image_high = np.vstack([obs_laser_image_high] * self.config.n_obs_stack)
obs_space_target_action_low = np.concatenate((obs_target_low, obs_action_low), axis=0)
obs_space_target_action_high = np.concatenate((obs_target_high, obs_action_high), axis=0)
self.obs = {"laser_image": obs_space_laser_image_low,
"target_action": obs_space_target_action_low}
self.observation_space = spaces.Dict({ "laser_image": spaces.Box(obs_space_laser_image_low, obs_space_laser_image_high),
"target_action": spaces.Box(obs_space_target_action_low, obs_space_target_action_high)})
self.action_space = spaces.Discrete(self.config.n_actions)
'''
print("---------------------")
print("turtlebot3_tentabot_drl::init_observation_action_space -> laser_image_width: " + str(self.config.laser_image_width))
print("turtlebot3_tentabot_drl::init_observation_action_space -> laser_image_height: " + str(self.config.laser_image_height))
print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data laser_image shape: " + str(self.obs_data["laser_image"].shape))
print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data target shape: " + str(self.obs_data["target"].shape))
print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_data action shape: " + str(self.obs_data["action"].shape))
print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_laser_image_low shape: " + str(obs_space_laser_image_low.shape))
print("turtlebot3_tentabot_drl::init_observation_action_space -> obs_space_target_action_low shape: " + str(obs_space_target_action_low.shape))
print("---------------------")
'''
#print("turtlebot3_tentabot_drl::init_observation_action_space -> observation_space: " + str(self.observation_space))
#print("turtlebot3_tentabot_drl::init_observation_action_space -> action_space: " + str(self.action_space))
'''
DESCRIPTION: TODO...
'''
def reinit_observation(self):
self.episode_oar_data = dict(obs=[], acts=[], infos=None, terminal=[], rews=[])
if self.config.observation_space_type == "laser_FC":
# Update laser scan
#self.filter_laser_scan()
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update target observation
self.update_obs_target()
# Stack observation data
self.obs_data = { "laser": np.vstack([obs_laser] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([self.obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::reinit_observation -> filtered_laser_ranges shape: " + str(self.filtered_laser_ranges.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_target shape: " + str(self.obs_target.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Initialize observation
obs_stacked_laser = np.hstack([obs_laser] * self.config.n_obs_stack)
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_stacked_laser shape: " + str(obs_stacked_laser.shape))
self.obs = np.concatenate((obs_stacked_laser, self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::reinit_observation -> obs: " + str(self.obs.shape))
elif self.config.observation_space_type == "Tentabot_FC":
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::reinit_observation -> OBSERVATION FAILURE!")
# Update target observation
self.update_obs_target()
# Stack observation data
self.obs_data = { "occupancy": np.vstack([self.occupancy_set] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([self.obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::reinit_observation -> occupancy_set shape: " + str(self.occupancy_set.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_target shape: " + str(self.obs_target.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Initialize observation
obs_stacked_occupancy = np.hstack([self.occupancy_set] * self.config.n_obs_stack)
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_stacked_occupancy shape: " + str(obs_stacked_occupancy.shape))
self.obs = np.concatenate((obs_stacked_occupancy, self.obs_target, self.previous_action), axis=0)
elif self.config.observation_space_type == "Tentabot_1DCNN_FC" or \
self.config.observation_space_type == "Tentabot_2DCNN_FC":
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::reinit_observation -> OBSERVATION FAILURE!")
# Update target observation
self.update_obs_target()
if self.config.cit_flag:
# Stack observation data
self.obs_data = { "occupancy": np.vstack([self.occupancy_set] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([self.obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
# Initialize observation
obs_space_occupancy = np.vstack([self.occupancy_set] * self.config.n_obs_stack)
else:
# Stack observation data
self.obs_data = { "occupancy": np.hstack([self.occupancy_set] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([self.obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
# Initialize observation
obs_space_occupancy = np.hstack([self.occupancy_set] * self.config.n_obs_stack)
obs_space_target_action = np.concatenate((self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::reinit_observation -> occupancy_set shape: " + str(self.occupancy_set.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_target shape: " + str(self.obs_target.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
if self.config.observation_space_type == "Tentabot_2DCNN_FC":
obs_space_occupancy = np.expand_dims(obs_space_occupancy, axis=0)
obs_space_target_action = np.expand_dims(obs_space_target_action, axis=0)
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_occupancy shape: " + str(obs_space_occupancy.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_target_action shape: " + str(obs_space_target_action.shape))
self.obs = {"occupancy": obs_space_occupancy,
"target_action": obs_space_target_action}
elif self.config.observation_space_type == "Tentabot_laser_1DCNN_FC":
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::reinit_observation -> OBSERVATION FAILURE!")
# Update target observation
self.update_obs_target()
if self.config.cit_flag:
# Stack observation data
self.obs_data = { "laser": np.vstack([obs_laser] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"occupancy": np.vstack([self.occupancy_set] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([self.obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
# Initialize observation
obs_space_laser = np.vstack([obs_laser] * self.config.n_obs_stack)
obs_space_occupancy = np.vstack([self.occupancy_set] * self.config.n_obs_stack)
else:
# Stack observation data
self.obs_data = { "laser": np.vstack([obs_laser] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"occupancy": np.hstack([self.occupancy_set] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([self.obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
# Initialize observation
obs_space_laser = np.vstack([obs_laser] * self.config.n_obs_stack)
obs_space_occupancy = np.hstack([self.occupancy_set] * self.config.n_obs_stack)
obs_space_target_action = np.concatenate((self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::reinit_observation -> occupancy_set shape: " + str(self.occupancy_set.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_target shape: " + str(self.obs_target.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_occupancy shape: " + str(obs_space_occupancy.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_target_action shape: " + str(obs_space_target_action.shape))
self.obs = {"laser": obs_space_laser,
"occupancy": obs_space_occupancy,
"target_action": obs_space_target_action}
elif self.config.observation_space_type == "laser_1DCNN_FC":
# Update laser scan
#self.filter_laser_scan()
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update target observation
self.update_obs_target()
# Stack observation data
self.obs_data = { "laser": np.vstack([obs_laser] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([self.obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::reinit_observation -> laser shape: " + str(self.filtered_laser_ranges.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_target shape: " + str(self.obs_target.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Initialize observation
obs_space_laser = np.vstack([obs_laser] * self.config.n_obs_stack)
obs_space_target_action = np.concatenate((self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_laser shape: " + str(obs_space_laser.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_target_action shape: " + str(obs_space_target_action.shape))
self.obs = {"laser": obs_space_laser,
"target_action": obs_space_target_action}
elif self.config.observation_space_type == "laser_WP_1DCNN_FC":
# Update laser scan
#self.filter_laser_scan()
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update waypoints
self.init_obs_waypoints()
# Stack observation data
self.obs_data = { "laser": np.vstack([obs_laser] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"waypoints": np.vstack([self.obs_wp] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::reinit_observation -> laser shape: " + str(self.filtered_laser_ranges.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> waypoints shape: " + str(self.obs_wp.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data waypoints shape: " + str(self.obs_data["waypoints"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Initialize the observation
obs_space_laser = np.vstack([obs_laser] * self.config.n_obs_stack)
obs_space_wp_action = np.concatenate((self.obs_wp, self.previous_action), axis=0)
self.obs = {"laser": obs_space_laser,
"waypoints_action": obs_space_wp_action}
elif self.config.observation_space_type == "Tentabot_WP_FC":
# Update waypoints
self.init_obs_waypoints()
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::reinit_observation -> OBSERVATION FAILURE!")
# Stack observation data
self.obs_data = { "occupancy": np.vstack([self.occupancy_set] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"waypoints": np.vstack([self.obs_wp] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
#print("turtlebot3_tentabot_drl::reinit_observation -> occupancy_set shape: " + str(self.occupancy_set.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> waypoints shape: " + str(self.obs_wp.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data waypoints shape: " + str(self.obs_data["waypoints"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Initialize observation
obs_stacked_occupancy = np.hstack([self.occupancy_set] * self.config.n_obs_stack)
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_stacked_occupancy shape: " + str(obs_stacked_occupancy.shape))
self.obs = np.concatenate((obs_stacked_occupancy, self.obs_wp, self.previous_action), axis=0)
elif self.config.observation_space_type == "laser_image_2DCNN_FC" or \
self.config.observation_space_type == "laser_rings_2DCNN_FC":
# Update target observation
self.update_obs_target()
obs_laser_image = self.laser_image
obs_laser_image = np.expand_dims(obs_laser_image, axis=0)
obs_target = self.obs_target
# Stack observation data
self.obs_data = { "laser_image": np.vstack([obs_laser_image] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"target": np.vstack([obs_target] * (self.config.n_obs_stack * self.config.n_skip_obs_stack)),
"action": np.vstack([self.previous_action] * (self.config.n_obs_stack * self.config.n_skip_obs_stack))}
# Initialize observation
obs_space_laser_image = np.vstack([obs_laser_image] * self.config.n_obs_stack)
obs_space_target_action = np.concatenate((obs_target, self.previous_action), axis=0)
if self.config.observation_space_type == "Tentabot_2DCNN_FC":
obs_space_laser_image = np.expand_dims(obs_space_laser_image, axis=0)
obs_space_target_action = np.expand_dims(obs_space_target_action, axis=0)
'''
print("---------------------")
print("turtlebot3_tentabot_drl::reinit_observation -> obs_laser_image shape: " + str(obs_laser_image.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_target shape: " + str(self.obs_target.shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> previous_action shape: " + str(self.previous_action.shape))
print("turtlebot3_tentabot_drl::reinit_observation -> obs_data laser_image shape: " + str(self.obs_data["laser_image"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::reinit_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_laser_image shape: " + str(obs_space_laser_image.shape))
print("turtlebot3_tentabot_drl::reinit_observation -> obs_space_target_action shape: " + str(obs_space_target_action.shape))
print("---------------------")
'''
'''
imi = (self.laser_image * 255).astype(np.uint8)
im = Image.fromarray(imi)
im = im.convert("L")
im.save(self.data_folder_path + "laser_image.jpeg")
np.savetxt(self.data_folder_path + "laser_image.txt", self.laser_image)
'''
self.obs = {"laser_image": obs_space_laser_image,
"target_action": obs_space_target_action}
'''
DESCRIPTION: TODO...
'''
def update_observation(self):
if self.config.observation_space_type == "laser_FC":
# Update laser scan
#self.filter_laser_scan()
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update target observation
self.update_obs_target()
# Update observation data
self.obs_data["laser"] = np.vstack((self.obs_data["laser"], obs_laser))
self.obs_data["laser"] = np.delete(self.obs_data["laser"], np.s_[0], axis=0)
self.obs_data["target"] = np.vstack((self.obs_data["target"], self.obs_target))
self.obs_data["target"] = np.delete(self.obs_data["target"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Update observation
obs_stacked_laser = self.obs_data["laser"][-1,:].reshape(self.config.fc_obs_shape)
#print("turtlebot3_tentabot_drl::update_observation -> obs_stacked_laser shape: " + str(obs_stacked_laser.shape))
if self.config.n_obs_stack > 1:
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_stacked_laser = np.hstack((self.obs_data["laser"][i,:], obs_stacked_laser))
#print("turtlebot3_tentabot_drl::update_observation -> obs_stacked_laser shape: " + str(obs_stacked_laser.shape))
self.obs = np.concatenate((obs_stacked_laser, self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs: " + str(self.obs.shape))
elif self.config.observation_space_type == "laser_1DCNN_FC":
# Update laser scan
#self.filter_laser_scan()
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update target observation
self.update_obs_target()
# Update observation data
self.obs_data["laser"] = np.vstack((self.obs_data["laser"], obs_laser))
self.obs_data["laser"] = np.delete(self.obs_data["laser"], np.s_[0], axis=0)
self.obs_data["target"] = np.vstack((self.obs_data["target"], self.obs_target))
self.obs_data["target"] = np.delete(self.obs_data["target"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Update observation
obs_space_laser = self.obs_data["laser"][-1,:].reshape(self.config.cnn_obs_shape)
if self.config.n_obs_stack > 1:
if(self.config.n_skip_obs_stack > 1):
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_space_laser = np.vstack((self.obs_data["laser"][i,:].reshape(self.config.cnn_obs_shape), obs_space_laser))
else:
obs_space_laser = self.obs_data["laser"]
obs_space_target_action = np.concatenate((self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_laser: " + str(obs_space_laser.shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_target_action: " + str(obs_space_target_action.shape))
self.obs["laser"] = obs_space_laser
self.obs["target_action"] = obs_space_target_action
elif self.config.observation_space_type == "Tentabot_FC":
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::update_observation -> OBSERVATION FAILURE!")
# Update target observation
self.update_obs_target()
# Update observation data
self.obs_data["occupancy"] = np.vstack((self.obs_data["occupancy"], self.occupancy_set))
self.obs_data["occupancy"] = np.delete(self.obs_data["occupancy"], np.s_[0], axis=0)
self.obs_data["target"] = np.vstack((self.obs_data["target"], self.obs_target))
self.obs_data["target"] = np.delete(self.obs_data["target"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Update observation
obs_stacked_occupancy = self.obs_data["occupancy"][-1,:].reshape(self.config.fc_obs_shape)
if self.config.n_obs_stack > 1:
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_stacked_occupancy = np.hstack((self.obs_data["occupancy"][i,:], obs_stacked_occupancy))
#print("turtlebot3_tentabot_drl::update_observation -> obs_stacked_occupancy shape: " + str(obs_stacked_occupancy.shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_target shape: " + str(self.obs_target.shape))
#print("turtlebot3_tentabot_drl::update_observation -> previous_action shape: " + str(self.previous_action.shape))
self.obs = np.concatenate((obs_stacked_occupancy, self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs: " + str(self.obs.shape))
elif self.config.observation_space_type == "Tentabot_1DCNN_FC" or \
self.config.observation_space_type == "Tentabot_2DCNN_FC":
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::update_observation -> OBSERVATION FAILURE!")
# Update target observation
self.update_obs_target()
if self.config.cit_flag:
# Update observation data
self.obs_data["occupancy"] = np.vstack((self.obs_data["occupancy"], self.occupancy_set))
self.obs_data["occupancy"] = np.delete(self.obs_data["occupancy"], np.s_[0], axis=0)
# Update observation
obs_space_occupancy = self.obs_data["occupancy"][-1,:].reshape(self.config.cnn_obs_shape)
if self.config.n_obs_stack > 1:
if(self.config.n_skip_obs_stack > 1):
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_space_occupancy = np.vstack((self.obs_data["occupancy"][i,:].reshape(self.config.cnn_obs_shape), obs_space_occupancy))
else:
obs_space_occupancy = self.obs_data["occupancy"]
else:
# Update observation data
self.obs_data["occupancy"] = np.hstack((self.obs_data["occupancy"], self.occupancy_set))
self.obs_data["occupancy"] = np.delete(self.obs_data["occupancy"], np.s_[0], axis=1)
# Update observation
obs_space_occupancy = self.obs_data["occupancy"][:,-1].reshape(self.config.cnn_obs_shape)
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: " + str(obs_space_occupancy.shape))
if self.config.n_obs_stack > 1:
if(self.config.n_skip_obs_stack > 1):
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_space_occupancy = np.hstack((self.obs_data["occupancy"][:,i].reshape(self.config.cnn_obs_shape), obs_space_occupancy))
else:
obs_space_occupancy = self.obs_data["occupancy"]
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: " + str(obs_space_occupancy.shape))
self.obs_data["target"] = np.vstack((self.obs_data["target"], self.obs_target))
self.obs_data["target"] = np.delete(self.obs_data["target"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
obs_space_target_action = np.concatenate((self.obs_target, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
if self.config.observation_space_type == "Tentabot_2DCNN_FC":
obs_space_occupancy = np.expand_dims(obs_space_occupancy, axis=0)
obs_space_target_action = np.expand_dims(obs_space_target_action, axis=0)
#print("**************** " + str(self.step_num))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: ")
#print(obs_space_occupancy[0, 65:75])
#print("turtlebot3_tentabot_drl::update_observation -> obs_target dist: " + str(self.obs_target[0,0]))
#print("turtlebot3_tentabot_drl::update_observation -> obs_target angle: " + str(self.obs_target[0,1] * 180 / math.pi))
#print("turtlebot3_tentabot_drl::update_observation -> previous_action: " + str(self.previous_action))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: " + str(type(obs_space_occupancy)))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: " + str(obs_space_occupancy.shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_target_action: " + str(obs_space_target_action.shape))
#print("****************")
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: " + str(obs_space_occupancy.shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_target_action: " + str(obs_space_target_action.shape))
self.obs["occupancy"] = obs_space_occupancy
self.obs["target_action"] = obs_space_target_action
elif self.config.observation_space_type == "Tentabot_laser_1DCNN_FC":
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::update_observation -> OBSERVATION FAILURE!")
# Update target observation
self.update_obs_target()
# Update observation data: laser data
self.obs_data["laser"] = np.vstack((self.obs_data["laser"], obs_laser))
self.obs_data["laser"] = np.delete(self.obs_data["laser"], np.s_[0], axis=0)
if self.config.cit_flag:
# Update observation data
self.obs_data["occupancy"] = np.vstack((self.obs_data["occupancy"], self.occupancy_set))
self.obs_data["occupancy"] = np.delete(self.obs_data["occupancy"], np.s_[0], axis=0)
# Update observation
obs_space_laser = self.obs_data["laser"][-1,:].reshape(self.config.cnn_obs_shape)
obs_space_occupancy = self.obs_data["occupancy"][-1,:].reshape(self.config.cnn_obs_shape)
if self.config.n_obs_stack > 1:
if(self.config.n_skip_obs_stack > 1):
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_space_laser = np.vstack((self.obs_data["laser"][i,:].reshape(self.config.cnn_obs_shape), obs_space_laser))
obs_space_occupancy = np.vstack((self.obs_data["occupancy"][i,:].reshape(self.config.cnn_obs_shape), obs_space_occupancy))
else:
obs_space_laser = self.obs_data["laser"]
obs_space_occupancy = self.obs_data["occupancy"]
else:
# Update observation data
self.obs_data["occupancy"] = np.hstack((self.obs_data["occupancy"], self.occupancy_set))
self.obs_data["occupancy"] = np.delete(self.obs_data["occupancy"], np.s_[0], axis=1)
# Update observation
obs_space_laser = self.obs_data["laser"][-1,:].reshape(self.config.cnn_obs_shape)
obs_space_occupancy = self.obs_data["occupancy"][:,-1].reshape(self.config.cnn_obs_shape)
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: " + str(obs_space_occupancy.shape))
if self.config.n_obs_stack > 1:
if(self.config.n_skip_obs_stack > 1):
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_space_laser = np.vstack((self.obs_data["laser"][i,:].reshape(self.config.cnn_obs_shape), obs_space_laser))
obs_space_occupancy = np.hstack((self.obs_data["occupancy"][:,i].reshape(self.config.cnn_obs_shape), obs_space_occupancy))
else:
obs_space_laser = self.obs_data["laser"]
obs_space_occupancy = self.obs_data["occupancy"]
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_occupancy: " + str(obs_space_occupancy.shape))
self.obs_data["target"] = np.vstack((self.obs_data["target"], self.obs_target))
self.obs_data["target"] = np.delete(self.obs_data["target"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
obs_space_target_action = np.concatenate((self.obs_target, self.previous_action), axis=0)
self.obs["laser"] = obs_space_laser
self.obs["occupancy"] = obs_space_occupancy
self.obs["target_action"] = obs_space_target_action
elif self.config.observation_space_type == "laser_WP_1DCNN_FC":
# Update laser scan
#self.filter_laser_scan()
if self.config.laser_normalize_flag:
obs_laser = self.normalized_laser_ranges
else:
obs_laser = self.filtered_laser_ranges
# Update waypoints
if self.config.wp_dynamic:
self.init_obs_waypoints()
else:
self.update_obs_waypoints()
# Update observation data
self.obs_data["laser"] = np.vstack((self.obs_data["laser"], obs_laser))
self.obs_data["laser"] = np.delete(self.obs_data["laser"], np.s_[0], axis=0)
self.obs_data["waypoints"] = np.vstack((self.obs_data["waypoints"], self.obs_wp))
self.obs_data["waypoints"] = np.delete(self.obs_data["waypoints"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
#print("turtlebot3_tentabot_rl::update_observation -> obs_data laser shape: " + str(self.obs_data["laser"].shape))
#print("turtlebot3_tentabot_rl::update_observation -> obs_data waypoints shape: " + str(self.obs_data["waypoints"].shape))
#print("turtlebot3_tentabot_rl::update_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Update observation
obs_space_laser = self.obs_data["laser"][-1,:].reshape(self.config.cnn_obs_shape)
if self.config.n_obs_stack > 1:
if(self.config.n_skip_obs_stack > 1):
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_space_laser = np.vstack((self.obs_data["laser"][i,:].reshape(self.config.cnn_obs_shape), obs_space_laser))
else:
obs_space_laser = self.obs_data["laser"]
obs_space_wp_action = np.concatenate((self.obs_wp, self.previous_action), axis=0)
# print("turtlebot3_tentabot_rl::update_observation -> obs_space_laser shape: " + str(obs_space_laser.shape))
# print("turtlebot3_tentabot_rl::update_observation -> obs_space_wp_action shape: " + str(obs_space_wp_action.shape))
self.obs["laser"] = obs_space_laser
self.obs["waypoints_action"] = obs_space_wp_action
elif self.config.observation_space_type == "Tentabot_WP_FC":
# Update waypoints
if self.config.wp_dynamic:
self.init_obs_waypoints()
else:
self.update_obs_waypoints()
# Update tentabot observation
success_rl_step = self.client_rl_step(1)
if not success_rl_step:
rospy.logerr("turtlebot3_tentabot_drl::update_observation -> OBSERVATION FAILURE!")
# Update observation data
self.obs_data["occupancy"] = np.vstack((self.obs_data["occupancy"], self.occupancy_set))
self.obs_data["occupancy"] = np.delete(self.obs_data["occupancy"], np.s_[0], axis=0)
self.obs_data["waypoints"] = np.vstack((self.obs_data["waypoints"], self.obs_wp))
self.obs_data["waypoints"] = np.delete(self.obs_data["waypoints"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs_data occupancy shape: " + str(self.obs_data["occupancy"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data waypoints shape: " + str(self.obs_data["waypoints"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
# Update observation
obs_stacked_occupancy = self.obs_data["occupancy"][-1,:].reshape(self.config.fc_obs_shape)
if self.config.n_obs_stack > 1:
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_stacked_occupancy = np.hstack((self.obs_data["occupancy"][i,:], obs_stacked_occupancy))
#print("turtlebot3_tentabot_drl::update_observation -> obs_stacked_occupancy shape: " + str(obs_stacked_occupancy.shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_wp shape: " + str(self.obs_wp.shape))
#print("turtlebot3_tentabot_drl::update_observation -> previous_action shape: " + str(self.previous_action.shape))
self.obs = np.concatenate((obs_stacked_occupancy, self.obs_wp, self.previous_action), axis=0)
#print("turtlebot3_tentabot_drl::update_observation -> obs: " + str(self.obs.shape))
elif self.config.observation_space_type == "laser_image_2DCNN_FC" or \
self.config.observation_space_type == "laser_rings_2DCNN_FC":
# Update target observation
self.update_obs_target()
obs_laser_image = self.laser_image
obs_laser_image = np.expand_dims(obs_laser_image, axis=0)
obs_target = self.obs_target
# Update observation data
self.obs_data["laser_image"] = np.vstack((self.obs_data["laser_image"], obs_laser_image))
self.obs_data["laser_image"] = np.delete(self.obs_data["laser_image"], np.s_[0], axis=0)
# Update observation
obs_space_laser_image = self.obs_data["laser_image"][-1,:,:]
obs_space_laser_image = np.expand_dims(obs_space_laser_image, axis=0)
if self.config.n_obs_stack > 1:
if(self.config.n_skip_obs_stack > 1):
latest_index = (self.config.n_obs_stack * self.config.n_skip_obs_stack) - 1
j = 0
for i in range(latest_index-1, -1, -1):
j += 1
if j % self.config.n_skip_obs_stack == 0:
obs_space_laser_image_current = self.obs_data["laser_image"][i,:,:]
obs_space_laser_image_current = np.expand_dims(obs_space_laser_image_current, axis=0)
obs_space_laser_image = np.vstack([obs_space_laser_image_current, obs_space_laser_image])
else:
obs_space_laser_image = self.obs_data["laser_image"]
self.obs_data["target"] = np.vstack((self.obs_data["target"], obs_target))
self.obs_data["target"] = np.delete(self.obs_data["target"], np.s_[0], axis=0)
self.obs_data["action"] = np.vstack((self.obs_data["action"], self.previous_action))
self.obs_data["action"] = np.delete(self.obs_data["action"], np.s_[0], axis=0)
obs_space_target_action = np.concatenate((obs_target, self.previous_action), axis=0)
'''
#print("**************** " + str(self.step_num))
print("turtlebot3_tentabot_drl::update_observation -> obs_data laser_image shape: " + str(self.obs_data["laser_image"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data target shape: " + str(self.obs_data["target"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_data action shape: " + str(self.obs_data["action"].shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_laser_image: ")
#print(obs_space_laser_image[0, 65:75])
#print("turtlebot3_tentabot_drl::update_observation -> obs_target dist: " + str(obs_target[0,0]))
#print("turtlebot3_tentabot_drl::update_observation -> obs_target angle: " + str(obs_target[0,1] * 180 / math.pi))
#print("turtlebot3_tentabot_drl::update_observation -> previous_action: " + str(self.previous_action))
print("turtlebot3_tentabot_drl::update_observation -> obs_laser_image shape: " + str(obs_laser_image.shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_laser_image type: " + str(type(obs_space_laser_image)))
print("turtlebot3_tentabot_drl::update_observation -> obs_space_laser_image shape: " + str(obs_space_laser_image.shape))
#print("turtlebot3_tentabot_drl::update_observation -> obs_space_target_action shape: " + str(obs_space_target_action.shape))
print("****************")
'''
'''
if self.step_num == 50:
imi = (self.laser_image * 255).astype(np.uint8)
im = Image.fromarray(imi)
im = im.convert("L")
im.save(self.data_folder_path + "laser_image_" + str(self.step_num) + ".jpeg")
#np.savetxt(self.data_folder_path + "laser_image_" + str(self.step_num) + ".txt", self.laser_image)
'''
self.obs["laser_image"] = obs_space_laser_image
self.obs["target_action"] = obs_space_target_action
'''
DESCRIPTION: TODO...
'''
def publish_goal(self):
goal_visu = MarkerArray()
marker = Marker()
marker.header.frame_id = self.config.world_frame_name
marker.ns = ""
marker.id = 1
marker.type = marker.CYLINDER
marker.action = marker.ADD
marker.scale.x = 0.5
marker.scale.y = 0.5
marker.scale.z = 0.5
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 1.0
marker.color.a = 0.5
marker.pose.orientation.w = 1.0
marker.pose.position.x = self.goal_pose["x"]
marker.pose.position.y = self.goal_pose["y"]
marker.pose.position.z = self.goal_pose["z"]
marker.header.seq += 1
marker.header.stamp = rospy.Time.now()
goal_visu.markers.append(marker)
self.goal_visu_pub.publish(goal_visu);
'''
DESCRIPTION: TODO...
'''
def publish_move_base_goal(self):
print("turtlebot3_tentabot_drl::publish_move_base_goal -> x: " + str(self.goal_pose["x"]) + " y: " + str(self.goal_pose["y"]) + " z: " + str(self.goal_pose["z"]))
self.move_base_goal.pose.position.x = self.goal_pose["x"]
self.move_base_goal.pose.position.y = self.goal_pose["y"]
self.move_base_goal.pose.position.z = self.goal_pose["z"]
self.move_base_goal.pose.orientation.z = 0.0
self.move_base_goal.pose.orientation.w = 1.0
self.move_base_goal.header.seq += 1
self.move_base_goal.header.frame_id = self.config.world_frame_name
self.move_base_goal.header.stamp = rospy.Time.now()
self.move_base_goal_pub.publish(self.move_base_goal)
'''
DESCRIPTION: TODO...
'''
def publish_debug_visu(self, debug_data):
debug_visu = MarkerArray()
for i, d in enumerate(debug_data):
if d[0] < float("inf") or d[1] < float("inf"):
marker = Marker()
marker.header.frame_id = self.config.world_frame_name
marker.ns = str(i)
marker.id = i
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.1
marker.scale.y = 0.1
marker.scale.z = 0.1
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 1.0
marker.color.a = 1.0
marker.pose.orientation.w = 1.0
marker.pose.position.x = d[0]
marker.pose.position.y = d[1]
marker.pose.position.z = 0
debug_visu.markers.append(marker)
if len(debug_visu.markers) > 0:
for m in debug_visu.markers:
m.header.seq += 1
m.header.stamp = rospy.Time.now()
self.debug_visu_pub.publish(debug_visu)
'''
DESCRIPTION: TODO...
'''
def publish_wp_visu(self, full_data, obs_data):
debug_visu = MarkerArray()
#delete previous markers
marker = Marker()
marker.ns = str(0)
marker.id = 0
marker.action = marker.DELETEALL
debug_visu.markers.append(marker)
for i, d in enumerate(full_data):
marker = Marker()
marker.header.frame_id = self.config.world_frame_name
marker.ns = str(i+1)
marker.id = i+1
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.1
marker.scale.y = 0.1
marker.scale.z = 0.1
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 1.0
marker.color.a = 1.0
marker.pose.orientation.w = 1.0
marker.pose.position.x = d.pose.position.x
marker.pose.position.y = d.pose.position.y
marker.pose.position.z = 0
debug_visu.markers.append(marker)
for j, d in enumerate(obs_data):
marker = Marker()
marker.header.frame_id = self.config.world_frame_name
marker.ns = str(i+j+1)
marker.id = i+j+1
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.1
marker.color.r = 0.0
marker.color.g = 1.0
marker.color.b = 1.0
marker.color.a = 1.0
marker.pose.orientation.w = 1.0
marker.pose.position.x = d.pose.position.x
marker.pose.position.y = d.pose.position.y
marker.pose.position.z = 0
debug_visu.markers.append(marker)
if len(debug_visu.markers) > 0:
for m in debug_visu.markers:
m.header.seq += 1
m.header.stamp = rospy.Time.now()
self.debug_visu_pub.publish(debug_visu)
'''
DESCRIPTION: TODO...
'''
def client_rl_step(self, parity):
#rospy.wait_for_service('rl_step')
try:
#srv_rl_step = rospy.ServiceProxy('rl_step', rl_step, True)
tentabot_client = self.srv_rl_step(parity)
if self.config.observation_space_type == "Tentabot_FC":
self.occupancy_set = (np.asarray(tentabot_client.occupancy_set)).reshape(self.config.fc_obs_shape)
self.navigability_set = (np.asarray(tentabot_client.navigability_set)).reshape(self.config.fc_obs_shape)
self.closeness_set = (np.asarray(tentabot_client.closeness_set)).reshape(self.config.fc_obs_shape)
else:
self.occupancy_set = (np.asarray(tentabot_client.occupancy_set)).reshape(self.config.cnn_obs_shape)
self.navigability_set = (np.asarray(tentabot_client.navigability_set)).reshape(self.config.cnn_obs_shape)
self.closeness_set = (np.asarray(tentabot_client.closeness_set)).reshape(self.config.cnn_obs_shape)
#print("--------------")
#print("turtlebot3_tentabot_drl::client_rl_step -> min id: " + str(np.argmin(self.occupancy_set)) + " val: " + str(np.min(self.occupancy_set)))
#print("turtlebot3_tentabot_drl::client_rl_step -> max id: " + str(np.argmax(self.occupancy_set)) + " val: " + str(np.max(self.occupancy_set)))
#print("turtlebot3_tentabot_drl::client_rl_step -> closeness_set: ")
#print(tentabot_client.navigability_set)
#for i, val in enumerate(self.closeness_set):
# print(str(i) + ": " + str(val))
#print("--------------")
#self.tentabot_obs = occupancy_set
#self.obs = np.stack((clearance_set, clutterness_set, closeness_set), axis=0)
return True
except rospy.ServiceException as e:
print("turtlebot3_tentabot_drl::client_rl_step -> Service call failed: %s"%e)
return False
'''
DESCRIPTION: TODO...
'''
def client_update_goal(self):
#rospy.wait_for_service('update_goal')
try:
#srv_update_goal = rospy.ServiceProxy('update_goal', update_goal, True)
print("turtlebot3_tentabot_drl::get_goal_location -> Updated goal_pose x: " + str(self.goal_pose["x"]) + ", y: " + str(self.goal_pose["y"]))
goalMsg = Pose()
goalMsg.orientation.z = 0.0
goalMsg.orientation.w = 1.0
goalMsg.position.x = self.goal_pose["x"]
goalMsg.position.y = self.goal_pose["y"]
goalMsg.position.z = self.goal_pose["z"]
success = self.srv_update_goal(goalMsg).success
if(success):
#print("turtlebot3_tentabot_drl::get_goal_location -> Updated goal_pose x: " + str(self.goal_pose["x"]) + ", y: " + str(self.goal_pose["y"]))
rospy.logdebug("turtlebot3_tentabot_drl::client_update_goal -> Updated goal_pose x: " + str(self.goal_pose["x"]) + ", y: " + str(self.goal_pose["y"]))
else:
#print("turtlebot3_tentabot_drl::client_update_goal -> goal_pose is NOT updated!")
rospy.logdebug("turtlebot3_tentabot_drl::client_update_goal -> goal_pose is NOT updated!")
return success
except rospy.ServiceException as e:
print("turtlebot3_tentabot_drl::client_update_goal -> Service call failed: %s"%e)
return False
'''
DESCRIPTION: TODO...
'''
def client_reset_map_utility(self, parity):
#rospy.wait_for_service('reset_map_utility')
try:
#srv_reset_map_utility = rospy.ServiceProxy('reset_map_utility', reset_map_utility, True)
success = self.srv_reset_map_utility(parity).success
if(success):
print("turtlebot3_tentabot_drl::client_reset_map_utility -> Map is reset!")
rospy.logdebug("turtlebot3_tentabot_drl::client_reset_map_utility -> Map is reset!")
else:
print("turtlebot3_tentabot_drl::client_reset_map_utility -> Map is NOT reset!")
rospy.logdebug("turtlebot3_tentabot_drl::client_reset_map_utility -> Map is NOT reset!")
return success
# Reset Robot Pose and Goal
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
'''
DESCRIPTION: TODO...
'''
def client_move_base_get_plan(self):
try:
self.update_odom()
start = PoseStamped()
start.pose = self.odom_data.pose.pose
start.header.seq += 1
start.header.frame_id = self.config.world_frame_name
start.header.stamp = rospy.Time.now()
goal = PoseStamped()
goal.pose.position.x = self.goal_pose["x"]
goal.pose.position.y = self.goal_pose["y"]
goal.pose.position.z = self.goal_pose["z"]
goal.pose.orientation.z = 0.0
goal.pose.orientation.w = 1.0
goal.header.seq += 1
goal.header.frame_id = self.config.world_frame_name
goal.header.stamp = rospy.Time.now()
tolerance = 0.5
#self.srv_clear_costmap()
self.move_base_global_plan = self.srv_move_base_get_plan(start, goal, tolerance).plan.poses
if(len(self.move_base_global_plan)):
#print("turtlebot3_tentabot_drl::client_move_base_get_plan -> move_base plan is received!")
rospy.logdebug("turtlebot3_tentabot_drl::client_move_base_get_plan -> move_base plan is received!")
success = True
else:
print("turtlebot3_tentabot_drl::client_move_base_get_plan -> move_base plan is received!")
rospy.logdebug("turtlebot3_tentabot_drl::client_move_base_get_plan -> move_base plan is received!")
success = False
return success
# Reset Robot Pose and Goal
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
'''
DESCRIPTION: TODO...
'''
def update_global_path_length(self):
if self.client_move_base_get_plan():
n_points_plan = len(self.move_base_global_plan)
self.global_plan_length = 0
p1 = {'x': self.move_base_global_plan[0].pose.position.x, 'y': self.move_base_global_plan[0].pose.position.y, 'z': self.move_base_global_plan[0].pose.position.z}
for i in range(1, n_points_plan):
p2 = {'x': self.move_base_global_plan[i].pose.position.x, 'y': self.move_base_global_plan[i].pose.position.y, 'z': self.move_base_global_plan[i].pose.position.z}
self.global_plan_length += self.calculate_euclidean_distance(p1, p2)
p1 = p2
print("turtlebot3_tentabot_drl::update_global_path_length -> global_plan_length: " + str(self.global_plan_length))
'''
DESCRIPTION: TODO...
'''
def reset_pedsim(self):
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
launch = roslaunch.parent.ROSLaunchParent(uuid, [self.tentabot_path + "/launch/others/pedsim_ros/start_pedsim_validation.launch"])
launch.start() | [] |
2024-01-10 | Suraj9968/Stock-Price-Prediction-with-Chatbot | Stock_Predictor.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas_datareader.data as web
import datetime
import yfinance as yf
import streamlit as st
from keras.models import load_model
import cufflinks as cf
from plotly import graph_objs as go
import replicate
from openai import OpenAI
# App title
st.set_page_config(page_title="🤑 Stocks-GPT")
st.markdown('''
# Stock Price Predection App
Shown are the stock price data for query companies!
**Credits**
- App built by Suraj, Divjot, Kirat and Simarjeet
- Built in `Python` using `streamlit`,`yfinance`, `cufflinks`, `pandas` and `datetime`
''')
st.write('---')
# Sidebar
st.sidebar.subheader('Query parameters')
start_date = st.sidebar.date_input("Start date", datetime.date(2010, 1, 1))
end_date = st.sidebar.date_input("End date", datetime.date(2023, 12, 10))
# Retrieving tickers data
ticker_list = pd.read_csv('constituents_symbols.txt')
tickerSymbol = st.sidebar.selectbox('Stock ticker', ticker_list) # Select ticker symbol
with st.sidebar:
st.title('🤑💸💬 Stocks-GPT Chatbot')
@st.cache_data
def load_data(ticker):
data = yf.download(ticker, start_date, end_date)
data.reset_index(inplace=True)
return data
data_load_state = st.text('Loading data...')
data = load_data(tickerSymbol)
data_load_state.text('Loading data... done!')
st.subheader('Raw data')
st.write(data.tail())
def plot_raw_data():
fig = go.Figure()
fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_raw_data()
data_training = pd.DataFrame(data['Close'][0:int(len(data)*0.70)])
data_testing = pd.DataFrame(data['Close'][int(len(data)*0.70):int(len(data))])
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
data_training_array = scaler.fit_transform(data_training)
#Load my Model
model = load_model('keras_model.h5')
past_100_days = data_training.tail(100)
final_df = past_100_days.append(data_testing, ignore_index=True)
input_data = scaler.fit_transform(final_df)
x_test = []
y_test = []
for i in range(100, input_data.shape[0]):
x_test.append(input_data[i-100:i])
y_test.append(input_data[i, 0])
x_test, y_test = np.array(x_test), np.array(y_test)
y_predicted = model.predict(x_test)
scaler = scaler.scale_
scaler_factor = 1/scaler[0]
y_predicted = y_predicted * scaler_factor
y_test = y_test * scaler_factor
# Bollinger bands
st.header('**Bollinger Bands**')
qf=cf.QuantFig(data,title='First Quant Figure',legend='top',name='GS')
qf.add_bollinger_bands()
fig = qf.iplot(asFigure=True)
st.plotly_chart(fig)
#Final Plot
st.subheader('Predictions vs Actual')
fig2 = plt.figure(figsize=(12,6))
plt.plot(y_test, 'b', label='Actual Price')
plt.plot(y_predicted, 'r', label='Predicted Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
st.pyplot(fig2)
st.subheader('Models and parameters')
selected_model = st.sidebar.selectbox('Choose a Llama2 model', ['Llama2-7B', 'Llama2-13B'], key='selected_model')
if selected_model == 'Llama2-7B':
llm = 'a16z-infra/llama7b-v2-chat:4f0a4744c7295c024a1de15e1a63c880d3da035fa1f49bfd344fe076074c8eea'
elif selected_model == 'Llama2-13B':
llm = 'a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5'
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=5.0, value=0.1, step=0.01)
top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
max_length = st.sidebar.slider('max_length', min_value=32, max_value=128, value=120, step=8)
with st.sidebar:
openai_api_key = st.text_input("Replicate API Key", key="chatbot_api_key", type="password")
st.caption("🚀 Stock-GPT powered by Llama LLM")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your Replicate API key to continue.")
st.stop()
client = OpenAI(api_key=openai_api_key)
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
msg = response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": msg})
st.chat_message("assistant").write(msg)
| [
"How can I help you?"
] |
2024-01-10 | juananpe/codeinterpreter-api | codeinterpreterapi~prompts~modifications_check.py | from langchain.prompts import PromptTemplate
determine_modifications_prompt = PromptTemplate(
input_variables=["code"],
template="The user will input some code and you need to determine "
"if the code makes any changes to the file system. \n"
"With changes it means creating new files or modifying exsisting ones.\n"
"Format your answer as JSON inside a codeblock with a "
"list of filenames that are modified by the code.\n"
"If the code does not make any changes to the file system, "
"return an empty list.\n\n"
"Determine modifications:\n"
"```python\n"
"import matplotlib.pyplot as plt\n"
"import numpy as np\n\n"
"t = np.arange(0.0, 4.0*np.pi, 0.1)\n\n"
"s = np.sin(t)\n\n"
"fig, ax = plt.subplots()\n\n"
"ax.plot(t, s)\n\n"
'ax.set(xlabel="time (s)", ylabel="sin(t)",\n'
' title="Simple Sin Wave")\n'
"ax.grid()\n\n"
'plt.savefig("sin_wave.png")\n'
"```\n\n"
"Answer:\n"
"```json\n"
"{{\n"
' "modifications": ["sin_wave.png"]\n'
"}}\n"
"```\n\n"
"Determine modifications:\n"
"```python\n"
"import matplotlib.pyplot as plt\n"
"import numpy as np\n\n"
"x = np.linspace(0, 10, 100)\n"
"y = x**2\n\n"
"plt.figure(figsize=(8, 6))\n"
"plt.plot(x, y)\n"
'plt.title("Simple Quadratic Function")\n'
'plt.xlabel("x")\n'
'plt.ylabel("y = x^2")\n'
"plt.grid(True)\n"
"plt.show()\n"
"```\n\n"
"Answer:\n"
"```json\n"
"{{\n"
' "modifications": []\n'
"}}\n"
"```\n\n"
"Determine modifications:\n"
"```python\n"
"{code}\n"
"```\n\n"
"Answer:\n"
"```json\n",
)
| [
"import numpy as np\n\n",
"plt.xlabel(\"x\")\n",
"y = x**2\n\n",
"plt.title(\"Simple Quadratic Function\")\n",
"t = np.arange(0.0, 4.0*np.pi, 0.1)\n\n",
"plt.savefig(\"sin_wave.png\")\n",
"Format your answer as JSON inside a codeblock with a ",
"return an empty list.\n\n",
"With changes it means creating new files or modifying exsisting ones.\n",
"list of filenames that are modified by the code.\n",
"The user will input some code and you need to determine if the code makes any changes to the file system. \nWith changes it means creating new files or modifying exsisting ones.\nFormat your answer as JSON inside a codeblock with a list of filenames that are modified by the code.\nIf the code does not make any changes to the file system, return an empty list.\n\nDetermine modifications:\n```python\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nt = np.arange(0.0, 4.0*np.pi, 0.1)\n\ns = np.sin(t)\n\nfig, ax = plt.subplots()\n\nax.plot(t, s)\n\nax.set(xlabel=\"time (s)\", ylabel=\"sin(t)\",\n title=\"Simple Sin Wave\")\nax.grid()\n\nplt.savefig(\"sin_wave.png\")\n```\n\nAnswer:\n```json\n{{\n \"modifications\": [\"sin_wave.png\"]\n}}\n```\n\nDetermine modifications:\n```python\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(0, 10, 100)\ny = x**2\n\nplt.figure(figsize=(8, 6))\nplt.plot(x, y)\nplt.title(\"Simple Quadratic Function\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y = x^2\")\nplt.grid(True)\nplt.show()\n```\n\nAnswer:\n```json\n{{\n \"modifications\": []\n}}\n```\n\nDetermine modifications:\n```python\n{code}\n```\n\nAnswer:\n```json\n",
"fig, ax = plt.subplots()\n\n",
"If the code does not make any changes to the file system, ",
"plt.plot(x, y)\n",
"```python\n",
"x = np.linspace(0, 10, 100)\n",
"s = np.sin(t)\n\n",
" \"modifications\": [\"sin_wave.png\"]\n",
"```\n\n",
"```json\n",
"Determine modifications:\n",
"{{\n",
"}}\n",
"The user will input some code and you need to determine ",
"import matplotlib.pyplot as plt\n",
"Answer:\n",
"plt.show()\n",
"plt.figure(figsize=(8, 6))\n",
"plt.ylabel(\"y = x^2\")\n",
" \"modifications\": []\n",
"{code}\n",
"ax.plot(t, s)\n\n",
" title=\"Simple Sin Wave\")\n",
"ax.set(xlabel=\"time (s)\", ylabel=\"sin(t)\",\n",
"if the code makes any changes to the file system. \n",
"plt.grid(True)\n",
"ax.grid()\n\n"
] |
2024-01-10 | juananpe/codeinterpreter-api | codeinterpreterapi~chains~rm_dl_link.py | from langchain.base_language import BaseLanguageModel
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import AIMessage, OutputParserException
from codeinterpreterapi.prompts import remove_dl_link_prompt
async def remove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = await llm.apredict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
async def test():
llm = ChatOpenAI(model="gpt-3.5-turbo-0613") # type: ignore
example = (
"I have created the plot to your dataset.\n\n"
"Link to the file [here](sandbox:/plot.png)."
)
print(await remove_download_link(example, llm))
if __name__ == "__main__":
import asyncio
from dotenv import load_dotenv
load_dotenv()
asyncio.run(test())
| [] |
2024-01-10 | juananpe/codeinterpreter-api | codeinterpreterapi~chains~modifications_check.py | import json
from typing import List, Optional
from langchain.base_language import BaseLanguageModel
from langchain.chat_models.anthropic import ChatAnthropic
from codeinterpreterapi.prompts import determine_modifications_prompt
async def get_file_modifications(
code: str,
llm: BaseLanguageModel,
retry: int = 2,
) -> Optional[List[str]]:
if retry < 1:
return None
prompt = determine_modifications_prompt.format(code=code)
result = await llm.apredict(prompt, stop="```")
try:
result = json.loads(result)
except json.JSONDecodeError:
result = ""
if not result or not isinstance(result, dict) or "modifications" not in result:
return await get_file_modifications(code, llm, retry=retry - 1)
return result["modifications"]
async def test():
llm = ChatAnthropic(model="claude-1.3") # type: ignore
code = """
import matplotlib.pyplot as plt
x = list(range(1, 11))
y = [29, 39, 23, 32, 4, 43, 43, 23, 43, 77]
plt.plot(x, y, marker='o')
plt.xlabel('Index')
plt.ylabel('Value')
plt.title('Data Plot')
plt.show()
"""
print(await get_file_modifications(code, llm))
if __name__ == "__main__":
import asyncio
import dotenv
dotenv.load_dotenv()
asyncio.run(test())
| [
"Index",
"Data Plot"
] |
2024-01-10 | juananpe/codeinterpreter-api | examples~use_additional_tools.py | """
The exciting part about this example is
that the code interpreter has internet access
so it can download the bitcoin chart from yahoo finance
and plot it for you
"""
import csv
import io
from typing import Any
from langchain.tools import BaseTool
from codeinterpreterapi import CodeInterpreterSession
class ExampleKnowledgeBaseTool(BaseTool):
name = "salary_database"
description = "Use to get salary data of company employees"
def _run(self, *args, **kwargs):
raise NotImplementedError()
async def _arun(self, *args, **kwargs: Any) -> Any:
f = io.StringIO()
writer = csv.writer(f)
writer.writerow(["month", "employee", "salary"])
writer.writerow(["march 2022", "Jan", "1200"])
writer.writerow(["march 2022", "Ola", "1500"])
writer.writerow(["april 2022", "Jan", "1800"])
writer.writerow(["april 2022", "Ola", "2000"])
return f.getvalue()
async def main():
async with CodeInterpreterSession(tools=[ExampleKnowledgeBaseTool()]) as session:
response = await session.generate_response(
"Plot chart of company employee salaries"
)
response.show()
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| [
"Use to get salary data of company employees"
] |
2024-01-10 | juananpe/codeinterpreter-api | codeinterpreterapi~schema~response.py | from langchain.schema import AIMessage, HumanMessage # type: ignore
from .file import File
class UserRequest(HumanMessage):
files: list[File] = []
def __str__(self):
return self.content
def __repr__(self):
return f"UserRequest(content={self.content}, files={self.files})"
class CodeInterpreterResponse(AIMessage):
files: list[File] = []
# final_code: str = "" TODO: implement
# final_output: str = "" TODO: implement
def show(self):
print("AI: ", self.content)
for file in self.files:
file.show_image()
def __str__(self):
return self.content
def __repr__(self):
return f"CodeInterpreterResponse(content={self.content}, files={self.files})"
| [] |
2024-01-10 | juananpe/codeinterpreter-api | codeinterpreterapi~session.py | import base64
import re
import traceback
import uuid
from io import BytesIO
from os import getenv
from typing import Optional
from codeboxapi import CodeBox # type: ignore
from codeboxapi.schema import CodeBoxOutput # type: ignore
from langchain.agents import (
AgentExecutor,
BaseSingleActionAgent,
ConversationalAgent,
ConversationalChatAgent,
)
from langchain.chat_models import ChatAnthropic, ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ConversationBufferMemory
from langchain.prompts.chat import MessagesPlaceholder
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import BaseTool, StructuredTool
from codeinterpreterapi.agents import OpenAIFunctionsAgent
from codeinterpreterapi.chains import get_file_modifications, remove_download_link
from codeinterpreterapi.config import settings
from codeinterpreterapi.prompts import code_interpreter_system_message
from codeinterpreterapi.schema import (
CodeInput,
CodeInterpreterResponse,
File,
UserRequest,
)
from codeinterpreterapi.utils import (
CodeAgentOutputParser,
CodeCallbackHandler,
CodeChatAgentOutputParser,
)
class CodeInterpreterSession:
def __init__(
self,
llm: Optional[BaseLanguageModel] = None,
additional_tools: list[BaseTool] = [],
**kwargs,
) -> None:
self.codebox = CodeBox()
self.verbose = kwargs.get("verbose", settings.VERBOSE)
self.tools: list[BaseTool] = self._tools(additional_tools)
self.llm: BaseLanguageModel = llm or self._choose_llm(**kwargs)
self.agent_executor: AgentExecutor = self._agent_executor()
self.input_files: list[File] = []
self.output_files: list[File] = []
def start(self) -> None:
self.codebox.start()
async def astart(self) -> None:
await self.codebox.astart()
def _tools(self, additional_tools: list[BaseTool]) -> list[BaseTool]:
return additional_tools + [
StructuredTool(
name="python",
description="Input a string of code to a ipython interpreter. "
"Write the entire code in a single string. This string can "
"be really long, so you can use the `;` character to split lines. "
"Variables are preserved between runs. ",
func=self._run_handler,
coroutine=self._arun_handler,
args_schema=CodeInput,
),
]
def _choose_llm(
self, model: str = "gpt-4", openai_api_key: Optional[str] = None, **kwargs
) -> BaseChatModel:
if "gpt" in model:
openai_api_key = (
openai_api_key
or settings.OPENAI_API_KEY
or getenv("OPENAI_API_KEY", None)
)
if openai_api_key is None:
raise ValueError(
"OpenAI API key missing. Set OPENAI_API_KEY env variable "
"or pass `openai_api_key` to session."
)
return ChatOpenAI(
temperature=0.03,
model=model,
openai_api_key=openai_api_key,
max_retries=3,
request_timeout=60 * 3,
) # type: ignore
elif "claude" in model:
return ChatAnthropic(model=model)
else:
raise ValueError(f"Unknown model: {model} (expected gpt or claude model)")
def _choose_agent(self) -> BaseSingleActionAgent:
return (
OpenAIFunctionsAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
system_message=code_interpreter_system_message,
extra_prompt_messages=[
MessagesPlaceholder(variable_name="chat_history")
],
)
if isinstance(self.llm, ChatOpenAI)
else ConversationalChatAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
system_message=code_interpreter_system_message.content,
output_parser=CodeChatAgentOutputParser(),
)
if isinstance(self.llm, BaseChatModel)
else ConversationalAgent.from_llm_and_tools(
llm=self.llm,
tools=self.tools,
prefix=code_interpreter_system_message.content,
output_parser=CodeAgentOutputParser(),
)
)
def _agent_executor(self) -> AgentExecutor:
return AgentExecutor.from_agent_and_tools(
agent=self._choose_agent(),
callbacks=[CodeCallbackHandler(self)],
max_iterations=9,
tools=self.tools,
verbose=self.verbose,
memory=ConversationBufferMemory(
memory_key="chat_history", return_messages=True
),
)
async def show_code(self, code: str) -> None:
"""Callback function to show code to the user."""
if self.verbose:
print(code)
def _run_handler(self, code: str):
raise NotImplementedError("Use arun_handler for now.")
async def _arun_handler(self, code: str):
"""Run code in container and send the output to the user"""
print("Running code in container...", code)
output: CodeBoxOutput = await self.codebox.arun(code)
if not isinstance(output.content, str):
raise TypeError("Expected output.content to be a string.")
if output.type == "image/png":
filename = f"image-{uuid.uuid4()}.png"
file_buffer = BytesIO(base64.b64decode(output.content))
file_buffer.name = filename
self.output_files.append(File(name=filename, content=file_buffer.read()))
return f"Image {filename} got send to the user."
elif output.type == "error":
if "ModuleNotFoundError" in output.content:
if package := re.search(
r"ModuleNotFoundError: No module named '(.*)'", output.content
):
await self.codebox.ainstall(package.group(1))
return (
f"{package.group(1)} was missing but "
"got installed now. Please try again."
)
else:
# TODO: preanalyze error to optimize next code generation
pass
if self.verbose:
print("Error:", output.content)
elif modifications := await get_file_modifications(code, self.llm):
for filename in modifications:
if filename in [file.name for file in self.input_files]:
continue
fileb = await self.codebox.adownload(filename)
if not fileb.content:
continue
file_buffer = BytesIO(fileb.content)
file_buffer.name = filename
self.output_files.append(
File(name=filename, content=file_buffer.read())
)
return output.content
async def _input_handler(self, request: UserRequest):
# TODO: variables as context to the agent
# TODO: current files as context to the agent
if not request.files:
return
if not request.content:
request.content = (
"I uploaded, just text me back and confirm that you got the file(s)."
)
request.content += "\n**The user uploaded the following files: **\n"
for file in request.files:
self.input_files.append(file)
request.content += f"[Attachment: {file.name}]\n"
await self.codebox.aupload(file.name, file.content)
request.content += "**File(s) are now available in the cwd. **\n"
async def _output_handler(self, final_response: str) -> CodeInterpreterResponse:
"""Embed images in the response"""
for file in self.output_files:
if str(file.name) in final_response:
# rm  from the response
final_response = re.sub(r"\n\n!\[.*\]\(.*\)", "", final_response)
if self.output_files and re.search(r"\n\[.*\]\(.*\)", final_response):
try:
final_response = await remove_download_link(final_response, self.llm)
except Exception as e:
if self.verbose:
print("Error while removing download links:", e)
return CodeInterpreterResponse(content=final_response, files=self.output_files)
async def generate_response(
self,
user_msg: str,
files: list[File] = [],
detailed_error: bool = False,
) -> CodeInterpreterResponse:
"""Generate a Code Interpreter response based on the user's input."""
user_request = UserRequest(content=user_msg, files=files)
try:
await self._input_handler(user_request)
response = await self.agent_executor.arun(input=user_request.content)
return await self._output_handler(response)
except Exception as e:
if self.verbose:
traceback.print_exc()
if detailed_error:
return CodeInterpreterResponse(
content="Error in CodeInterpreterSession: "
f"{e.__class__.__name__} - {e}"
)
else:
return CodeInterpreterResponse(
content="Sorry, something went while generating your response."
"Please try again or restart the session."
)
async def is_running(self) -> bool:
return await self.codebox.astatus() == "running"
async def astop(self) -> None:
await self.codebox.astop()
async def __aenter__(self) -> "CodeInterpreterSession":
await self.astart()
return self
async def __aexit__(self, exc_type, exc_value, traceback) -> None:
await self.astop()
| [] |
2024-01-10 | juananpe/codeinterpreter-api | codeinterpreterapi~agents~functions_agent.py | """
Module implements an agent that uses OpenAI's APIs function enabled API.
This file is a modified version of the original file
from langchain/agents/openai_functions_agent/base.py.
Credits go to the original authors :)
"""
import json
from dataclasses import dataclass
from json import JSONDecodeError
from typing import Any, List, Optional, Sequence, Tuple, Union
from langchain.agents import BaseSingleActionAgent
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain.schema import (
AgentAction,
AgentFinish,
AIMessage,
BaseMessage,
BasePromptTemplate,
FunctionMessage,
OutputParserException,
SystemMessage,
)
from langchain.tools import BaseTool
from langchain.tools.convert_to_openai import format_tool_to_openai_function
from pydantic import root_validator
@dataclass
class _FunctionsAgentAction(AgentAction):
message_log: List[BaseMessage]
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> List[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, _FunctionsAgentAction):
return agent_action.message_log + [
_create_function_message(agent_action, observation)
]
else:
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def _format_intermediate_steps(
intermediate_steps: List[Tuple[AgentAction, str]],
) -> List[BaseMessage]:
"""Format intermediate steps.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
"""
messages = []
for intermediate_step in intermediate_steps:
agent_action, observation = intermediate_step
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
async def _parse_ai_message(
message: BaseMessage, llm: BaseLanguageModel
) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_call = message.additional_kwargs["function_call"]
function_name = function_call["name"]
try:
_tool_input = json.loads(function_call["arguments"])
except JSONDecodeError:
if function_name == "python":
code = function_call["arguments"]
_tool_input = {
"code": code,
}
else:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = "responded: {content}\n" if message.content else "\n"
return _FunctionsAgentAction(
tool=function_name,
tool_input=tool_input,
log=f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n",
message_log=[message],
)
return AgentFinish(return_values={"output": message.content}, log=message.content)
class OpenAIFunctionsAgent(BaseSingleActionAgent):
"""An Agent driven by OpenAIs function powered API.
Args:
llm: This should be an instance of ChatOpenAI, specifically a model
that supports using `functions`.
tools: The tools this agent has access to.
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
def get_allowed_tools(self) -> List[str]:
"""Get allowed tools."""
return list([t.name for t in self.tools])
@root_validator
def validate_llm(cls, values: dict) -> dict:
if not isinstance(values["llm"], ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
return values
@root_validator
def validate_prompt(cls, values: dict) -> dict:
prompt: BasePromptTemplate = values["prompt"]
if "agent_scratchpad" not in prompt.input_variables:
raise ValueError(
"`agent_scratchpad` should be one of the variables in the prompt, "
f"got {prompt.input_variables}"
)
return values
@property
def input_keys(self) -> List[str]:
"""Get input keys. Input refers to user input here."""
return ["input"]
@property
def functions(self) -> List[dict]:
return [dict(format_tool_to_openai_function(t)) for t in self.tools]
def plan(self):
raise NotImplementedError
async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = await self.llm.apredict_messages(
messages, functions=self.functions, callbacks=callbacks
)
agent_decision = await _parse_ai_message(predicted_message, self.llm)
return agent_decision
@classmethod
def create_prompt(
cls,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
) -> BasePromptTemplate:
"""Create prompt for this agent.
Args:
system_message: Message to use as the system message that will be the
first in the prompt.
extra_prompt_messages: Prompt messages that will be placed between the
system message and the new human input.
Returns:
A prompt template to pass into this agent.
"""
_prompts = extra_prompt_messages or []
messages: List[Union[BaseMessagePromptTemplate, BaseMessage]]
if system_message:
messages = [system_message]
else:
messages = []
messages.extend(
[
*_prompts,
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
return ChatPromptTemplate(messages=messages) # type: ignore
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
**kwargs: Any,
) -> BaseSingleActionAgent:
"""Construct an agent from an LLM and tools."""
if not isinstance(llm, ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,
)
return cls(
llm=llm,
prompt=prompt,
tools=tools,
callback_manager=callback_manager, # type: ignore
**kwargs,
)
| [
"You are a helpful AI assistant.",
"{input}"
] |
2024-01-10 | juananpe/codeinterpreter-api | codeinterpreterapi~chains~extract_code.py | from langchain.base_language import BaseLanguageModel
from langchain.chat_models.anthropic import ChatAnthropic
# TODO: make async
def extract_python_code(
text: str,
llm: BaseLanguageModel,
retry: int = 2,
):
pass
async def test():
llm = ChatAnthropic(model="claude-1.3") # type: ignore
code = """
import matplotlib.pyplot as plt
x = list(range(1, 11))
y = [29, 39, 23, 32, 4, 43, 43, 23, 43, 77]
plt.plot(x, y, marker='o')
plt.xlabel('Index')
plt.ylabel('Value')
plt.title('Data Plot')
plt.show()
"""
print(extract_python_code(code, llm))
if __name__ == "__main__":
import asyncio
import dotenv
dotenv.load_dotenv()
asyncio.run(test())
| [] |
2024-01-10 | public-analytics/streamlit | investbot.py | # -*- coding: utf-8 -*-
"""
Pinecone + OpenAI POC
@author: alex leonhard
"""
import streamlit as st
import openai
import pinecone
import tiktoken
import time
#converts text to GPT4 token count. used to calculate context size
def num_tokens(string):
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = len(encoding.encode(string))
return num_tokens
#connect to pinecone
def pinecone_init(key, index):
pinecone.init(
api_key=key,
environment="us-east-1-aws"
)
return pinecone.Index(index)
#sorts pinecone results by relevance score (desc) and id (asc) to build context string
def sort_list_by_two_values(lst):
return sorted(lst, key=lambda d: (-d['score'], d['id']))
#convert text to vector
def get_embed(query, model):
openai.api_key = st.secrets["openai"]
res = openai.Embedding.create(
input=[query],
engine=embed_model
)
return res['data'][0]['embedding']
#queries pinecone for k number of nearest points
def get_context(index, vector, k):
res = index.query(vector, top_k=k, include_metadata=True)
res_ordered = sort_list_by_two_values(res['matches'])
return res_ordered
#combines pinecone context into token-limited sized string
def build_context(context, tokens):
contexts = [x['metadata']['text'] for x in context]
#optional prompt
prompt_start = (
"Answer the question based on the context below.\n\n"+
"Context:\n"
)
prompt_end = (
f"\n\nQuestion: {query}\nAnswer:"
)
# append contexts until hitting token limit
for i in range(0, len(contexts)):
if num_tokens("\n\n---\n\n".join(contexts[:i])) >= tokens:
raw_context = ("\n\n---\n\n".join(contexts[:i-1]))
prompt = (prompt_start + raw_context + prompt_end)
break
elif i == len(contexts)-1:
raw_context = ("\n\n---\n\n".join(contexts))
prompt = (prompt_start + raw_context + prompt_end)
return {'prompt':prompt, 'context':raw_context}
def chat(query_with_context, model):
openai.api_key = st.secrets["openai"]
res = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful investment bot."},
{"role": "user", "content": query_with_context}
]
)
return res
#################################################################
###Streamlit###
st.title('Invest Bot')
st.subheader('Public.com Pinecone POC')
col1, col2, col3, col4, col5 = st.columns(5)
with col1:
index = st.selectbox('Pinecone Index', ('tickers', 'public-faq'))
with col2:
k = st.number_input('Pinecone Results', format='%i', value=10)
with col3:
token_limit = st.number_input('Context Max Tokens', format='%i', value = 7000)
with col4:
embed_model = st.selectbox('Embed Model', ("text-embedding-ada-002", "placeholder"))
with col5:
model = st.selectbox('Chat Model', ('gpt-4', 'gpt-3.5-turbo'))
query = st.text_input('Enter Query', 'Who spoke at the Apple Q1 2023 earnings call')
if query:
with st.spinner('Wait for it...'):
vector = get_embed(query, embed_model)
pinecone_index = pinecone_init(st.secrets["pinecone"], index)
context = get_context(pinecone_index, vector, k)
query_with_context = build_context(context, token_limit)['prompt']
output = chat(query_with_context, model)
st.success(output['choices'][0]['message']['content'].replace("$","\$"))
with st.expander("View Pinecone Query Results"):
for x in context:
st.write(f"id: {x['id']}, title: {x['metadata']['title']}, score: {str(x['score'])}")
with st.expander("View Query With Context"):
st.write(query_with_context)
| [
"Answer the question based on the context below.\n\nContext:\n",
"You are a helpful investment bot.",
"\n\nQuestion: PLACEHOLDER\nAnswer:",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | public-analytics/streamlit | earnings_call.py | import streamlit as st
import openai
import pinecone
import tiktoken
import time
#converts text to GPT4 token count. used to calculate context size
def num_tokens(string):
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = len(encoding.encode(string))
return num_tokens
#connect to pinecone
def pinecone_init(key, index):
pinecone.init(
api_key=key,
environment="us-east-1-aws"
)
return pinecone.Index(index)
#sorts pinecone results by relevance score (desc) and id (asc) to build context string
def sort_list_by_two_values(lst):
return sorted(lst, key=lambda d: (-d['score'], d['id']))
#convert text to vector
def get_embed(query, model):
openai.api_key = st.secrets["openai"]
res = openai.Embedding.create(
input=[query],
engine=embed_model
)
return res['data'][0]['embedding']
#queries pinecone for k number of nearest points
def get_context(index, vector, k):
res = index.query(vector, top_k=k, include_metadata=True)
res_ordered = sort_list_by_two_values(res['matches'])
return res_ordered
#combines pinecone context into token-limited sized string
def build_context(context, tokens):
contexts = [x['metadata']['text'] for x in context]
#optional prompt
prompt_start = (
"Answer the question based on the context below.\n\n"+
"Context:\n"
)
prompt_end = (
f"\n\nQuestion: {query}\nAnswer:"
)
# append contexts until hitting token limit
for i in range(0, len(contexts)):
if num_tokens("\n\n---\n\n".join(contexts[:i])) >= tokens:
raw_context = ("\n\n---\n\n".join(contexts[:i-1]))
prompt = (prompt_start + raw_context + prompt_end)
break
elif i == len(contexts)-1:
raw_context = ("\n\n---\n\n".join(contexts))
prompt = (prompt_start + raw_context + prompt_end)
return {'prompt':prompt, 'context':raw_context}
def chat(query_with_context, model):
openai.api_key = st.secrets["openai"]
res = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful investment bot."},
{"role": "user", "content": query_with_context}
]
)
return res
#################################################################
###Streamlit###
st.title('Invest Bot')
st.subheader('Public.com Pinecone POC')
col1, col2, col3, col4, col5 = st.columns(5)
with col1:
index = st.selectbox('Pinecone Index', ('earnings-call', 'public-faq'))
with col2:
k = st.number_input('Pinecone Results', format='%i', value=10)
with col3:
token_limit = st.number_input('Context Max Tokens', format='%i', value = 7000)
with col4:
embed_model = st.selectbox('Embed Model', ("text-embedding-ada-002", "placeholder"))
with col5:
model = st.selectbox('Chat Model', ('gpt-4', 'gpt-3.5-turbo'))
query = st.text_input('Enter Query', 'Who spoke at the Apple Q1 2023 earnings call')
if query:
with st.spinner('Wait for it...'):
vector = get_embed(query, embed_model)
pinecone_index = pinecone_init(st.secrets["pinecone"], index)
context = get_context(pinecone_index, vector, k)
query_with_context = build_context(context, token_limit)['prompt']
output = chat(query_with_context, model)
st.success(output['choices'][0]['message']['content'].replace("$","\$"))
with st.expander("View Pinecone Query Results"):
for x in context:
st.write(f"id: {x['id']}, title: {x['metadata']['title']}, score: {str(x['score'])}")
with st.expander("View Query With Context"):
st.write(query_with_context)
| [
"\n\nQuestion: PLACEHOLDER\nAnswer:",
"You are a helpful investment bot.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"Answer the question based on the context below.\n\nContext:\n"
] |
2024-01-10 | gamersalpha/Mattermost-ChatGPT-Connect | script_mattermost-ChatGpt.py | import openai
import requests
# Configurer les clés d'API Mattermost et OpenAI
MATTERMOST_URL = "https://mattermost.example.com"
MATTERMOST_TOKEN = "your_mattermost_token"
OPENAI_API_KEY = "your_openai_api_key"
# Configurer les paramètres de la requête GPT-3
openai.api_key = OPENAI_API_KEY
prompt = (f"Interagir avec ChatGPT via Mattermost")
completions = openai.Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=1024, n=1,stop=None,temperature=0.5)
# Extraire la réponse de GPT-3
message = completions.choices[0].text
# Préparer la requête pour Mattermost
headers = {
"Authorization": f"Bearer {MATTERMOST_TOKEN}",
"Content-Type": "application/json",
}
data = {
"channel_id": "your_channel_id",
"message": message,
}
# Envoyer la réponse à Mattermost
requests.post(f"{MATTERMOST_URL}/api/v4/posts", headers=headers, json=data)
| [
"Interagir avec ChatGPT via Mattermost"
] |
2024-01-10 | eufmike/catshand | src~catshand~tools~prjsummary.py | import os
from pathlib import Path
from tqdm import tqdm
import base64
import requests
from pydub import AudioSegment
from pydub.silence import split_on_silence, detect_nonsilent
import pandas as pd
import openai
import multiprocessing as mp
mp.set_start_method('fork', force=True)
from catshand.utility import loggergen
from catshand.openai import process_audio_file, merge_tran_csv, convert_csv_to_txt, openai_text
def prjsummary(args):
prjdir = Path(args.prj_dir)
logdir = prjdir.joinpath('log')
logdir.mkdir(exist_ok=True, parents=True)
logger = loggergen(logdir)
logger.info(f'args: {args}')
threads = args.threads
# check if output_dir specified
if not args.input_dir is None:
ipdir = Path(args.input_dir)
else:
ipdir = prjdir.joinpath('00_Raw_wav_processed_sil_removal')
if not args.output_dir is None:
opdir = Path(args.output_dir)
else:
opdir = prjdir.joinpath('transcription')
csvdir = opdir.joinpath('csv')
segdir = opdir.joinpath('wav')
tmpdir = opdir.joinpath('tmp')
docdir = opdir.joinpath('doc')
txtdir = opdir.joinpath('txt')
opdir.mkdir(exist_ok=True, parents=True)
csvdir.mkdir(exist_ok=True, parents=True)
docdir.mkdir(exist_ok=True, parents=True)
txtdir.mkdir(exist_ok=True, parents=True)
names = []
audio_lsit = sorted(ipdir.glob('*.wav'))
print(f'nCPU: {threads}')
for ipfile in tqdm(audio_lsit):
opfile = csvdir.joinpath(ipfile.relative_to(ipdir)).with_suffix('.csv')
names.append(ipfile.stem)
opsegdir = segdir.joinpath(ipfile.stem)
opsegdir.mkdir(exist_ok=True, parents=True)
logger.info(f'Processing Transcribe, save csv to : {opfile}')
logger.info(f'Processing Transcribe, save wav files to : {opsegdir}')
process_audio_file(ipfile, opfile, opsegdir, tmpdir, threads = threads)
print(names)
logger.info(f'merge csv files to: {docdir}')
merge_tran_csv(csvdir, docdir)
convert_csv_to_txt(docdir, txtdir)
openai_text(docdir.joinpath('merge.txt'), docdir.joinpath('summary.txt'), names = names, threads = threads)
return
def add_subparser(subparsers):
description = "prjsummary creates the prejoct summary with transcript and time stamps"
# parser = argparse.ArgumentParser(description=description)
subparsers = subparsers.add_parser('prjsummary', help=description)
required_group = subparsers.add_argument_group('Required Arguments')
required_group.add_argument('-p', '--prj_dir', type = str, required = True, help = 'directory for the project folder')
optional_group = subparsers.add_argument_group('Optional Arguments')
optional_group.add_argument('-i', '--input_dir', type = str, help = 'input folders with *.wav files. Default folder: 00_Raw_wav_processed_sil_removal')
optional_group.add_argument('-o', '--output_dir', type = str, help = 'output folders different from default')
optional_group.add_argument('-t', '--threads', dest='threads', type=int, default = 1)
subparsers.set_defaults(func=prjsummary)
return | [] |
2024-01-10 | samurayy99/crewai_lenox | lenox_app~tools~browser_tools.py | import os
import json
import requests
from langchain.tools import tool
from unstructured.partition.html import partition_html
class BrowserTools:
@tool("Scrape website content")
def scrape_and_summarize_website(self, website):
"""
Scrape and summarize the content of the given website.
:param website: URL of the website to scrape.
:return: Summarized content of the website.
"""
try:
url = f"https://chrome.browserless.io/content?token={os.getenv('BROWSERLESS_API_KEY')}"
payload = json.dumps({"url": website})
headers = {'cache-control': 'no-cache', 'content-type': 'application/json'}
response = requests.post(url, headers=headers, data=payload)
if response.status_code == 200:
elements = partition_html(text=response.text)
content = "\n\n".join([str(el) for el in elements])
return {'status': 'success', 'data': {'content': content}}
else:
raise Exception(f"Failed to scrape the website with status code: {response.status_code}")
except Exception as e:
return {'status': 'error', 'message': str(e)}
| [
"application/json"
] |
2024-01-10 | samurayy99/crewai_lenox | lenox_app~tools~calculator_tools.py | from langchain.tools import tool
class CalculatorTools:
@tool("Perform calculation")
def calculate(self, expression):
"""
Performs a calculation based on the given mathematical expression.
:param expression: A string containing the mathematical expression to be calculated.
:return: The result of the calculation.
"""
try:
# Use a safer, simpler method for calculation
result = eval(expression, {"__builtins__": None}, {})
return result
except Exception as e:
return f"Error in calculation: {str(e)}"
| [] |
2024-01-10 | samurayy99/crewai_lenox | lenox_app~tools~sentiment_analysis.py | import os
import praw
import tweepy
import json
from nltk.sentiment import SentimentIntensityAnalyzer
from langchain.tools import tool
class RedditSentimentAnalysis:
def __init__(self):
self.reddit = praw.Reddit(
client_id=os.getenv('REDDIT_CLIENT_ID'),
client_secret=os.getenv('REDDIT_CLIENT_SECRET'),
user_agent=os.getenv('REDDIT_USER_AGENT')
)
self.sia = SentimentIntensityAnalyzer()
@tool("Analyze Reddit Sentiment")
def analyze_reddit_sentiment(self, keywords, subreddits, limit=100):
try:
posts = []
for subreddit in subreddits:
for post in self.reddit.subreddit(subreddit).hot(limit=limit):
if any(keyword.lower() in post.title.lower() for keyword in keywords):
sentiment_score = self.sia.polarity_scores(post.title)
posts.append({
'title': post.title,
'sentiment': sentiment_score['compound'],
'url': post.url
})
return {'status': 'success', 'data': posts}
except Exception as e:
return {'status': 'error', 'message': str(e)}
class TwitterSentimentAnalysis:
def __init__(self):
auth = tweepy.OAuthHandler(os.getenv('TWITTER_CONSUMER_KEY'), os.getenv('TWITTER_CONSUMER_SECRET'))
auth.set_access_token(os.getenv('TWITTER_ACCESS_TOKEN'), os.getenv('TWITTER_ACCESS_TOKEN_SECRET'))
self.api = tweepy.API(auth, wait_on_rate_limit=True)
self.sia = SentimentIntensityAnalyzer()
@tool("Analyze Twitter Sentiment")
def analyze_twitter_sentiment(self, keywords, limit=100):
try:
tweets = []
for keyword in keywords:
for tweet in tweepy.Cursor(self.api.search, q=keyword, tweet_mode='extended', lang='en').items(limit):
sentiment_score = self.sia.polarity_scores(tweet.full_text)
tweets.append({
'text': tweet.full_text,
'sentiment': sentiment_score['compound'],
'url': f"https://twitter.com/user/status/{tweet.id}"
})
return {'status': 'success', 'data': tweets}
except Exception as e:
return {'status': 'error', 'message': str(e)}
| [] |
2024-01-10 | katebarouch/paintbyai | dalle_img_maker.py | from dotenv import load_dotenv
import os
load_dotenv("secrets.sh")
import openai
import requests
from PIL import Image
import io
def dalle(prompt):
# Define OpenAI key
api_key = os.getenv("OPENAI_API_KEY")
print("API KEY = ", api_key)
openai.api_key = api_key
# Generate an image
response = openai.Image.create(
prompt=prompt,
size="1024x1024",
response_format="url"
)
print(response)
return response
def generate_image(api_url, painting_id):
# Extract the URL from the response dictionary
url = api_url['data'][0]['url']
# Make a request to the DALL·E API to get the image data
response = requests.get(url)
response.raise_for_status()
image_data = response.content
# Load the image data into a Pillow image object
image = Image.open(io.BytesIO(image_data))
# Save the image as a JPEG file
img_path = f'static/images/{painting_id}dalle.jpg'
image.save(img_path, 'JPEG')
return img_path
def make_dalle_img(prompt, painting_id):
response = dalle(prompt)
img_path = generate_image(response, painting_id)
return img_path
| [] |
2024-01-10 | katebarouch/paintbyai | shop.py | import openai
from dotenv import load_dotenv
load_dotenv(".gitignore/secrets.sh")
import os
def get_paint_info(color_prompts):
attempt_count = 0
max_attempts = 5
while attempt_count < max_attempts:
try:
# define OpenAI key
api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = api_key
responses = []
for prompt in color_prompts:
messages = [
{"role": "system", "content": "You are a customer in a paint store."},
{"role": "user", "content": prompt}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
responses.append(response)
return [response['choices'][0]['message']['content'] for response in responses]
except Exception as e:
print(f"An error occurred: {str(e)}")
attempt_count += 1
print("Max attempts reached. Function failed.")
return None
def send_message(message):
url = 'https://api.openai.com/v1/chat/completions'
headers = {
'Authorization': 'Bearer YOUR_API_KEY',
'Content-Type': 'application/json'
}
data = {
'messages': [{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': message}]
}
response = requests.post(url, headers=headers, json=data)
response_json = response.json()
message = response_json['choices'][0]['message']['content']
return message
| [
"You are a customer in a paint store.",
"You are a helpful assistant."
] |
2024-01-10 | mocy/autogen | test~test_notebook.py | import sys
import os
import pytest
try:
import openai
skip = False
except ImportError:
skip = True
here = os.path.abspath(os.path.dirname(__file__))
def run_notebook(input_nb, output_nb="executed_openai_notebook.ipynb", save=False):
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
try:
nb_loc = os.path.join(here, os.pardir, "notebook")
file_path = os.path.join(nb_loc, input_nb)
with open(file_path) as nb_file:
nb = nbformat.read(nb_file, as_version=4)
preprocessor = ExecutePreprocessor(timeout=4800, kernel_name="python3")
preprocessor.preprocess(nb, {"metadata": {"path": nb_loc}})
output_file_name = "executed_openai_notebook_output.txt"
output_file = os.path.join(here, output_file_name)
with open(output_file, "a") as nb_output_file:
for cell in nb.cells:
if cell.cell_type == "code" and "outputs" in cell:
for output in cell.outputs:
if "text" in output:
nb_output_file.write(output["text"].strip() + "\n")
elif "data" in output and "text/plain" in output["data"]:
nb_output_file.write(output["data"]["text/plain"].strip() + "\n")
except CellExecutionError:
raise
finally:
if save:
with open(os.path.join(here, output_nb), "w", encoding="utf-8") as nb_executed_file:
nbformat.write(nb, nb_executed_file)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_agentchat_auto_feedback_from_code(save=False):
run_notebook("agentchat_auto_feedback_from_code_execution.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_oai_completion(save=False):
run_notebook("oai_completion.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_agentchat_function_call(save=False):
run_notebook("agentchat_function_call.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_agentchat_MathChat(save=False):
run_notebook("agentchat_MathChat.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.11"),
reason="do not run if openai is not installed or py!=3.11",
)
def test_oai_chatgpt_gpt4(save=False):
run_notebook("oai_chatgpt_gpt4.ipynb", save=save)
if __name__ == "__main__":
test_agentchat_auto_feedback_from_code(save=True)
# test_oai_chatgpt_gpt4(save=True)
# test_oai_completion(save=True)
# test_agentchat_MathChat(save=True)
# test_agentchat_function_call(save=True)
| [] |
2024-01-10 | mocy/autogen | test~agentchat~test_retrievechat.py | import pytest
import sys
import autogen
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
from autogen.agentchat.contrib.retrieve_assistant_agent import (
RetrieveAssistantAgent,
)
from autogen.agentchat.contrib.retrieve_user_proxy_agent import (
RetrieveUserProxyAgent,
)
from autogen.retrieve_utils import create_vector_db_from_dir, query_vector_db
import chromadb
from chromadb.utils import embedding_functions as ef
skip_test = False
except ImportError:
skip_test = True
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows",
)
def test_retrievechat():
try:
import openai
except ImportError:
return
conversations = {}
autogen.ChatCompletion.start_logging(conversations)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314"],
},
)
assistant = RetrieveAssistantAgent(
name="assistant",
system_message="You are a helpful assistant.",
llm_config={
"request_timeout": 600,
"seed": 42,
"config_list": config_list,
},
)
sentence_transformer_ef = ef.SentenceTransformerEmbeddingFunction()
ragproxyagent = RetrieveUserProxyAgent(
name="ragproxyagent",
human_input_mode="NEVER",
max_consecutive_auto_reply=2,
retrieve_config={
"docs_path": "./website/docs",
"chunk_token_size": 2000,
"model": config_list[0]["model"],
"client": chromadb.PersistentClient(path="/tmp/chromadb"),
"embedding_function": sentence_transformer_ef,
},
)
assistant.reset()
code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached."
ragproxyagent.initiate_chat(assistant, problem=code_problem, search_string="spark", silent=True)
print(conversations)
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows",
)
def test_retrieve_utils():
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs")
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4
if __name__ == "__main__":
test_retrievechat()
test_retrieve_utils()
| [] |
2024-01-10 | mocy/autogen | test~test_with_openai.py | import autogen
import pytest
import sys
try:
import openai
skip = False
except ImportError:
skip = True
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_function_call_groupchat():
import random
def get_random_number():
return random.randint(0, 100)
config_list_gpt4 = autogen.config_list_from_json(
"OAI_CONFIG_LIST",
filter_dict={
"model": ["gpt-4", "gpt-4-0314", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
},
)
llm_config = {
"config_list": config_list_gpt4,
"seed": 42,
"functions": [
{
"name": "get_random_number",
"description": "Get a random number between 0 and 100",
"parameters": {
"type": "object",
"properties": {},
},
},
],
}
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin that will execute function_calls.",
function_map={"get_random_number": get_random_number},
human_input_mode="NEVER",
)
coder = autogen.AssistantAgent(
name="Player",
system_message="You will can function `get_random_number` to get a random number. Stop only when you get at least 1 even number and 1 odd number. Reply TERMINATE to stop.",
llm_config=llm_config,
)
groupchat = autogen.GroupChat(agents=[user_proxy, coder], messages=[], max_round=7)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
user_proxy.initiate_chat(manager, message="Let's start the game!")
if __name__ == "__main__":
test_function_call_groupchat()
| [] |
2024-01-10 | mocy/autogen | test~test_function_call.py | try:
import openai
except ImportError:
openai = None
import pytest
import json
import autogen
from autogen.math_utils import eval_math_responses
from test_code import KEY_LOC
@pytest.mark.skipif(openai is None, reason="openai not installed")
def test_eval_math_responses():
config_list = autogen.config_list_from_models(
KEY_LOC, exclude="aoai", model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"]
)
functions = [
{
"name": "eval_math_responses",
"description": "Select a response for a math problem using voting, and check if the response is correct if the solution is provided",
"parameters": {
"type": "object",
"properties": {
"responses": {
"type": "array",
"items": {"type": "string"},
"description": "The responses in a list",
},
"solution": {
"type": "string",
"description": "The canonical solution",
},
},
"required": ["responses"],
},
},
]
response = autogen.ChatCompletion.create(
config_list=config_list,
messages=[
{
"role": "user",
"content": 'evaluate the math responses ["1", "5/2", "5/2"] against the true answer \\frac{5}{2}',
},
],
functions=functions,
)
print(response)
responses = autogen.ChatCompletion.extract_text_or_function_call(response)
print(responses[0])
function_call = responses[0]["function_call"]
name, arguments = function_call["name"], json.loads(function_call["arguments"])
assert name == "eval_math_responses"
print(arguments["responses"])
# if isinstance(arguments["responses"], str):
# arguments["responses"] = json.loads(arguments["responses"])
arguments["responses"] = [f"\\boxed{{{x}}}" for x in arguments["responses"]]
print(arguments["responses"])
arguments["solution"] = f"\\boxed{{{arguments['solution']}}}"
print(eval_math_responses(**arguments))
def test_json_extraction():
from autogen.agentchat import UserProxyAgent
user = UserProxyAgent(name="test", code_execution_config={"use_docker": False})
jstr = '{\n"location": "Boston, MA"\n}'
assert user._format_json_str(jstr) == '{"location": "Boston, MA"}'
jstr = '{\n"code": "python",\n"query": "x=3\nprint(x)"}'
assert user._format_json_str(jstr) == '{"code": "python","query": "x=3\\nprint(x)"}'
jstr = '{"code": "a=\\"hello\\""}'
assert user._format_json_str(jstr) == '{"code": "a=\\"hello\\""}'
def test_execute_function():
from autogen.agentchat import UserProxyAgent
# 1. test calling a simple function
def add_num(num_to_be_added):
given_num = 10
return num_to_be_added + given_num
user = UserProxyAgent(name="test", function_map={"add_num": add_num})
# correct execution
correct_args = {"name": "add_num", "arguments": '{ "num_to_be_added": 5 }'}
assert user.execute_function(func_call=correct_args)[1]["content"] == "15"
# function name called is wrong or doesn't exist
wrong_func_name = {"name": "subtract_num", "arguments": '{ "num_to_be_added": 5 }'}
assert "Error: Function" in user.execute_function(func_call=wrong_func_name)[1]["content"]
# arguments passed is not in correct json format
wrong_json_format = {
"name": "add_num",
"arguments": '{ "num_to_be_added": 5, given_num: 10 }',
} # should be "given_num" with quotes
assert "You argument should follow json format." in user.execute_function(func_call=wrong_json_format)[1]["content"]
# function execution error with wrong arguments passed
wrong_args = {"name": "add_num", "arguments": '{ "num_to_be_added": 5, "given_num": 10 }'}
assert "Error: " in user.execute_function(func_call=wrong_args)[1]["content"]
# 2. test calling a class method
class AddNum:
def __init__(self, given_num):
self.given_num = given_num
def add(self, num_to_be_added):
self.given_num = num_to_be_added + self.given_num
return self.given_num
user = UserProxyAgent(name="test", function_map={"add_num": AddNum(given_num=10).add})
func_call = {"name": "add_num", "arguments": '{ "num_to_be_added": 5 }'}
assert user.execute_function(func_call=func_call)[1]["content"] == "15"
assert user.execute_function(func_call=func_call)[1]["content"] == "20"
# 3. test calling a function with no arguments
def get_number():
return 42
user = UserProxyAgent("user", function_map={"get_number": get_number})
func_call = {"name": "get_number", "arguments": "{}"}
assert user.execute_function(func_call)[1]["content"] == "42"
if __name__ == "__main__":
test_json_extraction()
test_execute_function()
test_eval_math_responses()
| [
"evaluate the math responses [\"1\", \"5/2\", \"5/2\"] against the true answer \\frac{5}{2}"
] |
2024-01-10 | a-t-h-i/Football-Stats | ai~prediction.py | import os
from openai import OpenAI
client = OpenAI(
api_key=os.environ.get("MY_KEY"),
)
def ask(query):
completions = client.chat.completions.create(
messages=[
{
"role": "user",
"content": query,
}
],
model="gpt-3.5-turbo",
)
response = completions['choices'][0]['message']['content']
return response
| [] |
2024-01-10 | sanjaikuttyy/Transcribe-whisper | transcription.py | from openai import OpenAI
import whisper
import soundfile as sf
from videoproc import youtube_preprocess
from chunking import chunk_by_size
# Load the Whisper model once, outside the loop
model = whisper.load_model("tiny")
# Process the YouTube video
audio_file = youtube_preprocess("https://youtu.be/HQjjgi6271k?si=tPJRuetEMA603s-a")
# Determine the number of chunks
no_of_chunks = chunk_by_size(audio_file)
# Process each chunk
for i in range(no_of_chunks):
file_path = f"process_chunks/chunk{i}.wav"
print(file_path)
# Read the audio file
audio, samplerate = sf.read(file_path)
# Transcribe the audio
transcript = model.transcribe(audio)
# Append the transcription to a file
with open("videotext.txt", "a") as f:
f.write(transcript["text"] + "\n")
| [] |
2024-01-10 | Knguyen-dev/SDEV-265-Group-4 | pages~AIChatPage.py | import time
import customtkinter as ctk
import sys, os
sys.path.append("..")
from classes.models import Message
from tkinter import messagebox
from customtkinter import CTkCanvas
from PIL import Image
'''
+ AIChatPage: Frame that represents the page where the user and AI send chat messages to each other in order to
write their story.
Attributes/Variables:
- master (App): 'App' class instance from 'Main.py'
- innerPageFrame (CTkFrame): Page frame that contains all of the widgets for the page and is used to center it
- header (CTkFrame): Header of the page frame
- heading (CTkLabel): Heading message
- storyStateMessage (CTkLabel): Label that tells the user what kind of story they're writing, whether they're remixing, writing
a new story, continuing a saved story, etc.
- pageStatusMessage (CTkLabel): Indicates status of the page like when the user is currently waiting on the AI for a response
or whether an occur has occurred.
- chatBox (CTkTextbox): Textbox that shows the messages of the user and AI.
- chatInputSection (CTkFrame): Section with all of the input related widgets
- chatEntry (CTkEntry): Input text box where user types in their message
- openSaveStoryBtn (CTkButton): Button that redirects the user to the saveStoryPage
- sendChatBtn (CTkButton): Button that sends the chat to the AI.
Methods:
- processUserChat(self): Sends a user chat message to the AI and gets its response.
- renderChat(self, messageObj): Renders message text onto the screen given a messgae object.
'''
class AIChatPage(ctk.CTkFrame):
def __init__(self, master):
self.master = master
super().__init__(self.master, fg_color=self.master.theme["main_clr"], corner_radius=0)
self.chatEntry_height=20
self.max_chatEntry_height = 400 # 4 line max view space
# This logic prevents the dynamically resizing msgbox from overexpanding - Powered by Nuke The Dev
self.msgbox_height=30
self.max_msgbox_height = 1200 # 12 line max view space
self.setup_ui()
'''
- Cases for the initial state:
1. User is continuing a saved story
2. Using is currently writing a remixed story, it's unsaved. If storyGenObj is detected with the constructor's logic then
we're rendering the AI's first response to a user's remixed story, which would be the first message of the chat.
3. Using is continuing an unsaved story that isn't a remix.
'''
# If there have been any unsaved messages, render them
if self.master.unsavedStoryMessages:
for messageObj in self.master.unsavedStoryMessages:
self.renderChatMessageObj(messageObj)
# if storyGenObj exists, then we have to process a generator that the AI returned
# NOTE: In this case, when storyGenObj exists here, that means it was set by the remixStoryPage,
# and this generator contains a response for remixing a story
if self.master.storyGenObj:
self.processAIChat()
# call the function once to start the periodic check
self.check_length_and_resize()
def setup_ui(self):
innerPageFrame = ctk.CTkFrame(self, fg_color=self.master.theme["sub_clr"])
innerPageFrame.pack(expand=True)
header = ctk.CTkFrame(innerPageFrame, fg_color="transparent")
heading = ctk.CTkLabel(header, text="Write Your Story!", font=("Helvetica", 32), text_color=self.master.theme["label_clr"])
storyStateMessage = ctk.CTkLabel(header, text="", text_color=self.master.theme["label_clr"])
self.pageStatusMessage = ctk.CTkLabel(header, text="StoryBot is currently waiting for your input.", font=("Helvetica", 24), text_color=self.master.theme["label_clr"])
# This is where we view the messages sent from the AI and the User
self.chatBox = ctk.CTkScrollableFrame(innerPageFrame, fg_color=self.master.theme["main_clr"], width=800, height=400)
# Section with all of the input options the user has for the AIChatPage
chatInputSection = ctk.CTkFrame(innerPageFrame, fg_color="transparent")
self.chatEntry = ctk.CTkTextbox(chatInputSection, height=50, width=600, fg_color=self.master.theme["entry_clr"], text_color=self.master.theme["entry_text_clr"], font=("Helvetica", 16), wrap="word", activate_scrollbars=True)
try:
openSaveStoryBtn_image = ctk.CTkImage(Image.open(os.path.join(self.master.image_path, 'glass_save_btn.png')), size=(50, 50))
except IOError as e:
messagebox.showerror("Error", f"Failed to load image: {e}")
return
self.openSaveStoryBtn = ctk.CTkButton(chatInputSection, image=openSaveStoryBtn_image, height=10, width=20, text="Save Story", font=("Helvetica", 16, "bold"), text_color=self.master.theme["btn_text_clr"], fg_color='transparent', hover_color=self.master.theme["hover_clr"], command=lambda: self.master.openPage("saveStoryPage"))
sendChatBtn_image = ctk.CTkImage(Image.open(os.path.join(self.master.image_path, 'glass_send_btn.png')),
size=(50, 50))
self.sendChatBtn = ctk.CTkButton(chatInputSection, corner_radius=0, image=sendChatBtn_image, height=10, width=20, text="Send", font=("Helvetica", 16, "bold"), text_color=self.master.theme["btn_text_clr"], fg_color='transparent', hover_color=self.master.theme["hover_clr"], hover=True, anchor="e", command=self.processUserChat)
# Structure and style widgets accordingly
header.grid(row=0, column=0, pady=10)
heading.grid(row=0, column=0)
storyStateMessage.grid(row=1, column=0)
self.pageStatusMessage.grid(row=2, column=0)
self.chatBox.grid(row=1, column=0, pady=10)
chatInputSection.grid(row=2, column=0, pady=20)
self.chatEntry.grid(row=0, column=0, padx=10, pady=5)
self.sendChatBtn.grid(row=0, column=1, padx=5)
self.openSaveStoryBtn.grid(row=0, column=2, padx=5)
if self.master.isSavedStory:
# Render saved messages associated with the current story
for messageObj in self.master.currentStory.messages:
self.renderChatMessageObj(messageObj)
storyStateMessage.configure(text=f"Currently continuing '{self.master.currentStory.storyTitle}'!")
elif self.master.isRemixedStory:
storyStateMessage.configure(text=f"Currently writing a remix based on '{self.master.currentStory.storyTitle}'!")
else:
storyStateMessage.configure(text=f"Currently continuing writing a new story!")
def renderChatMessageObj(self, messageObj):
'''
- Renders messageObj as chat messages on the screen
- NOTE: Only good for rendering saved, unsaved, and user chats because those are easily in message object form.
For rendering AI's response, it's a generator so use processAIChat(self).
'''
# Chat window is read and write now
messageText = messageObj.text
# If it's an AI message, else it was a message sent by the user
if messageObj.isAISender:
messageText = "StoryBot: " + messageText
else:
messageText = f"{self.master.loggedInUser.username}: " + messageText
# access the last msgbox to print to
msgbox = self.drawMsgBox()
msgbox.insert("1.0", messageText)
# Calculate the required height of the message
height = self.expandEntryBox(msgLength=messageText)
# Now we use the calculated `height` parameter to set the height of the msgbox
print('height=', height)
msgbox.configure(height=height)
# Check for the length of text in the entry field and adjust entry field height accordingly
def check_length_and_resize(self):
# get the text in the CTkTextbox
if self.chatEntry_height <= self.max_chatEntry_height:
new_height=self.expandEntryBox(self.chatEntry.get('1.0', 'end'))
self.chatEntry.configure(height=new_height)
# schedule the next check in 5000 milliseconds (1 second)
self.after(2000, self.check_length_and_resize)
def processUserChat(self):
'''
- Sends the user chat message to the ai, for the ai to respond, then goes to render both of those chat messages
1. userMessage (Message): Message object containing text that the user sent
2. AIResponse (Generator): Generator object containing text that the AI generated in response to the user
'''
# Check if user actually sent something
if (self.chatEntry.get('1.0', 'end').strip() == ""):
messagebox.showwarning('Empty Message!', 'Please enter a valid message!')
return
# Process and render the user's message
# The .strip() method ensures that a user cannot type whitespaces
# before the message content which has been known to cause an openAI api exception
userMessage = Message(text=self.chatEntry.get('1.0', 'end').strip(), isAISender=False)
self.renderChatMessageObj(userMessage)
self.master.unsavedStoryMessages.append(userMessage)
# Clear entry widget when user sends a message
self.chatEntry.delete(1.0, "end")
AIResponse = self.master.storyGPT.sendStoryPrompt(userMessage.text)
self.master.storyGenObj = AIResponse # type: ignore
# Process and render AI's message
self.processAIChat()
def drawMsgBox(self):
msgbox = ctk.CTkTextbox(self.chatBox, fg_color=self.master.theme["entry_clr"], font=("Helvetica", 16), width=750, height=20, wrap="word", activate_scrollbars=False)
msgbox.grid(row=len(self.master.msgboxes), column=0, padx=5, pady=5, sticky="nsew")
self.master.msgboxes.append(msgbox)
return msgbox
def expandEntryBox(self, msgLength):
num_chars = len(msgLength) # The number of characters in the text box at hand
# Calculate the number of lines at 100 characters per line of text onscreen
num_lines = num_chars // 100 # Use integer division to get the number of full lines
if num_chars % 100 > 0: # If there are any remaining characters, they will form an additional line
num_lines += 1
# Calculate the height
height = num_lines * 30 # Each line is 30 units high
# Now you can use `height` to set the height of your widget
return height
def processAIChat(self):
'''
- Handles the proces of processing the AI's generated chat.
1. Enable and disable certain parts of the UI, preventing the user from sending another
message to the AI until the first one is finished. Also prevent the user from being
able to redirect themselves to other pages, so that they don't lose their AI generated message.
2. Renders chunks of the messages as they're being obtained from openai.
3. Save the ai's generated message to unsavedStoryMessages so that we can keep track of it
'''
# Access the current messagebox at it's index
# Disable send chat button as user can't send another chat until the ai is finished
self.sendChatBtn.configure(state="disabled")
# Ensure user can't navigate to other pages while AI is generating message
self.master.sidebar.disableSidebarButtons()
self.openSaveStoryBtn.configure(state="disabled")
# Update page status message to indicate that AI is currently generating a message
self.pageStatusMessage.configure(text="Please wait here until StoryBot is finished!")
# Message object that contains the text from the generator
messageObj = Message(text="", isAISender=True)
chunkIndex = 0
# Create a new real-time dynamically resizing msg bubble to display AI response in
msgbox = self.drawMsgBox()
# Make the chat box writable
msgbox.configure(state="normal")
msgbox.insert('end', 'Story Bot: ')
for chunk in self.master.storyGenObj:
if any(chunk.endswith(char) for char in ['.', '?', '!']):
punct_marks = ['.', '?', '!']
for mark in punct_marks:
if chunk.endswith(f'{mark}'):
msgbox.insert('end', f"{mark}" + " ")
else:
msgbox.insert('end', chunk)
# Enables smooth real time typing
if (self.msgbox_height <= self.max_msgbox_height):
new_height=self.expandEntryBox(msgbox.get('1.0', 'end'))
self.msgbox_height = new_height
# Dynamically resize the height of the current msgbox
msgbox.update()
msgbox.configure(height=self.msgbox_height)
self.update_idletasks()
# add the chunk onto the message object's text since we want to keep track of this message; then increment chunkIndex
messageObj.text += chunk
chunkIndex += 1
#reset the msgbox height after each message
self.msgbox_height=30
# AI response processing is done, so append message object and variables related to processing a message
self.master.unsavedStoryMessages.append(messageObj)
self.master.storyGenObj = None
# Scroll to bottom and make chatbox read only
# This allows the user to view the latest text
msgbox.see("end-1c")
msgbox.configure(state="disabled")
# Allow the user to send another message and navigate to other pages
self.openSaveStoryBtn.configure(state="normal")
self.sendChatBtn.configure(state="normal")
self.master.sidebar.updateSidebar()
# Update the page status message to indicate the ai is done
self.pageStatusMessage.configure(text="StoryBot is currently waiting for your input.") | [] |
2024-01-10 | martinakaduc/llama-qa | hf_embedding.py | import torch
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field
from langchain.embeddings.base import Embeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).to(token_embeddings.dtype)
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
class HuggingFaceEmbeddings(BaseModel, Embeddings):
"""HuggingFace sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
model: Any
"""The HuggingFace model."""
tokenizer: Any
"""The HuggingFace tokenizer."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Key word arguments to pass when calling the `encode` method of the model."""
def __init__(self, model, tokenizer, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
self.model = model
self.tokenizer = tokenizer
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.encode([text], **self.encode_kwargs)[0]
return embedding.tolist()
def encode(self, texts: List[str], **kwargs: Any) -> List[List[float]]:
"""Encode a list of texts.
Args:
texts: The list of texts to embed.
kwargs: Additional keyword arguments to pass to the model.
Returns:
List of embeddings, one for each text.
"""
#Tokenize sentences
encoded_input = self.tokenizer(texts, padding=True, return_tensors='pt')
#Compute token embeddings
with torch.no_grad():
model_output = self.model(**encoded_input)
#Perform pooling. In this case, mean pooling
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
sentence_embeddings = sentence_embeddings.detach().cpu().numpy()
return sentence_embeddings | [] |
2024-01-10 | Parrot-Developers/airsdk-samples | road_runner~guidance~look_down~look_down.py | # Copyright (c) 2023 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import numpy as np
import libguidance_utils_binding as pyguidance
import libquaternion_binding as quaternion
import cfgreader
import telemetry
import guidance.core as gdnc_core
import cam_controller.frame_of_reference_pb2 as cam_for_pb2
import cam_controller.control_mode_pb2 as cam_cm_pb2
import road_runner.guidance.look_down.messages_pb2 as look_down_mode_pb2
LOOK_DOWN_CONFIG_FILENAME = "/etc/guidance/look_down/mode.cfg"
def _config(field):
cfg = cfgreader.build_config_start()
for (root, include) in field:
cfg = cfgreader.build_config_update(cfg, root, include)
return cfgreader.build_config_end(cfg)
class LookDownMode(gdnc_core.Mode):
def __init__(self, guidance, name):
super().__init__(guidance, name)
# Get guidance context
self.msghub = self.guidance.get_message_hub()
mode_config_path = guidance.get_config_file(LOOK_DOWN_CONFIG_FILENAME)
# Get configuration values
field = [
(
mode_config_path,
"cameraPitchPosition",
),
(
mode_config_path,
"reachedThresholdPercentageOfAngle",
),
]
look_down_cfg = _config(field)
# Convertion from degree to radian
self.camera_pitch_position = (
look_down_cfg.look_down.cameraPitchPosition * np.pi / 180
)
# Target reached detector configuration
self.att_reached_detector_cfg = (
pyguidance.AttitudeReachedDetector.Configuration()
)
self.att_reached_detector_cfg.target_kind = (
pyguidance.AttitudeReachedDetector.ANGLE
)
self.att_reached_detector_cfg.angle_threshold = 0.0
self.att_reached_detector_cfg.rate_threshold = 0.0
self.target_reached = False
# Target reached detector init
self.attitude_reached_detector = pyguidance.AttitudeReachedDetector(
self.att_reached_detector_cfg
)
self.attitude_reached_detector.set_threshold(
look_down_cfg.look_down.reachedThresholdPercentageOfAngle
* np.abs(self.camera_pitch_position),
)
# Telemetry consumer configuration
subset = [
"ref_ned_start_angles_yaw",
"ref_ned_start_angles_pitch",
"ref_ned_start_angles_roll",
]
self.tlm_fcam = telemetry.TlmSection(
"/dev/shm", "fcam_controller", subset=subset
)
# Msghub configuration
self.channel = self.guidance.get_channel(
gdnc_core.ChannelKind.GUIDANCE
)
self.evt_sender = gdnc_core.MessageSender(
look_down_mode_pb2.Event.DESCRIPTOR.full_name
)
def shutdown(self):
# Msghub
self.evt_sender = None
self.channel = None
self.msghub = None
# Telemetry
self.tlm_fcam = None
# Target reached detector
self.att_reached_detector_cfg = None
def configure(self, msg, disable_oa, override_fcam, override_stereo):
# Telemetry
self.tlm_fcam.fetch_sample()
# Target reached detector
self.attitude_reached_detector.set_target(
quaternion.from_euler(
self.tlm_fcam["ref_ned_start_angles_yaw"],
self.camera_pitch_position,
self.tlm_fcam["ref_ned_start_angles_roll"],
)
)
self.target_reached = False
# Fcam pitch axis configuration
self.output.has_front_cam_config = True
fcam_config = self.output.front_cam_config
fcam_config.pitch.locked = True
fcam_config.pitch.filtered = True
fcam_config.pitch.smoothness = 0.1
def generate_attitude_references(self):
# Fcam axes references
self.output.has_front_cam_reference = True
fcam_ref = self.output.front_cam_reference
fcam_ref.pitch.ctrl_mode = cam_cm_pb2.POSITION
fcam_ref.pitch.frame_of_ref = cam_for_pb2.NED
fcam_ref.pitch.position = self.camera_pitch_position
def get_triggers(self):
return (gdnc_core.Trigger.TIMER, 30, 30) # Aprox 33.33 Hz
def enter(self):
self.msghub.attach_message_sender(self.evt_sender, self.channel)
def exit(self):
self.msghub.detach_message_sender(self.evt_sender)
def begin_step(self):
# Telemetry: get sample
self.tlm_fcam.fetch_sample()
def end_step(self):
self.attitude_reached_detector.process(
quaternion.from_euler(
self.tlm_fcam["ref_ned_start_angles_yaw"],
self.tlm_fcam["ref_ned_start_angles_pitch"],
self.tlm_fcam["ref_ned_start_angles_roll"],
)
)
self.target_reached = self.attitude_reached_detector.is_reached()
if self.target_reached:
tosend = look_down_mode_pb2.Event()
tosend.done.SetInParent()
gdnc_core.msghub_send(self.evt_sender, tosend)
def generate_drone_reference(self):
# Unused in this mode. Not mandatory.
pass
def correct_drone_reference(self):
# Unused in this mode. Not mandatory.
pass
# Export classes
GUIDANCE_MODES = {
"com.parrot.missions.samples.road_runner.look_down": LookDownMode
}
| [] |
2024-01-10 | Parrot-Developers/airsdk-samples | hello~guidance~hello~hello.py | import numpy as np
import libpomp
import telemetry
import cam_controller.frame_of_reference_pb2 as cam_for_pb2
import cam_controller.control_mode_pb2 as cam_cm_pb2
import guidance.core as gdnc_core
import samples.hello.guidance.messages_pb2 as HelloGroundModeMessages
from msghub_utils import service_name
class HelloGroundMode(gdnc_core.Mode):
FCAM_PITCH_ANIMATION_PERIOD_MS = 5000
FCAM_PITCH_ANIMATION = [
0.0, -0.2, -0.8, -2.0, -3.8, -6.6, -10.4, -15.5, -22.0, -30.1, -40.0,
-25.0, -10.0, 4.9, 19.9, 34.9, 49.9, 55.5, 42.0, 28.5, 15.0, 1.5,
-11.9, -25.4, -26.9, -22.4, -18.0, -13.5, -9.0, -4.4, 0.0
]
def __init__(self, guidance, name):
super().__init__(guidance, name)
self.loop = self.guidance.get_loop()
self.msghub = self.guidance.get_message_hub()
self.front_cam_pitch_index = 0
subset = [
"attitude_euler_angles.yaw",
"attitude_euler_angles.pitch",
"attitude_euler_angles.roll",
]
self.tlm_dctl = telemetry.TlmSection(
"/dev/shm", "drone_controller", subset=subset
)
self.timer_cb = libpomp.pomp_timer_cb_t(lambda t, d: self._timer_cb())
self.timer = libpomp.pomp_timer_new(self.loop, self.timer_cb, None)
self.channel = self.guidance.get_channel(
gdnc_core.ChannelKind.GUIDANCE
)
self.evt_sender = gdnc_core.MessageSender(
service_name(HelloGroundModeMessages.Event)
)
self.say = False
self.say_count = 0
def shutdown(self):
self.loop = None
self.msghub = None
self.tlm_dctl = None
libpomp.pomp_timer_destroy(self.timer)
self.timer_cb = None
self.timer = None
self.evt_sender = None
def get_triggers(self):
return (gdnc_core.Trigger.TIMER, 30, 30)
def configure(self, msg, disable_oa, override_fcam, override_stereo):
ground_mode_msg = gdnc_core.unpack_config(
msg, HelloGroundModeMessages.Config
)
self.say = ground_mode_msg.say
self.output.has_front_cam_config = True
self.output.front_cam_config.yaw.locked = True
self.output.front_cam_config.yaw.filtered = False
self.output.front_cam_config.roll.locked = True
self.output.front_cam_config.roll.filtered = False
self.output.front_cam_config.pitch.locked = True
self.output.front_cam_config.pitch.filtered = False
if self.say:
libpomp.pomp_timer_set_periodic(
self.timer,
# the initial delay (phase) is close to zero, in order
# to start the animation right away, but not zero
# because that would deactivate the timer.
1,
HelloGroundMode.FCAM_PITCH_ANIMATION_PERIOD_MS,
)
self.say_count = 0
else:
# clear the timer here, because the mode might be
# reconfigured (set_mode with the same mode), in which
# case exit() is not called
libpomp.pomp_timer_clear(self.timer)
def enter(self):
self.msghub.attach_message_sender(self.evt_sender, self.channel)
def exit(self):
self.msghub.detach_message_sender(self.evt_sender)
libpomp.pomp_timer_clear(self.timer)
def begin_step(self):
self.tlm_dctl.fetch_sample()
def end_step(self):
if (
self.front_cam_pitch_index
< len(HelloGroundMode.FCAM_PITCH_ANIMATION) - 1
):
self.front_cam_pitch_index += 1
def generate_drone_reference(self):
pass
def correct_drone_reference(self):
pass
def generate_attitude_references(self):
# Front
self.output.has_front_cam_reference = True
fcam_ref = self.output.front_cam_reference
fcam_ref.yaw.ctrl_mode = cam_cm_pb2.POSITION
fcam_ref.yaw.frame_of_ref = cam_for_pb2.NED
fcam_ref.yaw.position = self.tlm_dctl["attitude_euler_angles.yaw"]
fcam_ref.pitch.ctrl_mode = cam_cm_pb2.POSITION
fcam_ref.pitch.frame_of_ref = cam_for_pb2.NED
fcam_ref.pitch.position = (
HelloGroundMode.FCAM_PITCH_ANIMATION[self.front_cam_pitch_index]
* np.pi
/ 180.0
)
fcam_ref.roll.ctrl_mode = cam_cm_pb2.POSITION
fcam_ref.roll.frame_of_ref = cam_for_pb2.NED
fcam_ref.roll.position = 0.0
# Stereo
self.output.has_stereo_cam_reference = True
stcam_ref = self.output.stereo_cam_reference
stcam_ref.yaw.ctrl_mode = cam_cm_pb2.POSITION
stcam_ref.yaw.frame_of_ref = cam_for_pb2.NED
stcam_ref.yaw.position = self.tlm_dctl["attitude_euler_angles.yaw"]
stcam_ref.pitch.ctrl_mode = cam_cm_pb2.POSITION
stcam_ref.pitch.frame_of_ref = cam_for_pb2.NED
stcam_ref.pitch.position = self.tlm_dctl["attitude_euler_angles.pitch"]
stcam_ref.roll.ctrl_mode = cam_cm_pb2.POSITION
stcam_ref.roll.frame_of_ref = cam_for_pb2.NED
stcam_ref.roll.position = self.tlm_dctl["attitude_euler_angles.roll"]
def _timer_cb(self):
self.log.info("Hello world")
self.front_cam_pitch_index = 0
self.say_count += 1
msg = HelloGroundModeMessages.Event()
msg.count = self.say_count
gdnc_core.msghub_send(self.evt_sender, msg)
GUIDANCE_MODES = {"com.parrot.missions.samples.hello.ground": HelloGroundMode}
| [] |
2024-01-10 | Parrot-Developers/airsdk-samples | road_runner~fsup~mission.py | # Copyright (c) 2023 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# fsup mandatory library
from fsup.genmission import AbstractMission
# msg_id: Get the full message Id from a protobuf Command/Event class and the
# name of an 'id' field.
from msghub_utils import msg_id
###################################################
# Stages and transitions
from fsup.missions.default.ground.stage import (
GROUND_STAGE as DEF_GROUND_STAGE,
)
from fsup.missions.default.hovering.stage import (
HOVERING_STAGE as DEF_HOVERING_STAGE,
)
from fsup.missions.default.landing.stage import (
LANDING_STAGE as DEF_LANDING_STAGE,
)
from fsup.missions.default.critical.stage import (
CRITICAL_STAGE as DEF_CRITICAL_STAGE,
)
from fsup.missions.default.mission import TRANSITIONS as DEF_TRANSITIONS
###################################################
# Messages
# Drone messages
import drone_controller.drone_controller_pb2 as dctl_msgs
import flight_supervisor.autopilot_pb2 as autopilot_msgs
import guidance.ascent_pb2 as gdnc_ascent_msgs
import flight_supervisor.internal_pb2 as internal_fsup_msgs
# AirSDK Service messages (cv_road)
import road_runner.cv_road.messages_pb2 as rr_service_msgs
# AirSDK guidance messages
import road_runner.guidance.look_down.messages_pb2 as gdnc_look_down_msgs
import road_runner.guidance.road_following.messages_pb2 as gdnc_road_following_msgs # noqa: E501
###################################################
# Overwritten Stages
from .flying.stage import FLYING_STAGE # noqa: E402
from .takeoff.stage import TAKEOFF_STAGE # noqa: E402
###################################################
# Messages channel
_CV_ROAD_SERVICE_CHANNEL = "unix:/tmp/road-runner-cv-road-service"
###################################################
# Mission
class Mission(AbstractMission):
def __init__(self, env):
super().__init__(env)
# AIRSDK GUIDANCE MODE <---> FSUP
# Look Down mode
self.gdnc_look_down_handler_messages = None
# Road following mode
self.gdnc_road_following_handler_messages = None
self.gdnc_road_following_messages_observer = None
# AIRSDK SERVICE (cv_road) <---> FSUP
self.airsdk_service_cv_road_messages_channel = None
self.airsdk_service_cv_road_handler_messages = None
self.airsdk_service_cv_road_messages_observer = None
def on_load(self):
# AIRSDK GUIDANCE MODE <---> FSUP
# AirSDK guidance channel is already set up by flight supervisor
# [self.mc.gdnc_channel]
# AIRSDK SERVICE (cv_road) <---> FSUP
self.airsdk_service_cv_road_messages_channel = (
self.mc.start_client_channel(
_CV_ROAD_SERVICE_CHANNEL
)
)
def on_unload(self):
# AIRSDK SERVICE (cv_road) <---> FSUP
self.mc.stop_channel(self.airsdk_service_cv_road_messages_channel)
self.airsdk_service_cv_road_messages_channel = None
def on_activate(self):
# AIRSDK GUIDANCE MODE <---> FSUP
# Look Down mode
self.gdnc_look_down_handler_messages = (
self.mc.attach_client_service_pair(
self.mc.gdnc_channel,
gdnc_look_down_msgs,
forward_events=True,
)
)
# Road following mode
self.gdnc_road_following_handler_messages = (
self.mc.attach_client_service_pair(
self.mc.gdnc_channel,
gdnc_road_following_msgs,
forward_events=True,
)
)
# Road following mode event forwarder used to start the computer vision
# service while in road_following mode
# AIRSDK GUIDANCE MODE (Road following) --> FSUP --> AIRSDK SERVICE (cv_road)
self.gdnc_road_following_messages_observer = (
self.gdnc_road_following_handler_messages.evt.observe(
{
msg_id(
gdnc_road_following_msgs.Event,
"road_following_enabled",
): lambda *args: self._send_cv_road_enable(True),
msg_id(
gdnc_road_following_msgs.Event,
"road_following_disabled",
): lambda *args: self._send_cv_road_enable(False),
}
)
)
# AIRSDK SERVICE (cv_road) ---> FSUP
self.airsdk_service_cv_road_handler_messages = (
self.mc.attach_client_service_pair(
self.airsdk_service_cv_road_messages_channel,
rr_service_msgs,
forward_events=True,
)
)
self.airsdk_service_cv_road_messages_observer = (
self.airsdk_service_cv_road_handler_messages.evt.observe(
{
msg_id(
rr_service_msgs.Event, "road_lost"
): lambda *args: self._send_cv_road_enable(False),
}
)
)
def on_deactivate(self):
# AIRSDK GUIDANCE
# Look Down
self.mc.detach_client_service_pair(self.gdnc_look_down_handler_messages) # noqa: E501
self.gdnc_look_down_handler_messages = None
# Road following
self.gdnc_road_following_messages_observer.unobserve()
self.gdnc_road_following_messages_observer = None
self.mc.detach_client_service_pair(self.gdnc_road_following_handler_messages) # noqa: E501
self.gdnc_road_following_handler_messages = None
# AIRSDK SERVICE (cv_road)
self.airsdk_service_cv_road_messages_observer.unobserve()
self.airsdk_service_cv_road_messages_observer = None
self.mc.detach_client_service_pair(self.airsdk_service_cv_road_handler_messages) # noqa: E501
self.airsdk_service_cv_road_handler_messages = None
self.airsdk_service_cv_road_messages_channel = None
def states(self):
return [
DEF_GROUND_STAGE,
TAKEOFF_STAGE,
DEF_HOVERING_STAGE,
FLYING_STAGE,
DEF_LANDING_STAGE,
DEF_CRITICAL_STAGE,
]
def transitions(self):
transitions = TRANSITIONS + DEF_TRANSITIONS
return transitions
def _send_cv_road_enable(self, enable):
self.airsdk_service_cv_road_handler_messages.cmd.sender.enable_cv(enable) # noqa: E501
self.log.info(f"receive message enable {enable}r")
Autopilot = lambda evt: msg_id(autopilot_msgs.Command, evt) # noqa: E731
Dctl = lambda evt: msg_id(dctl_msgs.Event, evt) # noqa: E731
GdncAscent = lambda evt: msg_id(gdnc_ascent_msgs.Event, evt) # noqa: E731
Internal = lambda evt: msg_id(internal_fsup_msgs.Event, evt) # noqa: E731
CvRoadService = lambda evt: msg_id(rr_service_msgs.Event, evt) # noqa: E731
GdncLookDown = lambda evt: msg_id(gdnc_look_down_msgs.Event, evt) # noqa: E731
GdncRoadFollowing = lambda evt: msg_id(
gdnc_road_following_msgs.Event, evt
) # noqa: E731, E501
TRANSITIONS = [
# Overwritten transitions
[
Dctl("motors_ramping_done"),
"takeoff.normal.wait_ascent",
"takeoff.road_runner.ascent",
],
[
Dctl("motors_ramping_done"),
"takeoff.normal.wait_motor_ramping",
"takeoff.road_runner.ascent",
],
# New transitions
[GdncAscent("done"), "takeoff.road_runner.ascent", "flying.road_runner.look_down"], # noqa: E501
[
GdncAscent("done_without_immobility"),
"takeoff.road_runner.ascent",
"flying.road_runner.look_down",
],
[
GdncLookDown("done"),
"flying.road_runner.look_down",
"flying.road_runner.road_following",
],
# mission interrupted if:
# - The drone lose the road
# - The Road_following mode no longer receives telemetry sent by the
# cv_road service
# - An horizontal,vertical or yaw command is received.
[CvRoadService("road_lost"), "flying.road_runner.road_following", "flying.manual"], # noqa: E501
[
GdncRoadFollowing("telemetry_missed_too_long"),
"flying.road_runner.road_following",
"flying.manual",
],
[
Internal("pcmd_horizontal_move"),
"flying.road_runner.road_following",
"flying.manual",
],
[Internal("pcmd_yaw"), "flying.road_runner.road_following", "flying.manual"],
[Internal("pcmd_vertical"), "flying.road_runner.road_following", "flying.manual"], # noqa: E501
]
| [] |
2024-01-10 | Parrot-Developers/airsdk-samples | road_runner~fsup~takeoff~road_runner.py | # Copyright (c) 2023 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from fsup.genstate import guidance_modes
from fsup.missions.default.takeoff.normal import (
Ascent as DefaultAscent,
)
from fsup.missions.default.uid import UID
import os
import cfgreader
import colibrylite.estimation_mode_pb2 as cbry_est
import guidance.ascent_pb2 as gdnc_ascent_msgs
_STATES_TO_REMOVE = ["ascent"]
CONFIG_FILENAME = "etc/services/road_following.cfg"
def _config(field):
cfg = cfgreader.build_config_start()
for (root, include) in field:
cfg = cfgreader.build_config_update(cfg, root, include)
return cfgreader.build_config_end(cfg)
@guidance_modes(UID + ".ascent")
class Ascent(DefaultAscent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Get configuration values
field = [
(
os.path.join(
self.mission.env.get_product_cfg_dir(), CONFIG_FILENAME
),
"droneAltitude",
),
]
self.road_runner_ascent = _config(field)
def enter(self, msg):
self.gdnc_asc_svc = self.mc.attach_client_service_pair(
self.mc.gdnc_channel, gdnc_ascent_msgs, forward_events=True
)
self.mc.dctl.cmd.sender.set_estimation_mode(cbry_est.TAKEOFF)
self.set_guidance_mode(
"com.parrot.missions.default.ascent",
gdnc_ascent_msgs.Config(
type=gdnc_ascent_msgs.TYPE_DEFAULT,
altitude=self.road_runner_ascent.road_following.droneAltitude,
),
)
ROAD_RUNNER_STATE = {
"name": "road_runner",
"initial": "ascent",
"children": [
{
"name": "ascent",
"class": Ascent,
},
],
}
| [] |
2024-01-10 | oliversen/chatgpt-docstrings | bundled~tool~lsp_server.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Implementation of tool support over LSP."""
from __future__ import annotations
import asyncio
import json
import os
import pathlib
import re
import sys
from typing import Any, Literal, Optional
# **********************************************************
# Update sys.path before importing any bundled libraries.
# **********************************************************
def update_sys_path(path_to_add: str, strategy: str) -> None:
"""Add given path to `sys.path`."""
if path_to_add not in sys.path and os.path.isdir(path_to_add):
if strategy == "useBundled":
sys.path.insert(0, path_to_add)
elif strategy == "fromEnvironment":
sys.path.append(path_to_add)
# Ensure that we can import LSP libraries, and other bundled libraries.
update_sys_path(
os.fspath(pathlib.Path(__file__).parent.parent / "libs"),
"useBundled"
)
# **********************************************************
# Imports needed for the language server goes below this.
# **********************************************************
# pylint: disable=wrong-import-position,import-error
import lsprotocol.types as lsp
from pygls import server, uris, workspace
import lsp_jsonrpc as jsonrpc
from lsp_custom_types import TelemetryParams, TelemetryTypes
from lsp_progress import Progress, ProgressHundlers
class LanguageServer(server.LanguageServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lsp.progress_handlers = ProgressHundlers()
self.lsp.fm.add_builtin_feature(lsp.WINDOW_WORK_DONE_PROGRESS_CANCEL, self.progress_cancel)
def progress(self, *args, **kwargs):
return Progress(self.lsp, *args, **kwargs)
def progress_cancel(ls: server.LanguageServer,
params: lsp.WorkDoneProgressCancelParams):
ls.lsp.progress_handlers.get(params.token).cancel()
def apply_edit_async(
self, edit: lsp.WorkspaceEdit, label: Optional[str] = None
) -> lsp.WorkspaceApplyEditResponse:
"""Sends apply edit request to the client. Should be called with `await`"""
return self.lsp.send_request_async(
lsp.WORKSPACE_APPLY_EDIT,
lsp.ApplyWorkspaceEditParams(edit=edit, label=label)
)
def _send_telemetry(self, params: TelemetryParams):
self.send_notification(lsp.TELEMETRY_EVENT, params)
def send_telemetry_info(self, name: str, data: dict[str, str]):
params = TelemetryParams(TelemetryTypes.Info, name, data)
self._send_telemetry(params)
def send_telemetry_error(self, name: str, data: dict[str, str]):
params = TelemetryParams(TelemetryTypes.Error, name, data)
self._send_telemetry(params)
WORKSPACE_SETTINGS = {}
GLOBAL_SETTINGS = {}
MAX_WORKERS = 5
LSP_SERVER = LanguageServer(
name="chatgpt-docstrings", version="0.1", max_workers=MAX_WORKERS
)
TOOL_MODULE = "chatgpt-docstrings"
TOOL_DISPLAY = "ChatGPT: Docstring Generator"
# **********************************************************
# Required Language Server Initialization and Exit handlers.
# **********************************************************
@LSP_SERVER.feature(lsp.INITIALIZE)
def initialize(params: lsp.InitializeParams) -> None:
"""LSP handler for initialize request."""
log_to_output(f"CWD Server: {os.getcwd()}")
log_to_output(f"PID Server: {os.getpid()}")
paths = "\r\n ".join(sys.path)
log_to_output(f"sys.path used to run Server:\r\n {paths}")
GLOBAL_SETTINGS.update(
**params.initialization_options.get("globalSettings", {})
)
settings = params.initialization_options["settings"]
_update_workspace_settings(settings)
settings_output = json.dumps(settings,
indent=4,
ensure_ascii=False)
global_settings_output = json.dumps(GLOBAL_SETTINGS,
indent=4,
ensure_ascii=False)
log_to_output(f"Settings used to run Server:\r\n{settings_output}\r\n")
log_to_output(f"Global settings:\r\n{global_settings_output}\r\n")
@LSP_SERVER.feature(lsp.EXIT)
def on_exit(_params: Optional[Any] = None) -> None:
"""Handle clean up on exit."""
jsonrpc.shutdown_json_rpc()
@LSP_SERVER.feature(lsp.SHUTDOWN)
def on_shutdown(_params: Optional[Any] = None) -> None:
"""Handle clean up on shutdown."""
jsonrpc.shutdown_json_rpc()
def _get_global_defaults():
return {
**GLOBAL_SETTINGS,
"interpreter": GLOBAL_SETTINGS.get("interpreter", [sys.executable]),
}
def _update_workspace_settings(settings):
if not settings:
key = os.getcwd()
WORKSPACE_SETTINGS[key] = {
"cwd": key,
"workspaceFS": key,
"workspace": uris.from_fs_path(key),
**_get_global_defaults(),
}
return
for setting in settings:
key = uris.to_fs_path(setting["workspace"])
WORKSPACE_SETTINGS[key] = {
**setting,
"workspaceFS": key,
}
def _get_settings_by_path(file_path: pathlib.Path):
workspaces = {s["workspaceFS"] for s in WORKSPACE_SETTINGS.values()}
while file_path != file_path.parent:
str_file_path = str(file_path)
if str_file_path in workspaces:
return WORKSPACE_SETTINGS[str_file_path]
file_path = file_path.parent
setting_values = list(WORKSPACE_SETTINGS.values())
return setting_values[0]
def _get_document_key(document: workspace.Document):
if WORKSPACE_SETTINGS:
document_workspace = pathlib.Path(document.path)
workspaces = {s["workspaceFS"] for s in WORKSPACE_SETTINGS.values()}
# Find workspace settings for the given file.
while document_workspace != document_workspace.parent:
if str(document_workspace) in workspaces:
return str(document_workspace)
document_workspace = document_workspace.parent
return None
def _get_settings_by_document(document: workspace.Document | None):
if document is None or document.path is None:
return list(WORKSPACE_SETTINGS.values())[0]
key = _get_document_key(document)
if key is None:
# This is either a non-workspace file or there is no workspace.
key = os.fspath(pathlib.Path(document.path).parent)
return {
"cwd": key,
"workspaceFS": key,
"workspace": uris.from_fs_path(key),
**_get_global_defaults(),
}
return WORKSPACE_SETTINGS[str(key)]
# **********************************************************
# Generate docstring features start here
# **********************************************************
import openai
from code_parser import FuncParser, NotFuncException, Position, Range
@LSP_SERVER.command("chatgpt-docstrings.applyGenerate")
async def apply_generate_docstring(ls: server.LanguageServer,
args: list[lsp.TextDocumentPositionParams, str]):
uri = args[0]["textDocument"]["uri"]
openai_api_key = args[1]
progress_token = args[2]
document = ls.workspace.get_document(uri)
source = document.source
cursor = args[0]["position"]
cursor["line"] += 1
cursor = Position(*cursor.values())
settings = _get_settings_by_document(document)
openai_model = settings["openaiModel"]
prompt_pattern = settings["promptPattern"]
docstring_format = settings["docstringFormat"]
response_timeout = settings["responseTimeout"]
# get function source
try:
func = FuncParser(source, cursor)
except NotFuncException:
show_info("The cursor must be set inside the function.")
return
# format prompt
prompt = prompt_pattern.format(docstring_format=docstring_format,
function=func.code)
log_to_output(f"Used ChatGPT prompt:\n{prompt}")
# get gocstring
with ls.progress(progress_token) as progress:
task = asyncio.create_task(_get_docstring(openai_api_key, openai_model, prompt))
while 1:
if task.done():
break
if response_timeout == 0:
task.cancel()
show_warning("ChatGPT response timed out.")
return
if progress.cancelled:
task.cancel()
return
progress.report(f"Waiting for ChatGPT response ({response_timeout} secs)...")
await asyncio.sleep(1)
response_timeout -= 1
if task.exception():
raise task.exception()
docstring = task.result()
log_to_output(f"Received ChatGPT docstring:\n{docstring}")
# format docstring
docstring = _format_docstring(docstring, func.indent_level+1)
docstring = _match_line_endings(document, docstring)
# define docsting position
if func.docstring_range:
docstring_pos = Range(func.suite, func.docstring_range.end)
else:
docstring_pos = Range(func.suite, func.suite)
# apply docstring
text_edits = _create_text_edits(docstring_pos, docstring)
workspace_edit = _create_workspace_edit(document, text_edits)
result = await ls.apply_edit_async(workspace_edit)
if not result.applied:
reason = result.failure_reason or \
"maybe you make changes to source code at generation time"
show_warning(f"Failed to add docstring to source code ({reason})")
ls.send_telemetry_error('applyEditWorkspaceFail', {'reason': reason})
async def _get_docstring(api_key: str,
model: Literal["gpt-3.5-turbo", "text-davinci-002"],
prompt: str) -> str:
openai.api_key = api_key
if model == "gpt-3.5-turbo":
response = await openai.ChatCompletion.acreate(
model=model,
messages=[
{"role": "system",
"content": "When you generate a docstring, return me only a string that I can add to my code."},
{"role": "user", "content": prompt},
],
temperature=0,
)
docstring = response.choices[0].message.content
elif model == "text-davinci-002":
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0,
max_tokens=1000,
)
docstring = response["choices"][0]["text"]
else:
raise Exception(
'Only models "gpt-3.5-turbo" and "text-davinci-002" are supported!'
)
return docstring
def _format_docstring(docstring: str, indent_level: int) -> str:
# remove function source code including markdown tags
if docstring.strip().startswith(("def ", "async ", "```")):
match = re.search(r'""".*?"""', docstring, flags=re.DOTALL)
docstring = match.group() if match else docstring
# remove leading and trailing whitespaces, newlines, quotes
docstring = docstring.strip().strip('"""').strip("\r\n")
# remove indents
if docstring.startswith(" "):
lines = docstring.splitlines(True)
docstring = "".join([re.sub(r"^\s{4}", "", line) for line in lines])
# eol conversion to single format
docstring = "\n".join(docstring.splitlines())
# add quotes
docstring = f'"""{docstring}\n"""'
# add indents
indents = " "*indent_level*4
docstring = "".join([f"{indents}{line}" for line in docstring.splitlines(True)])
# add new line
docstring = f"\n{docstring}"
return docstring
def _create_text_edits(docstring_pos: Range, docstring: str) -> list[lsp.TextEdit]:
return [
lsp.TextEdit(
range=Range(
start=Position(
line=docstring_pos.start.line - 1,
character=docstring_pos.start.character,
),
end=Position(
line=docstring_pos.end.line - 1,
character=docstring_pos.end.character,
),
),
new_text=docstring,
)
]
def _create_workspace_edit(
document: lsp.Document, text_edits: list[lsp.TextEdit]
) -> lsp.WorkspaceEdit:
return lsp.WorkspaceEdit(
document_changes=[
lsp.TextDocumentEdit(
text_document=lsp.VersionedTextDocumentIdentifier(
uri=document.uri,
version=0 if document.version is None else document.version,
),
edits=text_edits,
)
]
)
def _get_line_endings(lines: list[str]) -> str:
"""Returns line endings used in the text."""
try:
if lines[0][-2:] == "\r\n":
return "\r\n"
return "\n"
except Exception: # pylint: disable=broad-except
return None
def _match_line_endings(document: workspace.Document, text: str) -> str:
"""Ensures that the edited text line endings matches the document line endings."""
expected = _get_line_endings(document.source.splitlines(keepends=True))
actual = _get_line_endings(text.splitlines(keepends=True))
if actual == expected or actual is None or expected is None:
return text
return text.replace(actual, expected)
# **********************************************************
# Generate docstring features ends here
# **********************************************************
# *****************************************************
# Logging and notification.
# *****************************************************
def log_to_output(message: str,
msg_type: lsp.MessageType = lsp.MessageType.Log) -> None:
LSP_SERVER.show_message_log(message, msg_type)
def show_error(message: str) -> None:
log_to_output(message, lsp.MessageType.Error)
LSP_SERVER.show_message(message, lsp.MessageType.Error)
def show_warning(message: str) -> None:
log_to_output(message, lsp.MessageType.Warning)
LSP_SERVER.show_message(message, lsp.MessageType.Warning)
def show_info(message: str) -> None:
log_to_output(message, lsp.MessageType.Info)
LSP_SERVER.show_message(message, lsp.MessageType.Info)
# *****************************************************
# Start the server.
# *****************************************************
if __name__ == "__main__":
LSP_SERVER.start_io()
| [
"promptPattern",
"When you generate a docstring, return me only a string that I can add to my code."
] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~zeckhauser~zeckhauser_sqb.py | import os
import dotenv
import json
import re
import time
import sqlite3
import json
import random
import time
import pprint
from collections import Counter
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
from Scenario import Scenario
from Subject import Subject
from Experiment import Experiment
from Views import Views
from Views import View
def flatten(lst):
result = []
for i in lst:
if isinstance(i, list):
result.extend(flatten(i))
else:
result.append(i)
return result
print("Creating views")
views = Views()
views.create_views(100)
print(views.tally())
print("Creating subjects")
subjects = [Subject(score = v.score, view = v.view) for v in views]
options = (70, 30, 60, 50)
scenarios = [Scenario(options, status_quo) for status_quo in list(options) + [None]]
experiment = Experiment(subjects, scenarios, "text-ada-001")
print("Running the experiment")
experiment.run_all()
print("Writing data")
experiment.write_data("../data/zeckhauser_modular.db")
| [] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~charness_rabin~charness_rabin.py | import os
import dotenv
import json
import re
import openai
import time
import sqlite3
import json
from dotenv import load_dotenv
load_dotenv()
from threading import Thread
openai.api_key = os.getenv('OPENAI_API_KEY')
MAX_FAILURES = 4
scenarios = dict({
"Berk29": ((400, 400), (750, 400)),
"Barc2": ((400, 400), (750, 375)),
"Berk23": ((800, 200), (0, 0)),
"Barc8": ((300, 600), (700, 500)),
"Berk15": ((200, 700), (600, 600)),
"Berk26":((0, 800), (400, 400))
})
def create_prompt(scenario, personality):
left_a, left_b = scenario[0]
right_a, right_b = scenario[1]
return f"""You are deciding on allocation for yourself and another person, Person A.
{personality}
Option Left: You get ${left_b}, Person A gets ${left_a}
Option Right: You get ${right_b}, Person A gets ${right_a}
What do you choose, with one word [Left, Right]?"""
def get_decision(scenario, personality, scenario_name, model):
prompt = create_prompt(scenario, personality)
failure_count = 0
while True and failure_count < MAX_FAILURES:
try:
choice_raw = openai.Completion.create(
model= model,
prompt = prompt,
max_tokens=150,
temperature=0
)
choice_text = choice_raw['choices'][0]['text'].strip()
break
except openai.error.ServiceUnavailableError as e:
print(f"Experiment error: {e}")
failure_count += 1
time.sleep(30)
return dict({"choice_raw": choice_raw,
"choice_text": choice_text,
"choice": "Left" if "left" in choice_text.lower() else "Right",
"scenario": scenario,
"personality":personality,
"model":model,
"scenario_name":scenario_name,
"prompt":prompt})
class GetDictatorChoiceThread(Thread):
def __init__(self, scenario, personality, scenario_name, model):
super().__init__()
self.scenario = scenario
self.personality = personality
self.scenario_name = scenario_name
self.model = model
def run(self):
self.decision = get_decision(scenario = self.scenario, personality = self.personality, scenario_name = self.scenario_name, model = self.model)
models_described = dict({
"text-davinci-003": ("Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. Also supports inserting completions within text.",
"4,000 tokens",
"Up to Jun 2021"),
"text-curie-001": ("Very capable, but faster and lower cost than Davinci.",
"2,048 tokens",
"Up to Oct 2019"),
"text-babbage-001": ("Capable of straightforward tasks, very fast, and lower cost.",
"2,048 tokens",
"Up to Oct 2019"),
"text-ada-001": ("Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.",
"2,048 tokens",
"Up to Oct 2019")}
)
models = models_described.keys()
personalities = [
"",
"You only care about fairness between players",
"You only care about your own pay-off",
"You only care about the total pay-off of both players"]
threads = []
for model in models:
for personality in personalities:
for scenario_name, scenario in scenarios.items():
thread = GetDictatorChoiceThread(scenario = scenario, personality = personality, scenario_name = scenario_name, model = model)
thread.start()
threads.append(thread)
observations = []
for thread in threads:
thread.join()
observations.append(thread.decision)
def write_data(database_string, observations, flush = False):
conn = sqlite3.connect(database_string)
cursor = conn.cursor()
if flush:
cursor.execute("DROP TABLE IF EXISTS responses")
cursor.execute("DROP TABLE IF EXISTS experiments")
cursor.execute("CREATE TABLE IF NOT EXISTS experiments (note TEXT)")
cursor.execute("INSERT INTO experiments VALUES (?)", (f"Experiment:{time.time()}",))
experiment_id = cursor.lastrowid
cursor.execute("CREATE TABLE IF NOT EXISTS responses (experiment_id INTEGER, observation TEXT, FOREIGN KEY(experiment_id) REFERENCES experiments(id))")
for obs in observations:
cursor.execute("INSERT INTO responses VALUES (?,?)", (experiment_id, json.dumps(obs)))
conn.commit()
write_data("../../data/charness_rabin.db", observations, flush = False)
| [] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~horton~horton.py | import os
import dotenv
import json
import re
import openai
import time
import sqlite3
import json
import random
import copy
from dotenv import load_dotenv
load_dotenv()
MAX_FAILURES = 3
openai.api_key = os.getenv('OPENAI_API_KEY')
import json
def exclude_non_serializable_entries(dictionary):
new_dictionary = {}
for key, value in dictionary.items():
try:
json.dumps(value)
new_dictionary[key] = value
except TypeError:
pass
return new_dictionary
###############
## Create a job
###############
class Job:
def __init__(self, role, budget_language):
self.role = role
self.budget_language = budget_language
self.ats = []
self.record_id = None
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
def add_applicants(self, applicants):
counter = 1
for applicant in applicants:
self.ats.extend([f"Person {counter}: {applicant.description}"])
counter += 1
self.num_candidates = len(self.ats)
def create_prompt(self):
candidate_list = "\n".join(self.ats)
prompt = f"""
You are hiring for the role "{self.role}."
{self.budget_language}
You have {self.num_candidates} candidates.
{candidate_list}
Who would you hire? You have to pick one.
"""
return prompt
class Worker:
def __init__(self, education, experience, wage_ask):
self.education = education
self.experience = experience
self.wage_ask = wage_ask
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
@property
def description(self):
return f"Has {self.experience} year(s) of experience in this role. Requests ${self.wage_ask}/hour."
class Scenario:
def __init__(self, job, applicants, min_wage, pair_index):
self.job = job
self.applicants = applicants
self.min_wage = min_wage
self.job.add_applicants(self.applicants)
self.applications_json = [a.toJSON() for a in applicants]
self.job_json = job.toJSON()
self.pair_index = pair_index
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
class Decision:
def __init__(self, scenario):
self.scenario = scenario
self.prompt = scenario.job.create_prompt()
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
def get_decision(self):
failure_count = 0
while True and failure_count < MAX_FAILURES:
try:
self.choice_raw = openai.Completion.create(
model="text-davinci-003",
prompt = self.prompt,
max_tokens=150,
temperature=0
)
self.choice = self.choice_raw['choices'][0]['text'].strip()
break
except openai.error.ServiceUnavailableError as e:
print(f"Experiment error: {e}")
failure_count += 1
time.sleep(30)
self.hired_person_raw = openai.Completion.create(
model="text-davinci-003",
prompt = "In this text find who was hired: " + self.choice + "Person: ",
max_tokens=150,
temperature=0)
self.hired_person = self.hired_person_raw['choices'][0]['text'].strip()
return dict({"choice_raw": self.choice_raw,
"choice":self.choice,
"hired_person_raw": self.hired_person_raw,
"hired_person" : self.hired_person
})
## Construct workers
experience_levels = [0, 1]
education_levels = ["high school graduate"]
jobs = ["Dishwasher", "Custodian", "Home Health Aide", "Waiter", "Laborer", "Parking Lot Attendant"]
wage_asks = [12, 13, 14, 15, 16, 17, 18, 19, 20]
min_wages = [0, 15]
workers = []
for education in education_levels:
for wage_ask in wage_asks:
for experience in experience_levels:
w = Worker(education = education, experience = experience, wage_ask = wage_ask)
workers.append(w)
from threading import Thread
class GetDecisionThread(Thread):
def __init__(self, scenario):
super().__init__()
self.scenario = scenario
def run(self):
self.decision_obj = Decision(self.scenario)
while True:
try:
self.decision = self.decision_obj.get_decision()
break
except:
time.sleep(3)
self.result = dict({"scenario": self.scenario.toJSON(),
"decision_obj": self.decision_obj.toJSON(),
"decision":self.decision})
def write_data(database_string, observations, flush = False):
conn = sqlite3.connect(database_string)
cursor = conn.cursor()
if flush:
cursor.execute("DROP TABLE IF EXISTS responses")
cursor.execute("DROP TABLE IF EXISTS experiments")
cursor.execute("CREATE TABLE IF NOT EXISTS experiments (note TEXT)")
cursor.execute("INSERT INTO experiments VALUES (?)", (f"Experiment:{time.time()}",))
experiment_id = cursor.lastrowid
cursor.execute("CREATE TABLE IF NOT EXISTS responses (experiment_id INTEGER, observation TEXT, FOREIGN KEY(experiment_id) REFERENCES experiments(id))")
for obs in observations:
cursor.execute("INSERT INTO responses VALUES (?,?)", (experiment_id, json.dumps(obs)))
conn.commit()
if False:
job = "Dishwasher"
min_wage = 0
pair_index = 1
J = Job(job, "The typical hourly rate for this role is $12/hour.")
a = random.choice(workers)
b = random.choice(workers)
applicants = [a,b]
s = Scenario(job = J, applicants = applicants, min_wage = min_wage, pair_index = pair_index)
print(s.job.create_prompt())
max_attempts = 5
num_hiring_scenarios = 30
threads = []
pair_index = -1
for wage_ask in [12, 13, 14, 15, 16, 17]:
job = "Dishwasher"
for _ in range(num_hiring_scenarios):
J = Job(job, "The typical wage for this job is $12/hour.")
a_base = Worker(education = "", experience = 1, wage_ask = wage_ask)
b_base = Worker(education = "", experience = 0, wage_ask = 13)
pair_index += 1
for min_wage in min_wages:
a = copy.deepcopy(a_base)
b = copy.deepcopy(b_base)
if a.wage_ask < min_wage:
a.wage_ask = min_wage
if b.wage_ask < min_wage:
b.wage_ask = min_wage
applicants = [a,b]
s = Scenario(job = J, applicants = applicants, min_wage = min_wage, pair_index = pair_index)
wait = 1
attempts = 0
while True and attempts < max_attempts:
try:
thread = GetDecisionThread(s)
thread.start()
break
except Exception as e:
print("Rate limit")
time.sleep(wait)
wait = 2 * wait
attempts += 1
threads.append(thread)
observations = []
for thread in threads:
thread.join()
observations.append(thread.result)
write_data("../../data/horton.db", observations, flush = False)
| [
"Person: ",
"In this text find who was hired: "
] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~zeckhauser~Experiment.py | import openai
import sqlite3
import time
import json
from threading import Thread
class RunExperimentThread(Thread):
def __init__(self, subject, scenario, model = "text-davinci-003"):
super().__init__()
self.subject = subject
self.scenario = scenario
self.model = model
def run(self):
results= Experiment._run(self.subject, self.scenario, self.model)
self.observation = dict({
'subject':self.subject.toJSON(),
'scenario':self.scenario.toJSON(),
'result': results,
})
class Experiment:
"""Can we also set model, temperature & tokens here"""
def __init__(self, subjects, scenarios, model = "text-davinci-003"):
self.subjects = subjects
self.scenarios = scenarios
self.model = model
self.observations = []
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
@staticmethod
def get_response(prompt, temperature = 0.0, model="text-davinci-003", max_tokens = 150):
choice_raw = openai.Completion.create(
model = model,
prompt = prompt,
max_tokens = max_tokens,
temperature= temperature
)
choice = choice_raw['choices'][0]['text'].strip()
return dict({"choice_raw": choice_raw, "choice":choice, "model":model, "temperature":temperature, "max_tokens":max_tokens})
@staticmethod
def get_answer(prompt):
"""Parses the output to find the choice"""
choice_raw = openai.Completion.create(
model="text-davinci-003",
prompt = f"Response was: \"{prompt}\" Return single letter for choice that was selected (e.g., a, b, c...):",
max_tokens=150,
temperature=0
)
return choice_raw['choices'][0]['text'].strip()
@staticmethod
def _run(subject, scenario, model = "text-davinci-003"):
d = scenario.letters_to_budget_shares()
prompt = scenario.gen_prompt(subject.view)
while True:
try:
results = Experiment.get_response(prompt, temperature = 0.0, model = model)
answer = Experiment.get_answer(results['choice']).lower()
if answer in d.keys():
preferred_auto_share = d[answer]
else:
print("Could not parse")
preferred_auto_share = -1
break
except openai.error.ServiceUnavailableError as e:
print(f"Experiment error: {e}")
time.sleep(30)
return dict({"results":results, "prompt":prompt, 'answer':answer, 'preferred_auto_share':preferred_auto_share})
def run_all(self, debug = False):
print(f"Running {len(self.subjects) * len(self.scenarios)} scenarios")
counter = 0
threads = []
for subject in self.subjects:
for scenario in self.scenarios:
counter += 1
if debug:
results = "here are some results"
answer = 'a'
else:
thread = RunExperimentThread(subject = subject, scenario = scenario, model = self.model)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.observations.append(thread.observation)
def write_data(self, database_string, flush = False):
conn = sqlite3.connect(database_string)
cursor = conn.cursor()
if flush:
cursor.execute("DROP TABLE IF EXISTS responses")
cursor.execute("DROP TABLE IF EXISTS experiments")
cursor.execute("CREATE TABLE IF NOT EXISTS experiments (note TEXT)")
cursor.execute("INSERT INTO experiments VALUES (?)", (f"Experiment:{time.time()}",))
experiment_id = cursor.lastrowid
cursor.execute("CREATE TABLE IF NOT EXISTS responses (experiment_id INTEGER, observation TEXT, FOREIGN KEY(experiment_id) REFERENCES experiments(id))")
for obs in self.observations:
cursor.execute("INSERT INTO responses VALUES (?,?)", (experiment_id, json.dumps(obs)))
conn.commit()
| [
"Response was: \"PLACEHOLDER\" Return single letter for choice that was selected (e.g., a, b, c...):"
] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~zeckhauser~Scenario.py | import os
import dotenv
import json
import re
import time
import sqlite3
import json
import random
import time
import openai
from dotenv import load_dotenv
load_dotenv()
class Scenario:
def __init__(self, options, status_quo = None):
self.options = options
self.status_quo = status_quo
self.base_prompt = """The National Highway Safety Commission is deciding how to allocate its budget between two safety research programs: i) improving automobile safety (bumpers, body, gas tank configurations, seatbelts) and ii) improving the safety of interstate highways (guard rails, grading, highway interchanges, and implementing selective reduced speed limits).
"""
header = "abcdefghijklmnopqrstuvwxyz"
self.ask = f"Please choose your most preferred option in light of your views {[header[h] for h in range(len(options))]}:"
def __repr__(self):
return json.dumps(dict({'options':self.options, 'status_quo':self.status_quo}))
def toJSON(self):
return dict({'options':self.options, 'status_quo':self.status_quo})
def create_option(self,auto):
return f"Allocate {auto}% to auto safety and {100 - auto}% to highway safety"
def create_option_status_quo(self, auto):
if self.status_quo > auto:
return f"Decrease auto program by {self.status_quo - auto}% of budget and raise the highway program by like amount"
if self.status_quo == auto:
return f"Maintain present budget amounts for the programs"
if self.status_quo < auto:
return f"Decrease the highway program by {auto - self.status_quo}% of budget and raise the auto program by like amount"
def state_status_quo(self, auto):
return f"The current budget allocation is {auto}% to auto safety and {100-auto}% to highway safety."
def letters_to_budget_shares(self):
headers = "abcdefghijklmnopqrstuvwxyz"
return dict({headers[i]:o for i,o in enumerate(self.options)})
def multiple_choice(self, views):
"""Creates the scenario."""
headers = "abcdefghijklmnopqrstuvwxyz"
option_text = []
preamble = ""
if self.status_quo:
preamble = self.state_status_quo(self.status_quo) + "\n\n"
choice_text = [self.create_option_status_quo(o) for o in self.options]
else:
choice_text = [self.create_option(o) for o in self.options]
with_numbers = [h + ") " + choice for h, choice in zip(headers[:len(choice_text)], choice_text)]
self.prompt = self.base_prompt + "\n\n" + preamble + "\n They are considering the following options:\n\n" + "\n".join(with_numbers) + "\n\n" + f"Your own views: {views}" + "\n\n" + self.ask
return dict({'prompt':self.prompt, 'options':self.options, 'choice_text': choice_text, 'status_quo':self.status_quo})
def gen_prompt(self, views):
self.multiple_choice(views)
return self.prompt
| [] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~new_zeckhauser~GenScenario.py | import openai
import sqlite3
import time
import json
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
from threading import Thread
def get_topics(n):
messages=[{"role": "user", "content": f"Get a list of policy areas. For example transportation, medical devices, national defense, and so on. Return {n} such topics in a JSON list, with a single key 'policy_areas'"}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=500,
temperature=0.2,
messages = messages)
return response
def get_new_example(example, policy_area, model = "gpt-3.5-turbo"):
messages=[{"role": "user", "content": f"Create a new scenario with the structure as this one, but about {policy_area}. Keep JSON formatting: {json.dumps(example)}"}]
response = openai.ChatCompletion.create(
model= model, #"gpt-4",
max_tokens=100,
temperature=0.2,
messages = messages)
return response
if __name__ == "__main__":
example = dict({
"body":"The National Highway Safety Commission",
"option1":"improving automobile safety (bumpers, body, gas tank configurations, seatbelts) ",
"option1_short":"auto safety",
"option2":"improving the safety of interstate highways (guard rails, grading, highway interchanges, and implementing selective reduced speed limits)",
"option2_short":"highway safety"
})
# R = get_new_example(example, "national defense")
# outcome = R['choices'][0]['message']['content']
# print(outcome)
# R = get_new_example(example, "medical devices")
scenarios = []
raw_topics = get_topics(10)['choices'][0]['message']['content']
topics = json.loads(raw_topics)
print(topics)
for topic in topics['policy_areas']:
print(topic)
R = get_new_example(example, topic)
scenarios.append(R)
outcome = R['choices'][0]['message']['content']
print(outcome)
| [
"Get a list of policy areas. For example transportation, medical devices, national defense, and so on. Return PLACEHOLDER such topics in a JSON list, with a single key 'policy_areas'"
] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~horton~hiring_scenarios.py | import os
import dotenv
import json
import re
import openai
import time
import sqlite3
import json
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
import json
def exclude_non_serializable_entries(dictionary):
new_dictionary = {}
for key, value in dictionary.items():
try:
json.dumps(value)
new_dictionary[key] = value
except TypeError:
pass
return new_dictionary
###############
## Create a job
###############
class Job:
def __init__(self, role, budget_language):
self.role = role
self.budget_language = budget_language
self.ats = []
self.record_id = None
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
def add_applicants(self, applicants):
counter = 1
for applicant in applicants:
self.ats.extend([f"Person {counter}: {applicant.description}"])
counter += 1
self.num_candidates = len(self.ats)
def create_prompt(self):
candidate_list = "\n".join(self.ats)
prompt = f"""
You are hiring for the role "{self.role}."
{self.budget_language}
You have {self.num_candidates} candidates.
{candidate_list}
Who would you hire? You have to pick one.
"""
return prompt
class Worker:
def __init__(self, education, major, experience, wage_ask):
self.education = education
self.major = major
self.experience = experience
self.wage_ask = wage_ask
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
@property
def description(self):
return f"An {self.major} {self.education} graduate. Has {self.experience}. Requests ${self.wage_ask}/hour."
class Scenario:
def __init__(self, job, applicants):
self.job = job
self.applicants = applicants
self.job.add_applicants(self.applicants)
self.applications_json = [a.toJSON() for a in applicants]
self.job_json = job.toJSON()
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
class Decision:
def __init__(self, scenario):
self.scenario = scenario
self.prompt = scenario.job.create_prompt()
def toJSON(self):
return exclude_non_serializable_entries(self.__dict__)
def get_decision(self):
self.choice_raw = openai.Completion.create(
model="text-davinci-003",
prompt = self.prompt,
max_tokens=150,
temperature=0
)
self.choice = self.choice_raw['choices'][0]['text'].strip()
self.hired_person_raw = openai.Completion.create(
model="text-davinci-003",
prompt = "In this text find who was hired: " + self.choice + "Person: ",
max_tokens=150,
temperature=0)
self.hired_person = self.hired_person_raw['choices'][0]['text'].strip()
return dict({"choice_raw": self.choice_raw,
"choice":self.choice,
"hired_person_raw": self.hired_person_raw,
"hired_person" : self.hired_person})
jobs = ["Dishwasher", "Salesperson"]
more_experienced_wage_offers = [14, 15, 16, 17]
from threading import Thread
class GetDecisionThread(Thread):
def __init__(self, scenario):
super().__init__()
self.scenario = scenario
def run(self):
self.decision_obj = Decision(self.scenario)
self.decision = self.decision_obj.get_decision()
self.result = dict({"scenario": self.scenario.toJSON(),
"decision_obj": self.decision_obj.toJSON(),
"decision":self.decision})
threads = []
for job in jobs:
print(job)
for wage in more_experienced_wage_offers:
print(wage)
J = Job(job, "You have a limited budget")
a = Worker("CS", "MIT", "some labor market experience", 15)
b = Worker("CS", "MIT", "extensive labor market experience", wage)
applicants = [a,b]
s = Scenario(J, applicants)
thread = GetDecisionThread(s)
thread.start()
threads.append(thread)
#d = Decision(s)
#d.get_decision()
def write_data(database_string, observations, flush = False):
conn = sqlite3.connect(database_string)
cursor = conn.cursor()
if flush:
cursor.execute("DROP TABLE IF EXISTS responses")
cursor.execute("DROP TABLE IF EXISTS experiments")
cursor.execute("CREATE TABLE IF NOT EXISTS experiments (note TEXT)")
cursor.execute("INSERT INTO experiments VALUES (?)", (f"Experiment:{time.time()}",))
experiment_id = cursor.lastrowid
cursor.execute("CREATE TABLE IF NOT EXISTS responses (experiment_id INTEGER, observation TEXT, FOREIGN KEY(experiment_id) REFERENCES experiments(id))")
for obs in observations:
cursor.execute("INSERT INTO responses VALUES (?,?)", (experiment_id, json.dumps(obs)))
conn.commit()
observations = []
for thread in threads:
thread.join()
observations.append(thread.result)
write_data("../data/horton.db", observations, flush = False)
| [
"Person: ",
"In this text find who was hired: "
] |
2024-01-10 | johnjosephhorton/homo_silicus | experiments~kkt~kkt.py | import os
import dotenv
import json
import re
import openai
import time
import sqlite3
import json
from dotenv import load_dotenv
load_dotenv()
from threading import Thread
def create_prompt(new_price, politics, neutral):
if neutral:
store_action = "changes the price to"
else:
store_action = "raises the price to"
prompt = f"""A hardware store has been selling snow shovels for $15. The morning after a large snowstorm, the store {store_action} ${new_price}.
Please rate this action as:
1) Completely Fair
2) Acceptable
3) Unfair
4) Very Unfair
You are a {politics}.
What is your choice [1, 2, 3, or 4]:"""
return prompt
openai.api_key = os.getenv('OPENAI_API_KEY')
MAX_FAILURES = 4
def get_decision(new_price, politics, neutral, model = "text-davinci-003"):
prompt = create_prompt(new_price, politics, neutral)
failure_count = 0
while True and failure_count < MAX_FAILURES:
try:
choice_raw = openai.Completion.create(
model= model,
prompt = prompt,
max_tokens=150,
temperature=0
)
choice_text = choice_raw['choices'][0]['text'].strip()
break
except openai.error.ServiceUnavailableError as e:
print(f"Experiment error: {e}")
failure_count += 1
time.sleep(30)
return dict({"choice_raw": choice_raw,
"choice_text": choice_text,
"choice": choice_text,
"new_price": new_price,
"neutral": neutral,
"politics":politics,
"model":model,
"prompt":prompt})
class GetKKTChoiceThread(Thread):
def __init__(self, new_price, politics, neutral, model):
super().__init__()
self.new_price = new_price
self.politics = politics
self.neutral = netural
self.model = model
def run(self):
self.decision = get_decision(new_price = self.new_price, politics = self.politics, neutral = self.neutral, model = self.model)
models_described = dict({
"text-davinci-003": ("Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. Also supports inserting completions within text.",
"4,000 tokens",
"Up to Jun 2021"),
"text-curie-001": ("Very capable, but faster and lower cost than Davinci.",
"2,048 tokens",
"Up to Oct 2019"),
"text-babbage-001": ("Capable of straightforward tasks, very fast, and lower cost.",
"2,048 tokens",
"Up to Oct 2019"),
"text-ada-001": ("Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.",
"2,048 tokens",
"Up to Oct 2019")}
)
models = ["text-davinci-003"]
political_views = [
"socialist",
"leftist",
"liberal",
"moderate",
"liberterian",
"conservative",
]
new_prices = [16, 20, 40, 100]
threads = []
for model in models:
for new_price in new_prices:
for politics in political_views:
for netural in [True, False]:
thread = GetKKTChoiceThread(new_price = new_price, politics = politics, neutral = netural, model = model)
thread.start()
threads.append(thread)
observations = []
for thread in threads:
thread.join()
observations.append(thread.decision)
def write_data(database_string, observations, flush = False):
conn = sqlite3.connect(database_string)
cursor = conn.cursor()
if flush:
cursor.execute("DROP TABLE IF EXISTS responses")
cursor.execute("DROP TABLE IF EXISTS experiments")
cursor.execute("CREATE TABLE IF NOT EXISTS experiments (note TEXT)")
cursor.execute("INSERT INTO experiments VALUES (?)", (f"Experiment:{time.time()}",))
experiment_id = cursor.lastrowid
cursor.execute("CREATE TABLE IF NOT EXISTS responses (experiment_id INTEGER, observation TEXT, FOREIGN KEY(experiment_id) REFERENCES experiments(id))")
for obs in observations:
cursor.execute("INSERT INTO responses VALUES (?,?)", (experiment_id, json.dumps(obs)))
conn.commit()
write_data("../../data/kkt.db", observations, flush = False)
| [
"A hardware store has been selling snow shovels for $15. The morning after a large snowstorm, the store PLACEHOLDER $PLACEHOLDER. \n\nPlease rate this action as:\n1) Completely Fair \n2) Acceptable\n3) Unfair \n4) Very Unfair\n\nYou are a PLACEHOLDER.\nWhat is your choice [1, 2, 3, or 4]:"
] |
2024-01-10 | spiron09/gpt-st | myapp.py | from openai import OpenAI
import streamlit as st
import base64
from PIL import Image
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
st.title("💬 Chatbot")
st.caption("🚀 A streamlit chatbot powered by OpenAI LLM")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
client = OpenAI(api_key=openai_api_key)
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = client.chat.completions.create(model="gpt-4-1106-preview", messages=st.session_state.messages)
msg = response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": msg})
st.chat_message("assistant").write(msg)
| [
"How can I help you?"
] |
2024-01-10 | Goy25/anih_api | chat~anih_gpt.py | import openai
openai.api_key = "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"
historial = [{"role": "user", "content": "Hola, necesito que actues como psicologo"}]
def mandar_mensaje(mensaje: str):
global historial
historial.append({"role": "user", "content": mensaje})
respuesta = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=historial,
)
historial.append({"role": "assistant", "content": respuesta["choices"][0]["message"]["content"]})
return respuesta["choices"][0]["message"]["content"] | [
"content",
"Hola, necesito que actues como psicologo"
] |
2024-01-10 | sujal-goswami/Flirt-o-Matic | app1.py | from flask import Flask, render_template, request
import openai
import os
from dotenv import load_dotenv
import time
load_dotenv() # Load variables from .env file into environment
app = Flask(__name__)
# Set your OpenAI GPT-3.5 API key as an environment variable
openai.api_key = os.getenv("OPENAI_API_KEY") # Replace "OPENAI_API_KEY" with your actual API key
# Define the minimum time interval between API requests (in seconds)
MINIMUM_INTERVAL = 15 # Example: 15 seconds
last_request_time = None
def generate_pickup_line(user_input):
global last_request_time
if last_request_time is not None:
elapsed_time = time.time() - last_request_time
if elapsed_time < MINIMUM_INTERVAL:
time.sleep(MINIMUM_INTERVAL - elapsed_time)
response = openai.Completion.create(
engine="gpt-3.5-turbo",
prompt=user_input,
max_tokens=50, # Adjust as needed
temperature=0.7,
n=1,
stop=None,
)
last_request_time = time.time()
pickup_line = response.choices[0].text.strip()
return pickup_line
@app.route("/", methods=["GET", "POST"])
def home():
if request.method == "POST":
user_input = request.form.get("user_input")
pickup_line = generate_pickup_line(user_input)
return render_template("index.html", pickup_line=pickup_line)
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True) | [] |
2024-01-10 | pnkr01/bankathon-api | python-server~provider~CV_Analyzer.py | import openai
def analyse_cv(job_description, skills, resume_text):
print("CV Analyzer Started")
user_message = [
{"role": "system", "content": """You are a HR. You want to hire candidates for your company. you get many resume for that job description Your job is to rank the resumes according to their alignment with the job title, skills, and resumes texts.
Input will be in the following format -
job title - job title will be provided here
skills - skills will be provided here
resume text - resume text will be provided here
Output should be in the form of json object with the following keys -
score : the score of the resume out of 10 based on job title, skills, and resume text
"""},
{'role': 'user', 'content': f"""job description - {job_description} skills -{skills} and resume text - {resume_text}"""}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=user_message
)
print("CV Analyzer Completed")
return response['choices'][0]['message']['content']
| [
"job description - PLACEHOLDER skills -PLACEHOLDER and resume text - PLACEHOLDER",
"You are a HR. You want to hire candidates for your company. you get many resume for that job description Your job is to rank the resumes according to their alignment with the job title, skills, and resumes texts. \n Input will be in the following format - \n job title - job title will be provided here\n skills - skills will be provided here\n resume text - resume text will be provided here\n Output should be in the form of json object with the following keys - \n score : the score of the resume out of 10 based on job title, skills, and resume text\n "
] |
2024-01-10 | pnkr01/bankathon-api | python-server~provider~JD_Provider.py | import openai
def enhance_jd(request_id, job_description):
user_message = [
{"role": "system", "content": """You are a HR. You are trying to create a Job Description for a openings at your company and you are not sure about the description that you have written and you need recommendation for enhancing the provided Job description maintaing the same format as it have and enhanced JD must have to be of same word length as it have it in provided JD this is mandatory. Provided JD may have many format like heading then in next line it have sub headings or paragraphs it may starts from indexing or bullet points. in enhanced JD you also have to maintain the same indexing or bullet points enhanced it as a standard Job Description.
Input will be in the following format -
request_id - request id willbe provided here
Job Description - job description will be provided here
Output should be in the form of json object with the following keys -
request_id : The provided request_id
enhanced_jd : The enhanced Job Description
"""},
{'role': 'user', 'content': f"""request_id - {request_id} Job Description-{job_description} """}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=user_message
)
return response['choices'][0]['message']['content']
# response in format of json object with key request id anf enhanced_jd
| [
"You are a HR. You are trying to create a Job Description for a openings at your company and you are not sure about the description that you have written and you need recommendation for enhancing the provided Job description maintaing the same format as it have and enhanced JD must have to be of same word length as it have it in provided JD this is mandatory. Provided JD may have many format like heading then in next line it have sub headings or paragraphs it may starts from indexing or bullet points. in enhanced JD you also have to maintain the same indexing or bullet points enhanced it as a standard Job Description.\n Input will be in the following format -\n request_id - request id willbe provided here\n Job Description - job description will be provided here\n\n Output should be in the form of json object with the following keys -\n request_id : The provided request_id\n enhanced_jd : The enhanced Job Description\n ",
"request_id - PLACEHOLDER Job Description-PLACEHOLDER "
] |
2024-01-10 | pnkr01/bankathon-api | python-server~utils~screening~screening_question.py | import openai
import json
def generate_screening_question(job_title, job_description, resume_text):
messages = [
{"role": "system", "content": """You are a helpful assistant. Your purpose is to make 10 Screening Questions for the candidate appearing for the interviews. The screening questions should be based on the job description and candidate's resume. The questions can be based on situations, technical capabilities and soft skills. The format of the screening questions should be as follows -
1. 3 Project and previous employment Based Questions from CV. Questions can be based on situations or how they solved various issues faced
2. 3 Questions based on the technical skills of the candidate. These questions should be in depth inside the concept and try to ask relevant detailed question. These questions will test the technical capability of the candidate and how much candidate knows about the topic in detail. Try to test the concept of the candidate in depth. For example if it is SQL, then you ask question related to primary and foreign keys and their working. Also, do not give reference to CV while asking these questions.
3. 3 Questions based on job profile. These questions can be of any type but the theme should be strictly around job profile provided
4. 1 Question on Soft Skills
Input will be in the following format -
Job Title - Job Title for the position will be provided here
Job Description - job description will be provided here
Resume - Candidate's resume will be provided here
Output should be in the form of json object with the following keys -
Question - The screening question to be asked should be here
Reference - the reference of the question
Type - Job Profile, Technical, Example & Project based or Soft Skill
Tag - List of tags associated with the question for categorizing it and using it for future interviews
Return the output in the json format separated by comma. For example -
[
{
"Question": "What is your name?",
"Reference": "Resume",
"Type": "Example & Project based",
"Tag": ["Name", "Example & Project based"]
},
...
]
"""},
{"role": "user", "content": f"""Job Title - {job_title}, Job Description - {job_description}, Resume - {resume_text}"""}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages
)
return response['choices'][0]['message']['content']
| [
"You are a helpful assistant. Your purpose is to make 10 Screening Questions for the candidate appearing for the interviews. The screening questions should be based on the job description and candidate's resume. The questions can be based on situations, technical capabilities and soft skills. The format of the screening questions should be as follows - \n 1. 3 Project and previous employment Based Questions from CV. Questions can be based on situations or how they solved various issues faced\n 2. 3 Questions based on the technical skills of the candidate. These questions should be in depth inside the concept and try to ask relevant detailed question. These questions will test the technical capability of the candidate and how much candidate knows about the topic in detail. Try to test the concept of the candidate in depth. For example if it is SQL, then you ask question related to primary and foreign keys and their working. Also, do not give reference to CV while asking these questions.\n 3. 3 Questions based on job profile. These questions can be of any type but the theme should be strictly around job profile provided\n 4. 1 Question on Soft Skills\n \n Input will be in the following format - \n Job Title - Job Title for the position will be provided here\n Job Description - job description will be provided here\n Resume - Candidate's resume will be provided here\n \n Output should be in the form of json object with the following keys - \n Question - The screening question to be asked should be here\n Reference - the reference of the question\n Type - Job Profile, Technical, Example & Project based or Soft Skill\n Tag - List of tags associated with the question for categorizing it and using it for future interviews\n Return the output in the json format separated by comma. For example -\n [\n {\n \"Question\": \"What is your name?\",\n \"Reference\": \"Resume\",\n \"Type\": \"Example & Project based\",\n \"Tag\": [\"Name\", \"Example & Project based\"]\n },\n ...\n ]\n ",
"Job Title - PLACEHOLDER, Job Description - PLACEHOLDER, Resume - PLACEHOLDER"
] |
2024-01-10 | noodnik2/kafkloud | seer~app~seer.py | import logging
import chromadb
from chromadb.config import Settings
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.schema import Document
from langchain.document_loaders import TextLoader
class Seer:
def __init__(self, host="localhost", port="8020"):
chroma_settings = Settings(chroma_server_host=host, chroma_server_http_port=port,
chroma_api_impl="rest", anonymized_telemetry=False,
persist_directory="chroma_persistence", chroma_db_impl="duckdb+parquet")
self.logger = logging.getLogger(__name__)
self.chroma_client = chromadb.Client(chroma_settings)
self.embeddings = OpenAIEmbeddings()
def ask(self, questions):
self.logger.debug(f"received questions({questions})")
vectordb = Chroma(embedding_function=self.embeddings, client=self.chroma_client)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(), # consider playing with the "temperature" parameter
chain_type="stuff",
retriever=vectordb.as_retriever() # consider using 'search_type = e.g., "similarity" or "mar"' - and 'search_kwargs = e.g., {"k":2}' parameters
)
answers = []
for q in questions:
self.logger.debug(f"running query: {q}")
answer = qa.run(q)
self.logger.debug(f"the answer is: {answer}")
answers.append(answer)
return answers
def load(self, fns):
self.logger.debug(f"received fns({fns})")
documents_to_split = []
for fn in fns:
self.logger.debug(f"reading({fn})")
loader = TextLoader(fn)
for doc in loader.load():
documents_to_split.append(doc)
self._split_and_load_documents(documents_to_split)
def accept(self, texts):
self.logger.debug(f"received texts({texts})")
documents_to_split = []
for text in texts:
documents_to_split.append(Document(page_content=text))
self._split_and_load_documents(documents_to_split)
def _split_and_load_documents(self, documents_to_split):
self.logger.debug("RecursiveCharacterTextSplitter")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
self.logger.debug("text_splitter.split_documents")
documents = text_splitter.split_documents(documents_to_split)
self.logger.debug("Chroma.from_documents")
Chroma.from_documents(documents, self.embeddings, client=self.chroma_client)
| [] |
2024-01-10 | AnasMations/StudySync | mariam~mindmap_generator.py | from __future__ import annotations
import re
from typing import Optional, Tuple, List, Union, Literal
import base64
import matplotlib.pyplot as plt
import networkx as nx
import streamlit as st
from streamlit.delta_generator import DeltaGenerator
import os
import openai
import graphviz
from dataclasses import dataclass, asdict
from textwrap import dedent
from streamlit_agraph import agraph, Node, Edge, Config
from dotenv import load_dotenv, find_dotenv
# set title of page (will be seen in tab) and the width
st.set_page_config(page_title="AI Mind Maps", layout="wide")
COLOR = "#6dbbbd"
FOCUS_COLOR = "#b9359a"
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
@dataclass
class Message:
"""A class that represents a message in a ChatGPT conversation.
"""
content: str
role: Literal["user", "system", "assistant"]
# is a built-in method for dataclasses
# called after the __init__ method
def __post_init__(self):
self.content = dedent(self.content).strip()
START_CONVERSATION = [
Message("""
You are a useful mind map/undirected graph-generating AI that can generate mind maps
based on any input or instructions.
""", role="system"),
Message("""
You have the ability to perform the following actions given a request
to construct or modify a mind map/graph:
1. add(node1, node2) - add an edge between node1 and node2
2. delete(node1, node2) - delete the edge between node1 and node2
3. delete(node1) - deletes every edge connected to node1
Note that the graph is undirected and thus the order of the nodes does not matter
and duplicates will be ignored. Another important note: the graph should be sparse,
with many nodes and few edges from each node. Too many edges will make it difficult
to understand and hard to read. The answer should only include the actions to perform,
nothing else. If the instructions are vague or even if only a single word is provided,
still generate a graph of multiple nodes and edges that that could makes sense in the
situation. Remember to think step by step and debate pros and cons before settling on
an answer to accomplish the request as well as possible.
Here is my first request: Add a mind map about machine learning.
""", role="user"),
Message("""
add("Machine learning","AI")
add("Machine learning", "Reinforcement learning")
add("Machine learning", "Supervised learning")
add("Machine learning", "Unsupervised learning")
add("Supervised learning", "Regression")
add("Supervised learning", "Classification")
add("Unsupervised learning", "Clustering")
add("Unsupervised learning", "Anomaly Detection")
add("Unsupervised learning", "Dimensionality Reduction")
add("Unsupervised learning", "Association Rule Learning")
add("Clustering", "K-means")
add("Classification", "Logistic Regression")
add("Reinforcement learning", "Proximal Policy Optimization")
add("Reinforcement learning", "Q-learning")
""", role="assistant"),
Message("""
Remove the parts about reinforcement learning and K-means.
""", role="user"),
Message("""
delete("Reinforcement learning")
delete("Clustering", "K-means")
""", role="assistant")
]
def ask_chatgpt(conversation: List[Message]) -> Tuple[str, List[Message]]:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# asdict comes from `from dataclasses import asdict`
messages=[asdict(c) for c in conversation]
)
# turn into a Message object
msg = Message(**response["choices"][0]["message"])
# return the text output and the new conversation
return msg.content, conversation + [msg]
class MindMap:
"""A class that represents a mind map as a graph.
"""
def __init__(self, edges: Optional[List[Tuple[str, str]]]=None, nodes: Optional[List[str]]=None) -> None:
self.edges = [] if edges is None else edges
self.nodes = [] if nodes is None else nodes
self.save()
@classmethod
def load(cls) -> MindMap:
"""Load mindmap from session state if it exists
Returns: Mindmap
"""
if "mindmap" in st.session_state:
return st.session_state["mindmap"]
return cls()
def save(self) -> None:
# save to session state
st.session_state["mindmap"] = self
def is_empty(self) -> bool:
return len(self.edges) == 0
def ask_for_initial_graph(self, query: str) -> None:
"""Ask GPT-3 to construct a graph from scrach.
Args:
query (str): The query to ask GPT-3 about.
Returns:
str: The output from GPT-3.
"""
conversation = START_CONVERSATION + [
Message(f"""
Great, now ignore all previous nodes and restart from scratch. I now want you do the following:
{query}
""", role="user")
]
output, self.conversation = ask_chatgpt(conversation)
# replace=True to restart
self.parse_and_include_edges(output, replace=True)
def ask_for_extended_graph(self, selected_node: Optional[str]=None, text: Optional[str]=None) -> None:
"""Cached helper function to ask GPT-3 to extend the graph.
Args:
query (str): query to ask GPT-3 about
edges_as_text (str): edges formatted as text
Returns:
str: GPT-3 output
"""
# do nothing
if (selected_node is None and text is None):
return
# change description depending on if a node
# was selected or a text description was given
#
# note that the conversation is copied (shallowly) instead
# of modified in place. The reason for this is that if
# the chatgpt call fails self.conversation will not
# be updated
if selected_node is not None:
# prepend a description that this node
# should be extended
conversation = self.conversation + [
Message(f"""
add new edges to new nodes, starting from the node "{selected_node}"
""", role="user")
]
st.session_state.last_expanded = selected_node
else:
# just provide the description
conversation = self.conversation + [Message(text, role="user")]
# now self.conversation is updated
output, self.conversation = ask_chatgpt(conversation)
self.parse_and_include_edges(output, replace=False)
def parse_and_include_edges(self, output: str, replace: bool=True) -> None:
"""Parse output from LLM (GPT-3) and include the edges in the graph.
Args:
output (str): output from LLM (GPT-3) to be parsed
replace (bool, optional): if True, replace all edges with the new ones,
otherwise add to existing edges. Defaults to True.
"""
# Regex patterns
pattern1 = r'(add|delete)\("([^()"]+)",\s*"([^()"]+)"\)'
pattern2 = r'(delete)\("([^()"]+)"\)'
# Find all matches in the text
matches = re.findall(pattern1, output) + re.findall(pattern2, output)
new_edges = []
remove_edges = set()
remove_nodes = set()
for match in matches:
op, *args = match
add = op == "add"
if add or (op == "delete" and len(args)==2):
a, b = args
if a == b:
continue
if add:
new_edges.append((a, b))
else:
# remove both directions
# (undirected graph)
remove_edges.add(frozenset([a, b]))
else: # must be delete of node
remove_nodes.add(args[0])
if replace:
edges = new_edges
else:
edges = self.edges + new_edges
# make sure edges aren't added twice
# and remove nodes/edges that were deleted
added = set()
for edge in edges:
nodes = frozenset(edge)
if nodes in added or nodes & remove_nodes or nodes in remove_edges:
continue
added.add(nodes)
self.edges = list([tuple(a) for a in added])
self.nodes = list(set([n for e in self.edges for n in e]))
self.save()
def _delete_node(self, node) -> None:
"""Delete a node and all edges connected to it.
Args:
node (str): The node to delete.
"""
self.edges = [e for e in self.edges if node not in frozenset(e)]
self.nodes = list(set([n for e in self.edges for n in e]))
self.conversation.append(Message(
f'delete("{node}")',
role="user"
))
self.save()
def _add_expand_delete_buttons(self, node) -> None:
st.sidebar.subheader(node)
cols = st.sidebar.columns(2)
cols[0].button(
label="Expand",
on_click=self.ask_for_extended_graph,
key=f"expand_{node}",
# pass to on_click (self.ask_for_extended_graph)
kwargs={"selected_node": node}
)
cols[1].button(
label="Delete",
on_click=self._delete_node,
type="primary",
key=f"delete_{node}",
# pass on to _delete_node
args=(node,)
)
def visualize(self, graph_type: Literal["agraph", "networkx", "graphviz"]) -> None:
"""Visualize the mindmap as a graph a certain way depending on the `graph_type`.
Args:
graph_type (Literal["agraph", "networkx", "graphviz"]): The graph type to visualize the mindmap as.
Returns:
Union[str, None]: Any output from the clicking the graph or
if selecting a node in the sidebar.
"""
selected = st.session_state.get("last_expanded")
if graph_type == "agraph":
vis_nodes = [
Node(
id=n,
label=n,
# a little bit bigger if selected
size=10+10*(n==selected),
# a different color if selected
color=COLOR if n != selected else FOCUS_COLOR
)
for n in self.nodes
]
vis_edges = [Edge(source=a, target=b) for a, b in self.edges]
config = Config(width="100%",
height=600,
directed=False,
physics=True,
hierarchical=False,
)
# returns a node if clicked, otherwise None
clicked_node = agraph(nodes=vis_nodes,
edges=vis_edges,
config=config)
# if clicked, update the sidebar with a button to create it
if clicked_node is not None:
self._add_expand_delete_buttons(clicked_node)
return
def main():
# will initialize the graph from session state
# (if it exists) otherwise will create a new one
mindmap = MindMap.load()
st.markdown("""
<style>
.stButton button {
background-color: #b9359a; /* Change to your desired color */
color: white;
border-color: transparent; /* Set the border color to transparent */
}
[data-testid="stSidebar"] {
background: linear-gradient(220deg, #fbeaf7,#ffffff, #dfeefb);
color: black;
}
.stButton button:hover {
background-color: #ffffff; /* Retain the same color when hovering */
border-color: transparent;; /* Set the border color to transparent */
color: #b9359a;
.stTextArea > div > textarea {
border-color: #6dbbbd; /* Change to your desired color */
color: white;
}
}
}
</style>
""", unsafe_allow_html=True)
st.sidebar.image('logo.png', width=150)
st.sidebar.title("StudySync AI Mind Map Generator")
graph_type = "agraph"
empty = mindmap.is_empty()
reset = empty or st.sidebar.checkbox("Reset mind map", value=False)
query = st.sidebar.text_area(
"Describe your mind map" if reset else "Describe how to change your mind map",
value=st.session_state.get("mindmap-input", ""),
key="mindmap-input",
height=200
)
submit = st.sidebar.button("Submit")
valid_submission = submit and query != ""
if empty and not valid_submission:
return
with st.spinner(text="Loading graph..."):
# if submit and non-empty query, then update graph
if valid_submission:
if reset:
# completely new mindmap
mindmap.ask_for_initial_graph(query=query)
else:
# extend existing mindmap
mindmap.ask_for_extended_graph(text=query)
# since inputs also have to be updated, everything
# is rerun
st.experimental_rerun()
else:
mindmap.visualize(graph_type)
if __name__ == "__main__":
main() | [
"\n add new edges to new nodes, starting from the node \"PLACEHOLDER\"\n ",
"\n Remove the parts about reinforcement learning and K-means.\n ",
"\n delete(\"Reinforcement learning\")\n delete(\"Clustering\", \"K-means\")\n ",
"\n You are a useful mind map/undirected graph-generating AI that can generate mind maps\n based on any input or instructions.\n ",
"delete(\"PLACEHOLDER\")",
"\n add(\"Machine learning\",\"AI\")\n add(\"Machine learning\", \"Reinforcement learning\")\n add(\"Machine learning\", \"Supervised learning\")\n add(\"Machine learning\", \"Unsupervised learning\")\n add(\"Supervised learning\", \"Regression\")\n add(\"Supervised learning\", \"Classification\")\n add(\"Unsupervised learning\", \"Clustering\")\n add(\"Unsupervised learning\", \"Anomaly Detection\")\n add(\"Unsupervised learning\", \"Dimensionality Reduction\")\n add(\"Unsupervised learning\", \"Association Rule Learning\")\n add(\"Clustering\", \"K-means\")\n add(\"Classification\", \"Logistic Regression\")\n add(\"Reinforcement learning\", \"Proximal Policy Optimization\")\n add(\"Reinforcement learning\", \"Q-learning\")\n ",
"\n You have the ability to perform the following actions given a request\n to construct or modify a mind map/graph:\n\n 1. add(node1, node2) - add an edge between node1 and node2\n 2. delete(node1, node2) - delete the edge between node1 and node2\n 3. delete(node1) - deletes every edge connected to node1\n\n Note that the graph is undirected and thus the order of the nodes does not matter\n and duplicates will be ignored. Another important note: the graph should be sparse,\n with many nodes and few edges from each node. Too many edges will make it difficult \n to understand and hard to read. The answer should only include the actions to perform, \n nothing else. If the instructions are vague or even if only a single word is provided, \n still generate a graph of multiple nodes and edges that that could makes sense in the \n situation. Remember to think step by step and debate pros and cons before settling on \n an answer to accomplish the request as well as possible.\n\n Here is my first request: Add a mind map about machine learning.\n ",
"\n Great, now ignore all previous nodes and restart from scratch. I now want you do the following: \n\n PLACEHOLDER\n "
] |
2024-01-10 | nkucek0619/smartjournal-AI | gpt3-test.py | import openai
openai.api_key = ""
f = 'file-LXusegCxGmDfrqOfON1XB3qw'
response = openai.Classification.create(
file=f,
query="My baby is a dog",
search_model="ada",
model="curie",
max_examples=3
)
print(response) | [] |
2024-01-10 | rjslvn/VisionSpeak | ytuber-idea.py | import os
import argparse
import datetime
import openai # You need to install the 'openai' library
from googleapiclient.discovery import build
def set_api_keys():
openai_api_key = input("Enter your OpenAI API key: ")
youtube_api_key = input("Enter your YouTube API key: ")
os.environ["OPENAI_API_KEY"] = openai_api_key
os.environ["YOUTUBE_API_KEY"] = youtube_api_key
def create_folder_structure(base_folder, year, month):
# Create the base folder if it doesn't exist
if not os.path.exists(base_folder):
os.makedirs(base_folder)
# Create subfolders for the specified year and month
year_folder = os.path.join(base_folder, str(year))
month_folder = os.path.join(year_folder, str(month))
# Create the year and month folders if they don't exist
if not os.path.exists(year_folder):
os.makedirs(year_folder)
if not os.path.exists(month_folder):
os.makedirs(month_folder)
return month_folder
def generate_script(video_title):
# Use GPT-3 to generate a script based on the video title or description
response = openai.Completion.create(
engine="text-davinci-002",
prompt=f"Create a script for a video titled '{video_title}':",
max_tokens=150 # Adjust the max tokens as needed for the script length
)
return response.choices[0].text
def get_trending_videos(api_key, region_code, max_results=10):
youtube = build("youtube", "v3", developerKey=api_key)
# Get trending videos in the specified region
request = youtube.videos().list(
part="snippet",
chart="mostPopular",
regionCode=region_code,
maxResults=max_results
)
response = request.execute()
return response.get("items", [])
def main():
parser = argparse.ArgumentParser(description="Create folder structures, scripts, and retrieve trending YouTube content.")
parser.add_argument("--base-folder", default="YouTubeContent", help="Specify the base folder for folder structures.")
parser.add_argument("--iterations", type=int, default=3, help="Specify the number of iterations.")
parser.add_argument("--region-code", default="US", help="Specify the region code for trending videos (e.g., US).")
args = parser.parse_args()
set_api_keys() # Prompt for and set API keys as environment variables
# Get the current date to determine the year and month
now = datetime.datetime.now()
for i in range(args.iterations):
year = now.year
month = now.strftime("%B")
# Create the folder structure
content_folder = create_folder_structure(args.base_folder, year, month)
# Retrieve trending videos
trending_videos = get_trending_videos(os.environ["YOUTUBE_API_KEY"], args.region_code)
# Generate scripts for trending videos
for index, video in enumerate(trending_videos):
video_title = video["snippet"]["title"]
script = generate_script(video_title)
# Create a script file and write the generated script
script_filename = os.path.join(content_folder, f"Trending_{index + 1}_script.txt")
with open(script_filename, "w") as file:
file.write(script)
print(f"Created folder structure for {month} {year} at: {content_folder}")
print(f"Generated script for '{video_title}' and saved it to: {script_filename}")
# Move to the previous month for the next iteration
now = now - datetime.timedelta(days=30)
if __name__ == "__main__":
main()
| [
"Create a script for a video titled 'PLACEHOLDER':"
] |
2024-01-10 | tboudreaux/paper.GPT | arxivSummary.py | import logging
import openai
from chat import ask
import os
import datetime as dt
import sqlalchemy as sql
from config import postgresIP, postgrsPort, postgrsUser, postgrsPass, postgrsDB
from config import arxivCategories, catNameLookup
from config import root
from utils import build_postgrs_uri
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", None)
assert OPENAI_API_KEY is not None
if __name__ == "__main__":
openai.api_key = OPENAI_API_KEY
logging.basicConfig(level=logging.WARNING,
format="%(asctime)s %(levelname)s %(message)s")
uri = build_postgrs_uri(postgresIP, postgrsPort, postgrsUser, postgrsPass, postgrsDB)
engine = sql.create_engine(uri)
currentWeekday = dt.datetime.today().weekday()
if currentWeekday == 5:
TDELT = 2
elif currentWeekday == 6:
TDELT = 3
else:
TDELT = 1
with open(os.path.join(root, "summaryResults.html"), "w") as f:
f.write("""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Daily Paper Summary</title>
<link rel="icon" href="./favicon.ico" type="image/x-icon">
<style>
.paper {
border-radius: 25px;
background: #cfd1d0;
padding: 20px;
}
/* Style the tab */
.tab {
overflow: hidden;
border: 1px solid #ccc;
background-color: #f1f1f1;
}
/* Style the buttons inside the tab */
.tab button {
background-color: inherit;
float: left;
border: none;
outline: none;
cursor: pointer;
padding: 14px 16px;
transition: 0.3s;
font-size: 17px;
}
/* Change background color of buttons on hover */
.tab button:hover {
background-color: #ddd;
}
/* Create an active/current tablink class */
.tab button.active {
background-color: #ccc;
}
/* Style the tab content */
.tabcontent {
display: none;
padding: 6px 12px;
border: 1px solid #ccc;
border-top: none;
}
</style>
</head>
<script>
function openCat(evt, catName) {
var i, tabcontent, tablinks;
tabcontent = document.getElementsByClassName("tabcontent");
for (i = 0; i < tabcontent.length; i++) {
tabcontent[i].style.display = "none";
}
tablinks = document.getElementsByClassName("tablinks");
for (i = 0; i < tablinks.length; i++) {
tablinks[i].className = tablinks[i].className.replace(" active", "");
}
document.getElementById(catName).style.display = "block";
evt.currentTarget.className += " active";
}
</script>
<body>""")
f.write(f"<h1>Summary of papers published on {dt.datetime.today().date() - dt.timedelta(TDELT)}</h1>\
<div class=\"tab\">")
for cat in arxivCategories:
f.write(f"<button class=\"tablinks\" onclick=\"openCat(event, '{cat}')\">{catNameLookup[cat]}</button>\n")
f.write("</div>\n")
for cat in arxivCategories:
f.write(f"<div id=\"{cat}\" class=\"tabcontent\">\n")
f.write(f"<h2>{catNameLookup[cat]}</h2>\n")
with engine.connect() as conn:
metadata = sql.MetaData()
metadata.reflect(conn)
arxivsummary = metadata.tables['arxivsummary']
stmt = sql.select(arxivsummary).where(sql.and_(arxivsummary.columns.published_date == dt.datetime.today().date() - dt.timedelta(TDELT), arxivsummary.columns.subjects == f"{cat}"))
rs = conn.execute(stmt)
for row in rs:
query = f"Please summarize, in 1-2 sentences, the paper titled {row.title}."
f.write(f"<div class=\"paper\" id=\"{row.arxiv_id}\">\n")
f.write(f"<h3 class=\"ptitle\"><a href={row.url}>{row.title}</a> </h4>\n")
f.write(f"<h4 class=\"author_list\"> {row.author_list} </h4>\n")
f.write(f"<hr>\n")
f.write(f"<p class=\"psummary\">{ask(query)}</p>\n")
f.write(f"</div>\n\n")
f.write("<br>\n")
f.write("</div>\n")
f.write("</body>\n</html>")
| [] |
2024-01-10 | llach/forkan | forkan~common~tf_utils.py | import numpy as np
import tensorflow as tf
def entropy_from_logits(logits):
"""
Basically copied from OpenAIs PD classes, but with more comments in case
anyone wants to understand whats going on.
"""
# adding a constant in exp(x) to the x ( exp(x + a) ) is legit,
# check https://stats.stackexchange.com/questions/338285/how-does-the-subtraction-of-the-logit-maximum-improve-learning
# doing so will improve numerical stability (prevent infinities due to overflows)
# some tensorflow gihtub issues also adress this issue: https://github.com/tensorflow/tensorflow/issues/2462
# they also point to blog posts concerning this topic
# this trick is further descirbed here: https://en.wikipedia.org/wiki/LogSumExp
a0 = logits - tf.reduce_max(logits, axis=-1, keepdims=True)
# softmax on transformed logits
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
# entropy calculation with reversion of the max subtraction trick
return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)
def value_by_index(vec, idx_vec, num_idxes):
"""
Returns vector with values from vec at index given in idx_vec.
"""
return tf.reduce_sum(vec * tf.one_hot(idx_vec, num_idxes), axis=1)
def categorical_kl(logp, logq):
"""
Calculates mean KL-divergence of batch of pds
"""
return tf.reduce_mean(tf.reduce_sum(tf.exp(logq) * (logq - logp), axis=1))
def flat_concat(xs):
""" From SpinningUp: Merges list of tensors into one (X,) tensor. """
return tf.concat([tf.reshape(x, (-1,)) for x in xs], axis=0)
def flat_grad(f, params):
""" From SpinningUp: Convenience function calling flat_concat on gradients. """
return flat_concat(tf.gradients(xs=params, ys=f))
def assign_params_from_flat(x, params):
""" From SpinningUp. """
flat_size = lambda p : int(np.prod(p.shape.as_list())) # the 'int' is important for scalars
splits = tf.split(x, [flat_size(p) for p in params])
new_params = [tf.reshape(p_new, p.shape) for p, p_new in zip(params, splits)]
return tf.group([tf.assign(p, p_new) for p, p_new in zip(params, new_params)])
def get_trainable_variables(scope=''):
"""
Returns all trainable variables from tf scope.
"""
return tf.trainable_variables(scope=scope)
def vector_summary(name, var, scope='vectors', with_hist=False):
"""
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
Copied from TensoFlow docs, but slightly modified.
"""
with tf.name_scope('{}/{}'.format(scope, name)):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
if with_hist:
tf.summary.histogram('histogram', var)
def scalar_summary(name, var, scope='scalars'):
""" Adds scalar Tensor to TensorBoard visualization under scope. """
with tf.name_scope('{}/{}'.format(scope, name)):
return tf.summary.scalar(name, var)
| [] |
2024-01-10 | Charles-Breuer/Kargono | dependencies~custom-documentation-generator~util_functions.py | # Python program to find the SHA-1 message digest of a file
# importing the hashlib module
import hashlib
import shutil
import os
from json import JSONDecodeError
import openai
import json
import sys
import openai_keys
def produce_comments(src_string):
openai.api_key = openai_keys.openai_key1
while True:
initial_output = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "The format of your response should be a python list"
" with key-value pair dictionary elements. Do not add any additional text!"
"The input will be a c++ file. The key/value pairs have the few first words of the line being referenced as the key and the comment as the value."
"The response should be in the following format:"
'[{"void Carry(int MUX) {:" "/// @brief Architecture definition of the MUX"}, {"void Carry(int MUX) {:": "/// @details More details about this mux element."}]'
"Also strip all new line characters from your output."
"Only one comment for each line!"},
{"role": "system", "content": 'Here is an example of a decent response: '
'[{"enum class EventType": "/// @brief Enumeration defining different event types in the game engine"}, {"enum EventCategory": "/// @brief Enumeration defining different event categories in the game engine"}, {"#define EVENT_CLASS_TYPE(type)": "/// @brief Macro for defining the event class category and its members"},{"class KG_API Event": "/// @class Event", "31": "/// @brief Base class for all events in the game engine"},{"inline bool IsInCategory": "/// @return The category flags of the event"}]'
'Responses should be longer!'},
{"role": "user", "content": 'I am going to provide you a file from my game engine directory. Please provide'
"comments that are compatible with the Doxygen API. Return me a list where"
"the few first words of the line being referenced is the key, and the comment is the value."
"Please add documentation like @param, @return, @details, and your interpretation of the objects in the source code."
"This should include comments for functions, enumerations, classes, methods, parameters, and return values. "
"To ensure the c++ files still compile, ensure you add /// before each comment."
"Also note that each line from the input is separated by a newline character"
"The next input will be the file itself."
},
{"role": "user", "content": src_string}
]
)
try:
functionOutput = json.loads(initial_output.choices[0].message.content)
except JSONDecodeError as e:
print("JSON parsing failed, retrying AI.")
continue
return functionOutput
def push_directory(src, dest, ignore_directories=''):
# Remove any current directory and replace with new
if not os.path.exists(src):
raise Exception("Source Directory is invalid!")
if not os.path.exists(dest):
os.mkdir(dest)
shutil.rmtree(dest)
shutil.copytree(src, dest, ignore=shutil.ignore_patterns(*ignore_directories))
def remove_directory(src):
# Remove any current directory and replace with new
if os.path.exists(src):
shutil.rmtree(src)
def push_file(src, dest):
# Remove any current directory and replace with new
if not os.path.exists(src):
raise Exception("Source file is invalid!")
if os.path.exists(dest):
os.remove(dest)
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copyfile(src, dest)
def compare_hash(filename_1, filename_2):
if not os.path.exists(filename_1):
raise Exception("Input file does not exist. I don't know this would happen.")
if not os.path.exists(filename_2):
return False
digest_1 = hash_file(filename_1)
digest_2 = hash_file(filename_2)
if digest_1 == digest_2:
return True
else:
return False
def hash_file(filename):
""""This function returns the SHA-1 hash
of the file passed into it"""
# make a hash object
h = hashlib.sha1()
# open file for reading in binary mode
with open(filename, 'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
| [
"I am going to provide you a file from my game engine directory. Please providecomments that are compatible with the Doxygen API. Return me a list wherethe few first words of the line being referenced is the key, and the comment is the value.Please add documentation like @param, @return, @details, and your interpretation of the objects in the source code.This should include comments for functions, enumerations, classes, methods, parameters, and return values. To ensure the c++ files still compile, ensure you add /// before each comment.Also note that each line from the input is separated by a newline characterThe next input will be the file itself.",
"Here is an example of a decent response: [{\"enum class EventType\": \"/// @brief Enumeration defining different event types in the game engine\"}, {\"enum EventCategory\": \"/// @brief Enumeration defining different event categories in the game engine\"}, {\"#define EVENT_CLASS_TYPE(type)\": \"/// @brief Macro for defining the event class category and its members\"},{\"class KG_API Event\": \"/// @class Event\", \"31\": \"/// @brief Base class for all events in the game engine\"},{\"inline bool IsInCategory\": \"/// @return The category flags of the event\"}]Responses should be longer!",
"The format of your response should be a python list with key-value pair dictionary elements. Do not add any additional text!The input will be a c++ file. The key/value pairs have the few first words of the line being referenced as the key and the comment as the value.The response should be in the following format:[{\"void Carry(int MUX) {:\" \"/// @brief Architecture definition of the MUX\"}, {\"void Carry(int MUX) {:\": \"/// @details More details about this mux element.\"}]Also strip all new line characters from your output.Only one comment for each line!"
] |
2024-01-10 | malusamayo/Weaver | weaver~knowledge~knmodel.py | import json
from os.path import exists
from os import getenv
import os
import openai
import tiktoken
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
class LanguageModel(object):
def __init__(self):
pass
def __call__(self):
pass
# class ChatGPTModel(LanguageModel):
# def __init__(self, config_file):
# super().__init__()
# self.model = Chatbot(self.config(config_file)["api_key"])
# def config(self, config_file):
# with open(config_file, encoding="utf-8") as f:
# config = json.load(f)
# return config
# def __call__(self, prompt, rollback=True):
# response = self.model.ask(prompt)
# # print(response)
# if rollback:
# self.model.rollback(1)
# return response["choices"][0]["text"]
ENCODER = tiktoken.get_encoding("gpt2")
def get_max_tokens(prompt: str) -> int:
"""
Get the max tokens for a prompt
"""
return 4000 - len(ENCODER.encode(prompt))
class GPT3Model(LanguageModel):
def __init__(self, api_key: str = None) -> None:
super().__init__()
openai.api_key = api_key or os.environ.get("OPENAI_API_KEY")
assert openai.api_key is not None, "Please provide an OpenAI API key"
def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
max_tokens: int=256,
n: int=1,
stream: bool = False,
):
"""
Get the completion function
"""
return openai.Completion.create(
engine="gpt-3.5-turbo-instruct",
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
n=n,
stop=["\n\n\n"],
stream=stream,
user="kgtest"
)
@retry(wait=wait_random_exponential(min=2, max=60), stop=stop_after_attempt(6))
def __call__(self,
prompt,
temperature: float = 0.5,
max_tokens: int=256,
n: int=1,
stream: bool = False):
response = self._get_completion(prompt,
temperature=temperature,
max_tokens=max_tokens,
n=n,
stream=stream)
messages = [c["text"] for c in response["choices"]]
messages = messages[0] if len(messages) == 1 else messages
return messages
class GPT3ModelAsync(LanguageModel):
def __init__(self, api_key: str = None) -> None:
super().__init__()
openai.api_key = api_key or os.environ.get("OPENAI_API_KEY")
assert openai.api_key is not None, "Please provide an OpenAI API key"
async def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
stream: bool = False,
):
"""
Get the completion function
"""
return await openai.Completion.acreate(
engine="gpt-3.5-turbo-instruct",
prompt=prompt,
temperature=temperature,
max_tokens=256,
stop=["\n\n\n"],
stream=stream,
user="kgtest"
)
async def __call__(self, prompt):
response = self._get_completion(prompt)
return await response["choices"][0]["text"]
class CurieModel(LanguageModel):
def __init__(self, api_key: str = None) -> None:
super().__init__()
openai.api_key = api_key or os.environ.get("OPENAI_API_KEY")
assert openai.api_key is not None, "Please provide an OpenAI API key"
def _get_completion(
self,
prompt: str,
):
"""
Get the completion function
"""
return openai.Completion.create(
engine="curie",
prompt=prompt,
temperature=1.0,
top_p=0.95,
max_tokens=100,
stop=["\""],
user="kgtest"
)
@retry(wait=wait_random_exponential(min=2, max=60), stop=stop_after_attempt(6))
def __call__(self, prompt):
response = self._get_completion(prompt)
messages = [c["text"] for c in response["choices"]]
messages = messages[0] if len(messages) == 1 else messages
return messages
class ChatGPTModel(LanguageModel):
def __init__(self, sys_msg: str, api_key: str = None, temparature=1.0) -> None:
super().__init__()
openai.api_key = api_key or os.environ.get("OPENAI_API_KEY")
assert openai.api_key is not None, "Please provide an OpenAI API key"
self.sys_msg = {"role": "system", "content": sys_msg}
self.temperature = temparature
@retry(wait=wait_random_exponential(min=2, max=60), stop=stop_after_attempt(6))
def __call__(self, messages):
messages = [self.sys_msg] + messages
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
user="kgtest",
temperature=self.temperature,
)
messages = [c["message"] for c in response["choices"]]
messages = messages[0] if len(messages) == 1 else messages
return messages
if __name__ == "__main__":
model = GPT3Model()
print(model('Hi!', n=2)) | [
"[self.sys_msg] + messages"
] |
2024-01-10 | manas95826/mercor_assign | complexity.py | import requests
import gpt
import langchain
import github3
def get_user_repositories(user_name):
"""Fetches a user's repositories from their GitHub user name."""
github = github3.login('manas95826')
user = github.get_user(user_name)
return user.get_repos()
def preprocess_code(code):
"""Preprocesses the code in a repository before passing it into GPT."""
# Tokenize the code
tokens = gpt.tokenizer(code)
# Remove comments and whitespace
tokens = [token for token in tokens if token[0] != '#']
tokens = [token for token in tokens if token != ' ']
# Split the code into smaller chunks
chunks = [tokens[i:i+512] for i in range(0, len(tokens), 512)]
return chunks
def evaluate_code_complexity(code):
"""Evaluates the technical complexity of a piece of code using GPT."""
# Pass the code through GPT
prompt = 'Is this code technically complex?'
response = gpt.generate(prompt, code)
# Extract the score from the response
score = float(response.split(' ')[0])
return score
def find_most_complex_repository(user_name):
"""Finds the most technically complex repository in a user's profile."""
repositories = get_user_repositories(user_name)
# Preprocess the code in each repository
preprocessed_repositories = []
for repository in repositories:
filenames = repository.get_contents()
for filename in filenames:
code = repository.get_contents(filename).decoded
chunks = preprocess_code(code)
for chunk in chunks:
preprocessed_repositories.append((repository.name, chunk))
# Evaluate the technical complexity of each repository
scores = []
for repository, chunk in preprocessed_repositories:
score = evaluate_code_complexity(chunk)
scores.append((repository, score))
# Find the repository with the highest score
most_complex_repository = max(scores, key=lambda x: x[1])
return most_complex_repository
if __name__ == '__main__':
user_name = 'manas95826'
most_complex_repository = find_most_complex_repository(user_name)
print(f'The most technically complex repository in {user_name} is {most_complex_repository[0]}.')
print(f'GPT says that the repository is technically complex because it uses a variety of advanced techniques, such as {most_complex_repository[1]}.')
| [
"Is this code technically complex?"
] |
2024-01-10 | DavidLBick/espnet-1 | espnet2~train~preprocessor.py | import json
import logging
import random
import re
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Collection, Dict, Iterable, List, Union
import numpy as np
import scipy.signal
import soundfile
from typeguard import check_argument_types, check_return_type
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.cleaner import TextCleaner
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.text.whisper_token_id_converter import OpenAIWhisperTokenIDConverter
class AbsPreprocessor(ABC):
def __init__(self, train: bool):
self.train = train
@abstractmethod
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
raise NotImplementedError
def framing(
x,
frame_length: int = 512,
frame_shift: int = 256,
centered: bool = True,
padded: bool = True,
):
if x.size == 0:
raise ValueError("Input array size is zero")
if frame_length < 1:
raise ValueError("frame_length must be a positive integer")
if frame_length > x.shape[-1]:
raise ValueError("frame_length is greater than input length")
if 0 >= frame_shift:
raise ValueError("frame_shift must be greater than 0")
if centered:
pad_shape = [(0, 0) for _ in range(x.ndim - 1)] + [
(frame_length // 2, frame_length // 2)
]
x = np.pad(x, pad_shape, mode="constant", constant_values=0)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = frame_length + (nseg-1)*nstep,
# with integer nseg
nadd = (-(x.shape[-1] - frame_length) % frame_shift) % frame_length
pad_shape = [(0, 0) for _ in range(x.ndim - 1)] + [(0, nadd)]
x = np.pad(x, pad_shape, mode="constant", constant_values=0)
# Created strided array of data segments
if frame_length == 1 and frame_length == frame_shift:
result = x[..., None]
else:
shape = x.shape[:-1] + (
(x.shape[-1] - frame_length) // frame_shift + 1,
frame_length,
)
strides = x.strides[:-1] + (frame_shift * x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
return result
def detect_non_silence(
x: np.ndarray,
threshold: float = 0.01,
frame_length: int = 1024,
frame_shift: int = 512,
window: str = "boxcar",
) -> np.ndarray:
"""Power based voice activity detection.
Args:
x: (Channel, Time)
>>> x = np.random.randn(1000)
>>> detect = detect_non_silence(x)
>>> assert x.shape == detect.shape
>>> assert detect.dtype == np.bool
"""
if x.shape[-1] < frame_length:
return np.full(x.shape, fill_value=True, dtype=np.bool)
if x.dtype.kind == "i":
x = x.astype(np.float64)
# framed_w: (C, T, F)
framed_w = framing(
x,
frame_length=frame_length,
frame_shift=frame_shift,
centered=False,
padded=True,
)
framed_w *= scipy.signal.get_window(window, frame_length).astype(framed_w.dtype)
# power: (C, T)
power = (framed_w**2).mean(axis=-1)
# mean_power: (C, 1)
mean_power = np.mean(power, axis=-1, keepdims=True)
if np.all(mean_power == 0):
return np.full(x.shape, fill_value=True, dtype=np.bool)
# detect_frames: (C, T)
detect_frames = power / mean_power > threshold
# detects: (C, T, F)
detects = np.broadcast_to(
detect_frames[..., None], detect_frames.shape + (frame_shift,)
)
# detects: (C, TF)
detects = detects.reshape(*detect_frames.shape[:-1], -1)
# detects: (C, TF)
return np.pad(
detects,
[(0, 0)] * (x.ndim - 1) + [(0, x.shape[-1] - detects.shape[-1])],
mode="edge",
)
class CommonPreprocessor(AbsPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
aux_task_names: Collection[str] = None,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: str = "text",
fs: int = 0,
):
super().__init__(train)
self.train = train
self.speech_name = speech_name
self.text_name = text_name
self.speech_volume_normalize = speech_volume_normalize
self.rir_apply_prob = rir_apply_prob
self.noise_apply_prob = noise_apply_prob
self.short_noise_thres = short_noise_thres
self.aux_task_names = aux_task_names
if token_type is not None:
if token_list is None:
raise ValueError("token_list is required if token_type is not None")
self.text_cleaner = TextCleaner(text_cleaner)
self.tokenizer = build_tokenizer(
token_type=token_type,
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
if bpemodel not in ["whisper_en", "whisper_multilingual"]:
self.token_id_converter = TokenIDConverter(
token_list=token_list,
unk_symbol=unk_symbol,
)
else:
self.token_id_converter = OpenAIWhisperTokenIDConverter(
model_type=bpemodel
)
else:
self.text_cleaner = None
self.tokenizer = None
self.token_id_converter = None
if train and rir_scp is not None:
self.rirs = []
with open(rir_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
if len(sps) == 1:
self.rirs.append(sps[0])
else:
self.rirs.append(sps[1])
else:
self.rirs = None
if train and noise_scp is not None:
self.noises = []
with open(noise_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
if len(sps) == 1:
self.noises.append(sps[0])
else:
self.noises.append(sps[1])
sps = noise_db_range.split("_")
if len(sps) == 1:
self.noise_db_low = self.noise_db_high = float(sps[0])
elif len(sps) == 2:
self.noise_db_low, self.noise_db_high = float(sps[0]), float(sps[1])
else:
raise ValueError(
"Format error: '{noise_db_range}' e.g. -3_4 -> [-3db,4db]"
)
else:
self.noises = None
def _convolve_rir(self, speech, power):
rir_path = np.random.choice(self.rirs)
rir = None
if rir_path is not None:
rir, _ = soundfile.read(rir_path, dtype=np.float64, always_2d=True)
# rir: (Nmic, Time)
rir = rir.T
# speech: (Nmic, Time)
# Note that this operation doesn't change the signal length
speech = scipy.signal.convolve(speech, rir, mode="full")[
:, : speech.shape[1]
]
# Reverse mean power to the original power
power2 = (speech[detect_non_silence(speech)] ** 2).mean()
speech = np.sqrt(power / max(power2, 1e-10)) * speech
return speech, rir
def _add_noise(self, speech, power):
nsamples = speech.shape[1]
noise_path = np.random.choice(self.noises)
noise = None
if noise_path is not None:
noise_db = np.random.uniform(self.noise_db_low, self.noise_db_high)
with soundfile.SoundFile(noise_path) as f:
if f.frames == nsamples:
noise = f.read(dtype=np.float64, always_2d=True)
elif f.frames < nsamples:
if f.frames / nsamples < self.short_noise_thres:
logging.warning(
f"Noise ({f.frames}) is much shorter than "
f"speech ({nsamples}) in dynamic mixing"
)
offset = np.random.randint(0, nsamples - f.frames)
# noise: (Time, Nmic)
noise = f.read(dtype=np.float64, always_2d=True)
# Repeat noise
noise = np.pad(
noise,
[(offset, nsamples - f.frames - offset), (0, 0)],
mode="wrap",
)
else:
offset = np.random.randint(0, f.frames - nsamples)
f.seek(offset)
# noise: (Time, Nmic)
noise = f.read(nsamples, dtype=np.float64, always_2d=True)
if len(noise) != nsamples:
raise RuntimeError(f"Something wrong: {noise_path}")
# noise: (Nmic, Time)
noise = noise.T
noise_power = (noise**2).mean()
scale = (
10 ** (-noise_db / 20)
* np.sqrt(power)
/ np.sqrt(max(noise_power, 1e-10))
)
speech = speech + scale * noise
return speech, noise
def _speech_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
if self.speech_name in data:
if self.train and (self.rirs is not None or self.noises is not None):
speech = data[self.speech_name]
# speech: (Nmic, Time)
if speech.ndim == 1:
speech = speech[None, :]
else:
speech = speech.T
# Calc power on non silence region
power = (speech[detect_non_silence(speech)] ** 2).mean()
# 1. Convolve RIR
if self.rirs is not None and self.rir_apply_prob >= np.random.random():
speech, _ = self._convolve_rir(speech, power)
# 2. Add Noise
if (
self.noises is not None
and self.noise_apply_prob >= np.random.random()
):
speech, _ = self._add_noise(speech, power)
speech = speech.T
ma = np.max(np.abs(speech))
if ma > 1.0:
speech /= ma
data[self.speech_name] = speech
if self.speech_volume_normalize is not None:
speech = data[self.speech_name]
ma = np.max(np.abs(speech))
data[self.speech_name] = speech * self.speech_volume_normalize / ma
assert check_return_type(data)
return data
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
if self.text_name in data and self.tokenizer is not None:
text = data[self.text_name]
if isinstance(text, np.ndarray):
return data
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[self.text_name] = np.array(text_ints, dtype=np.int64)
if self.aux_task_names is not None and self.tokenizer is not None:
for name in self.aux_task_names:
if name in data:
text = data[name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._speech_process(data)
data = self._text_process(data)
return data
class SLUPreprocessor(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
transcript_token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: str = "text",
):
super().__init__(
train=train,
token_type=token_type,
token_list=token_list,
bpemodel=bpemodel,
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
text_name=text_name,
)
if transcript_token_list is not None:
print("using transcript")
self.transcript_tokenizer = build_tokenizer(
token_type="word",
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
self.transcript_token_id_converter = TokenIDConverter(
token_list=transcript_token_list,
unk_symbol=unk_symbol,
)
else:
self.transcript_tokenizer = None
self.transcript_token_id_converter = None
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
if self.text_name in data and self.tokenizer is not None:
text = data[self.text_name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[self.text_name] = np.array(text_ints, dtype=np.int64)
if "transcript" in data and self.tokenizer is not None:
text = data["transcript"]
text = self.text_cleaner(text)
tokens = self.transcript_tokenizer.text2tokens(text)
text_ints = self.transcript_token_id_converter.tokens2ids(tokens)
data["transcript"] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
class CommonPreprocessor_multi(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
aux_task_names: Collection[str] = None,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: List[str] = ["text"],
fs: int = 0,
):
super().__init__(
train=train,
token_type=token_type,
token_list=token_list,
bpemodel=bpemodel,
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
aux_task_names=aux_task_names,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
fs=fs,
)
if isinstance(text_name, str):
self.text_name = [text_name]
else:
self.text_name = text_name
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
for text_n in self.text_name:
if text_n in data and self.tokenizer is not None:
text = data[text_n]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[text_n] = np.array(text_ints, dtype=np.int64)
if self.aux_task_names is not None and self.tokenizer is not None:
for name in self.aux_task_names:
if name in data:
text = data[name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._speech_process(data)
data = self._text_process(data)
return data
class MutliTokenizerCommonPreprocessor(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: List[str] = [None],
token_list: List[Union[Path, str, Iterable[str]]] = [None],
bpemodel: List[Union[Path, str, Iterable[str]]] = [None],
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: List[str] = ["text"],
):
# TODO(jiatong): sync with Kamo and Jing on interface for preprocessor
super().__init__(
train=train,
token_type=token_type[0],
token_list=token_list[0],
bpemodel=bpemodel[0],
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
speech_name=speech_name,
text_name=text_name[0],
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
)
assert (
len(token_type) == len(token_list) == len(bpemodel) == len(text_name)
), "token_type, token_list, bpemodel, or processing text_name mismatched"
self.num_tokenizer = len(token_type)
self.tokenizer = []
self.token_id_converter = []
for i in range(self.num_tokenizer):
if token_type[i] is not None:
if token_list[i] is None:
raise ValueError("token_list is required if token_type is not None")
self.tokenizer.append(
build_tokenizer(
token_type=token_type[i],
bpemodel=bpemodel[i],
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
)
self.token_id_converter.append(
TokenIDConverter(
token_list=token_list[i],
unk_symbol=unk_symbol,
)
)
else:
self.tokenizer.append(None)
self.token_id_converter.append(None)
self.text_cleaner = TextCleaner(text_cleaner)
self.text_name = text_name # override the text_name from CommonPreprocessor
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
for i in range(self.num_tokenizer):
text_name = self.text_name[i]
if text_name in data and self.tokenizer[i] is not None:
text = data[text_name]
text = self.text_cleaner(text)
tokens = self.tokenizer[i].text2tokens(text)
text_ints = self.token_id_converter[i].tokens2ids(tokens)
data[text_name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
class DynamicMixingPreprocessor(AbsPreprocessor):
def __init__(
self,
train: bool,
source_scp: str = None,
ref_num: int = 2,
dynamic_mixing_gain_db: float = 0.0,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
mixture_source_name: str = None,
utt2spk: str = None,
):
super().__init__(train)
self.source_scp = source_scp
self.ref_num = ref_num
self.dynamic_mixing_gain_db = dynamic_mixing_gain_db
self.speech_name = speech_name
self.speech_ref_name_prefix = speech_ref_name_prefix
# mixture_source_name: the key to select source utterances from dataloader
if mixture_source_name is None:
self.mixture_source_name = f"{speech_ref_name_prefix}1"
else:
self.mixture_source_name = mixture_source_name
self.sources = {}
assert (
source_scp is not None
), f"Please pass `source_scp` to {type(self).__name__}"
with open(source_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
assert len(sps) == 2
self.sources[sps[0]] = sps[1]
self.utt2spk = {}
if utt2spk is None:
# if utt2spk is not provided, create a dummy utt2spk with uid.
for key in self.sources.keys():
self.utt2spk[key] = key
else:
with open(utt2spk, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
assert len(sps) == 2
self.utt2spk[sps[0]] = sps[1]
for key in self.sources.keys():
assert key in self.utt2spk
self.source_keys = list(self.sources.keys())
def _pick_source_utterances_(self, uid):
# return (ref_num - 1) uid of reference sources.
source_keys = [uid]
spk_ids = [self.utt2spk[uid]]
retry_cnt = 0
while len(source_keys) < self.ref_num:
picked = random.choice(self.source_keys)
spk_id = self.utt2spk[picked]
# make one utterance or one speaker only appears once in mixing.
if (picked not in source_keys) and (spk_id not in spk_ids):
source_keys.append(picked)
else:
retry_cnt += 1
if retry_cnt > 10:
source_keys.append(picked)
logging.warning(
"Can not find speech source from different speaker "
f"for {retry_cnt} times."
"There may be problems with training data. "
"Please check the utt2spk file."
)
return source_keys[1:]
def _read_source_(self, key, speech_length):
source, _ = soundfile.read(
self.sources[key],
dtype=np.float32,
always_2d=False,
)
if speech_length > source.shape[0]:
pad = speech_length - source.shape[0]
source = np.pad(source, (0, pad), "reflect")
else:
source = source[0:speech_length]
assert speech_length == source.shape[0]
return source
def _mix_speech_(self, uid, data):
# pick sources
source_keys = self._pick_source_utterances_(uid)
# load audios
speech_length = data[self.mixture_source_name].shape[0]
ref_audios = [self._read_source_(key, speech_length) for key in source_keys]
ref_audios = [data[self.mixture_source_name]] + ref_audios
# apply random gain to speech sources
gain_in_db = [
random.uniform(-self.dynamic_mixing_gain_db, self.dynamic_mixing_gain_db)
for i in range(len(ref_audios))
]
gain = [10 ** (g_db / 20.0) for g_db in gain_in_db]
ref_audios = [ref * g for ref, g in zip(ref_audios, gain)]
speech_mix = np.sum(np.array(ref_audios), axis=0)
for i, ref in enumerate(ref_audios):
data[f"{self.speech_ref_name_prefix}{i+1}"] = ref
data[self.speech_name] = speech_mix
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
# TODO(Chenda): need to test for multi-channel data.
assert (
len(data[self.mixture_source_name].shape) == 1
), "Multi-channel input has not been tested"
if self.train:
data = self._mix_speech_(uid, data)
assert check_return_type(data)
return data
class EnhPreprocessor(CommonPreprocessor):
"""Preprocessor for Speech Enhancement (Enh) task."""
def __init__(
self,
train: bool,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
noise_ref_name_prefix: str = "noise_ref",
dereverb_ref_name_prefix: str = "dereverb_ref",
use_reverberant_ref: bool = False,
num_spk: int = 1,
num_noise_type: int = 1,
sample_rate: int = 8000,
force_single_channel: bool = False,
):
super().__init__(
train=train,
token_type=None,
token_list=None,
bpemodel=None,
text_cleaner=None,
g2p_type=None,
unk_symbol="<unk>",
space_symbol="<space>",
non_linguistic_symbols=None,
delimiter=None,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
)
self.speech_ref_name_prefix = speech_ref_name_prefix
self.noise_ref_name_prefix = noise_ref_name_prefix
self.dereverb_ref_name_prefix = dereverb_ref_name_prefix
self.use_reverberant_ref = use_reverberant_ref
self.num_spk = num_spk
self.num_noise_type = num_noise_type
self.sample_rate = sample_rate
self.force_single_channel = force_single_channel
if self.speech_volume_normalize is not None:
sps = speech_volume_normalize.split("_")
if len(sps) == 1:
self.volume_low, self.volume_high = float(sps[0])
elif len(sps) == 2:
self.volume_low, self.volume_high = float(sps[0]), float(sps[1])
else:
raise ValueError(
"Format error for --speech_volume_normalize: "
f"'{speech_volume_normalize}'"
)
def _ensure_2d(self, signal):
if isinstance(signal, tuple):
return tuple(self._ensure_2d(sig) for sig in signal)
elif isinstance(signal, list):
return [self._ensure_2d(sig) for sig in signal]
else:
# (Nmic, Time)
return signal[None, :] if signal.ndim == 1 else signal.T
def _get_early_signal(self, speech, rir, power):
predelay = 50 # milliseconds
dt = np.argmax(rir, axis=1).min()
et = dt + (predelay * self.sample_rate) // 1000
rir_early = rir[:, :et]
speech2 = scipy.signal.convolve(speech, rir_early, mode="full")[
:, : speech.shape[1]
]
# Reverse mean power to the original power
power2 = (speech2[detect_non_silence(speech2)] ** 2).mean()
speech2 = np.sqrt(power / max(power2, 1e-10)) * speech2
return speech2
def _apply_to_all_signals(self, data_dict, func):
data_dict[self.speech_name] = func(data_dict[self.speech_name])
for n in range(self.num_noise_type):
noise_name = self.noise_ref_name_prefix + str(n + 1)
if noise_name in data_dict:
data_dict[noise_name] = func(data_dict[noise_name])
for spk in range(self.num_spk):
speech_ref_name = self.speech_ref_name_prefix + str(spk + 1)
if self.train or speech_ref_name in data_dict:
data_dict[speech_ref_name] = func(data_dict[speech_ref_name])
dereverb_ref_name = self.dereverb_ref_name_prefix + str(spk + 1)
if dereverb_ref_name in data_dict:
data_dict[dereverb_ref_name] = func(data_dict[dereverb_ref_name])
def _speech_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
if self.speech_name not in data:
assert check_return_type(data)
return data
if self.train:
# clean speech signal (Nmic, Time)
speech_ref = [
self._ensure_2d(data[self.speech_ref_name_prefix + str(i + 1)])
for i in range(self.num_spk)
]
# dereverberated (noisy) signal (Nmic, Time)
if "dereverb_ref1" in data:
dereverb_speech_ref = [
self._ensure_2d(data[self.dereverb_ref_name_prefix + str(i + 1)])
for i in range(self.num_spk)
if self.dereverb_ref_name_prefix + str(i + 1) in data
]
assert len(dereverb_speech_ref) in (1, self.num_spk), len(
dereverb_speech_ref
)
else:
dereverb_speech_ref = None
# Calc power on non silence region
power_ref = [
(sref[detect_non_silence(sref)] ** 2).mean() for sref in speech_ref
]
speech_mix = data[self.speech_name]
# 1. Convolve RIR
if self.rirs is not None and self.rir_apply_prob >= np.random.random():
speech_ref, rir_ref = zip(
*[
self._convolve_rir(sp, power)
for sp, power in zip(speech_ref, power_ref)
]
)
if self.force_single_channel:
speech_ref = list(
map(lambda x: x if x.shape[0] == 1 else x[:1], speech_ref)
)
rir_ref = list(
map(lambda x: x if x.shape[0] == 1 else x[:1], rir_ref)
)
if self.use_reverberant_ref:
for spk in range(self.num_spk):
suffix = str(spk + 1)
speech_ref_name = self.speech_ref_name_prefix + suffix
# (Time, Nmic)
data[speech_ref_name] = speech_ref[spk].T
if dereverb_speech_ref is not None:
if spk == 0 or len(dereverb_speech_ref) > 1:
dereverb_name = self.dereverb_ref_name_prefix + suffix
data[dereverb_name] = self._get_early_signal(
speech_ref[spk], rir_ref[spk], power_ref[spk]
).T
else:
for spk in range(self.num_spk):
suffix = str(spk + 1)
speech_ref_name = self.speech_ref_name_prefix + suffix
# clean speech with early reflections (Time, Nmic)
data[speech_ref_name] = self._get_early_signal(
speech_ref[spk], rir_ref[spk], power_ref[spk]
).T
if dereverb_speech_ref is not None:
if spk == 0 or len(dereverb_speech_ref) > 1:
dereverb_name = self.dereverb_ref_name_prefix + suffix
data[dereverb_name] = data[speech_ref_name]
# NOTE(Wangyou): Must be careful here in case that the original
# `speech_ref` dones not sum up to `speech_mix`
# (such as in the TSE task)
speech_mix = sum(speech_ref)
power_mix = (speech_mix[detect_non_silence(speech_mix)] ** 2).mean()
# 2. Add Noise
if self.noises is not None and self.noise_apply_prob >= np.random.random():
speech_mix, noise = self._add_noise(speech_mix, power_mix)
if self.force_single_channel:
if speech_mix.shape[0] > 1:
speech_mix = speech_mix[:1]
if noise.shape[0] > 1:
noise = noise[:1]
for n in range(1, self.num_noise_type):
name = self.noise_ref_name_prefix + str(n + 1)
data.pop(name, None)
data[self.noise_ref_name_prefix + "1"] = noise.T
speech_mix = speech_mix.T
data[self.speech_name] = speech_mix
ma = np.max(np.abs(speech_mix))
if ma > 1.0:
self._apply_to_all_signals(data, lambda x: x / ma)
self._apply_to_all_signals(data, lambda x: x.squeeze())
if self.force_single_channel:
self._apply_to_all_signals(data, lambda x: x if x.ndim == 1 else x[:, 0])
if self.speech_volume_normalize is not None:
if self.train:
volume_scale = np.random.uniform(self.volume_low, self.volume_high)
else:
# use a fixed scale to make it deterministic
volume_scale = self.volume_low
speech_mix = data[self.speech_name]
ma = np.max(np.abs(speech_mix))
self._apply_to_all_signals(data, lambda x: x * volume_scale / ma)
assert check_return_type(data)
return data
class SVSPreprocessor(AbsPreprocessor):
"""Preprocessor for Sing Voice Sythesis (SVS) task."""
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
singing_volume_normalize: float = None,
singing_name: str = "singing",
text_name: str = "text",
label_name: str = "label",
midi_name: str = "score",
fs: np.int32 = 0,
hop_length: np.int32 = 256,
align: list = [
"singing",
"label_lab",
"midi_lab",
"tempo_lab",
"beat_lab",
], # TODO(Tao): add to args
phn_seg: dict = {
1: [1],
2: [0.25, 1],
3: [0.1, 0.5, 1],
4: [0.05, 0.1, 0.5, 1],
},
):
super().__init__(train)
self.train = train
self.singing_name = singing_name
self.text_name = text_name
self.label_name = label_name
self.midi_name = midi_name
self.fs = fs
self.hop_length = hop_length
self.singing_volume_normalize = singing_volume_normalize
self.align = align
self.phn_seg = phn_seg
self.time_shift = hop_length / fs
if token_type is not None:
if token_list is None:
raise ValueError("token_list is required if token_type is not None")
self.text_cleaner = TextCleaner(text_cleaner)
self.tokenizer = build_tokenizer(
token_type=token_type,
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
self.token_id_converter = TokenIDConverter(
token_list=token_list,
unk_symbol=unk_symbol,
)
else:
self.text_cleaner = None
self.tokenizer = None
self.token_id_converter = None
def __call__(
self,
uid: str,
data: Dict[str, Union[str, np.ndarray, tuple]],
) -> Dict[str, np.ndarray]:
assert check_argument_types()
if self.singing_name in data:
if self.singing_volume_normalize is not None:
singing = data[self.singing_name]
ma = np.max(np.abs(singing))
data[self.singing_name] = singing * self.singing_volume_normalize / ma
if (
self.midi_name in data
and self.label_name in data
and self.tokenizer is not None
):
# Load label info
lab_timeseq, text = data[self.label_name]
lab_len = len(text)
text = " ".join(text)
text = self.text_cleaner(text)
text = text.split(" ")
text_ints = self.token_id_converter.tokens2ids(text)
data.pop(self.label_name)
label = np.zeros((lab_len))
midi = np.zeros((lab_len))
beat_phn = np.zeros((lab_len))
beat_ruled_phn = np.zeros((lab_len))
beat_syb = np.zeros((lab_len))
# Load score info
tempo, syb_info = data[self.midi_name]
phn_cnt = []
# Calculate features
nsamples_score = int((syb_info[-1][1] - syb_info[0][0]) * self.fs)
labelseq_score_phn = np.zeros((nsamples_score))
midiseq_score = np.zeros((nsamples_score))
beatseq_score_phn = np.zeros((nsamples_score))
beatseq_score_syb = np.zeros((nsamples_score))
temposeq_score = np.full(nsamples_score, tempo)
index_lab = 0
nsamples_lab = int((lab_timeseq[-1][1] - lab_timeseq[0][0]) * self.fs)
labelseq_lab_phn = np.zeros((nsamples_lab))
midiseq_lab = np.zeros((nsamples_lab))
beatseq_lab_phn = np.zeros((nsamples_lab))
temposeq_lab = np.full(nsamples_lab, tempo)
offset = lab_timeseq[0][0]
for st, et, syb, note, phns in syb_info:
start = int(st * self.fs)
end = int(et * self.fs) + 1
if end > nsamples_score:
end = nsamples_score
midiseq_score[start:end] = note
dur = et - st
_beat_syb = int(dur / self.time_shift + 0.5)
beatseq_score_syb[start:end] = _beat_syb
phone = phns.split("_")
phone_ints = self.token_id_converter.tokens2ids(phone)
phn_start = start
phn_num = len(phone)
phn_cnt.append(phn_num)
pre_seg = 0
for k in range(phn_num):
if self.phn_seg[phn_num][k] == 1:
phn_end = end
else:
phn_end = (
int((st + dur * self.phn_seg[phn_num][k]) * self.fs) + 1
)
labelseq_score_phn[phn_start:phn_end] = phone_ints[k]
_beat_ruled_phn = int(
(self.phn_seg[phn_num][k] - pre_seg) * dur / self.time_shift
+ 0.5
)
beatseq_score_phn[phn_start:phn_end] = _beat_ruled_phn
pre_seg = self.phn_seg[phn_num][k]
phn_start = phn_end
# timeseq from lab
assert text[index_lab] == phone[k]
lab_start = int((lab_timeseq[index_lab][0] - offset) * self.fs)
lab_end = int((lab_timeseq[index_lab][1] - offset) * self.fs) + 1
labelseq_lab_phn[lab_start:lab_end] = text_ints[index_lab]
midiseq_lab[lab_start:lab_end] = note
_beat_phn = int(
(lab_timeseq[index_lab][1] - lab_timeseq[index_lab][0])
/ self.time_shift
+ 0.5
)
beatseq_lab_phn[lab_start:lab_end] = _beat_phn
# phone level feature
label[index_lab] = text_ints[index_lab]
midi[index_lab] = note
beat_phn[index_lab] = _beat_phn
beat_ruled_phn[index_lab] = _beat_ruled_phn
beat_syb[index_lab] = _beat_syb
index_lab += 1
assert index_lab == lab_len
data.pop(self.midi_name)
phn_cnt = np.array(phn_cnt)
label.astype(np.int64)
midi.astype(np.int64)
beat_phn.astype(np.int64)
beat_syb.astype(np.int64)
beat_ruled_phn.astype(np.int64)
phn_cnt.astype(np.int64)
labelseq_lab_phn.astype(np.int64)
midiseq_lab.astype(np.int64)
beatseq_lab_phn.astype(np.int64)
temposeq_lab.astype(np.int64)
labelseq_score_phn.astype(np.int64)
midiseq_score.astype(np.int64)
beatseq_score_phn.astype(np.int64)
beatseq_score_syb.astype(np.int64)
temposeq_score.astype(np.int64)
data["label"] = label
data["midi"] = midi
data["beat_phn"] = beat_phn
data["beat_ruled_phn"] = beat_ruled_phn
data["beat_syb"] = beat_syb
data["phn_cnt"] = phn_cnt
data["label_lab"] = labelseq_lab_phn
data["midi_lab"] = midiseq_lab
data["beat_lab"] = beatseq_lab_phn
data["tempo_lab"] = temposeq_lab
data["label_score"] = labelseq_score_phn
data["midi_score"] = midiseq_score
data["beat_score_phn"] = beatseq_score_phn
data["beat_score_syb"] = beatseq_score_syb
data["tempo_score"] = temposeq_score
# TODO(Yuning): Add score from midi
if self.text_name in data and self.tokenizer is not None:
# FIX ME (Yuning): wrong transfer happen in pyopenjtalk
text = data[self.text_name]
if not isinstance(text, np.ndarray):
if not isinstance(text, str):
text = " ".join(text)
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
_text_ints = self.token_id_converter.tokens2ids(tokens)
data[self.text_name] = np.array(_text_ints, dtype=np.int64)
# align frame length with singing
length = min([len(data[key]) for key in data.keys() if key in self.align])
for key in self.align:
if key in data:
data[key] = data[key][:length]
return data
class TSEPreprocessor(EnhPreprocessor):
"""Preprocessor for Target Speaker Extraction."""
def __init__(
self,
train: bool,
train_spk2enroll: str = None,
enroll_segment: int = None,
load_spk_embedding: bool = False,
load_all_speakers: bool = False,
# inherited from EnhPreprocessor
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
noise_ref_name_prefix: str = "noise_ref",
dereverb_ref_name_prefix: str = "dereverb_ref",
use_reverberant_ref: bool = False,
num_spk: int = 1,
num_noise_type: int = 1,
sample_rate: int = 8000,
force_single_channel: bool = False,
):
super().__init__(
train,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
speech_ref_name_prefix=speech_ref_name_prefix,
noise_ref_name_prefix=noise_ref_name_prefix,
dereverb_ref_name_prefix=dereverb_ref_name_prefix,
use_reverberant_ref=use_reverberant_ref,
num_spk=num_spk,
num_noise_type=num_noise_type,
sample_rate=sample_rate,
force_single_channel=force_single_channel,
)
# If specified, the enrollment will be chomped to the specified length
self.enroll_segment = enroll_segment
# If True, the speaker embedding will be loaded instead of enrollment audios
self.load_spk_embedding = load_spk_embedding
# If False, only one of the speakers in each mixture sample will be loaded
self.load_all_speakers = load_all_speakers
if train and rir_scp is not None and rir_apply_prob > 0:
logging.warning(
"Be cautious when applying RIRs on the fly in the TSE task! "
"Please ensure `speech_ref` sums up to `speech_mix` for each sample. "
"Otherwise, the preprocessed training data will be wrong after the "
"line:\n data = super()._speech_process(data)"
)
if train:
if train_spk2enroll is None:
logging.info("Using fixed enrollment for each sample")
self.train_spk2enroll = None
else:
logging.info("Using dynamically sampled enrollment for each sample")
with open(train_spk2enroll, "r", encoding="utf-8") as f:
# {spkID: [(uid1, path1), (uid2, path2), ...]}
self.train_spk2enroll = json.load(f)
else:
self.train_spk2enroll = None
def _read_audio_segment(self, path, seg_len=None):
with soundfile.SoundFile(path) as f:
if seg_len is None or f.frames == seg_len:
audio = f.read(dtype=np.float32, always_2d=True)
elif f.frames < seg_len:
offset = np.random.randint(0, seg_len - f.frames)
# audio: (Time, Nmic)
audio = f.read(dtype=np.float32, always_2d=True)
# Repeat audio
audio = np.pad(
audio,
[(offset, seg_len - f.frames - offset), (0, 0)],
mode="wrap",
)
else:
offset = np.random.randint(0, f.frames - seg_len)
f.seek(offset)
# audio: (Time, Nmic)
audio = f.read(seg_len, dtype=np.float32, always_2d=True)
if len(audio) != seg_len:
raise RuntimeError(f"Something wrong: {path}")
return audio[:, 0]
def _speech_process(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
ref_names = [k for k in data.keys() if re.match(r"speech_ref\d+", k)]
num_spk = len(ref_names)
aux_names = [k for k in data.keys() if re.match(r"enroll_ref\d+", k)]
if self.train:
assert len(ref_names) == len(aux_names), (len(ref_names), len(aux_names))
if not self.load_all_speakers:
# only load one target-speaker data
spk = np.random.randint(0, num_spk)
for i, name in enumerate(ref_names):
if i == 0:
data[name] = data[ref_names[spk]]
else:
data.pop(name)
continue
for i, name in enumerate(aux_names):
if not self.load_all_speakers:
if i == 0:
data[name] = data[aux_names[spk]]
else:
data.pop(name)
continue
if self.train_spk2enroll is None:
# normal format in `enroll_spk?.scp`:
# MIXTURE_UID /path/to/enrollment_or_embedding
aux_audio = data[name]
else:
# a special format in `enroll_spk?.scp`:
# MIXTURE_UID *UID SPEAKER_ID
assert data[name].startswith("*"), data[name]
cur_uid, spkid = data[name][1:].strip().split(maxsplit=1)
aux_uid, aux_audio = random.choice(self.train_spk2enroll[spkid])
while aux_uid == cur_uid:
aux_uid, aux_audio = random.choice(self.train_spk2enroll[spkid])
if getattr(self, "load_spk_embedding", False):
data[name] = np.load(aux_audio)[None, :] # force 2D
elif self.enroll_segment:
data[name] = self._read_audio_segment(
aux_audio, self.enroll_segment
)
else:
data[name] = soundfile.read(aux_audio)[0]
else:
for name in aux_names:
if data[name].startswith("*"):
# in case of collecting stats for training data
data[name] = np.zeros(1, dtype=data["speech_mix"].dtype)
else:
if getattr(self, "load_spk_embedding", False):
data[name] = np.load(data[name])[None, :] # force 2D
elif self.enroll_segment:
data[name] = self._read_audio_segment(
data[name], self.enroll_segment
)
else:
data[name] = soundfile.read(data[name])[0]
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = super()._speech_process(data)
data = self._speech_process(uid, data)
return data
| [] |
2024-01-10 | benjamin920101/search_info | news_generator.py | # Importing necessary packages, files and services
import os
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
from langchain.memory import ConversationBufferMemory
from langchain.utilities import WikipediaAPIWrapper
os.environ['OPENAI_API_KEY'] = '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'
# App UI framework
st.title('🦜🔗 Tweet Generator')
prompt = st.text_input('Tweet topic: ')
# Prompt templates
title_template = PromptTemplate(
input_variables = ['topic'],
template='給我寫一則關於 {topic} 的推文'
)
tweet_template = PromptTemplate(
input_variables = ['title', 'wikipedia_research'],
template='寫一篇關於此標題的推文標題給我: {title} ,同時利用此維基百科:{wikipedia_research} '
)
# Wikipedia data
wiki = WikipediaAPIWrapper()
# Memory
title_memory = ConversationBufferMemory(input_key='topic', memory_key='chat_history')
tweet_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')
# Llms
llm = OpenAI(model_name="text-davinci-003", temperature=0.9)
title_chain = LLMChain(llm=llm, prompt=title_template, verbose=True, output_key='title', memory=title_memory)
tweet_chain = LLMChain(llm=llm, prompt=tweet_template, verbose=True, output_key='script', memory=tweet_memory)
# Chaining the components and displaying outputs
if prompt:
title = title_chain.run(prompt)
wiki_research = wiki.run(prompt)
tweet = tweet_chain.run(title=title, wikipedia_research=wiki_research)
st.write(title)
st.write(tweet)
with st.expander('Title History'):
st.info(title_memory.buffer)
with st.expander('Tweet History'):
st.info(tweet_memory.buffer)
with st.expander('Wikipedia Research'):
st.info(wiki_research) | [
"給我寫一則關於 {topic} 的推文",
"Tweet topic: ",
"wikipedia_research",
"寫一篇關於此標題的推文標題給我: {title} ,同時利用此維基百科:{wikipedia_research} "
] |
2024-01-10 | spirosrap/Deep-Reinforcement-Learning | PPO~multiprocessing_env.py | #This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
| [] |
2024-01-10 | spirosrap/Deep-Reinforcement-Learning | DQN~DQN%20OTHER%20IMPL~atari_wrappers.py | '''
Code from openAI baselines : https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
'''
import numpy as np
from collections import deque
from PIL import Image
import gym
from gym import spaces
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def _step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ClipRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.res = 84
self.observation_space = spaces.Box(low=0, high=255, shape=(self.res, self.res, 1))
def _observation(self, obs):
frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32'))
frame = np.array(Image.fromarray(frame).resize((self.res, self.res),
resample=Image.BILINEAR), dtype=np.uint8)
return frame.reshape((self.res, self.res, 1))
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Buffer observations and stack across channels (last axis)."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
assert shp[2] == 1 # can only stack 1-channel frames
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k))
def _reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k): self.frames.append(ob)
return self._observation()
def _step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._observation(), reward, done, info
def _observation(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
def wrap_deepmind(env):
"""Configure environment for DeepMind-style Atari."""
assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
env = FrameStack(env,4)
return env | [] |
2024-01-10 | laoyin/freeswitch_chatGPT | src~open_ai~chat_ai.py |
import openai
import json
import asyncio
import websockets
from sanic import Request, Websocket
from sanic import Sanic
from sanic.response import text
from config import token
ws_app = Sanic("task-server")
ws_app.ctx.ws_dict = {}
async def chat_ai(command, ws):
question = command["text"]
openai.api_key = token
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': question}
],
temperature=0,
stream=True # this time, we set stream=True
)
await ws.send("--start--")
for chunk in response:
data = chunk.get("choices")
if data:
data = data[0]
data = data.get("delta", {}).get("content", "")
await asyncio.sleep(0.01)
await ws.send(data)
await ws.send("--end--")
@ws_app.websocket("/ws")
@ws_app.exception(websockets.exceptions.ConnectionClosed)
async def ws(request: Request, ws: Websocket):
try:
while True:
data = await ws.recv()
print("Received: " + data)
if data:
try:
print(data)
command = json.loads(data)
except Exception as e:
print(e)
print("what")
if command.get("cmd", "") == "qa":
await chat_ai(command, ws)
# Authorization = command.get("Authorization")
# if Authorization:
# flag, user_message = JwtToken.parse_token(Authorization[7:])
except websockets.exceptions.ConnectionClosed:
# print(e)
print("close ws")
except asyncio.CancelledError:
print("asyncio cancell close ws")
| [] |
2024-01-10 | Pre1999/Shepherd_Cache | gem5-Fall2022~configs~example~gem5_library~riscv-ubuntu-run.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script shows an example of running a full system RISCV Ubuntu boot
simulation using the gem5 library. This simulation boots Ubuntu 20.04 using
2 TIMING CPU cores. The simulation ends when the startup is completed
successfully.
Usage
-----
```
scons build/RISCV/gem5.opt
./build/RISCV/gem5.opt \
configs/example/gem5_library/riscv-ubuntu-run.py
```
"""
import m5
from m5.objects import Root
from gem5.utils.requires import requires
from gem5.components.boards.riscv_board import RiscvBoard
from gem5.components.memory import DualChannelDDR4_2400
from gem5.components.processors.simple_processor import (
SimpleProcessor,
)
from gem5.components.processors.cpu_types import CPUTypes
from gem5.isas import ISA
from gem5.coherence_protocol import CoherenceProtocol
from gem5.resources.resource import Resource
from gem5.simulate.simulator import Simulator
# This runs a check to ensure the gem5 binary is compiled for RISCV.
requires(
isa_required=ISA.RISCV,
)
# With RISCV, we use simple caches.
from gem5.components.cachehierarchies.classic\
.private_l1_private_l2_cache_hierarchy import (
PrivateL1PrivateL2CacheHierarchy,
)
# Here we setup the parameters of the l1 and l2 caches.
cache_hierarchy = PrivateL1PrivateL2CacheHierarchy(
l1d_size="16kB",
l1i_size="16kB",
l2_size="256kB",
)
# Memory: Dual Channel DDR4 2400 DRAM device.
memory = DualChannelDDR4_2400(size = "3GB")
# Here we setup the processor. We use a simple processor.
processor = SimpleProcessor(
cpu_type=CPUTypes.TIMING,
isa=ISA.RISCV,
num_cores=2,
)
# Here we setup the board. The RiscvBoard allows for Full-System RISCV
# simulations.
board = RiscvBoard(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# Here we set the Full System workload.
# The `set_kernel_disk_workload` function for the RiscvBoard accepts a
# RISCV bootloader and a disk image. Once the system successfully boots, it
# encounters an `m5_exit instruction encountered`. We stop the simulation then.
# When the simulation has ended you may inspect `m5out/system.pc.com_1.device`
# to see the stdout.
board.set_kernel_disk_workload(
# The RISCV bootloader will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# The riscv-ubuntu boot-test was tested with riscv-bootloader-5.10
kernel=Resource(
"riscv-bootloader-vmlinux-5.10",
),
# The RISCV ubuntu image will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
disk_image=Resource(
"riscv-ubuntu-20.04-img",
),
)
simulator = Simulator(board=board)
simulator.run()
| [] |
2024-01-10 | vitco/openai-python | openai~api_resources~higherlevel.py | from openai.api_resources.abstract.engine_api_resource import EngineAPIResource
class HigherLevel(EngineAPIResource):
api_prefix = "higherlevel"
def get_url(self, base):
return "/%s/%s" % (self.api_prefix, base)
def classification(self, **params):
return self.request("post", self.get_url("classifications"), params)
def answer(self, **params):
return self.request("post", self.get_url("answers"), params)
def retriever_file_set_search(self, **params):
return self.request("post", self.get_url("retriever_file_set_search"), params)
| [] |
2024-01-10 | ZhimaoLin/RAG-Chatbot | load_papers.py | from langchain.embeddings import OpenAIEmbeddings
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import TokenTextSplitter
import pinecone
import time
import uuid
from config import OPENAI_API_KEY, PINECONE_API_KEY, PINECONE_ENVIRONMENT, PINECONE_INDEX_NAME, EMBEDDING_MODEL, SPLITTER_CHUNK_SIZE, SPLITTER_CHUNK_OVERLAP, UPLOAD_BATCH_SIZE
# Paper list
PAPER_LIST = ["data/paper1.pdf", "data/paper2.pdf", "data/paper3.pdf"]
# Helper functions
def print_match(result):
for match in result['matches']:
print("="*60)
print(f"Score: {match['score']:.2f} \t Source: {match['metadata']['source']} \t Page: {int(match['metadata']['page'])}")
print("="*60)
print(f"{match['metadata']['text']}")
print("="*60)
print()
# Initialize OpenAI
embedding_model = OpenAIEmbeddings(
openai_api_key=OPENAI_API_KEY,
model=EMBEDDING_MODEL
)
print("="*30)
print("OpenAI initialization: OK")
print("="*30)
print()
# Initialize Pinecone vector storage
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENVIRONMENT
)
if PINECONE_INDEX_NAME not in pinecone.list_indexes():
# we create a new index if it doesn't exist
pinecone.create_index(
name=PINECONE_INDEX_NAME,
metric='cosine',
dimension=1536 # 1536 dim of text-embedding-ada-002
)
# wait for index to be initialized
time.sleep(1)
pinecone_index = pinecone.Index(PINECONE_INDEX_NAME)
pinecone_stats = pinecone_index.describe_index_stats()
print("="*30)
print("Pinecone initialization: OK")
print(pinecone_stats)
print("="*30)
print()
for file_path in PAPER_LIST:
loader = PyPDFLoader(file_path)
pages = loader.load_and_split()
print(f"Processing [{file_path}]")
print(f"Pages shape: {len(pages)}")
text_splitter = TokenTextSplitter(
chunk_size=SPLITTER_CHUNK_SIZE,
chunk_overlap=SPLITTER_CHUNK_OVERLAP
)
source = pages[0].metadata["source"]
total_sentences = []
page_number_list = []
for idx, page in enumerate(pages):
page_num = page.metadata["page"] + 1
sentences = text_splitter.split_text(page.page_content)
total_sentences += sentences
page_number_list += [page_num] * len(sentences)
# Due to OpenAPI rate limitation, I have to embed multiple chunks at the same time
paper_embedding = embedding_model.embed_documents(total_sentences)
# Reformat the vectors
to_upsert = []
for i, sentence_vector in enumerate(paper_embedding):
to_upsert.append({
"id": str(uuid.uuid4()),
"values": sentence_vector,
"metadata": {
"text": total_sentences[i],
"source": source,
"page": page_number_list[i]
}
})
# Upload the vectors in baches
batch_size = UPLOAD_BATCH_SIZE
n = len(to_upsert)
print(f"Total number: {n}")
for i in range(0, n, batch_size):
if i + batch_size <= n:
batch = to_upsert[i: i+batch_size]
else:
batch = to_upsert[i:]
pinecone_index.upsert(vectors=batch)
print(f"Uploaded batch [{i} : {min(n, i+batch_size)}]")
# Auto testing
query_list = [
"How to treat patient with ACHD?",
"How to diagnose Resistant Hypertension?",
"How to reduce the cardiorenal risk?"
]
for i, query in enumerate(query_list):
print("="*30)
print(f"Test {i+1}: {query}")
print("="*30)
query_embedding = embedding_model.embed_documents([query])
res = pinecone_index.query(query_embedding, top_k=3, include_metadata=True)
print_match(res)
print("Upload papers - Done!")
| [] |
2024-01-10 | appchamp/streamlit-llamaindex-worldhistory | app2~streamlit_demo.py | import os
import streamlit as st
from llama_index import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
ServiceContext,
StorageContext,
LLMPredictor,
load_index_from_storage,
)
from langchain.chat_models import ChatOpenAI
index_name = "./saved_index-1doc"
documents_folder = "../docs/world-history-txt-1"
@st.cache_resource
def initialize_index(index_name, documents_folder):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
if os.path.exists(index_name):
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=index_name),
service_context=service_context,
)
else:
documents = SimpleDirectoryReader(documents_folder).load_data()
index = GPTVectorStoreIndex.from_documents(
documents, service_context=service_context
)
index.storage_context.persist(persist_dir=index_name)
return index
@st.cache_data(max_entries=200, persist=True)
def query_index(_index, query_text):
if _index is None:
return "Please initialize the index!"
response = _index.as_query_engine().query(query_text)
return str(response)
st.title("🦙 Llama Index Demo 🦙")
st.header("Welcome to the Llama Index Streamlit Demo")
index = None
api_key = st.text_input("Enter your OpenAI API key here:", type="password")
if api_key:
os.environ["OPENAI_API_KEY"] = api_key
index = initialize_index(index_name, documents_folder)
if index is None:
st.warning("Please enter your api key first.")
text = st.text_input("Query text:", value="What did the author do growing up?")
if st.button("Run Query") and text is not None:
response = query_index(index, text)
st.markdown(response)
llm_col, embed_col = st.columns(2)
with llm_col:
st.markdown(
f"LLM Tokens Used: {index.service_context.llm_predictor._last_token_usage}"
)
with embed_col:
st.markdown(
f"Embedding Tokens Used: {index.service_context.embed_model._last_token_usage}"
)
| [] |
2024-01-10 | j0rd1smit/todoist_react_agent | main_streamlit.py | import json
import os
import time
import openai
import pydantic
import streamlit as st
import tqdm
from dotenv import load_dotenv
from streamlit_chat import message
from todoist_react_agent.chat_bot import ChatBot
from todoist_react_agent.models import (
CreateNewProjectAction,
GetAllInboxTasksAction,
GetAllProjectsAction,
GetAllTasksAction,
GiveFinalAnswerAction,
MoveTaskAction,
ReactResponse,
)
from todoist_react_agent.repair_agent import parse_base_model_with_retries
from todoist_react_agent.todoist_action_toolkit import TodoistActionToolKit
def main() -> None:
st.set_page_config(page_title="ToDo Agent", page_icon=":robot:")
st.header("ToDo Agent")
# max tokens options to mix from [128, 256, 512, 1024]
st.sidebar.header("Model settings")
max_tokens = st.sidebar.select_slider(
"Max response tokens", [128, 256, 512, 1024], value=512
)
# selection box for the engine:
history_length = st.sidebar.slider("Message History length:", 1, 25, 15)
temperature = st.sidebar.slider("Temperature:", 0.0, 1.0, 0.0, 0.1)
max_actions = st.sidebar.slider("Max number of actions:", 1, 50, 20)
st.sidebar.header("Instructions")
user_input = st.sidebar.text_area(
"What do you want the agent to do?",
placeholder="Which tasks do I have in my inbox?",
)
submit_button = st.sidebar.button("Submit")
if submit_button:
render_agent_loop(
user_input,
history_length=history_length,
temperature=temperature,
max_actions=max_actions,
max_tokens=max_tokens,
)
else:
st.write("Please specify what you want the agent to do in the sidebar.")
def render_agent_loop(
user_input: str,
history_length: int,
temperature: float,
max_actions: int,
max_tokens: int,
) -> None:
out_of_order_render = OutOfOrderRender()
message_render = MessageRender()
with out_of_order_render:
message_render(user_input, is_user=True)
system_message = create_system_prompt(ReactResponse, user_input)
chatbot = ChatBot(
system_message=system_message,
history_length=history_length,
temperature=temperature,
max_tokens=max_tokens,
)
todoist = TodoistActionToolKit(os.getenv("TODOIST_API_KEY"))
inputs = json.dumps({"objective": user_input})
for i in range(max_actions):
raw_response = chatbot(inputs, role="user")
try:
response = parse_base_model_with_retries(raw_response, ReactResponse)
with out_of_order_render:
message_render(
f"Thought: {response.thought}\n\nAction: {response.action.dict()}\n\nNumber of actions used: {i + 1}"
)
chatbot.set_message_content(-1, json.dumps(response.dict()))
match response.action:
case GiveFinalAnswerAction():
with out_of_order_render:
message_render(f"Final Answer: {response.action.answer}")
return
case GetAllInboxTasksAction():
observation = todoist.get_inbox_tasks()
case GetAllTasksAction():
observation = todoist.get_all_tasks()
case GetAllProjectsAction():
observation = todoist.get_all_projects()
case MoveTaskAction(task_id=task_id, project_id=project_id):
todoist.move_task(task_id, project_id)
observation = (
f"Task with id {task_id} moved to project with id {project_id}."
)
case CreateNewProjectAction(project_name=project_name):
observation = todoist.create_project(project_name)
case _:
raise ValueError(f"Unknown action {response.action}")
except ValueError as e:
observation = f"You response caused the following error: {e}. Please try again and avoid this error."
chatbot.set_message_content(-1, json.dumps(observation))
with out_of_order_render:
message_render(f"Observation: {observation}")
inputs = json.dumps({"observation": observation})
with out_of_order_render:
message_render(f"I have used my maximum number of actions. I will now stop.")
def create_system_prompt(react_model: pydantic.BaseModel, question: str) -> str:
return f"""
You are a getting things done (GTD) agent.
It is your job to accomplish the following task: {question}
You have access to multiple tools to accomplish this task.
See the action in the json schema for the available tools.
If you have insufficient information to answer the question, you can use the tools to get more information.
All your answers must be in json format and follow the following schema json schema:
{react_model.schema()}
If your json response asks me to preform an action, I will preform that action.
I will then response with the result of that action.
Let's begin to answer the question: {question}
Do not write anything else than json!
"""
class MessageRender:
def __init__(self):
self.idx = 0
def __call__(self, message_str: str, is_user: bool = False):
message(message_str, is_user=is_user, key=f"message_{self.idx}")
self.idx += 1
class OutOfOrderRender:
def __init__(self, max_n_elements: int = 200):
self.max_n_elements = max_n_elements
self.placeholders = [st.empty() for _ in range(max_n_elements)]
self.placeholder_idx = max_n_elements - 1
def __enter__(self):
self.placeholders[self.placeholder_idx].__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.placeholders[self.placeholder_idx].__exit__(exc_type, exc_val, exc_tb)
self.placeholder_idx = self.placeholder_idx - 1
if __name__ == "__main__":
load_dotenv()
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
openai.api_base = os.environ["OPENAI_API_BASE"]
openai.api_key = os.environ["OPENAI_API_KEY"]
main()
| [] |
2024-01-10 | j0rd1smit/todoist_react_agent | todoist_react_agent~chat_bot.py | import openai
class ChatBot:
def __init__(
self,
system_message: str,
messages: list[dict[str, str]] | None = None,
temperature: float = 0.0,
history_length: int = 15,
engine: str = "gpt-35-turbo",
top_p: float = 0.95,
max_tokens: int = 512,
) -> None:
self.messages = messages if messages is not None else []
self.system_message = {"role": "system", "content": system_message}
self.history_length = history_length
self.temperature = temperature
self.engine = engine
self.top_p = top_p
self.max_tokens = max_tokens
def __call__(self, message: str, role: str) -> str:
self.messages.append({"role": role, "content": message})
messages = [self.system_message] + self.messages[-self.history_length :]
completion = openai.ChatCompletion.create(
engine=self.engine,
messages=messages,
temperature=self.temperature,
top_p=self.top_p,
max_tokens=self.max_tokens,
)
message = completion.choices[0].message.content.strip()
self.messages.append({"role": "assistant", "content": message})
return message
def set_message_content(self, index: int, content: str) -> None:
self.messages[index]["content"] = content
def export_conversation(self) -> str:
result = ""
for message in self.messages:
result += f"\n{message['role'].upper()}:\n"
result += f"{message['content']}"
return result
| [] |
2024-01-10 | Hritikbansal/sparse_feedback | scripts~llm_feedback_rankings.py | import os
import csv
import time
import openai
import argparse
import pandas as pd
from tqdm import tqdm
from constants import RANKINGS_PROMPT
parser = argparse.ArgumentParser()
parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo', 'gpt-4'], default='gpt-3.5-turbo')
parser.add_argument('--input_csv', type = str, default = 'chatgpt_feedback/without_dolly/test_pairwise_data.csv')
parser.add_argument('--save_feedback_csv', type = str, default = None)
parser.add_argument('--start_index', type = int, default = 0)
args = parser.parse_args()
PROMPT_DICT = {
"prompt_input": (
"{instruction}\n\nInput:\n{input}"
),
"prompt_no_input": (
"{instruction}"
),
}
def get_reward(instruction, input, output_1, output_2):
if str(input) == "":
print('here')
instruction = PROMPT_DICT['prompt_no_input'].format(instruction = instruction)
prompt = RANKINGS_PROMPT.format(instruction = instruction, output_1 = output_1, output_2 = output_2)
else:
instruction = PROMPT_DICT['prompt_input'].format(instruction = instruction, input = input)
prompt = RANKINGS_PROMPT.format(instruction = instruction, output_1 = output_1, output_2 = output_2)
messages = [{"role": "user", "content": prompt}]
return messages
def main():
df = pd.read_csv(args.input_csv)
df = df.iloc[args.start_index:]
for j in tqdm(range(len(df))):
if j != 0 and j % 20 == 0:
time.sleep(5)
try:
instruction = df.iloc[j]['instruction']
input = df.iloc[j]['input']
output1 = df.iloc[j]['response1']
output2 = df.iloc[j]['response2']
completion = openai.ChatCompletion.create(
model = args.gpt_version,
messages = get_reward(instruction, input, output1, output2))
feedback_1 = completion['choices'][0]['message']['content']
completion = openai.ChatCompletion.create(
model = args.gpt_version,
messages = get_reward(instruction, input, output2, output1))
feedback_2 = completion['choices'][0]['message']['content']
if '(a)' in feedback_1 and '(b)' in feedback_2:
feedback = '(a)'
elif '(b)' in feedback_1 and '(a)' in feedback_2:
feedback = '(b)'
elif '(a)' in feedback_1 and '(a)' in feedback_2:
feedback = 'equal'
elif '(b)' in feedback_1 and '(b)' in feedback_2:
feedback = 'equal'
else:
continue
print(feedback_1, feedback_2, feedback)
with open(args.save_feedback_csv, 'a') as f:
csvwriter = csv.writer(f)
csvwriter.writerow([instruction, input, output1, output2, feedback])
except:
print('Sleeping...')
time.sleep(5)
if __name__ == '__main__':
main()
| [
"{'prompt_input': '{instruction}\\n\\nInput:\\n{input}', 'prompt_no_input': '{instruction}'}"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.