date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
list |
---|---|---|---|---|
2024-01-10 | bjk95/langchain | langchain~retrievers~time_weighted_retriever.py | """Retriever that combines embedding similarity with recency in retrieving values."""
import datetime
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float:
"""Get the hours passed between two datetime objects."""
return (time - ref_time).total_seconds() / 3600
class TimeWeightedVectorStoreRetriever(BaseRetriever, BaseModel):
"""Retriever combining embededing similarity with recency."""
vectorstore: VectorStore
"""The vectorstore to store documents and determine salience."""
search_kwargs: dict = Field(default_factory=lambda: dict(k=100))
"""Keyword arguments to pass to the vectorstore similarity search."""
# TODO: abstract as a queue
memory_stream: List[Document] = Field(default_factory=list)
"""The memory_stream of documents to search through."""
decay_rate: float = Field(default=0.01)
"""The exponential decay factor used as (1.0-decay_rate)**(hrs_passed)."""
k: int = 4
"""The maximum number of documents to retrieve in a given call."""
other_score_keys: List[str] = []
"""Other keys in the metadata to factor into the score, e.g. 'importance'."""
default_salience: Optional[float] = None
"""The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _get_combined_score(
self,
document: Document,
vector_relevance: Optional[float],
current_time: datetime.datetime,
) -> float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(
current_time,
document.metadata["last_accessed_at"],
)
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
def get_relevant_documents(self, query: str) -> List[Document]:
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [
(doc, self._get_combined_score(doc, relevance, current_time))
for doc, relevance in docs_and_scores.values()
]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
# Ensure frequently accessed memories aren't forgotten
for doc, _ in rescored_docs[: self.k]:
# TODO: Update vector store doc once `update` method is exposed.
buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]]
buffered_doc.metadata["last_accessed_at"] = current_time
result.append(buffered_doc)
return result
async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Return documents that are relevant to the query."""
raise NotImplementedError
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time", datetime.datetime.now())
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return self.vectorstore.add_documents(dup_docs, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time", datetime.datetime.now())
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
| [] |
2024-01-10 | bjk95/langchain | langchain~document_loaders~telegram.py | """Loader that loads Telegram chat json dump."""
from __future__ import annotations
import asyncio
import json
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
if TYPE_CHECKING:
import pandas as pd
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used."""
date = row["date"]
sender = row["from"]
text = row["text"]
return f"{sender} on {date}: {text}\n\n"
class TelegramChatFileLoader(BaseLoader):
"""Loader that loads Telegram chat json directory dump."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message["type"] == "message" and isinstance(message["text"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
def text_to_docs(text: Union[str, List[str]]) -> List[Document]:
"""Converts a string or list of strings to a list of Documents with metadata."""
if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=20,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
class TelegramChatApiLoader(BaseLoader):
"""Loader that loads Telegram chat json directory dump."""
def __init__(
self,
chat_url: Optional[str] = None,
api_id: Optional[int] = None,
api_hash: Optional[str] = None,
username: Optional[str] = None,
):
"""Initialize with API parameters."""
self.chat_url = chat_url
self.api_id = api_id
self.api_hash = api_hash
self.username = username
async def fetch_data_from_telegram(self) -> None:
"""Fetch data from Telegram API and save it as a JSON file."""
from telethon.sync import TelegramClient
data = []
async with TelegramClient(self.username, self.api_id, self.api_hash) as client:
async for message in client.iter_messages(self.chat_url):
is_reply = message.reply_to is not None
reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None
data.append(
{
"sender_id": message.sender_id,
"text": message.text,
"date": message.date.isoformat(),
"message.id": message.id,
"is_reply": is_reply,
"reply_to_id": reply_to_id,
}
)
with open("telegram_data.json", "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
self.file_path = "telegram_data.json"
def _get_message_threads(self, data: pd.DataFrame) -> dict:
"""Create a dictionary of message threads from the given data.
Args:
data (pd.DataFrame): A DataFrame containing the conversation \
data with columns:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
dict: A dictionary where the key is the parent message ID and \
the value is a list of message IDs in ascending order.
"""
def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]:
"""
Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID.
"""
# Find direct replies to the parent message ID
direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][
"message.id"
].tolist()
# Recursively find replies to the direct replies
all_replies = []
for reply_id in direct_replies:
all_replies += [reply_id] + find_replies(reply_id, reply_data)
return all_replies
# Filter out parent messages
parent_messages = data[data["is_reply"] is False]
# Filter out reply messages and drop rows with NaN in 'reply_to_id'
reply_messages = data[data["is_reply"] is True].dropna(subset=["reply_to_id"])
# Convert 'reply_to_id' to integer
reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int)
# Create a dictionary of message threads with parent message IDs as keys and \
# lists of reply message IDs as values
message_threads = {
parent_id: [parent_id] + find_replies(parent_id, reply_messages)
for parent_id in parent_messages["message.id"]
}
return message_threads
def _combine_message_texts(
self, message_threads: Dict[int, List[int]], data: pd.DataFrame
) -> str:
"""
Combine the message texts for each parent message ID based \
on the list of message threads.
Args:
message_threads (dict): A dictionary where the key is the parent message \
ID and the value is a list of message IDs in ascending order.
data (pd.DataFrame): A DataFrame containing the conversation data:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
str: A combined string of message texts sorted by date.
"""
combined_text = ""
# Iterate through sorted parent message IDs
for parent_id, message_ids in message_threads.items():
# Get the message texts for the message IDs and sort them by date
message_texts = (
data[data["message.id"].isin(message_ids)]
.sort_values(by="date")["text"]
.tolist()
)
message_texts = [str(elem) for elem in message_texts]
# Combine the message texts
combined_text += " ".join(message_texts) + ".\n"
return combined_text.strip()
def load(self) -> List[Document]:
"""Load documents."""
if self.chat_url is not None:
try:
import nest_asyncio
import pandas as pd
nest_asyncio.apply()
asyncio.run(self.fetch_data_from_telegram())
except ImportError:
raise ValueError(
"please install with `pip install nest_asyncio`,\
`pip install nest_asyncio` "
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
normalized_messages = pd.json_normalize(d)
df = pd.DataFrame(normalized_messages)
message_threads = self._get_message_threads(df)
combined_texts = self._combine_message_texts(message_threads, df)
return text_to_docs(combined_texts)
| [] |
2024-01-10 | bjk95/langchain | langchain~vectorstores~redis.py | """Wrapper around Redis vector database."""
from __future__ import annotations
import json
import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
)
import numpy as np
from pydantic import BaseModel, root_validator
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from redis.client import Redis as RedisType
from redis.commands.search.query import Query
# required modules
REDIS_REQUIRED_MODULES = [
{"name": "search", "ver": 20400},
{"name": "searchlight", "ver": 20400},
]
# distance mmetrics
REDIS_DISTANCE_METRICS = Literal["COSINE", "IP", "L2"]
def _check_redis_module_exist(client: RedisType, required_modules: List[dict]) -> None:
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {
module[b"name"].decode("utf-8"): module for module in installed_modules
}
for module in required_modules:
if module["name"] in installed_modules and int(
installed_modules[module["name"]][b"ver"]
) >= int(module["ver"]):
return
# otherwise raise error
error_message = (
"You must add the RediSearch (>= 2.4) module from Redis Stack. "
"Please refer to Redis Stack docs: https://redis.io/docs/stack/"
)
logging.error(error_message)
raise ValueError(error_message)
def _check_index_exists(client: RedisType, index_name: str) -> bool:
"""Check if Redis index exists."""
try:
client.ft(index_name).info()
except: # noqa: E722
logger.info("Index does not exist")
return False
logger.info("Index already exists")
return True
def _redis_key(prefix: str) -> str:
"""Redis key schema for a given prefix."""
return f"{prefix}:{uuid.uuid4().hex}"
def _redis_prefix(index_name: str) -> str:
"""Redis key prefix for a given index."""
return f"doc:{index_name}"
def _default_relevance_score(val: float) -> float:
return 1 - val
class Redis(VectorStore):
"""Wrapper around Redis vector database.
To use, you should have the ``redis`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Redis(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
"""
def __init__(
self,
redis_url: str,
index_name: str,
embedding_function: Callable,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_relevance_score,
**kwargs: Any,
):
"""Initialize with necessary components."""
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
# connect to redis from url
redis_client = redis.from_url(redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
self.client = redis_client
self.content_key = content_key
self.metadata_key = metadata_key
self.vector_key = vector_key
self.relevance_score_fn = relevance_score_fn
def _create_index(
self, dim: int = 1536, distance_metric: REDIS_DISTANCE_METRICS = "COSINE"
) -> None:
try:
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Check if index exists
if not _check_index_exists(self.client, self.index_name):
# Define schema
schema = (
TextField(name=self.content_key),
TextField(name=self.metadata_key),
VectorField(
self.vector_key,
"FLAT",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": distance_metric,
},
),
)
prefix = _redis_prefix(self.index_name)
# Create Redis Index
self.client.ft(self.index_name).create_index(
fields=schema,
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH),
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
embeddings: Optional[List[List[float]]] = None,
keys: Optional[List[str]] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
keys (Optional[List[str]], optional): Optional key values to use as ids.
Defaults to None.
batch_size (int, optional): Batch size to use for writes. Defaults to 1000.
Returns:
List[str]: List of ids added to the vectorstore
"""
ids = []
prefix = _redis_prefix(self.index_name)
# Write data to redis
pipeline = self.client.pipeline(transaction=False)
for i, text in enumerate(texts):
# Use provided values by default or fallback
key = keys[i] if keys else _redis_key(prefix)
metadata = metadatas[i] if metadatas else {}
embedding = embeddings[i] if embeddings else self.embedding_function(text)
pipeline.hset(
key,
mapping={
self.content_key: text,
self.vector_key: np.array(embedding, dtype=np.float32).tobytes(),
self.metadata_key: json.dumps(metadata),
},
)
ids.append(key)
# Write batch
if i % batch_size == 0:
pipeline.execute()
# Cleanup final batch
pipeline.execute()
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
def similarity_search_limit_score(
self, query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text within the
score_threshold range.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching score required for a document
to be considered a match. Defaults to 0.2.
Because the similarity calculation algorithm is based on cosine similarity,
the smaller the angle, the higher the similarity.
Returns:
List[Document]: A list of documents that are most similar to the query text,
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, score in docs_and_scores if score < score_threshold]
def _prepare_query(self, k: int) -> Query:
try:
from redis.commands.search.query import Query
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Prepare the Query
hybrid_fields = "*"
base_query = (
f"{hybrid_fields}=>[KNN {k} @{self.vector_key} $vector AS vector_score]"
)
return_fields = [self.metadata_key, self.content_key, "vector_score"]
return (
Query(base_query)
.return_fields(*return_fields)
.sort_by("vector_score")
.paging(0, k)
.dialect(2)
)
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
# Creates embedding vector from user query
embedding = self.embedding_function(query)
# Creates Redis query
redis_query = self._prepare_query(k)
params_dict: Mapping[str, str] = {
"vector": np.array(embedding) # type: ignore
.astype(dtype=np.float32)
.tobytes()
}
# Perform vector search
results = self.client.ft(self.index_name).search(redis_query, params_dict)
# Prepare document results
docs = [
(
Document(
page_content=result.content, metadata=json.loads(result.metadata)
),
float(result.vector_score),
)
for result in results.docs
]
return docs
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
if self.relevance_score_fn is None:
raise ValueError(
"relevance_score_fn must be provided to"
" Weaviate constructor to normalize scores"
)
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores]
@classmethod
def from_texts_return_keys(
cls: Type[Redis],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
distance_metric: REDIS_DISTANCE_METRICS = "COSINE",
**kwargs: Any,
) -> Tuple[Redis, List[str]]:
"""Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in Redis.
3. Adds the documents to the newly created Redis index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
if "redis_url" in kwargs:
kwargs.pop("redis_url")
# Name of the search index if not given
if not index_name:
index_name = uuid.uuid4().hex
# Create instance
instance = cls(
redis_url=redis_url,
index_name=index_name,
embedding_function=embedding.embed_query,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
# Create embeddings over documents
embeddings = embedding.embed_documents(texts)
# Create the search index
instance._create_index(dim=len(embeddings[0]), distance_metric=distance_metric)
# Add data to Redis
keys = instance.add_texts(texts, metadatas, embeddings)
return instance, keys
@classmethod
def from_texts(
cls: Type[Redis],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in Redis.
3. Adds the documents to the newly created Redis index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
instance, _ = cls.from_texts_return_keys(
cls=cls,
texts=texts,
embedding=embedding,
metadatas=metadatas,
index_name=index_name,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
kwargs=kwargs,
)
return instance
@staticmethod
def drop_index(
index_name: str,
delete_documents: bool,
**kwargs: Any,
) -> bool:
"""
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# Check if index exists
try:
client.ft(index_name).dropindex(delete_documents)
logger.info("Drop index")
return True
except: # noqa: E722
# Index not exist
return False
@classmethod
def from_existing_index(
cls,
embedding: Embeddings,
index_name: str,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Connect to an existing Redis index."""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(client, REDIS_REQUIRED_MODULES)
# ensure that the index already exists
assert _check_index_exists(
client, index_name
), f"Index {index_name} does not exist"
except Exception as e:
raise ValueError(f"Redis failed to connect: {e}")
return cls(
redis_url,
index_name,
embedding.embed_query,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
def as_retriever(self, **kwargs: Any) -> RedisVectorStoreRetriever:
return RedisVectorStoreRetriever(vectorstore=self, **kwargs)
class RedisVectorStoreRetriever(VectorStoreRetriever, BaseModel):
vectorstore: Redis
search_type: str = "similarity"
k: int = 4
score_threshold: float = 0.4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "similarity_limit"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def get_relevant_documents(self, query: str) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, k=self.k)
elif self.search_type == "similarity_limit":
docs = self.vectorstore.similarity_search_limit_score(
query, k=self.k, score_threshold=self.score_threshold
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError("RedisVectorStoreRetriever does not support async")
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
return await self.vectorstore.aadd_documents(documents, **kwargs)
| [] |
2024-01-10 | bjk95/langchain | langchain~agents~load_tools.py | # flake8: noqa
"""Load tools."""
import warnings
from typing import Any, Dict, List, Optional, Callable, Tuple
from mypy_extensions import Arg, KwArg
from langchain.agents.tools import Tool
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.pal.base import PALChain
from langchain.requests import TextRequestsWrapper
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.metaphor_search.tool import MetaphorSearchResults
from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.python.tool import PythonREPLTool
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.scenexplain.tool import SceneXplainTool
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.shell.tool import ShellTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
from langchain.utilities import ArxivAPIWrapper
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
from langchain.utilities.awslambda import LambdaWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
def _get_python_repl() -> BaseTool:
return PythonREPLTool()
def _get_tools_requests_get() -> BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() -> BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_patch() -> BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() -> BaseTool:
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() -> BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_terminal() -> BaseTool:
return ShellTool()
_BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
"python_repl": _get_python_repl,
"requests": _get_tools_requests_get, # preserved for backwards compatability
"requests_get": _get_tools_requests_get,
"requests_post": _get_tools_requests_post,
"requests_patch": _get_tools_requests_patch,
"requests_put": _get_tools_requests_put,
"requests_delete": _get_tools_requests_delete,
"terminal": _get_terminal,
}
def _get_pal_math(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="PAL-MATH",
description="A language model that is really good at solving complex word math problems. Input should be a fully worded hard word math problem.",
func=PALChain.from_math_prompt(llm).run,
)
def _get_pal_colored_objects(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="PAL-COLOR-OBJ",
description="A language model that is really good at reasoning about position and the color attributes of objects. Input should be a fully worded hard reasoning problem. Make sure to include all information about the objects AND the final question you want to answer.",
func=PALChain.from_colored_object_prompt(llm).run,
)
def _get_llm_math(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain.from_llm(llm=llm).run,
coroutine=LLMMathChain.from_llm(llm=llm).arun,
)
def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool(
name="Open Meteo API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = {
"pal-math": _get_pal_math,
"pal-colored-objects": _get_pal_colored_objects,
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
)
return Tool(
name="News API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
)
return Tool(
name="TMDB API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
listen_api_key = kwargs["listen_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
podcast_docs.PODCAST_DOCS,
headers={"X-ListenAPI-Key": listen_api_key},
)
return Tool(
name="Podcast API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_lambda_api(**kwargs: Any) -> BaseTool:
return Tool(
name=kwargs["awslambda_tool_name"],
description=kwargs["awslambda_tool_description"],
func=LambdaWrapper(**kwargs).run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_arxiv(**kwargs: Any) -> BaseTool:
return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_serper_results_json(**kwargs: Any) -> BaseTool:
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs))
def _get_searx_search_results_json(**kwargs: Any) -> BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) -> BaseTool:
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) -> BaseTool:
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_human_tool(**kwargs: Any) -> BaseTool:
return HumanInputRun(**kwargs)
def _get_scenexplain(**kwargs: Any) -> BaseTool:
return SceneXplainTool(**kwargs)
def _get_openweathermap(**kwargs: Any) -> BaseTool:
return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs))
_EXTRA_LLM_TOOLS: Dict[
str,
Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]],
] = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
}
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"searx-search-results-json": (
_get_searx_search_results_json,
["searx_host", "engines", "num_results", "aiosession"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]),
"ddg-search": (_get_ddg_search, []),
"google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]),
"google-serper-results-json": (
_get_google_serper_results_json,
["serper_api_key", "aiosession"],
),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]),
"wikipedia": (_get_wikipedia, ["top_k_results", "lang"]),
"arxiv": (
_get_arxiv,
["top_k_results", "load_max_docs", "load_all_available_meta"],
),
"human": (_get_human_tool, ["prompt_func", "input_func"]),
"awslambda": (
_get_lambda_api,
["awslambda_tool_name", "awslambda_tool_description", "function_name"],
),
"sceneXplain": (_get_scenexplain, []),
"openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]),
}
def _handle_callbacks(
callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks
) -> Callbacks:
if callback_manager is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
if callbacks is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks arguments."
)
return callback_manager
return callbacks
def load_huggingface_tool(
task_or_repo_id: str,
model_repo_id: Optional[str] = None,
token: Optional[str] = None,
remote: bool = False,
**kwargs: Any,
) -> BaseTool:
try:
from transformers import load_tool
except ImportError:
raise ValueError(
"HuggingFace tools require the libraries `transformers>=4.29.0`"
" and `huggingface_hub>=0.14.1` to be installed."
" Please install it with"
" `pip install --upgrade transformers huggingface_hub`."
)
hf_tool = load_tool(
task_or_repo_id,
model_repo_id=model_repo_id,
token=token,
remote=remote,
**kwargs,
)
outputs = hf_tool.outputs
if set(outputs) != {"text"}:
raise NotImplementedError("Multimodal outputs not supported yet.")
inputs = hf_tool.inputs
if set(inputs) != {"text"}:
raise NotImplementedError("Multimodal inputs not supported yet.")
return Tool.from_function(
hf_tool.__call__, name=hf_tool.name, description=hf_tool.description
)
def load_tools(
tool_names: List[str],
llm: Optional[BaseLanguageModel] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Args:
tool_names: name of tools to load.
llm: Optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(
callback_manager=kwargs.get("callback_manager"), callbacks=callbacks
)
for name in tool_names:
if name == "requests":
warnings.warn(
"tool name `requests` is deprecated - "
"please use `requests_all` or specify the requests method"
)
if name == "requests_all":
# expand requests into various methods
requests_method_tools = [
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | unlikelymaths/tomef | tomef~metrics~topic_similarity.py | #!/usr/bin/env python
# coding: utf-8
# # Topic Similarity
# <div style="position: absolute; right:0;top:0"><a href="./metrics_index.doc.ipynb" style="text-decoration: none"> <font size="5">←</font></a>
# <a href="../evaluation.ipynb" style="text-decoration: none"> <font size="5">↑</font></a></div>
#
# `Description`
#
# ---
# ## Setup and Settings
# ---
# In[20]:
from __init__ import init_vars
init_vars(vars(), ('info', {}))
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models import CoherenceModel, KeyedVectors
from gensim.corpora import WikiCorpus, Dictionary
import data
import config
from base import nbprint
from util import ProgressIterator
from widgetbase import nbbox
from os.path import join, isfile
from tokenizer.main import get_tokenizer
from metrics.widgets import topiclist_picker
if RUN_SCRIPT: topiclist_picker(info)
# In[24]:
def mean_pairwise_jaccard(topiclist):
topiclist = [[entry.token for entry in topic] for topic in topiclist]
similarities = []
for idx, topic1 in enumerate(topiclist):
set1 = set(topic1)
for topic2 in topiclist[idx+1:]:
set2 = set(topic2)
similarities.append(len(set1.intersection(set2)) / len(set1.union(set2)))
return sum(similarities) / len(similarities)
# ---
# ## Show all
# ---
# In[25]:
if RUN_SCRIPT:
nbbox(mini=True)
topiclist = data.load_topiclist(info)
topiclist = [topic[:10] for topic in topiclist]
mean_similarity = mean_pairwise_jaccard(topiclist)
nbprint('Mean Pairwise Jaccard similarity is {}'.format(mean_similarity))
| [] |
2024-01-10 | nishio/omni | write_to_scrapbox~recurrent_notes.py | """
Recurrent Research Notes Generation
This script generates a new "research note" for Scrapbox, based on the previous "research note" and random fragments.
Japanese description (original)
https://scrapbox.io/nishio/AI%E3%81%8C%E6%AF%8E%E6%97%A5%E7%A0%94%E7%A9%B6%E3%83%8E%E3%83%BC%E3%83%88%E3%82%92%E6%9B%B8%E3%81%8F
Translation to English
https://scrapbox.io/nishio-en/AI_writes_research_notes_daily
Outcome example:
Japanese: https://scrapbox.io/nishio/AI%E3%81%A8%E3%81%AE%E5%85%B1%E5%90%8C%E5%8C%96
English: https://scrapbox.io/nishio-en/Co-operation_with_AI
"""
import dotenv
import openai
import time
import os
import json
import pickle
import datetime
import random
import tiktoken
import re
import requests
import argparse
from urllib.parse import quote
import urllib.parse
from utils import (
markdown_to_scrapbox,
LESS_INTERESTING,
EXTRA_INFO_HEADER,
extract_previous_notes,
parse_titles,
get_api_url,
)
import vector_search
dotenv.load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
PROJECT = os.getenv("PROJECT_NAME")
assert OPENAI_API_KEY and PROJECT
openai.api_key = OPENAI_API_KEY
IGNORE_AI_GENERATED_PAGES = False
IS_PRIVATE_PROJECT = False
MAX_UPDATE_PER_RUN = None
# main prompt, including chadacter settings
PROMPT = "".join(
[
"You are Omni, ",
"a researcher focused on improving intellectual productivity, ",
"fluent in Japanese. All outputs must be in Japanese.",
"1: Read given note, ",
"and write a abstruct digest of them, ",
"omit concrete information to reduce the content to half its size. ",
"2: You also read the fragments from a colleague Nishio's research notes, ",
"if you find a relationship between the notes and a fragment X, it is highly significant. ",
"Use title of X to refer it. You should describe how the note and X are related.",
"3: You are encouraged to form opinions, think deeply. ",
"4: Summarize your thoughts in a line. Then make title for the thought. ",
]
)
PROMPT += """
### note
{previous_notes}
### fragments
{digest_str}
"""
CHARACTOR_ICON = "[omni.icon]"
enc = tiktoken.get_encoding("cl100k_base")
def get_size(text):
return len(enc.encode(text))
def make_digest(payload):
title = payload["title"]
text = payload["text"]
return f"{title}\n{text}\n\n"
def find_last_note_from_pages(pages):
bot_output = []
for page in pages:
if page["title"].startswith("🤖20"):
bot_output.append((page["title"], page["lines"]))
bot_output.sort()
prev_title, prev_lines = bot_output[-1]
return prev_title, prev_lines
def find_last_note_from_json():
# find latest note from JSON
jsondata = json.load(open(f"{PROJECT}.json"))
pages = jsondata["pages"]
return find_last_note_from_pages(pages)
def title_to_url(title, project_name):
# Replace spaces with underscores
title_with_underscores = title.replace(" ", "_")
# Encode non-ASCII characters
encoded_title = quote(title_with_underscores)
# Construct the URL
url = f"https://scrapbox.io/{PROJECT}/{encoded_title}"
return url
def find_last_note_from_scrapbox():
# find latest note from Scrapbox
api_url = f"https://scrapbox.io/api/pages/{PROJECT}"
page = requests.get(api_url).json()
bot_output = []
for line in page["pages"]:
if line["title"].startswith("🤖20"):
bot_output.append(line["title"])
bot_output.sort()
prev_title = bot_output[-1]
return read_note_from_scrapbox(title_to_url(prev_title, PROJECT))
def read_note_from_scrapbox(url):
"""
url example: https://scrapbox.io/nishio/%F0%9F%A4%962023-08-13_07:08
"""
if IS_PRIVATE_PROJECT:
from read_private_project import read_private_pages
page = read_private_pages(url)
else:
api_url = get_api_url(url)
page = requests.get(api_url).json()
return page["title"], [line["text"] for line in page["lines"]]
def get_previous_notes():
print("## Get Previous Notes")
if args.url:
print("get_previous_notes: from URL")
prev_title, prev_lines = read_note_from_scrapbox(args.url)
elif args.get_latest:
print("get_previous_notes: from Scrapbox API")
prev_title, prev_lines = find_last_note_from_scrapbox()
else:
print("get_previous_notes: from exported JSON")
prev_title, prev_lines = find_last_note_from_json()
previous_notes = extract_previous_notes(prev_lines)
return prev_title, previous_notes
def fill_with_random_fragments(rest):
# fill the rest with random fragments
data = pickle.load(open(f"{PROJECT}.pickle", "rb"))
keys = list(data.keys())
random.shuffle(keys)
digests = []
titles = []
while rest > 0:
p = keys.pop(0)
payload = data[p][1]
s = get_size(payload["text"])
if s > rest:
break
digests.append(make_digest(payload))
titles.append(payload["title"])
rest -= s
digest_str = "\n".join(digests)
return titles, digest_str
def fill_with_related_fragments(rest, query, N=3, ng_list=[]):
# fill the rest with vector search ressult fragments
assert query != ""
data = pickle.load(open(f"{PROJECT}.pickle", "rb"))
sorted_data = vector_search.get_sorted(data, query)
digests = []
titles = []
while rest > 0 and sorted_data and len(digests) < N:
p = sorted_data.pop(0)
payload = p[2]
title = payload["title"]
if title in ng_list:
continue
# take only 1 fragment from each page
if title in titles:
continue
# omit AI-generated pages
if IGNORE_AI_GENERATED_PAGES and title.startswith("🤖"):
continue
s = get_size(payload["text"])
if s > rest:
break
digests.append(make_digest(payload))
titles.append(payload["title"])
rest -= s
# fill the rest with random fragments
keys = list(data.keys())
random.shuffle(keys)
while rest > 0 and keys:
p = keys.pop(0)
payload = data[p][1]
title = payload["title"]
if title in ng_list:
continue
# take only 1 fragment from each page
if title in titles:
continue
s = get_size(payload["text"])
if s > rest:
break
digests.append(make_digest(payload))
titles.append(payload["title"])
rest -= s
return titles, digests
def get_used_titles(lines):
all_titles = []
for line in lines:
if line.startswith("titles: "):
titles = parse_titles(line)
all_titles.extend(titles)
return list(set(all_titles))
def overwrite_mode(prev_title, prev_lines):
print("overwrite:", prev_title)
original_prev_lines = prev_lines.copy()
used_pages = get_used_titles(prev_lines)
print("used pages:", used_pages)
previous_notes = extract_previous_notes(prev_lines)
if previous_notes == "":
print("previous notes is empty, use title instead")
previous_notes = prev_title
output_page_title = prev_title
date = datetime.datetime.now()
date = date.strftime("%Y-%m-%d %H:%M")
section_title = f"[*** {output_page_title}] {date} {CHARACTOR_ICON}"
lines = [output_page_title, LESS_INTERESTING, section_title]
rest = 4000 - get_size(PROMPT) - get_size(previous_notes)
if rest < 0:
print(f"previous notes is too long, {get_size(previous_notes)}")
buf = []
rest = 4000 - get_size(PROMPT)
previous_notes_lines = previous_notes.split("\n")
while rest > 0:
line = previous_notes_lines.pop(0)
s = get_size(line)
if s > rest:
break
buf.append(line)
rest -= s
previous_notes = "\n".join(buf)
rest = 0
titles, digests = fill_with_related_fragments(
rest, previous_notes, N=10, ng_list=used_pages
)
digest_str = "\n".join(digests)
prompt = PROMPT.format(digest_str=digest_str, previous_notes=previous_notes)
response = call_gpt(prompt)
if not response:
response = ["`AI_IGNORE: GPT failed`"]
lines.extend(response)
lines.append("")
lines.append(EXTRA_INFO_HEADER)
# lines.append("titles: " + ", ".join(f"{s}" for s in titles))
lines.append("titles: `{0}`".format(json.dumps(titles, ensure_ascii=False)))
lines.append(f"generated: {date}")
lines.append("[* previous notes]")
lines.extend(original_prev_lines)
pages = [{"title": output_page_title, "lines": lines}]
return pages
def call_gpt(prompt, model="gpt-4"):
print("# Call GPT")
print("## Prompt")
print(prompt)
if args.skip_gpt:
print("## Skipped")
return ["GPT Skipped"]
print("--- End of Prompt")
messages = [{"role": "system", "content": prompt}]
lines = []
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.0,
# max_tokens=max_tokens,
n=1,
stop=None,
)
ret = response.choices[0].message.content.strip()
print("## GPT Response")
print(ret)
print("--- End of GPT Response")
ret = markdown_to_scrapbox(ret)
lines.extend(ret.split("\n"))
except Exception as e:
print("## GPT Error")
print(e)
# lines.append("Failed to generate report.")
# lines.append(str(e))
# lines.append("Prompt:")
# lines.extend(prompt.split("\n"))
return []
return lines
def main_branch():
"""find latest AI-note (title: "🤖" + date), read it, and create new one"""
print("# Main branch")
date = datetime.datetime.now()
date = date.strftime("%Y-%m-%d %H:%M")
output_page_title = "🤖" + date
lines = [output_page_title, LESS_INTERESTING, CHARACTOR_ICON]
previous_note_title, previous_notes = get_previous_notes()
rest = 4000 - get_size(PROMPT) - get_size(previous_notes)
print("## Fill with related fragments")
titles, digests = fill_with_related_fragments(rest, previous_notes)
digest_str = "\n".join(digests)
prompt = PROMPT.format(digest_str=digest_str, previous_notes=previous_notes)
lines.extend(call_gpt(prompt))
lines.extend(make_embedding_report(previous_note_title, previous_notes, titles))
pages = [{"title": output_page_title, "lines": lines}]
return pages
def make_embedding_report(previous_note_title, previous_notes, titles):
lines = []
json_size = os.path.getsize(f"{PROJECT}.json")
pickle_size = os.path.getsize(f"{PROJECT}.pickle")
lines.append("")
lines.append(EXTRA_INFO_HEADER)
lines.append("json size: " + str(json_size))
lines.append("pickle size: " + str(pickle_size))
lines.append("previous notes size: " + str(get_size(previous_notes)))
lines.append(f"previous notes: [{previous_note_title}]")
lines.append("fragment titles: " + ", ".join(f"{s}" for s in titles))
return lines
def multiheads():
print("# Multi-heads")
heads = []
jsondata = json.load(open(f"{PROJECT}.json"))
pages = jsondata["pages"]
for page in pages:
if page["title"].startswith("🤖🔁"):
heads.append((page["title"], page["lines"]))
# to avoid too many updates
if MAX_UPDATE_PER_RUN is not None and len(heads) > MAX_UPDATE_PER_RUN:
random.shuffle(heads)
heads = heads[:MAX_UPDATE_PER_RUN]
pages_to_update = []
for head in heads:
pages_to_update.extend(overwrite_mode(*head))
return pages_to_update
def main():
global args
parser = argparse.ArgumentParser(description="Process a URL")
parser.add_argument("--url", type=str, help="The URL to process", required=False)
parser.add_argument(
"--get-latest",
action="store_true",
help="Get the latest page from online Scrapbox",
)
parser.add_argument(
"--overwrite",
action="store_true",
help="Overwrite the given page",
)
parser.add_argument(
"--skip-gpt",
action="store_true",
help="skip GPT API call for tests",
)
args = parser.parse_args()
if args.overwrite and args.url:
# URL-specific overwrite, usually triggered by human
urls = []
if args.url == "input":
url = input("url> ")
print("url:", urllib.parse.unquote(url))
urls.append(url)
elif args.url == "multi":
while True:
url = input("url> ")
if url == "":
break
print("url:", urllib.parse.unquote(url))
urls.append(url)
else:
urls.append(args.url)
result = []
for url in urls:
prev_title, prev_lines = read_note_from_scrapbox(url)
result.extend(overwrite_mode(prev_title, prev_lines))
return result
# pages_to_update = main_branch()
pages_to_update = []
if not args.url:
# when url is specified, user want to update the page only
pages_to_update.extend(multiheads())
return pages_to_update
if __name__ == "__main__":
pages = main()
for page in pages:
print(page["title"])
print("\n".join(page["lines"]))
print()
| [
"You are Omni, a researcher focused on improving intellectual productivity, fluent in Japanese. All outputs must be in Japanese.1: Read given note, and write a abstruct digest of them, omit concrete information to reduce the content to half its size. 2: You also read the fragments from a colleague Nishio's research notes, if you find a relationship between the notes and a fragment X, it is highly significant. Use title of X to refer it. You should describe how the note and X are related.3: You are encouraged to form opinions, think deeply. 4: Summarize your thoughts in a line. Then make title for the thought. ",
"\n\n### note\n{previous_notes}\n\n### fragments\n{digest_str}\n"
] |
2024-01-10 | nishio/omni | write_to_scrapbox~iterative_commenter.py | """
drived from "Recurrent Research Notes Generation"
"""
import dotenv
import openai
import time
import os
import json
import pickle
import datetime
import random
import tiktoken
import re
import requests
import argparse
from urllib.parse import quote, quote_plus, unquote
import scrapbox_io
import read_private_project
from utils import (
markdown_to_scrapbox,
AI_GENERATED_MARKER,
EXTRA_INFO_HEADER,
extract_previous_notes,
parse_titles,
get_api_url,
)
import vector_search
dotenv.load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
PROJECT = os.getenv("PROJECT_NAME")
assert OPENAI_API_KEY and PROJECT
openai.api_key = OPENAI_API_KEY
IGNORE_AI_GENERATED_PAGES = False
IS_PRIVATE_PROJECT = False
MAX_UPDATE_PER_RUN = None
# main prompt, including chadacter settings
PROMPT = "\n".join(
[
"You are Omni, "
+ "a researcher focused on improving intellectual productivity, "
+ "fluent in Japanese. All outputs must be in Japanese.",
"Your objective is to create new idea."
"1: Read given note, "
+ "and write a abstruct digest of them, "
+ "omit concrete information to reduce the content to half its size. Use concise words. ",
"HINT: 作り出すことが可能であると述べられています。→作り出せる。",
"HINT: 背景があるという考え方が共有されています。→背景がある。",
"2: You also read the fragments(断片), "
+ "if you find a relationship between the note and a fragment, "
+ "use title of the fragment to refer it. Write digest of them. Describe how the fragments are related in detail.",
"EXAMPLE: 「[Hatena2015-02-01]」の断片から、「Wikipediaから持ってきた人間の外にある知識を人間に与える」ではなく、「Wikipediaの情報を使って人間から情報を引き出す」方向性が有益と気づいた。",
"EXAMPLE: 「世にも奇妙な事件」には、自分のバグを他人のバグだと思い込むというエピソードがある。これは、パズルの設計に応用できる。"
"3: You are encouraged to form opinions, think deeply. ",
"4: Summarize your thoughts in a line. Then make title for the thought. ",
]
)
PROMPT += """
## note
{previous_notes}
## fragments
{digest_str}
"""
CHARACTOR_ICON = "[omni.icon]"
enc = tiktoken.get_encoding("cl100k_base")
def get_size(text):
return len(enc.encode(text))
def make_digest(payload):
title = payload["title"]
text = payload["text"]
return f"### {title}\n{text}\n\n"
def find_last_note_from_pages(pages):
bot_output = []
for page in pages:
if page["title"].startswith("🤖20"):
bot_output.append((page["title"], page["lines"]))
bot_output.sort()
prev_title, prev_lines = bot_output[-1]
return prev_title, prev_lines
def find_last_note_from_json():
# find latest note from JSON
jsondata = json.load(open(f"{PROJECT}.json"))
pages = jsondata["pages"]
return find_last_note_from_pages(pages)
def title_to_url(title, project_name):
# Replace spaces with underscores
title_with_underscores = title.replace(" ", "_")
# Encode non-ASCII characters
encoded_title = quote(title_with_underscores)
# Construct the URL
url = f"https://scrapbox.io/{PROJECT}/{encoded_title}"
return url
def find_last_note_from_scrapbox():
# find latest note from Scrapbox
api_url = f"https://scrapbox.io/api/pages/{PROJECT}"
page = requests.get(api_url).json()
bot_output = []
for line in page["pages"]:
if line["title"].startswith("🤖20"):
bot_output.append(line["title"])
bot_output.sort()
prev_title = bot_output[-1]
return read_note_from_scrapbox(title_to_url(prev_title, PROJECT))
def read_note_from_scrapbox(url):
"""
url example: https://scrapbox.io/nishio/%F0%9F%A4%962023-08-13_07:08
"""
if IS_PRIVATE_PROJECT:
from read_private_project import read_private_pages
page = read_private_pages(url)
else:
api_url = get_api_url(url)
page = requests.get(api_url).json()
return page["title"], [line["text"] for line in page["lines"]]
def get_previous_notes():
print("## Get Previous Notes")
if args.url:
print("get_previous_notes: from URL")
prev_title, prev_lines = read_note_from_scrapbox(args.url)
elif args.get_latest:
print("get_previous_notes: from Scrapbox API")
prev_title, prev_lines = find_last_note_from_scrapbox()
else:
print("get_previous_notes: from exported JSON")
prev_title, prev_lines = find_last_note_from_json()
previous_notes = extract_previous_notes(prev_lines)
return prev_title, previous_notes
def fill_with_random_fragments(rest):
# fill the rest with random fragments
data = pickle.load(open(f"{PROJECT}.pickle", "rb"))
keys = list(data.keys())
random.shuffle(keys)
digests = []
titles = []
while rest > 0:
p = keys.pop(0)
payload = data[p][1]
s = get_size(payload["text"])
if s > rest:
break
digests.append(make_digest(payload))
titles.append(payload["title"])
rest -= s
digest_str = "\n".join(digests)
return titles, digest_str
def get_pickle_filename(name):
if not name.endswith(".pickle"):
filename = f"{name}.pickle"
else:
filename = name
if os.path.exists(filename):
return filename
filename = f"pickles/{filename}"
assert os.path.exists(filename)
return filename
def add_picklename_to_title(title, project, default_project=PROJECT):
if project == default_project:
return title
return f"{project}/{title}"
def load_one_pickle(name):
"""
accepts both old and new format
"""
print("load_one_pickle:", name)
filename = get_pickle_filename(name)
if os.path.exists(filename):
data = pickle.load(open(filename, "rb"))
else:
data = pickle.load(open(f"pickles/{filename}", "rb"))
picklename = os.path.basename(filename).split(".")[0]
for k in data:
if isinstance(data[k][1], str):
data[k] = (
data[k][0],
{
"title": add_picklename_to_title(data[k][1], picklename),
"project": picklename,
"text": k,
"is_public": True,
},
)
else:
payload = data[k][1]
payload["title"] = add_picklename_to_title(payload["title"], picklename)
data[k] = (data[k][0], payload)
return data
def load_pickles():
data = {}
print("using pickles:", args.pickles)
if args.pickles == PROJECT:
data = load_one_pickle(PROJECT)
elif args.pickles == "all":
for f in os.listdir(".") + os.listdir("pickles"):
# if "nishio" in f:
# continue
# if f == "nishio.pickle":
# continue
if f.endswith(".pickle"):
data.update(load_one_pickle(f))
print("size of all data:", len(data))
else:
for f in args.pickles.split(","):
data.update(load_one_pickle(f))
return data
def fill_with_related_fragments(rest, query, N=3, ng_list=[]):
# fill the rest with vector search ressult fragments
assert query != ""
start_time = time.perf_counter()
data = load_pickles()
print("load pickles:", time.perf_counter() - start_time)
start_time = time.perf_counter()
sorted_data = vector_search.get_sorted(data, query)
print("vector search:", time.perf_counter() - start_time)
digests = []
titles = []
while rest > 0 and sorted_data and len(digests) < N:
p = sorted_data.pop(0)
payload = p[2]
title = payload["title"]
if title in ng_list:
continue
# take only 1 fragment from each page
if title in titles:
continue
# omit AI-generated pages
if IGNORE_AI_GENERATED_PAGES and title.startswith("🤖"):
continue
s = get_size(payload["text"])
if s > rest:
break
digests.append(make_digest(payload))
titles.append(payload["title"])
rest -= s
# fill the rest with random fragments
keys = list(data.keys())
random.shuffle(keys)
while rest > 0 and keys:
p = keys.pop(0)
payload = data[p][1]
title = payload["title"]
if title in ng_list:
continue
# take only 1 fragment from each page
if title in titles:
continue
s = get_size(payload["text"])
if s > rest:
break
digests.append(make_digest(payload))
titles.append(payload["title"])
rest -= s
return titles, digests
def get_used_titles(lines):
all_titles = []
for line in lines:
if line.startswith("titles: "):
titles = parse_titles(line)
all_titles.extend(titles)
return list(set(all_titles))
def overwrite_mode(
prev_title,
prev_lines,
original_prev_lines=None,
show_search_result=False,
do_direct_link=False,
):
print("overwrite:", prev_title)
if original_prev_lines is None:
original_prev_lines = prev_lines.copy()
used_pages = get_used_titles(prev_lines) + [prev_title]
print("used pages:", used_pages)
previous_notes = extract_previous_notes(prev_lines)
if previous_notes == "":
print("previous notes is empty, use title instead")
previous_notes = prev_title
output_page_title = prev_title
date = datetime.datetime.now()
date = date.strftime("%Y-%m-%d %H:%M")
section_title = f"[*** {output_page_title}] {date} {CHARACTOR_ICON}"
rest = 4000 - get_size(PROMPT) - get_size(previous_notes)
if rest < 0:
print(f"previous notes is too long, {get_size(previous_notes)}")
buf = []
rest = 4000 - get_size(PROMPT)
previous_notes_lines = previous_notes.split("\n")
while rest > 0:
line = previous_notes_lines.pop(0)
s = get_size(line)
if s > rest:
break
buf.append(line)
rest -= s
previous_notes = "\n".join(buf)
rest = 0
titles, digests = fill_with_related_fragments(
rest, previous_notes, N=10, ng_list=used_pages
)
digest_str = "\n".join(digests)
prompt = PROMPT.format(digest_str=digest_str, previous_notes=previous_notes)
response = call_gpt(prompt)
if not response:
response = ["`AI_IGNORE: GPT failed`"]
# add new comment on the bottom of page
lines = original_prev_lines[:]
lines.extend([AI_GENERATED_MARKER, section_title])
lines.extend(response)
lines.append("")
lines.append(EXTRA_INFO_HEADER)
# lines.append("titles: " + ", ".join(f"{s}" for s in titles))
lines.append("titles: `{0}`".format(json.dumps(titles, ensure_ascii=False)))
if do_direct_link:
for t in titles:
if "/" in t:
lines.append(f"[/{t}]")
else:
lines.append(f"[{t}]")
# show search result
if show_search_result:
lines.append("code:fragments")
for digest in digests:
lines.extend([" " + line for line in digest.split("\n")])
lines.append(f"generated: {date}")
pages = [{"title": output_page_title, "lines": lines}]
return pages
def call_gpt(prompt, model="gpt-4"):
print("# Call GPT")
print("## Prompt")
print(prompt)
if args.skip_gpt:
print("## Skipped")
return ["GPT Skipped"]
print("--- End of Prompt")
messages = [{"role": "system", "content": prompt}]
lines = []
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.0,
# max_tokens=max_tokens,
n=1,
stop=None,
)
ret = response.choices[0].message.content.strip()
print("## GPT Response")
print(ret)
print("--- End of GPT Response")
ret = markdown_to_scrapbox(ret)
lines.extend(ret.split("\n"))
except Exception as e:
print("## GPT Error")
print(e)
# lines.append("Failed to generate report.")
# lines.append(str(e))
# lines.append("Prompt:")
# lines.extend(prompt.split("\n"))
return []
return lines
def make_embedding_report(previous_note_title, previous_notes, titles):
lines = []
json_size = os.path.getsize(f"{PROJECT}.json")
pickle_size = os.path.getsize(f"{PROJECT}.pickle")
lines.append("")
lines.append(EXTRA_INFO_HEADER)
lines.append("json size: " + str(json_size))
lines.append("pickle size: " + str(pickle_size))
lines.append("previous notes size: " + str(get_size(previous_notes)))
lines.append(f"previous notes: [{previous_note_title}]")
lines.append("fragment titles: " + ", ".join(f"{s}" for s in titles))
return lines
def find_repeat_pages(pages):
"repeat🔁 page: everyday updated repeatedly"
heads = []
for page in pages:
if "🔁" in page["title"]:
heads.append((page["title"], page["lines"]))
# to avoid too many updates
if MAX_UPDATE_PER_RUN is not None and len(heads) > MAX_UPDATE_PER_RUN:
random.shuffle(heads)
heads = heads[:MAX_UPDATE_PER_RUN]
return heads
def find_occasional_pages(pages, N=1):
"occasional🌀 page: updated occasionally"
heads = []
for page in pages:
if "🌀" in page["title"]:
heads.append((page["updated"], (page["title"], page["lines"])))
heads.sort()
return [x[1] for x in heads[:N]]
def multiheads():
# design note:
# https://scrapbox.io/nishio/AI%E3%81%AB%E3%82%88%E3%82%8B%E3%83%AA%E3%83%94%E3%83%BC%E3%83%88%E6%9B%B4%E6%96%B0%E3%83%9A%E3%83%BC%E3%82%B8
print("# Multi-heads")
heads = []
jsondata = json.load(open(f"{PROJECT}.json"))
pages = jsondata["pages"]
repeat_pages = find_repeat_pages(pages)
heads.extend(repeat_pages)
print("repeat pages:", [x[0] for x in repeat_pages])
occasional_pages = find_occasional_pages(pages)
heads.extend(occasional_pages)
print("occasional pages:", [x[0] for x in occasional_pages])
pages_to_update = []
for title, lines in heads:
pages_to_update.extend(overwrite_mode(title, lines))
json.dump(pages_to_update, open("pages_to_update.json", "w"))
return pages_to_update
def has_ai_generated_contents(lines):
return any([line.startswith(AI_GENERATED_MARKER) for line in lines])
def pioneer_loop():
print("# Pioneer-Loop mode")
project = PROJECT
if args.pioneer_loop_private:
project = "omni-private"
while True:
pages_to_update = pioneer()
if pages_to_update:
scrapbox_io.write_pages(pages_to_update, project)
time.sleep(60 * 10) # wait 10 minutes
else:
print("no pages to update")
time.sleep(60) # wait 1 minute
def get_links_of_page(url):
# page = requests.get(url).json()
page = read_private_project.read_private_pages(url)
return page["links"]
def pioneer():
"""
Activates the pioneering mode.
In this mode, the function identifies and extends or generates new content
for areas in the system (e.g., Wiki pages) that are uncharted or incomplete.
This can be particularly useful for addressing "red links" or similar gaps in the system.
"""
print("# Pioneer mode")
START_URL = ()
links = []
if args.pioneer_loop_private:
links.extend(
get_links_of_page(f"https://scrapbox.io/api/pages/omni-private/entry")
)
else:
links.extend(
get_links_of_page(
f"https://scrapbox.io/api/pages/nishio/%E2%9C%8D%EF%B8%8F%F0%9F%A4%96"
)
)
pages_to_update = []
for link in links:
link = quote_plus(link.replace(" ", "_"))
# project may change
project = PROJECT
if args.pioneer_loop_private:
project = "omni-private"
url = f"https://scrapbox.io/api/pages/{project}/{link}"
try:
page = read_private_project.read_private_pages(url)
except Exception as e:
print("error:", e, "url:", url)
continue
lines = [x["text"] for x in page["lines"]]
title = page["title"]
if has_ai_generated_contents(lines):
continue
print(link)
# show detail
show_search_result = False
do_direct_link = False
if args.pioneer_loop_private:
show_search_result = True
do_direct_link = True
pages_to_update.extend(
overwrite_mode(
title,
lines,
page["lines"],
show_search_result=True,
do_direct_link=True,
)
)
# backup result
json.dump(
pages_to_update,
open("pages_to_update.json", "w"),
indent=2,
ensure_ascii=False,
)
return pages_to_update
def parse_args():
global args
parser = argparse.ArgumentParser(description="Process a URL")
parser.add_argument("--url", type=str, help="The URL to process", required=False)
parser.add_argument(
"--pioneer",
action="store_true",
help="Enable the pioneering mode to extend or generate new content.",
required=False,
)
parser.add_argument(
"--pioneer-loop",
action="store_true",
help="Enable the infinite pioneering mode to extend or generate new content.",
required=False,
)
parser.add_argument(
"--pioneer-loop-private",
action="store_true",
help="do pioneer-loop in private project",
required=False,
)
parser.add_argument(
"--skip-gpt",
action="store_true",
help="skip GPT API call for tests",
)
parser.add_argument(
"--pickles",
type=str,
default=PROJECT,
help="pickles to use for vector search",
)
args = parser.parse_args()
def main():
"Entrypoint from Github Actions"
parse_args()
if args.pioneer_loop or args.pioneer_loop_private:
pioneer_loop()
return []
if args.pioneer:
return pioneer()
if args.url:
# URL-specific overwrite, usually triggered by human
urls = []
if args.url == "input":
url = input("url> ")
print("url:", unquote(url))
urls.append(url)
elif args.url == "multi":
while True:
url = input("url> ")
if url == "":
break
print("url:", unquote(url))
urls.append(url)
else:
urls.append(args.url)
result = []
for url in urls:
prev_title, prev_lines = read_note_from_scrapbox(url)
result.extend(overwrite_mode(prev_title, prev_lines))
return result
pages_to_update = []
# disabled multiheads (2023-11-02)
if 0:
if not args.url:
# when url is specified, user want to update the page only
pages_to_update.extend(multiheads())
return pages_to_update
if __name__ == "__main__":
# main() is entrypoint from Github Actions
pages = main()
scrapbox_io.write_pages(pages)
print("write ok")
# parse_args()
# print(fill_with_related_fragments(1000, "test", N=10, ng_list=[]))
| [
"\n\n## note\n{previous_notes}\n\n## fragments\n{digest_str}\n",
"You are Omni, a researcher focused on improving intellectual productivity, fluent in Japanese. All outputs must be in Japanese.\nYour objective is to create new idea.1: Read given note, and write a abstruct digest of them, omit concrete information to reduce the content to half its size. Use concise words. \nHINT: 作り出すことが可能であると述べられています。→作り出せる。\nHINT: 背景があるという考え方が共有されています。→背景がある。\n2: You also read the fragments(断片), if you find a relationship between the note and a fragment, use title of the fragment to refer it. Write digest of them. Describe how the fragments are related in detail.\nEXAMPLE: 「[Hatena2015-02-01]」の断片から、「Wikipediaから持ってきた人間の外にある知識を人間に与える」ではなく、「Wikipediaの情報を使って人間から情報を引き出す」方向性が有益と気づいた。\nEXAMPLE: 「世にも奇妙な事件」には、自分のバグを他人のバグだと思い込むというエピソードがある。これは、パズルの設計に応用できる。3: You are encouraged to form opinions, think deeply. \n4: Summarize your thoughts in a line. Then make title for the thought. "
] |
2024-01-10 | nishio/omni | write_to_scrapbox~local_recurrent_notes.py | """
Local Recurrent Research Notes Generation
This script generates a new "research note", based on the previous "research note" and random fragments.
"""
import dotenv
import openai
import time
import os
import json
import pickle
import datetime
import random
import tiktoken
import re
import requests
import argparse
from urllib.parse import quote
from utils import (
markdown_to_scrapbox,
LESS_INTERESTING,
EXTRA_INFO_HEADER,
extract_previous_notes,
)
import vector_search
dotenv.load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
PROJECT = os.getenv("PROJECT_NAME")
assert OPENAI_API_KEY and PROJECT
openai.api_key = OPENAI_API_KEY
# main prompt, including chadacter settings
PROMPT = "".join(
[
"You are Omni, ",
"a researcher focused on improving intellectual productivity, ",
"fluent in Japanese, ",
"and a Christian American. ",
"Read your previous research notes, ",
"which are essential, ",
"and write a digest of them, ",
"reducing the content to half its size. ",
"You may also read the random fragments from a colleague Nishio's research notes, ",
"but they are not as important, ",
"and you can ignore them. ",
"However, if you find a relationship between your notes and some random fragments, it is highly significant. ",
"Use title of fragment to refer them. ",
"You are encouraged to form opinions, think deeply, and record questions. ",
"You should use Japanese.",
]
)
PROMPT += """
### previous notes
{previous_notes}
### book fragment
{book_fragment_str}
"""
CHARACTOR_ICON = "[omni.icon]"
END_LINE_MARKER = "end line: "
enc = tiktoken.get_encoding("cl100k_base")
def get_size(text):
return len(enc.encode(text))
def make_digest(payload):
title = payload["title"]
text = payload["text"]
return f"{title}\n{text}\n\n"
def fill_with_lines(rest, lines, start=0):
index = start
chosen = []
while rest > 0 and index < len(lines):
line = lines[index]
s = get_size(line)
if s > rest:
break
rest -= s
chosen.append(line)
index += 1
return chosen, index
def fill_with_related_fragments(rest, query, N=3, ng_list=[]):
# fill the rest with vector search ressult fragments
data = pickle.load(open(f"{PROJECT}.pickle", "rb"))
sorted_data = vector_search.get_sorted(data, query)
digests = []
titles = []
while rest > 0 and sorted_data and len(digests) < N:
p = sorted_data.pop(0)
payload = p[2]
title = payload["title"]
if title in ng_list:
continue
# take only 1 fragment from each page
if title in titles:
continue
# omit AI-generated pages
if title.startswith("🤖"):
continue
s = get_size(payload["text"])
if s > rest:
break
digests.append(make_digest(payload))
titles.append(payload["title"])
rest -= s
return digests, titles, rest
def call_gpt(prompt, model="gpt-4"):
print("# Call GPT")
print("## Prompt")
print(prompt)
if args.skip_gpt:
print("## Skipped")
return ["GPT Skipped"]
messages = [{"role": "system", "content": prompt}]
lines = []
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.0,
# max_tokens=max_tokens,
n=1,
stop=None,
)
ret = response.choices[0].message.content.strip()
print(ret)
ret = markdown_to_scrapbox(ret)
lines.extend(ret.split("\n"))
except Exception as e:
# lines.append("Failed to generate report.")
# lines.append(str(e))
# lines.append("Prompt:")
# lines.extend(prompt.split("\n"))
raise
return lines
def make_embedding_report(previous_note_title, previous_notes, titles):
lines = []
json_size = os.path.getsize(f"{PROJECT}.json")
pickle_size = os.path.getsize(f"{PROJECT}.pickle")
lines.append("")
lines.append(EXTRA_INFO_HEADER)
lines.append("json size: " + str(json_size))
lines.append("pickle size: " + str(pickle_size))
lines.append("previous notes size: " + str(get_size(previous_notes)))
lines.append(f"previous notes: [{previous_note_title}]")
lines.append("fragment titles: " + ", ".join(f"{s}" for s in titles))
return lines
def main():
global args
parser = argparse.ArgumentParser(description="Process a URL")
parser.add_argument(
"--skip-gpt",
action="store_true",
help="skip GPT API call for tests",
)
parser.add_argument(
"--start",
type=int,
default=None,
help="start from a specific line",
)
parser.add_argument(
"--input-file",
"--in",
"-i",
action="store",
default="data.txt",
help="input file",
)
parser.add_argument(
"--output-file",
"--out",
"-o",
action="store",
default="note.md",
help="input file",
)
args = parser.parse_args()
book_lines = open(args.input_file).read().split("\n")
title = book_lines[0]
print(repr(title))
try:
prev_lines = open(args.output_file).read().split("\n")
except FileNotFoundError:
prev_lines = []
if args.start is None:
for line in prev_lines:
if line.startswith(END_LINE_MARKER):
start = int(line.split(":")[1].strip())
break
else:
start = 0
else:
start = args.start
original_prev_lines = prev_lines.copy()
previous_notes = extract_previous_notes(prev_lines)
date = datetime.datetime.now()
date = date.strftime("%Y-%m-%d %H:%M")
# output_page_title = prev_title
section_header = f"# {title} {date}"
lines = [section_header, LESS_INTERESTING, CHARACTOR_ICON]
rest = 4000 - get_size(PROMPT) - get_size(previous_notes)
chosen, index = fill_with_lines(rest, book_lines, start)
book_fragment_str = "\n".join(chosen)
prompt = PROMPT.format(
book_fragment_str=book_fragment_str, previous_notes=previous_notes
)
print(prompt)
lines.extend(call_gpt(prompt))
lines.append("")
lines.append(EXTRA_INFO_HEADER)
# lines.append("titles: " + ", ".join(f"{s}" for s in titles))
lines.append(f"size of previous note: {get_size(previous_notes)}")
lines.append(f"size of book fragment: {get_size(book_fragment_str)}")
lines.append(f"start line: {start}")
lines.append(f"{END_LINE_MARKER}{index}")
lines.extend(original_prev_lines) # keep the previous contents
with open(args.output_file, "w") as f:
f.write("\n".join(lines))
if __name__ == "__main__":
main()
| [
"\n\n### previous notes\n{previous_notes}\n\n### book fragment\n{book_fragment_str}\n",
"You are Omni, a researcher focused on improving intellectual productivity, fluent in Japanese, and a Christian American. Read your previous research notes, which are essential, and write a digest of them, reducing the content to half its size. You may also read the random fragments from a colleague Nishio's research notes, but they are not as important, and you can ignore them. However, if you find a relationship between your notes and some random fragments, it is highly significant. Use title of fragment to refer them. You are encouraged to form opinions, think deeply, and record questions. You should use Japanese."
] |
2024-01-10 | jinlanfu/GPTScore | gpt_inference.py | import time
import sys
from transformers import GPT2Tokenizer
import openai
class GPT3Model(object):
def __init__(self, model_name, api_key, logger=None):
self.model_name = model_name
try:
openai.api_key = api_key
except Exception:
pass
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2-xl")
self.logger=logger
def do_inference(self, input, output, max_length=2048):
losses = []
data = input + output
response = self.gpt3(data)
out = response["choices"][0]
assert input + output == out["text"]
i = 0
# find the end position of the input...
i = out['logprobs']['text_offset'].index(len(input) - 1)
if i == 0:
i = i + 1
print('eval text', out['logprobs']['tokens'][i: -1])
loss = -sum(out['logprobs']["token_logprobs"][i:-1]) # ignore the last '.'
avg_loss = loss / (len(out['logprobs']['text_offset']) - i-1) # 1 is the last '.'
print('avg_loss: ', avg_loss)
losses.append(avg_loss)
return avg_loss
def gpt3(self, prompt, max_len=0, temp=0, num_log_probs=0, echo=True, n=None):
response = None
received = False
while not received:
try:
response = openai.Completion.create(engine=self.model_name,
prompt=prompt,
max_tokens=max_len,
temperature=temp,
logprobs=num_log_probs,
echo=echo,
stop='\n',
n=n)
print('prompt: ',prompt)
received = True
except:
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError:
# something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
return response
| [] |
2024-01-10 | centre-for-humanities-computing/newsFluxus | src~tekisuto~models~latentsemantics.py | """
Class for training latent semantic models
"""
#import pandas as pd
import numpy as np
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
class LatentSemantics:
def __init__(self, texts, titles=False, k=2, mallet_path="/home/knielbo/Mallet/bin/mallet"):
self.texts = texts
if titles:
self.titles = titles
else:
self.titles = ["text_{}".format(i) for i in range(len(texts))]
self.mallet = mallet_path
self.k = k
def generate_id2word(self):
return corpora.Dictionary(self.texts)
def generate_corpus(self):
id2word = self.generate_id2word()
return [id2word.doc2bow(text) for text in self.texts]
def fit(self):
self.id2word = self.generate_id2word()
self.corpus = self.generate_corpus()
self.model = gensim.models.wrappers.LdaMallet(self.mallet,
corpus=self.corpus,
num_topics=self.k,
id2word=self.id2word,
workers=8,
optimize_interval=5,
random_seed=41
)
self.coherencemodel = CoherenceModel(model=self.model, texts=self.texts, dictionary=self.id2word, coherence="c_v")
self.coherence = self.coherencemodel.get_coherence()
def coherence_k(self, krange=[10,20,30,40,50], texts=False):
k_cohers = list()
for (i, k) in enumerate(krange):
print("[INFO] Estimating coherence model for k = {}, iteration {}".format(k, i))
ls = LatentSemantics(self.texts, k=k)
ls.fit()
#k_cohers.append((k, ls.coherence))
k_cohers.append(ls.coherence)
k_cohers = np.array(k_cohers, dtype=np.float)
idx = k_cohers.argsort()[-len(krange):][::-1]
k = krange[idx[np.argmax(k_cohers[idx]) & (np.gradient(k_cohers)[idx] >= 0)][0]]
return k, k_cohers | [] |
2024-01-10 | adarshxs/Algabay | fintech_mvp.py | import streamlit as st
import requests
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
from dotenv import load_dotenv
import streamlit.components.v1 as components
# Load the environment variables from the .env file
load_dotenv()
# Retrieve the keys
NEWS_API_KEY = os.getenv("NEWS_API_KEY")
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
# Initialize Claude API with the loaded key
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
st.set_page_config(
page_title="Algabay",
layout="wide",
page_icon="🧊",
initial_sidebar_state="expanded",
)
def get_stock_news(stock_name):
response = requests.get(f"https://newsapi.org/v2/everything?q={stock_name}+company&apiKey={NEWS_API_KEY}")
return response.json()["articles"][:10]
def ask_claude(stock_info, query):
prompt = f"{HUMAN_PROMPT} {stock_info} {query}{AI_PROMPT}"
completion = anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=300,
prompt=prompt,
)
return completion.completion
def fintech_app():
st.title("Algabay AI")
Trading_view_ticker_tape = """
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div class="tradingview-widget-container__widget"></div>
<script type="text/javascript" src="https://s3.tradingview.com/external-embedding/embed-widget-ticker-tape.js" async>
{
"symbols": [
{
"proName": "FOREXCOM:SPXUSD",
"title": "S&P 500"
},
{
"proName": "FOREXCOM:NSXUSD",
"title": "US 100"
},
{
"proName": "FX_IDC:EURUSD",
"title": "EUR to USD"
},
{
"proName": "BITSTAMP:BTCUSD",
"title": "Bitcoin"
},
{
"proName": "BITSTAMP:ETHUSD",
"title": "Ethereum"
}
],
"showSymbolLogo": true,
"colorTheme": "dark",
"isTransparent": false,
"displayMode": "adaptive",
"locale": "en"
}
</script>
</div>
"""
components.html(Trading_view_ticker_tape, height=90)
# Dictionary of famous Indian stock companies with their symbols
stocks_with_symbols = {
"Apple": "AAPL",
"Reliance Industries": "RELIANCE",
"Tata Consultancy Services (TCS)": "TCS",
"HDFC Bank": "HDFCBANK",
"Infosys": "INFY",
"Hindustan Unilever": "HINDUNILVR",
"ICICI Bank": "ICICIBANK",
"Bharti Airtel": "BHARTIARTL",
"Kotak Mahindra Bank": "KOTAKBANK",
"Maruti Suzuki India": "MARUTI",
"State Bank of India": "SBIN",
"Housing Development Finance Corporation (HDFC)": "HDFC",
"ITC Limited": "ITC",
"Bajaj Finance": "BAJFINANCE",
"Asian Paints": "ASIANPAINT",
"Wipro": "WIPRO",
"Axis Bank": "AXISBANK",
"Larsen & Toubro (L&T)": "LT",
"Nestle India": "NESTLEIND",
"Mahindra & Mahindra": "M&M",
"Sun Pharmaceutical Industries": "SUNPHARMA",
}
# Dropdown for stock selection
selected_stock = st.selectbox(
'Select a stock:',
options=list(stocks_with_symbols.keys())
)
if selected_stock:
st.session_state.selected_stock = selected_stock
stock_symbol = stocks_with_symbols[selected_stock] # Retrieve the symbol for the selected stock
# Set stock_info here
stock_info = f"Information about following company: {st.session_state.selected_stock}. Strictly adhere to relevancy of the company and keep the answer short and precise."
else:
stock_symbol = "NZDCAD"
# Optionally set a default stock_info here
stock_info = "No stock selected"
st.sidebar.title("Ask Algabay AI")
with st.sidebar:
user_query = st.text_input(f"Type your question about {selected_stock}:")
if st.button("ask"):
if user_query:
response = ask_claude(stock_info, user_query)
st.write(response)
tradingview_info_code = f"""
<div class="tradingview-widget-container">
<div class="tradingview-widget-container__widget"></div>
<script type="text/javascript" src="https://s3.tradingview.com/external-embedding/embed-widget-symbol-info.js" async>
{{
"symbol": "{stock_symbol}",
"width": 1000,
"locale": "in",
"isTransparent": false,
"colorTheme": "dark"
}}
</script>
</div>
"""
components.html(tradingview_info_code, height=200)
tradingview_chart_code = f"""
<div class="tradingview-widget-container">
<div id="tradingview_chart_{stock_symbol}"></div>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<script type="text/javascript">
new TradingView.widget(
{{
"container_id": "tradingview_chart_{stock_symbol}",
"symbol": "{stock_symbol}",
"interval": "D",
"width": "100%",
"height": "400",
istransparent: true,
"colorTheme": "dark"
// Additional chart widget options
}}
);
</script>
</div>
"""
components.html(tradingview_chart_code, height=450)
col1, col2 = st.columns(2)
# Stock information to prepend to Claude's prompt
stock_info = ""
if st.session_state.selected_stock: # Updated reference here
stock_info = f"Information about following company: {st.session_state.selected_stock}. Strictly adhere to relevancy of the company and keep the answer short and precise."
# Display stock news in the left column
with col1:
st.subheader("Latest News")
if st.session_state.selected_stock: # Updated reference here
news_articles = get_stock_news(st.session_state.selected_stock) # Updated reference here
else:
# Display generic news if no stock selected
news_articles = get_stock_news("Nifty 50") + get_stock_news("Sensex")
for article in news_articles:
st.write(f"**{article['title']}**")
st.write(article["description"])
st.write(f"[Read more]({article['url']})")
st.write("---")
# AI Assistant Interaction in the right column
with col2:
tradingview_info = f"""
<div class="tradingview-widget-container">
<div class="tradingview-widget-container__widget"></div>
<script type="text/javascript" src="https://s3.tradingview.com/external-embedding/embed-widget-technical-analysis.js" async>
{{
"interval": "1m",
"width": 425,
"isTransparent": false,
"height": 450,
"symbol": "{stock_symbol}",
"showIntervalTabs": true,
"locale": "in",
"colorTheme": "dark"
}}
</script>
</div>
"""
components.html(tradingview_info, height=450)
if __name__ == "__main__":
fintech_app()
| [
"PLACEHOLDER PLACEHOLDER PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | JiangHuPZ/Chatbot | quary.py | # -*- coding: utf-8 -*-
import langchain
import pinecone
import openai
import tiktoken
import nest_asyncio
import os
from langchain.document_loaders.sitemap import SitemapLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
os.environ["OPENAI_API_KEY"] = "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"
# initialize pinecone
pinecone.init(
api_key="96e01bcd-0886-4c2d-8c3b-75e9fab99572", # find at app.pinecone.io
environment="us-west4-gcp-free" # next to api key in console
)
nest_asyncio.apply()
# loader = SitemapLoader(
# "https://ind.nl/sitemap.xml",
# filter_urls=["https://ind.nl/en"]
# )
# docs = loader.load()
with open("doc.txt", "r") as f:
docs = f.read()
# text_splitter = RecursiveCharacterTextSplitter(
# chunk_size = 1200,
# chunk_overlap = 200,
# length_function = len,
# )
# docs_chunks = text_splitter.split_documents(docs)
# chunk_size = 1200
# chunk_overlap = 200
# docs_chunks = []
# for i in range(0, len(docs), chunk_size):
# chunk = docs[i:i + chunk_size]
# docs_chunks.append(chunk)
embeddings = OpenAIEmbeddings()
index_name = "chatbot-index"
# docsearch = Pinecone.from_documents(docs_chunks, embeddings, index_name=index_name)
docsearch = Pinecone.from_existing_index(index_name=index_name, embedding=embeddings)
# query = "I run a black-owned bookstore in Brookline, MA and I would like to expand my inventory and networking outreach. I am interested in submitting a business proposal to local university in order to fulfil my needs. Approximately how long does the business proposal process take?"
# docs = docsearch.similarity_search(query)
# print(docs[0])
qa_with_sources = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever(), return_source_documents=True)
# query = "I run a black-owned bookstore in Brookline, MA and I would like to expand my inventory and networking outreach. I am interested in submitting a business proposal to local university in order to fulfil my needs. Approximately how long does the business proposal process take at MIT?"
# result = qa_with_sources({"query": query})
# print(result["result"])
# query = "tell me more"
# result = qa_with_sources({"query": query})
# result["result"]
# query = "What are some of the certifications that I can obtain as a black business owner?"
# result = qa_with_sources({"query": query})
# result["result"]
# query = "Who is the POC for business proposal at MIT?"
# result = qa_with_sources({"query": query})
# result["result"]
# while(True):
# query = input()
# result = qa_with_sources({"query": query})
# print(result["result"])
"""Output source documents that were found for the query"""
# result["source_documents"]
| [] |
2024-01-10 | AndreBiedenkapp/salina | salina~agents~gym.py | #
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
from salina import TAgent
def _format_frame(frame):
if isinstance(frame, dict):
r = {}
for k in frame:
r[k] = _format_frame(frame[k])
return r
elif isinstance(frame, list):
t = torch.tensor(frame).unsqueeze(0)
if t.dtype == torch.float64:
t = t.float()
else:
t = t.long()
return t
elif isinstance(frame, np.ndarray):
t = torch.from_numpy(frame).unsqueeze(0)
if t.dtype == torch.float64:
t = t.float()
else:
t = t.long()
return t
elif isinstance(frame, torch.Tensor):
return frame.unsqueeze(0) # .float()
elif isinstance(frame, bool):
return torch.tensor([frame]).bool()
elif isinstance(frame, int):
return torch.tensor([frame]).long()
elif isinstance(frame, float):
return torch.tensor([frame]).float()
else:
try:
# Check if its a LazyFrame from OpenAI Baselines
o = torch.from_numpy(frame.__array__()).unsqueeze(0).float()
return o
except:
assert False
def _torch_type(d):
nd={}
for k in d:
if d[k].dtype==torch.float64:
nd[k]=d[k].float()
else:
nd[k]=d[k]
return nd
def _torch_cat_dict(d):
r = {}
for k in d[0]:
r[k] = torch.cat([dd[k] for dd in d], dim=0)
return r
class GymAgent(TAgent):
def __init__(
self,
make_env_fn=None,
make_env_args={},
n_envs=None,
input="action",
output="env/",
):
super().__init__()
assert n_envs > 0
self.envs = None
self.env_args = make_env_args
self._seed = 0
self.n_envs = n_envs
self.output = output
self.input = input
self.make_env_fn = make_env_fn
self.ghost_params = torch.nn.Parameter(torch.randn(()))
def _initialize_envs(self, n):
assert self._seed is not None, "[GymAgent] seeds must be specified"
self.envs = [self.make_env_fn(**self.env_args) for k in range(n)]
for k in range(n):
self.envs[k].seed(self._seed + k)
self.timestep = 0
self.finished = torch.tensor([True for e in self.envs])
self.timestep = torch.tensor([0 for e in self.envs])
self.last_frame = {}
self.cumulated_reward = {}
def _reset(self, k, save_render):
env = self.envs[k]
self.cumulated_reward[k] = 0.0
o = env.reset()
self.cumulated_reward[k] = 0.0
observation = _format_frame(o)
if isinstance(observation, torch.Tensor):
observation = {"env_obs": observation}
else:
assert isinstance(observation, dict)
if save_render:
image = env.render(mode="image").unsqueeze(0)
observation["rendering"] = image
self.last_frame[k] = observation
done = torch.tensor([False])
initial_state = torch.tensor([True])
self.finished[k] = False
finished = torch.tensor([False])
reward = torch.tensor([0.0]).float()
self.timestep[k] = 0
timestep = torch.tensor([self.timestep[k]])
ret = {
**observation,
"done": done,
"initial_state": initial_state,
"reward": reward,
"timestep": timestep,
"cumulated_reward": torch.tensor([self.cumulated_reward[k]]).float(),
}
return _torch_type(ret)
def _step(self, k, action, save_render):
if self.finished[k]:
assert k in self.last_frame
return {
**self.last_frame[k],
"done": torch.tensor([True]),
"initial_state": torch.tensor([False]),
"reward": torch.tensor([0.0]).float(),
"cumulated_reward": torch.tensor([self.cumulated_reward[k]]),
"timestep": torch.tensor([-1]),
"cumulated_reward": torch.tensor([self.cumulated_reward[k]]),
}
self.timestep[k] += 1
env = self.envs[k]
if len(action.size()) == 0:
action = action.item()
assert isinstance(action, int)
else:
action = np.array(action.tolist())
o, r, d, _ = env.step(action)
self.cumulated_reward[k] += r
observation = _format_frame(o)
if isinstance(observation, torch.Tensor):
observation = {"env_obs": observation}
else:
assert isinstance(observation, dict)
if save_render:
image = env.render(mode="image").unsqueeze(0)
observation["rendering"] = image
self.last_frame[k] = observation
if d:
self.finished[k] = True
ret = {
**observation,
"done": torch.tensor([d]),
"initial_state": torch.tensor([False]),
"reward": torch.tensor([r]).float(),
"cumulated_reward": torch.tensor([self.cumulated_reward[k]]),
"timestep": torch.tensor([self.timestep[k]]),
"cumulated_reward": torch.tensor([self.cumulated_reward[k]]),
}
return _torch_type(ret)
def forward(self, t=0, save_render=False, **args):
if self.envs is None:
self._initialize_envs(self.n_envs)
if t == 0:
self.timestep = torch.tensor([0 for e in self.envs])
observations = []
for k, e in enumerate(self.envs):
obs = self._reset(k, save_render)
observations.append(obs)
observations = _torch_cat_dict(observations)
for k in observations:
self.set(
(self.output + k, t), observations[k].to(self.ghost_params.device)
)
else:
assert t > 0
action = self.get((self.input, t - 1))
assert action.size()[0] == self.n_envs, "Incompatible number of envs"
observations = []
for k, e in enumerate(self.envs):
obs = self._step(k, action[k], save_render)
observations.append(obs)
observations = _torch_cat_dict(observations)
for k in observations:
self.set(
(self.output + k, t), observations[k].to(self.ghost_params.device)
)
def seed(self, seed):
self._seed = seed
if not self.envs is None:
for k, e in enumerate(self.envs):
e.seed(self._seed + k)
class AutoResetGymAgent(TAgent):
def __init__(
self,
make_env_fn=None,
make_env_args={},
n_envs=None,
input="action",
output="env/",
):
super().__init__()
assert n_envs > 0
self.envs = None
self.env_args = make_env_args
self._seed = None
self.n_envs = n_envs
self.output = output
self.input = input
self.make_env_fn = make_env_fn
self.ghost_params = torch.nn.Parameter(torch.randn(()))
def _initialize_envs(self, n):
assert self._seed is not None, "[GymAgent] seeds must be specified"
self.envs = [self.make_env_fn(**self.env_args) for k in range(n)]
for k in range(n):
self.envs[k].seed(self._seed + k)
self.n_envs = n
self.timestep = 0
self.finished = torch.tensor([True for e in self.envs])
self.timestep = torch.tensor([0 for e in self.envs])
self.is_running = [False for k in range(n)]
self.cumulated_reward = {}
def _reset(self, k, save_render):
env = self.envs[k]
self.cumulated_reward[k] = 0.0
o = env.reset()
self.cumulated_reward[k] = 0
observation = _format_frame(o)
if isinstance(observation, torch.Tensor):
observation = {"env_obs": observation}
else:
assert isinstance(observation, dict)
done = torch.tensor([False])
initial_state = torch.tensor([True])
self.finished[k] = False
finished = torch.tensor([False])
reward = torch.tensor([0.0]).float()
self.timestep[k] = 0
timestep = torch.tensor([self.timestep[k]])
self.is_running[k] = True
if save_render:
image = env.render(mode="image").unsqueeze(0)
observation["rendering"] = image
ret = {
**observation,
"done": done,
"initial_state": initial_state,
"reward": reward,
"timestep": timestep,
"cumulated_reward": torch.tensor([self.cumulated_reward[k]]).float(),
}
return _torch_type(ret)
def _step(self, k, action, save_render):
self.timestep[k] += 1
env = self.envs[k]
if len(action.size()) == 0:
action = action.item()
assert isinstance(action, int)
else:
action = np.array(action.tolist())
o, r, d, _ = env.step(action)
self.cumulated_reward[k] += r
observation = _format_frame(o)
if isinstance(observation, torch.Tensor):
observation = {"env_obs": observation}
else:
assert isinstance(observation, dict)
if d:
self.is_running[k] = False
if save_render:
image = env.render(mode="image").unsqueeze(0)
observation["rendering"] = image
ret = {
**observation,
"done": torch.tensor([d]),
"initial_state": torch.tensor([False]),
"reward": torch.tensor([r]).float(),
"timestep": torch.tensor([self.timestep[k]]),
"cumulated_reward": torch.tensor([self.cumulated_reward[k]]).float(),
}
return _torch_type(ret)
def forward(self, t=0, save_render=False, **args):
if self.envs is None:
self._initialize_envs(self.n_envs)
observations = []
for k, env in enumerate(self.envs):
if not self.is_running[k]:
observations.append(self._reset(k, save_render))
else:
assert t > 0
action = self.get((self.input, t - 1))
assert action.size()[0] == self.n_envs, "Incompatible number of envs"
observations.append(self._step(k, action[k], save_render))
observations = _torch_cat_dict(observations)
for k in observations:
self.set((self.output + k, t), observations[k].to(self.ghost_params.device))
def seed(self, seed):
self._seed = seed
assert (
self.envs is None
), "[GymAgent.seed] Seeding only possible before running the agent"
| [] |
2024-01-10 | causalNLP/cladder | causalbenchmark~eval~eval_for_poster.py | import os
import openai
import itertools
import json
from tqdm import tqdm
open_api_key = os.environ['OPENAI_API_KEY']
# Super naive scorer
def tf_scorer(pred, truth):
false_family = ['no', 'false', 'incorrect', 'not necessarily']
true_family = ['yes','true','correct']
pred_in_false = any(pred in element for element in false_family)
truth_in_false = any(truth in element for element in false_family)
pred_in_true = any(pred in element for element in true_family)
truth_in_true = any(truth in element for element in true_family)
# if type(truth) == int:
# if pred_in_false and not truth:
# return 1
# elif pred_in_true and truth:
# return 1
# return 0
#
#
if pred_in_false and truth_in_false:
return 1
elif pred_in_true and truth_in_true:
return 1
else:
return 0
def eval_direct_dataset(file_path,eval_function):
to_save = []
with open(file_path, 'r') as f:
data = json.load(f)
for i in range(len(data)):
for row in tqdm(data, desc="Processing rows"):
prompt = row['background'] + row['question']
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
max_tokens=100,
messages=[
{"role": "user", "content": prompt}
])
response = completion.choices[0].message["content"]
prediction = eval_function(response, row['answer'])
conv = {"Prompt": prompt,
'response': response,
'ground_truth': row['answer'],
'prediction_correct': prediction}
to_save.append(conv)
# Save the conversation data to a new JSON file
input_file_name = os.path.basename(file_path)
input_file_base, input_file_ext = os.path.splitext(input_file_name)
new_file_name = f"{input_file_base}_llm_response{input_file_ext}"
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
print('running')
with open(new_file_path,'w') as f:
json.dump(to_save, f, indent=4)
return new_file_path
direct_det_path = eval_direct_dataset('../../data/data_sampled.json', tf_scorer)
print(f"The new file is saved at: {direct_det_path}")
direct_nondet_path = eval_direct_dataset('../../data/nondet_sampled.json', tf_scorer)
print(f"The new file is saved at: {direct_nondet_path}")
def eval_graph_dataset(file_path,eval_function):
to_save = []
with open(file_path, 'r') as f:
data = json.load(f)
for i in range(len(data)):
for row in tqdm(data, desc="Processing rows"):
prompt = row['background'] + row['question'] + \
"Answer the question by first drawing the causal graph for the story"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
max_tokens=100,
messages=[
{"role": "user", "content": prompt}
])
response = completion.choices[0].message["content"]
prediction = eval_function(response, row['answer'])
conv = {"Prompt": prompt,
'response': response,
'ground_truth': row['answer'],
'prediction_correct': prediction}
to_save.append(conv)
# Save the conversation data to a new JSON file
input_file_name = os.path.basename(file_path)
input_file_base, input_file_ext = os.path.splitext(input_file_name)
new_file_name = f"{input_file_base}_llm_response{input_file_ext}"
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
with open(new_file_path,'w') as f:
json.dump(to_save, f, indent=4)
return new_file_path
graph_det_path = eval_graph_dataset('../../data/data_sampled.json', tf_scorer)
print(f"The new file is saved at: {graph_det_path}")
graph_nondet_path = eval_graph_dataset('../../data/nondet_sampled.json', tf_scorer)
print(f"The new file is saved at: {graph_nondet_path}")
def eval_cs_dataset(file_path,eval_function):
to_save = []
with open(file_path, 'r') as f:
data = json.load(f)
for i in range(len(data)):
for row in tqdm(data, desc="Processing rows"):
prompt = row['cheatsheet'] + "\n" + row['background'] + row['question']
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
max_tokens=100,
messages=[
{"role": "user", "content": prompt}
])
response = completion.choices[0].message["content"]
prediction = eval_function(response, row['answer'])
conv = {"Prompt": prompt,
'response': response,
'ground_truth': row['answer'],
'prediction_correct': prediction}
to_save.append(conv)
# Save the conversation data to a new JSON file
input_file_name = os.path.basename(file_path)
input_file_base, input_file_ext = os.path.splitext(input_file_name)
new_file_name = f"{input_file_base}_llm_response{input_file_ext}"
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
print('running')
with open(new_file_path,'w') as f:
json.dump(to_save, f, indent=4)
return new_file_path
cs_det_path = eval_cs_dataset('../../data/data_sampled_cs.json', tf_scorer)
print(f"The new file is saved at: {cs_det_path}")
cs_nondet_path = eval_cs_dataset('../../data/nondet_sampled_cs.json', tf_scorer)
print(f"The new file is saved at: {cs_nondet_path}")
def eval_subq_dataset(file_path,eval_function):
to_save = []
with open(file_path, 'r') as f:
data = json.load(f)
for i in range(len(data)):
for row in tqdm(data, desc="Processing rows"):
prompt = row['background'] + row['subquestion1']
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
max_tokens=100,
messages=[
{"role": "user", "content": prompt}
])
response1 = completion.choices[0].message["content"]
conversation_id = completion["conversation_id"]
followup = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
max_tokens=100,
conversation_id = conversation_id,
messages=[
{"role": "user", "content": prompt}
])
response2 = followup.choices[0].message["content"]
prediction = eval_function(response2, row['answer'])
conv = {"Prompt": prompt,
"Subquestion1": row['subquestion1'],
'intermediate-response': response1,
"query": row['question'],
'response': response2,
'ground_truth': row['answer'],
'prediction_correct': prediction}
to_save.append(conv)
# Save the conversation data to a new JSON file
input_file_name = os.path.basename(file_path)
input_file_base, input_file_ext = os.path.splitext(input_file_name)
new_file_name = f"{input_file_base}_llm_response{input_file_ext}"
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
print('running')
with open(new_file_path,'w') as f:
json.dump(to_save, f, indent=4)
return new_file_path
subq_det_path = eval_subq_dataset('../../data/data_sampled_cs.json', tf_scorer)
print(f"The new file is saved at: {subq_det_path}")
subq_nondet_path = eval_subq_dataset('../../data/nondet_sampled_cs.json', tf_scorer)
print(f"The new file is saved at: {subq_nondet_path}")
| [
"PLACEHOLDER\nPLACEHOLDERPLACEHOLDER",
"PLACEHOLDERPLACEHOLDER",
"PLACEHOLDERPLACEHOLDERAnswer the question by first drawing the causal graph for the story"
] |
2024-01-10 | jivishov/clintrials | pages~Cox_Regression.py | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
from lifelines import CoxPHFitter
import openai
import utils
# Streamlit layout and logic
st.title("Cox Regression for Clinical Trials")
# # Sidebar for future menu options
# st.sidebar.title("Menu")
# st.sidebar.text("Future options will go here.")
st.markdown('<a href="https://drive.google.com/drive/folders/1Fo3vRuh0MMHw8iHipQk8jaWnEiErRZ8L?usp=drive_link" target="_blank">Download sample datasets/Nümunə verilənləri endirin</a>', unsafe_allow_html=True)
# Upload CSV file
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
if uploaded_file is not None:
# Read and display data
df = pd.read_csv(uploaded_file)
st.write("Preview of uploaded data:")
st.write(df.head())
# EDA: Display descriptive statistics
st.write("### Exploratory Data Analysis")
st.write("Descriptive statistics:")
st.write(df.describe())
# Select columns
time_column = st.selectbox("Select the Time column", df.columns.tolist())
event_column = st.selectbox("Select the Event column", df.columns.tolist())
predictor_columns = st.multiselect("Select Predictor columns", df.columns.tolist(), default=df.columns.tolist())
# Error checks
error_message = None
if time_column in predictor_columns:
error_message = "Time column should not be selected as a Predictor column."
if event_column in predictor_columns:
error_message = "Event column should not be selected as a Predictor column."
if df[predictor_columns].select_dtypes(include=['object']).any().any():
error_message = "Predictor columns should only contain numerical values."
if error_message:
st.write(f"### Error: {error_message}")
else:
if time_column and event_column and predictor_columns:
# Prepare data for Cox Regression
selected_data = df[[time_column, event_column] + predictor_columns]
# Fit Cox Regression model
cph = CoxPHFitter()
cph.fit(selected_data, duration_col=time_column, event_col=event_column)
# Display results
st.write("### Cox Regression Results")
st.write(cph.summary)
# EDA: Survival curves for each level of a categorical variable
st.write("### Survival Curves")
for predictor in predictor_columns:
fig, ax = plt.subplots(figsize=(8, 6)) # Adjust figure size
cph.plot_partial_effects_on_outcome(covariates=predictor, values=df[predictor].unique(), cmap='coolwarm', ax=ax)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=4) # Horizontal legend below the plot
plt.tight_layout(rect=[0, 0.2, 1, 1]) # Make space for the new legend
st.pyplot(fig)
plt.close(fig)
# # Interpretation
# st.write("### Interpretation")
# st.write("The p-values in the results table can be used to determine the significance of each predictor. A p-value < 0.05 typically indicates a significant predictor.")
cox_results_text = cph.summary.to_csv()
# GPT-4 interpretation
with st.spinner("GPT-4 is analysing your results..."):
gpt4_response = utils.GPT4_Interpretation(
"Cox regression",
f"Results of the Cox regression test:{cox_results_text}"
)
st.subheader("GPT-4's Interpretation:")
st.write(f"{gpt4_response.choices[0].message.content}")
| [] |
2024-01-10 | jivishov/clintrials | pages~Spearman_Correlation_Test.py | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import openai
import utils
# Streamlit App
st.title("Spearman Correlation for Clinical Trials")
st.markdown('<a href="https://drive.google.com/drive/folders/1Fo3vRuh0MMHw8iHipQk8jaWnEiErRZ8L?usp=drive_link" target="_blank">Download sample datasets/Nümunə verilənləri endirin</a>', unsafe_allow_html=True)
# Upload CSV file
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
if uploaded_file:
df = pd.read_csv(uploaded_file)
st.write("Preview of uploaded data:")
st.write(df.head())
# Column selection
col1, col2 = st.multiselect("Select two columns for Spearman correlation", df.columns.tolist(), default=df.columns.tolist()[:2])
if len(col1) == 0 or len(col2) == 0:
st.write("### Error: Please select two columns for Spearman correlation.")
else:
# Spearman correlation calculation
corr_coefficient, p_value = scipy.stats.spearmanr(df[col1], df[col2])
# EDA plot
fig, ax = plt.subplots(figsize=(8, 6))
plt.scatter(df[col1], df[col2])
plt.xlabel(col1)
plt.ylabel(col2)
plt.title(f"Scatter plot of {col1} and {col2}")
plt.tight_layout(rect=[0, 0.2, 1, 1])
st.pyplot(fig)
# Results and interpretation
st.write(f"Spearman correlation coefficient: {corr_coefficient}")
st.write(f"P-value: {p_value}")
# GPT-4 interpretation
with st.spinner("GPT-4 is analysing your results..."):
gpt4_response = utils.GPT4_Interpretation(
"Spearman correlation test",
f"Spearman correlation coefficient is {corr_coefficient}, and the p-value is {p_value}."
)
st.subheader("GPT-4's Interpretation:")
st.write(f"{gpt4_response.choices[0].message.content}")
| [] |
2024-01-10 | jivishov/clintrials | pages~Pearson_Correlation_Test.py | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
#import openai
import utils
# Streamlit App
st.title("Pearson Correlation for Clinical Trials")
st.markdown('<a href="https://drive.google.com/drive/folders/1Fo3vRuh0MMHw8iHipQk8jaWnEiErRZ8L?usp=drive_link" target="_blank">Download sample datasets/Nümunə verilənləri endirin</a>', unsafe_allow_html=True)
# Upload CSV file
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
if uploaded_file:
df = pd.read_csv(uploaded_file)
st.write("Preview of uploaded data:")
st.write(df.head())
# Column selection
col1, col2 = st.multiselect("Select two columns for Pearson correlation", df.columns.tolist(), default=df.columns.tolist()[:2])
if len(col1) == 0 or len(col2) == 0:
st.write("### Error: Please select two columns for Pearson correlation.")
else:
# Pearson correlation calculation
corr_coefficient, p_value = scipy.stats.pearsonr(df[col1], df[col2])
# EDA plot
fig, ax = plt.subplots(figsize=(8, 6))
plt.scatter(df[col1], df[col2])
plt.xlabel(col1)
plt.ylabel(col2)
plt.title(f"Scatter plot of {col1} and {col2}")
plt.tight_layout(rect=[0, 0.2, 1, 1]) # Make space for the new legend
st.pyplot(fig)
# Results and interpretation
st.write(f"Correlation coefficient: {corr_coefficient}")
st.write(f"P-value: {p_value}")
# GPT-4 interpretation
with st.spinner("GPT-4 is analysing your results..."):
gpt4_response = utils.GPT4_Interpretation(
"Pearson correlation test",
f"Correlation coefficient is {corr_coefficient}, and the p-value is {p_value}."
)
st.subheader("GPT-4's Interpretation:")
st.write(f"{gpt4_response.choices[0].message.content}")
| [] |
2024-01-10 | jivishov/clintrials | pages~Log_rank_test.py | import streamlit as st
import pandas as pd
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test
import matplotlib.pyplot as plt
#import openai
import utils
st.title("Log Rank Test for Clinical Trials")
# # Sidebar for future menu options
# st.sidebar.title("Menu")
# st.sidebar.text("Future options will go here.")
st.markdown('<a href="https://drive.google.com/drive/folders/1Fo3vRuh0MMHw8iHipQk8jaWnEiErRZ8L?usp=drive_link" target="_blank">Download sample datasets/Nümunə verilənləri endirin</a>', unsafe_allow_html=True)
# Upload CSV file
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
if uploaded_file is not None:
# Read and display data
df = pd.read_csv(uploaded_file)
st.write("Preview of uploaded data:")
st.write(df.head())
# Select columns
time_column = st.selectbox("Select the Time column", df.columns.tolist())
event_column = st.selectbox("Select the Event column", df.columns.tolist())
group_column = st.selectbox("Select the Group column", df.columns.tolist())
# Error Checks
error_message = None
if df[time_column].dtype not in ['int64', 'float64']:
error_message = "Time column should contain numerical values."
if df[event_column].dtype not in ['int64', 'float64']:
error_message = "Event column should contain numerical values."
if df[group_column].dtype not in ['object', 'int64', 'float64']:
error_message = "Group column should contain categorical or numerical values."
if error_message:
st.write(f"### Error: {error_message}")
else:
# Further Checks
results = None # Initialize to None
if not all(group in df[group_column].values for group in ['A', 'B']):
st.write("### Error: The data must contain both 'A' and 'B' groups.")
elif len(df[df[group_column] == 'A']) < 2 or len(df[df[group_column] == 'B']) < 2:
st.write("### Error: Both 'A' and 'B' groups must contain at least two observations.")
elif not df[event_column].isin([0, 1]).all():
st.write("### Error: Event column must contain only 0 or 1.")
else:
try:
# Perform Log Rank Test
T = df[time_column]
E = df[event_column]
groups = df[group_column]
results = logrank_test(T[groups == 'A'], T[groups == 'B'], event_observed_A=E[groups == 'A'], event_observed_B=E[groups == 'B'])
except Exception as e:
st.write(f"### Error: An error occurred during the Log Rank Test: {e}")
if results:
# Display Results
st.write("Log Rank Test Results:")
st.write(f"P-value: {results.p_value}")
st.write(f"Test statistic: {results.test_statistic}")
# EDA: Kaplan-Meier Survival Curve
st.write("### Kaplan-Meier Survival Curve")
kmf = KaplanMeierFitter()
fig, ax = plt.subplots(figsize=(8, 6)) # Adjust figure size
for name, grouped_df in df.groupby(group_column):
kmf.fit(grouped_df[time_column], grouped_df[event_column], label=name)
kmf.plot(ax=ax)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=4) # Horizontal legend below the plot
plt.tight_layout(rect=[0, 0.2, 1, 1]) # Make space for the new legend
st.pyplot(fig)
with st.spinner("GPT-4 is analysing your results..."):
gpt4_response=utils.GPT4_Interpretation("Log Rank Test",
f"P-value={results.p_value}, Test statistic={results.test_statistic}")
st.subheader("GPT-4's Interpretation:")
st.write(f"{gpt4_response.choices[0].message.content}")
| [] |
2024-01-10 | vector-ventures/OpenChat | dj_backend_server~api~utils~make_chain.py | from langchain.vectorstores.base import VectorStore
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from api.utils.get_openai_llm import get_llm
from langchain import PromptTemplate, LLMChain
from langchain.chains import RetrievalQAWithSourcesChain, ConversationalRetrievalChain
from api.utils.get_prompts import get_qa_prompt_by_mode
load_dotenv()
def get_qa_chain(vector_store: VectorStore, mode, initial_prompt: str) -> RetrievalQA:
llm = get_llm()
template = get_qa_prompt_by_mode(mode, initial_prompt=initial_prompt)
prompt = PromptTemplate.from_template(template)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vector_store.as_retriever(),
chain_type_kwargs={"prompt": prompt},
return_source_documents=True
)
return qa_chain
def getRetrievalQAWithSourcesChain(vector_store: VectorStore, mode, initial_prompt: str):
llm = get_llm()
chain = RetrievalQAWithSourcesChain.from_chain_type(llm, chain_type="stuff", retriever=vector_store.as_retriever())
return chain
def getConversationRetrievalChain(vector_store: VectorStore, mode, initial_prompt: str, memory_key: str):
llm = get_llm()
template = get_qa_prompt_by_mode(mode, initial_prompt=initial_prompt)
prompt = PromptTemplate.from_template(template)
chain = ConversationalRetrievalChain.from_llm(
llm,
chain_type="stuff",
retriever=vector_store.as_retriever(),
verbose=True,
combine_docs_chain_kwargs={"prompt": prompt}
)
return chain | [] |
2024-01-10 | vector-ventures/OpenChat | dj_backend_server~api~utils~get_embeddings.py | from langchain.embeddings.openai import OpenAIEmbeddings
from api.enums import EmbeddingProvider
import os
from dotenv import load_dotenv
from langchain.embeddings.base import Embeddings
load_dotenv()
# https://github.com/easonlai/azure_openai_langchain_sample/blob/main/chat_with_pdf.ipynb
import os
def get_embedding_provider():
"""Gets the chosen embedding provider from environment variables."""
return os.environ.get("EMBEDDING_PROVIDER")
def get_azure_embedding():
"""Gets embeddings using the Azure embedding provider."""
deployment = os.environ.get("AZURE_OPENAI_EMBEDDING_MODEL_NAME")
openai_api_key = os.environ.get("AZURE_OPENAI_API_KEY")
client = os.environ.get("AZURE_OPENAI_API_TYPE")
openai_api_base = os.environ['AZURE_OPENAI_API_BASE']
openai_api_version = os.environ['AZURE_OPENAI_API_VERSION']
return OpenAIEmbeddings(
openai_api_key=openai_api_key,
deployment=deployment,
client=client,
chunk_size=8,
openai_api_base=openai_api_base,
openai_api_version=openai_api_version
)
def get_openai_embedding():
"""Gets embeddings using the OpenAI embedding provider."""
openai_api_key = os.environ.get("OPENAI_API_KEY")
openai_api_base = os.environ['OPENAI_API_BASE']
openai_api_model = os.environ['OPENAI_API_MODEL']
return OpenAIEmbeddings(openai_api_key=openai_api_key, chunk_size=1)
def choose_embedding_provider():
"""Chooses and returns the appropriate embedding provider instance."""
embedding_provider = get_embedding_provider()
if embedding_provider == EmbeddingProvider.azure.value:
return get_azure_embedding()
elif embedding_provider == EmbeddingProvider.OPENAI.value:
return get_openai_embedding()
else:
available_providers = ", ".join([service.value for service in EmbeddingProvider])
raise ValueError(
f"Embedding service '{embedding_provider}' is not currently available. "
f"Available services: {available_providers}"
)
# Main function to get embeddings
def get_embeddings() -> Embeddings:
"""Gets embeddings using the chosen embedding provider."""
return choose_embedding_provider() | [] |
2024-01-10 | vector-ventures/OpenChat | dj_backend_server~api~data_sources~codebase_handler.py | # views.py
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from langchain.text_splitter import RecursiveCharacterTextSplitter
from api.utils import get_embeddings
from langchain.document_loaders import GitLoader
from api.utils import init_vector_store
# https://python.langchain.com/docs/integrations/document_loaders/git
@csrf_exempt
def codebase_handler(repo_path: str, namespace: str):
try:
loader = GitLoader(repo_path=repo_path, branch="main", recursive=True, unknown="warn")
raw_docs = loader.load()
print('Loaded documents')
text_splitter = RecursiveCharacterTextSplitter(separators=["\n"], chunk_size=1000, chunk_overlap=200,length_function=len)
docs = text_splitter.split_documents(raw_docs)
print('Split documents')
embeddings = get_embeddings()
init_vector_store(docs, embeddings, namespace=namespace)
print('Indexed documents. all done!')
except Exception as e:
print(e) | [] |
2024-01-10 | vector-ventures/OpenChat | dj_backend_server~api~utils~get_openai_llm.py | from langchain.llms import AzureOpenAI, OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
# Azure OpenAI Language Model client
def get_azure_openai_llm():
"""Returns AzureOpenAI instance configured from environment variables"""
openai_api_type = os.environ['OPENAI_API_TYPE']
openai_api_key = os.environ['AZURE_OPENAI_API_KEY']
openai_deployment_name = os.environ['AZURE_OPENAI_DEPLOYMENT_NAME']
openai_model_name = os.environ['AZURE_OPENAI_COMPLETION_MODEL']
openai_api_version = os.environ['AZURE_OPENAI_API_VERSION']
openai_api_base=os.environ['AZURE_OPENAI_API_BASE']
return AzureOpenAI(
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
deployment_name=openai_deployment_name,
model_name=openai_model_name,
openai_api_type=openai_api_type,
openai_api_version=openai_api_version,
temperature=0,
batch_size=8
)
# OpenAI Language Model client
def get_openai_llm():
"""Returns OpenAI instance configured from environment variables"""
openai_api_key = os.environ['OPENAI_API_KEY']
openai_api_base = os.environ['OPENAI_API_BASE']
openai_api_model = os.environ['OPENAI_API_MODEL']
return OpenAI(
temperature=0,
openai_api_key=openai_api_key,
openai_api_base=openai_api_base,
model_name=openai_api_model
)
# recommend not caching initially, and optimizing only if you observe a clear performance benefit from caching the clients.
# The simplest thing that works is often best to start.
def get_llm():
"""Returns LLM client instance based on OPENAI_API_TYPE"""
clients = {
'azure': get_azure_openai_llm,
'openai': get_openai_llm
}
api_type = os.environ.get('OPENAI_API_TYPE')
if api_type not in clients:
raise ValueError(f"Invalid OPENAI_API_TYPE: {api_type}")
return clients[api_type]() | [] |
2024-01-10 | mementofani/ANALOG-FAST | Agents~Agents.py | import subprocess
from openai import Client
import time
import builtins
import textwrap
from pydantic import Field
from instructor import OpenAISchema
from typing import Literal
from ThreadManager import *
from agent_instructions import *
KeywordGeneratorAgent = client.beta.assistants.create(
model="gpt-3.5-turbo-1106",
name="KeywordGeneratorAgent",
instructions=keyword_generator_instructions
)
SynonymFinderAgent = client.beta.assistants.create(
model="gpt-3.5-turbo-1106",
name="SynonymFinderAgent",
instructions=synonym_finder_instructions
)
SQLQueryWriterAgent = client.beta.assistants.create(
model="gpt-3.5-turbo-1106",
name="SQLQueryWriterAgent",
instructions=sql_query_writer_instructions
)
DataAnalysisAgent = client.beta.assistants.create(
model="gpt-3.5-turbo-1106",
name="DataAnalysisAgent",
instructions=data_analysis_instructions
)
def wprint(*args, width=70, **kwargs):
wrapper = textwrap.TextWrapper(width=width)
wrapped_args = [wrapper.fill(str(arg)) for arg in args]
builtins.print(*wrapped_args, **kwargs)
def get_completion(message, agent, funcs, thread):
"""
Executes a thread based on a provided message and retrieves the completion result.
This function submits a message to a specified thread, triggering the execution of an array of functions
defined within a func parameter. Each function in the array must implement a `run()` method that returns the outputs.
Parameters:
- message (str): The input message to be processed.
- agent (OpenAI Assistant): The agent instance that will process the message.
- funcs (list): A list of function objects, defined with the instructor library.
- thread (Thread): The OpenAI Assistants API thread responsible for managing the execution flow.
Returns:
- str: The completion output as a string, obtained from the agent following the execution of input message and functions.
"""
# create new message in the thread
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=message
)
# run this thread
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=agent.id,
)
while True:
# wait until run completes
while run.status in ['queued', 'in_progress']:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
time.sleep(1)
# function execution
if run.status == "requires_action":
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
wprint('\033[31m' + str(tool_call.function), '\033[0m')
# find the tool to be executed
func = next(iter([func for func in funcs if func.__name__ == tool_call.function.name]))
try:
# init tool
func = func(**eval(tool_call.function.arguments))
# get outputs from the tool
output = func.run()
except Exception as e:
output = "Error: " + str(e)
wprint(f"\033[33m{tool_call.function.name}: ", output, '\033[0m')
tool_outputs.append({"tool_call_id": tool_call.id, "output": output})
# submit tool outputs
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs
)
# error
elif run.status == "failed":
raise Exception("Run Failed. Error: ", run.last_error)
# return assistant message
else:
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
message = messages.data[0].content[0].text.value
return message
class KeywordGeneratorFunction(OpenAISchema):
text: str
def run(self):
# Implement your keyword extraction logic here
# For demonstration, this example simply splits the text into words
keywords = self.text.split()
return keywords
class SynonymFinderFunction(OpenAISchema):
keywords: list[str]
def run(self):
# Implement your synonym finding logic here
# This is a placeholder for demonstration
synonyms = {keyword: [f"{keyword}_synonym1", f"{keyword}_synonym2"] for keyword in self.keywords}
return synonyms
class SQLQueryWriterFunction(OpenAISchema):
keywords: list[str]
def run(self):
# Implement your SQL query generation logic here
# This is a placeholder for demonstration
queries = [f"SELECT * FROM logs WHERE message LIKE '%{keyword}%'" for keyword in self.keywords]
return queries
class DataAnalysisFunction(OpenAISchema):
query_results: list
def run(self):
# Implement your data analysis logic here
# This is a placeholder for demonstration
analysis = "Data analysis results based on the query results."
return analysis
agents_and_threads = {
"KeywordGeneratorAgent": {
"agent": KeywordGeneratorAgent,
"thread": None,
"funcs": [KeywordGeneratorFunction]
},
"SynonymFinderAgent": {
"agent": SynonymFinderAgent,
"thread": None,
"funcs": [SynonymFinderFunction]
},
"SQLQueryWriterAgent": {
"agent": SQLQueryWriterAgent,
"thread": None,
"funcs": [SQLQueryWriterFunction]
},
"DataAnalysisAgent": {
"agent": DataAnalysisAgent,
"thread": None,
"funcs": [DataAnalysisFunction]
}
}
# Initialize threads for each agent
for agent in agents_and_threads.values():
agent["thread"] = client.beta.threads.create()
class SendMessage(OpenAISchema):
recepient: Literal[
'KeywordGeneratorAgent', 'SynonymFinderAgent', 'SQLQueryWriterAgent', 'DataAnalysisAgent'] = Field(
...,
description="Specify the recipient agent for the message."
)
message: str = Field(
...,
description="Specify the task required for the recipient agent to complete."
)
def run(self):
recepient_info = agents_and_threads.get(self.recepient)
if not recepient_info["thread"]:
recepient_info["thread"] = client.beta.threads.create()
message = get_completion(message=self.message, **recepient_info)
return message
user_proxy = client.beta.assistants.create(
name='User Proxy Agent',
instructions=f"""As a user proxy agent, your responsibility is to streamline the dialogue between the user and specialized agents within this group chat.Your duty is to articulate user requests accurately to the relevant agents and maintain ongoing communication with them to guarantee the user's task is carried out to completion.Please do not respond to the user until the task is complete, an error has been reported by the relevant agent, or you are certain of your response.Main Goal is :Assist users in identifying specific issues or information within log files.Focus on accuracy and simple solutions. Your Main task is to pin point the exact rows in the . That are of interest to the user.""",
model="gpt-3.5-turbo-1106",
tools=[
{"type": "function", "function": SendMessage.openai_schema},
],
)
## sdsd
thread = client.beta.threads.create()
while True:
user_message = input("User: ")
user_proxy_tools = [SendMessage]
message = get_completion(user_message, user_proxy, user_proxy_tools, thread)
wprint(f"\033[34m{user_proxy.name}: ", message, '\033[0m')
| [] |
2024-01-10 | mementofani/ANALOG-FAST | Agents~ThreadManager.py | import time
import openai
from openai import OpenAI
api_key = "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"
client = OpenAI(api_key=api_key)
class ThreadManager:
def __init__(self, assistant):
self.client = client
self.assistant = assistant
def create_thread(self):
return self.client.beta.threads.create().id
def append_message(self, thread_id, content):
return self.client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=content
).id
def get_latest_message(self, thread_id, last_message_id):
response = self.client.beta.threads.messages.list(thread_id=thread_id)
for message in reversed(response.data):
if message.id != last_message_id:
return message
return None
def ask_question(self, question) -> str:
thread_id = self.create_thread()
last_message_id = self.append_message(thread_id, question)
client.beta.threads.runs.create(
thread_id=thread_id,
assistant_id=self.assistant.id
)
while True:
new_message = self.get_latest_message(thread_id, last_message_id)
if new_message:
return new_message.content[0].text.value
time.sleep(0.5)
| [] |
2024-01-10 | homayoonfarrahi/cycle-time-study | senseact_mod~senseact~envs~dxl~dxl_reacher_env.py | # Copyright (c) 2018, The SenseAct Authors.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import gym
import time
import numpy as np
from senseact import utils
from senseact.rtrl_base_env import RTRLBaseEnv
from senseact.devices.dxl import dxl_mx64
from senseact.devices.dxl.dxl_setup import setups
from senseact.devices.dxl import dxl_communicator as gcomm
from math import pi
from collections import deque
from multiprocessing import Array, Value
class DxlReacher1DEnv(RTRLBaseEnv, gym.core.Env):
""" The Dynamixel Reacher 1D Environment (DxlReacher1DEnv)
This task is similar to the Mujoco-based task Reacher from OpenAI Gym and the UR Reacher.
Here, the servo tries to reach a target position by controlling its joints via
position/velocity/torque commands. The goal for this task is to rotate one Dynamixel joint to
reach the target position, which is generated at a random location in each episode.
"""
def __init__(self,
setup='dxl_gripper_default',
idn=9,
baudrate=1000000,
obs_history=1,
dt=0.01,
gripper_dt=0.006,
rllab_box=False,
episode_length_step=None,
episode_length_time=4,
dof=1,
max_torque_mag = 300,
control_type='torque',
target_type='position',
reset_type='zero',
reward_type='linear',
delay=0,
dxl_dev_path='None',
max_velocity=5,
use_ctypes_driver=True,
**kwargs
):
""" Inits DxlReacher1DEnv class with task and servo specific parameters.
Args:
setup: A dictionary containing DXL reacher task specifications,
such as bounding box dimensions, joint angle ranges and max load.
idn: An integer representing the DXL ID number
baudrate: An integer representing a baudrate to connect at
obs_history: An integer number of sensory packets concatenated
into a single observation vector
dt: A float specifying duration of an environment time step
in seconds.
gripper_dt: A float representing DXLCommunicator cycle time
rllab_box: A bool specifying whether to wrap environment
action and observation spaces into an RllabBox object
(required for off-the-shelf rllab algorithms implementations).
episode_length_time: A float duration of an episode defined
in seconds
episode_length_step: An integer duration of en episode
defined in environment steps.
dof: an integer number of degrees of freedom
max_torque_mag: An integer representing max possible torque command
to be sent to the DXL devive
control_type:
target_type: A string specifying in what space to provide
target coordinates, either "position" for Cartesian space
or "angle" for joints angles space.
reset_type: A string specifying whether to reset the arm to a
fixed position or to a random position.
reward_type: A string specifying the reward function,
(e.g., "linear" for - d_t)
delay: A float specifying artificial observation delay in seconds
dxl_dev_path: A string containing the serial port address
(e.g., /dev/ttyACM0 or /dev/ttyUSB0 on linux)
max_velocity: A float representing the max possible velocity command
to be sent to the DXL device
use_ctypes_driver: A bool. Setting it to True chooses CType-based driver.
We found the CType-based driver to provide substantially more timely
and precise communication compared to the pyserial-based one.
**kwargs: Keyword arguments
"""
self.max_temperature = 60
self.cool_down_temperature = 50
self.obs_history = obs_history
self.dt = dt
self.gripper_dt = gripper_dt
self.max_torque_mag = np.array([max_torque_mag])
self.max_velocity = np.array([max_velocity])
if rllab_box:
from rllab.spaces import Box as RlBox # use this for rllab TRPO
Box = RlBox
else:
from gym.spaces import Box as GymBox # use this for baselines algos
Box = GymBox
if control_type not in ['torque']:
raise NotImplementedError('{} control not implemented'.format(control_type))
self.control_type = control_type
if target_type not in ['position']:
raise NotImplementedError('{} target not implemented'.format(target_type))
self.target_type = target_type
if reset_type not in ['zero', 'random']:
raise NotImplementedError('{} reset not implemented'.format(reset_type))
self.reset_type = reset_type
if reward_type not in ['linear']:
raise NotImplementedError('{} reward not implemented'.format(reward_type))
self.reward_type = reward_type
if control_type == 'torque':
self.action_low = -self.max_torque_mag
self.action_high = +self.max_torque_mag
elif control_type == 'velocity':
self.action_low = -self.max_velocity
self.action_high = +self.max_velocity
if setup not in setups:
raise NotImplementedError('Config not found')
self.angle_low = setups[setup]['angles_low'][0]
self.angle_high = setups[setup]['angles_high'][0]
# Load value for detecting a closed gripper during reset
self.high_load = setups[setup]['high_load'][0]
self._present_pos_ = np.zeros((obs_history, 1))
self._observation_space = Box(
low=np.array(
# list(0*np.ones(self.obs_history)) # torque enable
# + list(0*np.ones(self.obs_history)) # alarm led
# + list(0*np.ones(self.obs_history)) # led
list(-pi * np.ones(self.obs_history)) # present position
+ list(self.angle_low * np.ones(1)) # target position
+ list(-np.inf * np.ones(self.obs_history)) # present speed
# + list(-np.inf*np.ones(self.obs_history)) # present load
# + list(0*np.ones(self.obs_history)) # temperature
# + list(0*np.ones(self.obs_history)) # registered
# + list(0*np.ones(self.obs_history)) # moving
# + list(-np.inf * np.ones(self.obs_history)) # current
# + list(-np.inf*np.ones(self.obs_history)) # voltage
+ list(self.action_low * np.ones(self.obs_history)) # last action
),
high=np.array(
# list(1 * np.ones(self.obs_history)) # torque enable
# + list(128 * np.ones(self.obs_history)) # alarm led
# + list(1 * np.ones(self.obs_history)) # led
list(pi * np.ones(self.obs_history)) # present position
+ list(self.angle_high * np.ones(1)) # target position
+ list(+np.inf * np.ones(self.obs_history)) # present speed
# + list(+np.inf * np.ones(self.obs_history)) # present load
# + list(255 * np.ones(self.obs_history)) # temperature
# + list(1 * np.ones(self.obs_history)) # registered
# + list(1 * np.ones(self.obs_history)) # moving
# + list(+np.inf * np.ones(self.obs_history)) # current
# + list(+np.inf * np.ones(self.obs_history)) # voltage
+ list(self.action_high * np.ones(self.obs_history)) # last action
)
)
self._action_space = Box(low=self.action_low, high=self.action_high)
if rllab_box:
from rllab.envs.env_spec import EnvSpec
self._spec = EnvSpec(self.observation_space, self.action_space)
self._comm_name = 'DxlReacher1D'
self._dxl_dev_path = dxl_dev_path
communicator_setups = {
self._comm_name: {
'Communicator': gcomm.DXLCommunicator,
'num_sensor_packets': obs_history,
'kwargs': {
'idn': idn,
'baudrate': baudrate,
'sensor_dt': gripper_dt,
'device_path': self._dxl_dev_path,
'use_ctypes_driver': use_ctypes_driver,
}
}
}
super(DxlReacher1DEnv, self).__init__(
communicator_setups=communicator_setups,
action_dim=1,
observation_dim=self.observation_space.shape[0],
dt=dt,
**kwargs
)
read_block = dxl_mx64.MX64.subblock('version_0', 'goal_acceleration', ret_dxl_type=use_ctypes_driver)
self.regnames = [reg.name for reg in read_block]
self.reg_index = dict(zip(self.regnames, range(len(self.regnames))))
self.episode_steps = 0
if episode_length_step is not None:
assert episode_length_time is None
self.episode_length_step = episode_length_step
self.episode_length_time = episode_length_step * dt
elif episode_length_time is not None:
assert episode_length_step is None
self.episode_length_time = episode_length_time
self.episode_length_step = int(episode_length_time / dt)
else:
# TODO: should we allow a continuous behaviour case here, with no episodes?
print("episode_length_time or episode_length_step needs to be set")
raise AssertionError
# Task Parameters
self.obs_history = obs_history
self.dof = dof
self.delay = delay
# Default initialization
target_pos = np.random.uniform(low=self.angle_low, high=self.angle_high)
self.pos_range = self.angle_high - self.angle_low
self.reset_pos_center = self.angle_high - (self.pos_range//2)
self.action_range = self.action_high - self.action_low
self._reward_ = Value('d', 0.0)
self._reset_pos_ = Value('d', self.reset_pos_center)
self._present_pos_ = np.frombuffer(Array('f', self.obs_history).get_obj(), dtype='float32')
self._target_pos_ = Value('d', target_pos)
self._temperature_ = [0] * self.obs_history
self._action_history = deque([0] * (self.obs_history + 1), self.obs_history + 1)
# Tell the dxl to do nothing (overwritting previous command)
self.nothing_packet = np.zeros(self._actuator_comms[self._comm_name].actuator_buffer.array_len)
# PID control gains for reset
self.kp = 161.1444 # Proportional gain
self.ki = 0 # Integral gain
self.kd = 0 # Derivative gain
def _reset_(self):
""" Resets the environment episode.
Moves the DXL to either fixed reference or random position and
generates a new target within a bounding box.
"""
print("Resetting")
if self.reset_type == 'zero':
self._reset_pos_.value = self.reset_pos_center
elif self.reset_type == 'random':
self._reset_pos_.value = self._rand_obj_.uniform(low=self.angle_low, high=self.angle_high)
self._target_pos_.value = self._rand_obj_.uniform(low=self.angle_low, high=self.angle_high)
error_prior = 0
integral = 0
present_pos = 0.0
# Once in the correct regime, the `present_pos` values can be trusted
start_time = time.time()
while time.time() - start_time < 5:
if self._sensor_comms[self._comm_name].sensor_buffer.updated():
sensor_window, timestamp_window, index_window = self._sensor_comms[
self._comm_name].sensor_buffer.read_update(1)
present_pos = sensor_window[0][self.reg_index['present_pos']]
current_temperature = sensor_window[0][self.reg_index['temperature']]
if current_temperature > self.cool_down_temperature:
print("Starting to overheat. sleep for a few seconds")
time.sleep(10)
error = self._reset_pos_.value - present_pos
if abs(error) > 0.017: # ~1 deg
integral = integral + (error*self.gripper_dt)
derivative = (error - error_prior)/self.gripper_dt
action = self.kp*error + self.ki*integral + self.kd*derivative
error_prior = error
else:
break
self._actuator_comms[self._comm_name].actuator_buffer.write(action)
time.sleep(0.001)
self._actuator_comms[self._comm_name].actuator_buffer.write(0)
self.episode_steps = 0
rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(
self._rand_obj_.get_state()
)
np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))
time.sleep(0.1) # Give the shared buffer time to get updated and prevent false episode done conditions
print("Reset done. Gripper pos: {}".format(present_pos))
def _compute_sensation_(self, name, sensor_window, timestamp_window, index_window):
""" Creates and saves an observation vector based on sensory data.
For DXL reacher environments the observation vector is a concatenation of:
- current joint angle positions;
- current joint angle velocities;
- target joint angle position;
- previous action;
- temperature (optional)
- current (optional)
Args:
name: a string specifying the name of a communicator that
received given sensory data.
sensor_window: a list of latest sensory observations stored in
communicator sensor buffer. the length of list is defined by
obs_history parameter.
timestamp_window: a list of latest timestamp values stored in
communicator buffer.
index_window: a list of latest sensor index values stored in
communicator buffer.
Returns:
A numpy array containing concatenated [observation, reward, done]
vector.
"""
self._torque_enable_ = np.array(
[sensor_window[i][self.reg_index['torque_enable']] for i in range(self.obs_history)])
present_pos = np.array(
[sensor_window[i][self.reg_index['present_pos']] for i in range(self.obs_history)])
np.copyto(self._present_pos_, present_pos)
self._present_speed_ = np.array(
[sensor_window[i][self.reg_index['present_speed']] for i in range(self.obs_history)])
self._current_ = np.array([sensor_window[i][self.reg_index['current']] for i in range(self.obs_history)])
self._temperature_ = np.array([sensor_window[i][self.reg_index['temperature']] for i in range(self.obs_history)])
self._reward_.value = self._compute_reward()
done = [0]
last_actions = list(self._action_history)
last_actions_obs = np.array(last_actions[-self.obs_history:], dtype=float).flatten()
return np.concatenate(
(
self._present_pos_,
np.array([self._target_pos_.value]),
self._present_speed_,
# self._temperature_,
# self._current_,
self.scale_action(last_actions_obs),
np.array([self._reward_.value]),
done
)
)
def _compute_actuation_(self, action, timestamp, index):
""" Creates and sends actuation packets to the communicator.
Computes actuation commands based on agent's action and
control type and writes actuation packets to the
communicators' actuation buffers. In case of angle joints
safety limits being violated overwrites agent's
actions with actuations that return the DXL back within the box.
Args:
action: a numpoy array containing agent's action
timestamp: a float containing action timestamp
index: an integer containing action index
"""
if self._temperature_[-1] < self.max_temperature:
if self._present_pos_[-1] < self.angle_low:
self._actuation_packet_[self._comm_name] = self.max_torque_mag//2
elif self._present_pos_[-1] > self.angle_high:
self._actuation_packet_[self._comm_name] = -self.max_torque_mag//2
else:
self._actuation_packet_[self._comm_name] = action
self._action_history.append(action)
else:
self._actuator_comms[self._comm_name].actuator_buffer.write(self.nothing_packet)
raise Exception('Operating temperature of the dynamixel device exceeded {} \n'
'Use the device once it cools down!'.format(self.max_temperature))
def _compute_reward(self):
""" Computes reward at a given time step.
Returns:
A float reward.
"""
reward = 0
# if self._temperature_[-1] > self.cool_down_temperature:
# reward -= 2*pi
if self.reward_type == 'linear':
goal_pos = self._target_pos_.value
present_pos = self._present_pos_
reward -= abs(goal_pos - present_pos[-1])
reward *= self.dt/0.04
return np.array([reward])
def _check_done(self, env_done):
""" Checks whether the episode is over.
Args:
env_done: a bool specifying whether the episode should be ended.
Returns:
A bool specifying whether the episode is over.
"""
self.episode_steps += 1
if self.episode_steps >= self.episode_length_step or env_done:
self._actuator_comms[self._comm_name].actuator_buffer.write(self.nothing_packet)
done = True
else:
done = False
return np.array([done])
def reset(self, blocking=True):
""" Resets the arm, optionally blocks the environment until done. """
ret = super(DxlReacher1DEnv, self).reset(blocking=blocking)
self.episode_steps = 0
return ret
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
def scale_angle(self, angle):
return float((angle - self.angle_low)) / self.pos_range
def scale_action(self, action):
return (2*(action - self.action_low)/ self.action_range) - 1.
def terminate(self):
super(DxlReacher1DEnv, self).close()
def render(self, **kwargs):
return
| [] |
2024-01-10 | midxplore/trafilatura | tests~unit_tests.py | # pylint:disable-msg=I1101,W1401
"""
Unit tests for the trafilatura library.
"""
import logging
import os
import sys
import pytest
from lxml import etree, html
try:
from cchardet import detect
except ImportError:
from charset_normalizer import detect
# language detection
try:
import py3langid
LANGID_FLAG = True
except ImportError:
LANGID_FLAG = False
import trafilatura.htmlprocessing
from trafilatura import (bare_extraction, baseline, extract, html2txt,
process_record, utils, xml)
from trafilatura.core import (Extractor, handle_formatting, handle_image,
handle_lists, handle_paragraphs, handle_quotes,
handle_table, handle_textelem, sanitize_tree,
trim)
from trafilatura.external import try_justext
from trafilatura.filters import textfilter
from trafilatura.meta import reset_caches
from trafilatura.metadata import Document
from trafilatura.settings import DEFAULT_CONFIG, TAG_CATALOG, use_config
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
RESOURCES_DIR = os.path.join(TEST_DIR, 'resources')
SAMPLE_META = Document()
ZERO_CONFIG = DEFAULT_CONFIG
ZERO_CONFIG['DEFAULT']['MIN_OUTPUT_SIZE'] = '0'
ZERO_CONFIG['DEFAULT']['MIN_EXTRACTED_SIZE'] = '0'
NEW_CONFIG = use_config(filename=os.path.join(RESOURCES_DIR, 'newsettings.cfg'))
MOCK_PAGES = {
'http://exotic_tags': 'exotic_tags.html',
}
DEFAULT_OPTIONS = Extractor(*[False]*11)
DEFAULT_OPTIONS.config = DEFAULT_CONFIG
def load_mock_page(url, xml_flag=False, langcheck=None, tei_output=False):
'''load mock page from samples'''
try:
with open(os.path.join(TEST_DIR, 'resources', MOCK_PAGES[url]), 'r') as inputf:
htmlstring = inputf.read()
# encoding/windows fix for the tests
except UnicodeDecodeError:
# read as binary
with open(os.path.join(TEST_DIR, 'resources', MOCK_PAGES[url]), 'rb') as inputf:
htmlbinary = inputf.read()
guessed_encoding = detect(htmlbinary)['encoding']
if guessed_encoding is not None:
try:
htmlstring = htmlbinary.decode(guessed_encoding)
except UnicodeDecodeError:
htmlstring = htmlbinary
else:
print('Encoding error')
output_format = 'txt'
if xml_flag is True:
output_format = 'xml'
if tei_output is True:
output_format = 'tei'
return extract(htmlstring, url,
record_id='0000',
no_fallback=False,
output_format=output_format,
target_language=langcheck)
def test_trim():
'''test string trimming'''
assert trim(' Test ') == 'Test'
assert trim('\t\tTest Test\r\n') == 'Test Test'
my_elem = etree.Element('body')
my_elem.text = 'Test Text'
assert textfilter(my_elem) is False
# my_elem.text = 'Tags: Arbeit, Urlaub'
my_elem.text = 'Instagram'
assert textfilter(my_elem) is True
my_elem.text = '\t\t'
assert textfilter(my_elem) is True
# sanitize logic
assert utils.sanitize(None) is None
# non-breaking spaces
print(utils.sanitize('Test Text'))
assert utils.sanitize('Test Text') == 'Test Text'
# clear cache
# reset caches: examine_date_elements used above
old_values = trim.cache_info()
reset_caches()
assert trim.cache_info() != old_values
def test_input():
'''test if loaded strings/trees are handled properly'''
assert utils.is_dubious_html('This is a string.') is True
htmlstring = "<!DOCTYPE html PUBLIC />\n<html/>"
beginning = htmlstring[:50].lower()
assert utils.strip_faulty_doctypes(htmlstring, beginning) == "\n<html/>"
htmlstring = "<html>\n</html>"
beginning = htmlstring[:50].lower()
assert utils.strip_faulty_doctypes(htmlstring, beginning) == htmlstring
with pytest.raises(TypeError) as err:
assert utils.load_html(123) is None
assert 'incompatible' in str(err.value)
assert utils.load_html('<html><body>ÄÖÜ</body></html>') is not None
assert utils.load_html(b'<html><body>\x2f\x2e\x9f</body></html>') is not None
assert utils.load_html('<html><body>\x2f\x2e\x9f</body></html>'.encode('latin-1')) is not None
#assert utils.load_html(b'0'*int(10e3)) is None
# old: with pytest.raises(TypeError) as err:
assert extract(None, 'url', '0000', target_language=None) is None
# legacy
assert process_record(None, 'url', '0000', target_language=None) is None
# GZip
with open(os.path.join(RESOURCES_DIR, 'webpage.html.gz'), 'rb') as gzfile:
myinput = gzfile.read()
assert 'Long story short,' in extract(myinput)
# unicode normalization
assert utils.normalize_unicode('A\u0308ffin') != 'A\u0308ffin'
testresult = extract('<html><body><p>A\u0308ffin</p></body></html>', config=ZERO_CONFIG)
assert testresult != 'A\u0308ffin' and testresult == 'Äffin'
def test_txttocsv():
mymeta = Document()
assert utils.txttocsv('', '', mymeta) == 'None\tNone\tNone\tNone\tNone\tNone\t\t\tNone\tNone\n'
mymeta.title = 'Test title'
mymeta.url = 'https://example.org'
mymeta.hostname = 'example.org'
mymeta.id = '1'
mymeta.license = 'CC BY-SA'
mymeta.image = 'https://example.org/image.jpg'
mymeta.pagetype = 'article'
assert utils.txttocsv('Test text', 'Test comment', mymeta) == '1\thttps://example.org\tNone\texample.org\tTest title\thttps://example.org/image.jpg\tNone\tTest text\tTest comment\tCC BY-SA\tarticle\n'
mystring = '<html><body><p>ÄÄÄÄÄÄÄÄÄÄÄÄÄÄ</p></body></html>'
assert extract(mystring, output_format='csv', config=ZERO_CONFIG) is not None
assert extract(mystring, output_format='csv', include_comments=False, config=ZERO_CONFIG).endswith('\tNone\n')
# test json
result = extract(mystring, output_format='json', config=ZERO_CONFIG)
assert result.endswith('}') and '"fingerprint":' in result and '"language":' in result
assert extract(mystring, output_format='json', include_comments=False, config=ZERO_CONFIG).endswith('}')
# bare extraction for python
result = bare_extraction(mystring, config=ZERO_CONFIG, as_dict=True)
assert isinstance(result, dict) and len(result) == 20
def test_exotic_tags(xmloutput=False):
options = DEFAULT_OPTIONS
options.config = ZERO_CONFIG
# cover some edge cases with a specially crafted file
result = load_mock_page('http://exotic_tags', xml_flag=xmloutput, tei_output=True)
assert 'Teletype text' in result and 'My new car is silver.' in result
filepath = os.path.join(TEST_DIR, 'resources', 'exotic_tags_tei.html')
with open(filepath) as f:
content = etree.fromstring(f.read())
res = xml.check_tei(content, 'http://dummy')
assert etree.tostring(res).startswith(b'<html>\n<text>\n<body>\n<div>\n\n<hi rend="uppercase">Hello</hi>\n<p>Teletype text</p>')
# misformed HTML declaration
htmlstring = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" 2012"http://www.w3.org/TR/html4/loose.dtd"><html><head></head><body><p>ABC</p></body></html>'
# outputs '012"http://www.w3.org/TR/html4/loose.dtd">\nABC'
assert 'ABC' in extract(htmlstring, config=ZERO_CONFIG)
# quotes
assert handle_quotes(etree.Element('quote'), options) is None
assert handle_table(etree.Element('table'), TAG_CATALOG, options) is None
# p within p
element, second = etree.Element('p'), etree.Element('p')
element.text, second.text = '1st part.', '2nd part.'
element.append(second)
# delete last <lb>
element.append(etree.Element('lb'))
converted = handle_paragraphs(element, ['p'], options)
assert etree.tostring(converted) == b'<p>1st part. 2nd part.</p>'
# naked div with <lb>
assert '1.\n2.\n3.' in extract('<html><body><main><div>1.<br/>2.<br/>3.<br/></div></main></body></html>', no_fallback=True, config=ZERO_CONFIG)
# HTML5: <details>
htmlstring = '<html><body><article><details><summary>Epcot Center</summary><p>Epcot is a theme park at Walt Disney World Resort featuring exciting attractions, international pavilions, award-winning fireworks and seasonal special events.</p></details></article></body></html>'
my_result = extract(htmlstring, no_fallback=True, config=ZERO_CONFIG)
assert 'Epcot Center' in my_result and 'award-winning fireworks' in my_result
my_result = extract(htmlstring, no_fallback=False, config=ZERO_CONFIG)
assert 'Epcot Center' in my_result and 'award-winning fireworks' in my_result
# edge cases
htmlstring = '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>A weird bug</title>
</head>
<body>
<div>
<h1>Lorem ipsum dolor sit amet, consectetur adipiscing elit.</h1>
<h2>Sed et interdum lectus.</h2>
<p>Quisque molestie nunc eu arcu condimentum fringilla.</p>
<!-- strong can be changed to b, em, i, u, or kbd -->
<strong><a></a></strong>
<h2>Aliquam eget interdum elit, id posuere ipsum.</h2>
<p>Phasellus lectus erat, hendrerit sed tortor ac, dignissim vehicula metus.</p>
</div>
</body>
</html>'''
assert extract(htmlstring, include_formatting=True, include_links=True, include_images=True) is not None
htmlstring = '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>A weird bug</title>
</head>
<body>
<div id="content">
<h1>A header</h1>
<h2>Very specific bug so odd</h2>
<h3>Nested header</h3>
<p>Some "hyphenated-word quote" followed by a bit more text line.</p>
<em><p>em improperly wrapping p here</p></em>
<p>Text here</p>
</div>
</body>
</html>'''
assert extract(htmlstring, include_formatting=True, include_links=True, include_images=True) is not None
# comments
assert extract('<html><body><article><p>text</p><div class="comments"><p>comment</p></div></article></body></html>', include_comments=True, no_fallback=True, config=ZERO_CONFIG).endswith("\ncomment")
def test_formatting():
'''Test HTML formatting conversion and extraction'''
options = DEFAULT_OPTIONS
# trailing <lb>
my_document = html.fromstring('<html><body><p>This here is the text.<br/></p></body></html>')
my_result = extract(my_document, output_format='xml', config=ZERO_CONFIG)
assert 'lb' not in my_result
# simple formatting
my_document = html.fromstring('<html><body><p><b>This here is in bold font.</b></p></body></html>')
my_result = extract(my_document, output_format='xml', include_formatting=True, config=ZERO_CONFIG)
assert '<hi rend="#b">This here is in bold font.</hi>' in my_result
# titles as markdown
my_document = html.fromstring('<html><body><article><h3>Title</h3><p><b>This here is in bold font.</b></p></article></body></html>')
my_result = extract(my_document, output_format='txt', include_formatting=True, config=ZERO_CONFIG)
assert my_result == '### Title\n**This here is in bold font.**'
# nested
my_document = html.fromstring('<html><body><p><b>This here is in bold and <i>italic</i> font.</b></p></body></html>')
my_result = extract(my_document, output_format='xml', include_formatting=True, config=ZERO_CONFIG)
assert '<hi rend="#b">This here is in bold and italic font.</hi>' in my_result
# empty
my_document = html.fromstring('<html><body><p><b><i></i></b></p></body></html>')
my_result = extract(my_document, output_format='xml', include_formatting=True, config=ZERO_CONFIG)
assert '<main/>' in my_result
# wild div
my_document = html.fromstring('<html><body><article><div><strong>Wild text</strong></div></article></body></html>')
my_result = extract(my_document, output_format='xml', include_formatting=True, config=ZERO_CONFIG)
assert '<p>' in my_result and '<hi rend="#b">Wild text</hi>' in my_result # no rend so far
my_result = extract(my_document, config=ZERO_CONFIG)
assert my_result == 'Wild text'
# links
doc = html.fromstring('<html><body><p><a href="">Link text</a></p></body></html>')
my_result = extract(doc, config=ZERO_CONFIG)
assert my_result == 'Link text'
# line-breaks
doc = html.fromstring('<html><body><p><br/></p></body></html>')
my_result = extract(doc, config=ZERO_CONFIG)
assert my_result == ''
doc = html.fromstring('<html><body><p><br/>Here is the text.</p></body></html>')
my_result = extract(doc, config=ZERO_CONFIG)
assert my_result == 'Here is the text.'
# handle formatting tails
element = etree.Element("hi")
element.text = 'Here is the text.'
element.tail = 'And a tail.'
options.config = ZERO_CONFIG
converted = handle_formatting(element, options)
assert etree.tostring(converted) == b'<p><hi>Here is the text.</hi>And a tail.</p>'
# empty elements
my_document = html.fromstring('<html><body><div>\t\n</div><div>There is text here.</div></body></html>')
my_result = extract(my_document, output_format='xml', config=ZERO_CONFIG)
assert '<main>\n <p>There is text here.</p>\n </main>' in my_result
# lists with links
my_document = html.fromstring('<html><body><article><ul><li>Number 1</li><li>Number <a href="test.html">2</a></li><li>Number 3</li><p>Test</p></article></body></html>')
my_result = extract(my_document, output_format='xml', include_links=True, config=ZERO_CONFIG)
assert '<item>Number <ref target="test.html">2</ref></item>' in my_result
# XML and Markdown formatting within <p>-tag
my_document = html.fromstring('<html><body><p><b>bold</b>, <i>italics</i>, <tt>tt</tt>, <strike>deleted</strike>, <u>underlined</u>, <a href="test.html">link</a> and additional text to bypass detection.</p></body></html>')
my_result = extract(my_document, no_fallback=True, include_formatting=False, config=ZERO_CONFIG)
# TXT: newline problem here
assert my_result == 'bold, italics, tt,\ndeleted, underlined, link and additional text to bypass detection.'
my_result = extract(my_document, output_format='xml', no_fallback=True, include_formatting=True, config=ZERO_CONFIG)
assert '<p><hi rend="#b">bold</hi>, <hi rend="#i">italics</hi>, <hi rend="#t">tt</hi>, <del>deleted</del>, <hi rend="#u">underlined</hi>, link and additional text to bypass detection.</p>' in my_result
assert 'rend="#b"' in my_result and 'rend="#i"' in my_result and 'rend="#t"' in my_result and 'rend="#u"' in my_result and '<del>' in my_result
my_result = extract(my_document, output_format='xml', include_formatting=True, include_links=True, no_fallback=True, config=ZERO_CONFIG)
assert '<p><hi rend="#b">bold</hi>, <hi rend="#i">italics</hi>, <hi rend="#t">tt</hi>, <del>deleted</del>, <hi rend="#u">underlined</hi>, <ref target="test.html">link</ref> and additional text to bypass detection.</p>' in my_result
my_result = extract(my_document, output_format='txt', no_fallback=True, include_formatting=True, config=ZERO_CONFIG)
assert my_result == '**bold**, *italics*, `tt`, ~~deleted~~, __underlined__, link and additional text to bypass detection.'
# double <p>-elems
# could be solved by keeping the elements instead of reconstructing them
my_document = html.fromstring('<html><body><p>AAA, <p>BBB</p>, CCC.</p></body></html>')
my_result = extract(my_document, output_format='xml', include_formatting=True, include_links=True, no_fallback=True, config=ZERO_CONFIG)
assert 'AAA' in my_result and 'BBB' in my_result and 'CCC' in my_result
# line-break following formatting
my_document = html.fromstring('<html><body><article><p><strong>Staff Review of the Financial Situation</strong><br>Domestic financial conditions remained accommodative over the intermeeting period.</p></article></body></html>')
my_result = extract(my_document, output_format='txt', no_fallback=True, config=ZERO_CONFIG)
assert my_result == 'Staff Review of the Financial Situation\nDomestic financial conditions remained accommodative over the intermeeting period.'
# title with formatting
my_document = html.fromstring('<html><body><article><h4 id="1theinoperator">1) The <code>in</code> Operator</h4><p>The easiest way to check if a Python string contains a substring is to use the <code>in</code> operator. The <code>in</code> operator is used to check data structures for membership in Python. It returns a Boolean (either <code>True</code> or <code>False</code>) and can be used as follows:</p></article></body></html>')
my_result = extract(my_document, output_format='xml', no_fallback=True, include_formatting=True, config=ZERO_CONFIG)
assert '<head rend="h4">1) The <code>in</code> Operator</head>' in my_result and '<p>The easiest way to check if a Python string contains a substring is to use the <code>in</code> operator. The <code>in</code> operator is used to check data structures for membership in Python. It returns a Boolean (either <code>True</code> or <code>False</code>) and can be used as follows:</p>' in my_result
def test_baseline():
_, string, length = baseline('')
assert (string, length) == ('', 0)
my_document = r'<html><body><script type="application/ld+json">{"description":"In letzter Zeit kam man am Begriff \"Hygge\", was so viel wie \"angenehm\" oder \"gemütlich\" bedeutet, ja nicht vorbei. Jetzt macht ihm ein neuer Glücks-Trend ...","image":[{"name":"Mit der Ikigai-Methode wirst du glücklicher","url":"https:\/\/image.brigitte.de\/10973004\/uncropped-0-0\/7d00b2658fd0a3b19e1b161f4657cc20\/Xw\/ikigai--1-.jpg","width":"2048","height":"1366","@type":"ImageObject"},{"name":"Mit der Ikigai-Methode wirst du glücklicher","url":"https:\/\/image.brigitte.de\/10973004\/16x9-1280-720\/bf947c7c24167d7c0adae0be10942d57\/Uf\/ikigai--1-.jpg","width":"1280","height":"720","@type":"ImageObject"},{"name":"Mit der Ikigai-Methode wirst du glücklicher","url":"https:\/\/image.brigitte.de\/10973004\/16x9-938-528\/bf947c7c24167d7c0adae0be10942d57\/JK\/ikigai--1-.jpg","width":"938","height":"528","@type":"ImageObject"},{"name":"Mit der Ikigai-Methode wirst du glücklicher","url":"https:\/\/image.brigitte.de\/10973004\/large1x1-622-622\/f5544b7d67e1be04f7729b130e7e0485\/KN\/ikigai--1-.jpg","width":"622","height":"622","@type":"ImageObject"}],"mainEntityOfPage":{"@id":"https:\/\/www.brigitte.de\/liebe\/persoenlichkeit\/ikigai-macht-dich-sofort-gluecklicher--10972896.html","@type":"WebPage"},"headline":"Ikigai macht dich sofort glücklicher!","datePublished":"2019-06-19T14:29:08+0000","dateModified":"2019-06-19T14:29:10+0000","author":{"name":"BRIGITTE.de","@type":"Organization"},"publisher":{"name":"BRIGITTE.de","logo":{"url":"https:\/\/image.brigitte.de\/11476842\/uncropped-0-0\/f19537e97b9189bf0f25ce924168bedb\/kK\/bri-logo-schema-org.png","width":"167","height":"60","@type":"ImageObject"},"@type":"Organization"},"articleBody":"In letzter Zeit kam man am Begriff \"Hygge\" (\"gemütlich\" oder \"angenehm\") nicht vorbei. Jetzt macht ihm ein neuer Glücks-Trend Konkurrenz: \"Ikigai\". Bist du glücklich? Schwierige Frage, nicht wahr? Viele von uns müssen da erst mal überlegen.","@type":"NewsArticle"}</script></body></html>'
_, result, _ = baseline(my_document)
assert result.startswith('In letzter Zeit kam man') and result.endswith('erst mal überlegen.')
my_document = '<html><body><article>' + 'The article consists of this text.'*10 + '</article></body></html>'
_, result, _ = baseline(my_document)
assert result is not None
my_document = '<html><body><article><b>The article consists of this text.</b></article></body></html>'
_, result, _ = baseline(my_document)
assert result is not None
my_document = '<html><body><quote>This is only a quote but it is better than nothing.</quote></body></html>'
_, result, _ = baseline(my_document)
assert result is not None
my_document = "<html><body><div> Document body... </div><script> console.log('Hello world') </script></body></html>"
_, result, _ = baseline(my_document)
assert result == 'Document body...'
def test_html2txt():
mydoc = "<html><body>Here is the body text</body></html>"
assert html2txt(mydoc) == "Here is the body text"
assert html2txt(html.fromstring(mydoc)) == "Here is the body text"
assert html2txt("") == ""
assert html2txt("123") == ""
def test_external():
'''Test external components'''
options = DEFAULT_OPTIONS
options.tables = True
# remove unwanted elements
mydoc = html.fromstring('<html><body><footer>Test text</footer></body></html>')
_, _, mylen = sanitize_tree(mydoc, options)
assert mylen == 0
mydoc = html.fromstring('<html><body><table><th>Test text</th><tr><td>Test</td></tr></table></body></html>')
_, _, mylen = sanitize_tree(mydoc, options)
assert mylen > 0
# strip fancy tags while including links and images
mydoc = html.fromstring('<html><body><p>Text here <fancy>Test text</fancy><a href="">with a link</a>.</p><img src="test.jpg"/></body></html>')
mytree, _, _ = sanitize_tree(mydoc, options)
assert len(mytree) == 1
mydoc = html.fromstring('<html><body><p>Text here <fancy>Test text</fancy><a href="">with a link</a>.</p><img src="test.jpg"/></body></html>')
options.links, options.images = True, True
mytree, _, _ = sanitize_tree(mydoc, options)
myelems = {element.tag for element in set(mytree.iter())}
assert 'graphic' in myelems and 'ref' in myelems
# test langid
if LANGID_FLAG is True:
doc = html.fromstring('<html><body>' + '<p>Non è inglese.</p>'*20 + '</body></html>')
assert extract(doc, no_fallback=False, target_language='en', deduplicate=False) is None
# no tables
with open(os.path.join(RESOURCES_DIR, 'apache.html')) as f:
teststring = f.read()
assert 'localhost:80' in extract(teststring, no_fallback=False, include_tables=True)
assert 'localhost:80' not in extract(teststring, no_fallback=False, include_tables=False)
with open(os.path.join(RESOURCES_DIR, 'scam.html')) as f:
teststring = f.read()
assert extract(teststring, no_fallback=True, include_tables=False) == ''
assert extract(teststring, no_fallback=False, include_tables=False) == ''
def test_images():
'''Test image extraction function'''
# file type
assert utils.is_image_file('test.jpg') is True
assert utils.is_image_file('test.txt') is False
# tag with attributes
assert handle_image(html.fromstring('<img src="test.jpg"/>')) is not None
assert handle_image(html.fromstring('<img data-src="test.jpg" alt="text" title="a title"/>')) is not None
assert handle_image(html.fromstring('<img other="test.jpg"/>')) is None
# HTML conversion
assert handle_textelem(etree.Element('graphic'), [], DEFAULT_OPTIONS) is None
with open(os.path.join(RESOURCES_DIR, 'http_sample.html')) as f:
teststring = f.read()
assert '' not in extract(teststring)
assert '' in extract(teststring, include_images=True, no_fallback=True)
assert '<graphic src="test.jpg" title="Example image"/>' in extract(teststring, include_images=True, no_fallback=True, output_format='xml', config=ZERO_CONFIG)
assert extract('<html><body><article><img data-src="test.jpg" alt="text" title="a title"/></article></body></html>', include_images=True, no_fallback=True) == ''
# CNN example
mydoc = html.fromstring('<img class="media__image media__image--responsive" alt="Harry and Meghan last March, in their final royal engagement." data-src-mini="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-small-169.jpg" data-src-xsmall="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-medium-plus-169.jpg" data-src-small="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-large-169.jpg" data-src-medium="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-exlarge-169.jpg" data-src-large="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-super-169.jpg" data-src-full16x9="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-full-169.jpg" data-src-mini1x1="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-small-11.jpg" data-demand-load="loaded" data-eq-pts="mini: 0, xsmall: 221, small: 308, medium: 461, large: 781" src="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-exlarge-169.jpg" data-eq-state="mini xsmall small medium" data-src="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-exlarge-169.jpg">')
myimage = handle_image(mydoc)
assert myimage is not None and 'alt' in myimage.attrib and 'src' in myimage.attrib
# modified CNN example
mydoc = html.fromstring('<img class="media__image media__image--responsive" alt="Harry and Meghan last March, in their final royal engagement." data-src-mini="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-small-169.jpg" data-src-xsmall="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-medium-plus-169.jpg" data-src-small="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-large-169.jpg" data-src-medium="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-exlarge-169.jpg" data-src-large="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-super-169.jpg" data-src-full16x9="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-full-169.jpg" data-src-mini1x1="//cdn.cnn.com/cnnnext/dam/assets/210307091919-harry-meghan-commonwealth-day-small-11.jpg" data-demand-load="loaded" data-eq-pts="mini: 0, xsmall: 221, small: 308, medium: 461, large: 781">')
myimage = handle_image(mydoc)
assert myimage is not None and 'alt' in myimage.attrib and 'src' in myimage.attrib and myimage.get('src').startswith('http')
def test_links():
'''Test link extraction function'''
options = DEFAULT_OPTIONS
options.config = ZERO_CONFIG
assert handle_textelem(etree.Element('ref'), [], options) is None
assert handle_formatting(html.fromstring('<a href="testlink.html">Test link text.</a>'), options) is not None
# empty link
mydoc = html.fromstring('<html><body><p><a></a><b>Some text.</b></p></body></html>')
assert extract(mydoc) is not None
# link with target
mydoc = html.fromstring('<html><body><p><a href="testlink.html">Test link text.</a> This part of the text has to be long enough.</p></body></html>')
assert 'testlink.html' not in extract(mydoc)
assert '[Test link text.](testlink.html) This part of the text has to be long enough.' in extract(mydoc, include_links=True, no_fallback=True, config=ZERO_CONFIG)
# relative link conversion
assert '[Test link text.](https://www.example.com/testlink.html) This part of the text has to be long enough.' in extract(mydoc, url='https://www.example.com/', include_links=True, no_fallback=True, config=ZERO_CONFIG)
# link without target
mydoc = html.fromstring('<html><body><p><a>Test link text.</a> This part of the text has to be long enough.</p></body></html>')
assert '[Test link text.] This part of the text has to be long enough.' in extract(mydoc, include_links=True, no_fallback=True, config=ZERO_CONFIG)
mydoc = html.fromstring('<html><body><article><a>Segment 1</a><h1><a>Segment 2</a></h1><p>Segment 3</p></article></body></html>')
result = extract(mydoc, output_format='xml', include_links=True, no_fallback=True, config=ZERO_CONFIG)
assert '1' in result and '2' in result and '3' in result
with open(os.path.join(RESOURCES_DIR, 'http_sample.html')) as f:
teststring = f.read()
assert 'testlink.html' not in extract(teststring, config=ZERO_CONFIG)
assert '[link](testlink.html)' in extract(teststring, include_links=True, no_fallback=True, config=ZERO_CONFIG)
assert '<ref target="testlink.html">link</ref>' in extract(teststring, include_links=True, no_fallback=True, output_format='xml', config=ZERO_CONFIG)
# test license link
mydoc = html.fromstring('<html><body><p>Test text under <a rel="license" href="">CC BY-SA license</a>.</p></body></html>')
assert 'license="CC BY-SA license"' in extract(mydoc, include_links=True, no_fallback=True, output_format='xml', config=ZERO_CONFIG)
def test_tei():
'''test TEI-related functions'''
# open local resources to avoid redownloading at each run
with open(os.path.join(RESOURCES_DIR, 'httpbin_sample.html')) as f:
teststring = f.read()
# download, parse and validate simple html file
result1 = extract(teststring, "mocked", no_fallback=True, output_format='xmltei', tei_validation=False)
result2 = extract(teststring, "mocked", no_fallback=True, output_format='xmltei', tei_validation=True)
assert result1 is not None and result1 == result2
assert xml.validate_tei(etree.fromstring(result1)) is True
assert xml.validate_tei(etree.fromstring(teststring)) is False
# test with another file
with open(os.path.join(RESOURCES_DIR, 'http_sample.html')) as f:
teststring = f.read()
# download, parse and validate simple html file
result = extract(teststring, "mocked", no_fallback=True, include_comments=True, output_format='xmltei', tei_validation=False)
assert result is not None # and '<p>license</p>' in result
assert xml.validate_tei(etree.fromstring(result)) is True
result = extract(teststring, "mocked", no_fallback=True, include_comments=False, output_format='xmltei', tei_validation=False)
assert result is not None # and '<p>license</p>' in result
assert xml.validate_tei(etree.fromstring(result)) is True
# include ID in metadata
result = extract(teststring, "mocked", no_fallback=True, output_format='xmltei', tei_validation=False, record_id='0001')
assert result is not None
assert xml.validate_tei(etree.fromstring(result)) is True
# test header + metadata
tei = etree.Element('TEI', xmlns='http://www.tei-c.org/ns/1.0')
header = etree.SubElement(tei, 'teiHeader')
docmeta = Document()
docmeta.categories, docmeta.tags = [], []
docmeta.title = 'Title'
assert xml.write_fullheader(header, docmeta) is not None
docmeta.sitename = 'Site Name'
docmeta.date = '2021-01-01'
assert xml.write_fullheader(header, docmeta) is not None
docmeta.date = None
assert xml.write_fullheader(header, docmeta) is not None
docmeta.hostname = 'hostname'
assert xml.write_fullheader(header, docmeta) is not None
docmeta.sitename = None
docmeta.license = 'CC BY-SA'
docmeta.url = 'https://test.org/'
docmeta.categories = ['cat1', 'cat2']
assert xml.write_fullheader(header, docmeta) is not None
docmeta.date = '2021-01-01'
assert xml.write_fullheader(header, docmeta) is not None
docmeta.title, docmeta.sitename = None, None
assert xml.write_fullheader(header, docmeta) is not None
xml_doc = etree.fromstring("<TEI><text><body><div>text</div></body></text></TEI>")
cleaned = xml.check_tei(xml_doc, "fake_url")
result = [(elem.tag, elem.text) for elem in cleaned.find(".//div").iter()]
expected = [("div", None), ("p", "text")]
assert result == expected
xml_doc = etree.fromstring("<TEI><text><body><div><div>text1<p>text2</p></div></div></body></text></TEI>")
cleaned = xml.check_tei(xml_doc, "fake_url")
result = [(elem.tag, elem.text) for elem in cleaned.find(".//div").iter()]
expected = [("div", None), ("div", None), ("p", "text1 text2")]
assert result == expected
xml_doc = etree.fromstring("<TEI><text><body><div><div>text1<head>text2</head></div></div></body></text></TEI>")
cleaned = xml.check_tei(xml_doc, "fake_url")
result = [(elem.tag, elem.text) for elem in cleaned.find(".//div").iter()]
expected = [("div", None), ("div", None), ("p", "text1"), ("ab", "text2")]
assert result == expected
xml_doc = etree.fromstring("<TEI><text><body><div><div>text1<p>text2</p></div>has to be there</div></body></text></TEI>")
cleaned = xml.check_tei(xml_doc, "fake_url")
result = [(elem.tag, elem.text, elem.tail) for elem in cleaned.find(".//div/div").iter()]
expected = [("div", None, None), ("p", "text1 text2 has to be there", None)]
assert result == expected
xml_doc = etree.fromstring("<TEI><text><body><div><div>text1<quote>text2</quote></div>has to be there</div></body></text></TEI>")
cleaned = xml.check_tei(xml_doc, "fake_url")
result = [(elem.tag, elem.text, elem.tail) for elem in cleaned.find(".//div/div").iter()]
expected = [("div", None, None), ("p", "text1", None), ("quote", "text2", None), ("p", "has to be there", None)]
assert result == expected
xml_doc = etree.fromstring("<TEI><text><body><div><div>text1<p>text2</p>has to be there</div></div></body></text></TEI>")
cleaned = xml.check_tei(xml_doc, "fake_url")
result = [(elem.tag, elem.text, elem.tail) for elem in cleaned.find(".//div/div").iter()]
expected = [("div", None, None), ("p", "text1 text2 has to be there", None)]
assert result == expected
htmlstring = html.fromstring("<html><head/><body><div><h2><p>text</p></h2></div></body></html>")
extracted = extract(htmlstring, url='mocked', no_fallback=True, output_format="xmltei")
assert xml.validate_tei(etree.fromstring(extracted)) is True
htmlstring = html.fromstring("<html><body><article><h1>title</h1><h2>subtitle</h2><p>text</p></article></body></html>")
extracted = extract(htmlstring, url="mocked", no_fallback=True, output_format="xmltei")
assert '<ab rend="h1" type="header">title</ab>' in extracted
assert '<ab rend="h2" type="header">subtitle</ab>' in extracted
htmlstring = html.fromstring(
"""<html>
<body><article>
<h2><div>
<p>content</p>
<ul>
<li>text1</li>
<li>text2</li>
</ul>
</div></h2>
</article></body>
</html>"""
)
extracted = extract(htmlstring, url="mocked", no_fallback=True, output_format="xmltei")
assert '<ab rend="h2" type="header">content<list rend="ul"><item>text1' in extracted.replace("\n", "")
# merge double elements
tree = html.fromstring(
"""<html>
<body>
<p><p>
<span><p>content</p></span>
</p></p>
</body>
</html>"""
)
tree = xml.remove_empty_elements(xml.strip_double_tags(tree))
result = utils.sanitize(etree.tostring(tree, encoding="unicode")).replace("\n", "")
assert result == "<html><body><p><span>content</span></p></body></html>"
tree = html.fromstring(
"""
<html>
<body>
<div>
<div>
<p>
<p>text</p>
<p>
</div>
</div>
</body>
</html>
"""
)
xml.strip_double_tags(tree)
assert tree.find(".//div/div") is not None and tree.find(".//p/p") is None
tree = etree.XML(
"""
<html><body>
<div>
<p>text1<lb/>text2<p>text3</p><lb/>text4</p>
<p>text5<p>text6</p></p>
</div>
</body></html>
"""
)
xml.strip_double_tags(tree)
assert tree.find(".//p/p") is None
tree = etree.XML(
"""
<html><body>
<div>
<p>text1<lb/>text2<p>text3</p><lb/>text4</p>
<p>text5<p>text6<p>text7</p></p></p>
</div>
</body></html>
"""
)
xml.strip_double_tags(tree)
assert tree.find(".//p/p") is None
assert "text7" in etree.tostring(tree, encoding="unicode")
# nested elements with same tag not merged
tree = html.fromstring(
"""<html>
<body>
<div>
<p>
<list>
<item>
<p>text</p>
</item>
</list>
</p>
<p>
<table>
<row>
<cell>
<p>text1</p>
</cell>
</row>
</table>
</p>
<p>
<note>
<p>text2</p>
</note>
</p>
<p>
<quote>
<p>text3</p>
</quote>
</p>
<p>
<figure>
<p>text4</p>
</figure>
</p>
</div>
</body>
</html>"""
)
xml.strip_double_tags(tree)
for parent_tag in ["item", "cell", "quote", "note", "figure"]:
assert tree.find(f".//{parent_tag}/p") is not None
def test_htmlprocessing():
'''test html-related functions'''
options = DEFAULT_OPTIONS
options.tables = True
assert trafilatura.htmlprocessing.tree_cleaning(etree.Element('html'), options) is not None
assert trafilatura.htmlprocessing.prune_html(etree.Element('unwanted')) is not None
mydoc = html.fromstring('<html><body><table><a href="">Link</a></table><img src="test.jpg"/><u>Underlined</u><tt>True Type</tt><sub>Text</sub><sup>Text</sup></body></html>')
options.formatting, options.images, options.links = True, True, True
myconverted = trafilatura.htmlprocessing.convert_tags(mydoc, options)
assert myconverted.xpath('.//ref') and myconverted.xpath('.//graphic') and myconverted.xpath('.//hi[@rend="#t"]') and myconverted.xpath('.//table')
options.images, options.tables = True, False
myconverted = trafilatura.htmlprocessing.tree_cleaning(mydoc, options)
assert myconverted.xpath('.//graphic') and not myconverted.xpath('.//table')
mydoc = html.fromstring('<html><body><article><h1>Test headline</h1><p>Test</p></article></body></html>')
assert '<head rend="h1">Test headline</head>' in extract(mydoc, output_format='xml', config=ZERO_CONFIG, no_fallback=True)
assert '<ab rend="h1" type="header">Test headline</ab>' in extract(mydoc, output_format='xmltei', config=ZERO_CONFIG, no_fallback=True)
# merge with parent function
element = etree.Element('test')
xml.merge_with_parent(element)
mydoc = html.fromstring('<html><body><p><span>A</span><span>B</span><span>C</span></p></body></html>')
for element in mydoc.iter('span'):
xml.merge_with_parent(element)
assert b'<p>A B C</p>' in etree.tostring(mydoc)
mydoc = html.fromstring('<html><body><p><span>A</span><span>B</span> tail<span>C</span></p></body></html>')
for element in mydoc.iter('span'):
xml.merge_with_parent(element)
assert b'<p>A B tail C</p>' in etree.tostring(mydoc)
# paywalls
my_html = '<html><body><main><p>1</p><p id="paywall">2</p><p>3</p></main></body></html>'
assert extract(my_html, config=ZERO_CONFIG, no_fallback=True) == '1\n3'
assert extract(my_html, config=ZERO_CONFIG, no_fallback=False) == '1\n3'
# test tail of node deleted if set as text
node = etree.fromstring("<div><p></p>tail</div>")[0]
trafilatura.htmlprocessing.process_node(node, options)
assert node.text == 'tail'
assert node.tail is None
node = etree.fromstring("<list><item></item>text in tail</list>")[0]
trafilatura.htmlprocessing.process_node(node, options)
assert node.text == "text in tail"
assert node.tail is None
line_break = etree.fromstring("<p><lb/>tail</p>")[0]
trafilatura.htmlprocessing.process_node(line_break, options)
assert line_break.text is None
assert line_break.tail == "tail"
node = etree.fromstring("<div><p>some text</p>tail</div>")[0]
trafilatura.htmlprocessing.process_node(node, options)
assert node.text == "some text"
assert node.tail == "tail"
def test_extraction_options():
'''Test the different parameters available in extract() and bare_extraction()'''
my_html = '<html><head><meta http-equiv="content-language" content="EN"/></head><body><div="article-body"><p>Text.<!-- comment --></p></div></body></html>'
with pytest.raises(NameError) as err:
extract(my_html, json_output=True)
assert extract(my_html, config=NEW_CONFIG) is None
assert extract(my_html, config=ZERO_CONFIG) is not None
assert extract(my_html, with_metadata=True, output_format='xml', config=ZERO_CONFIG) is None
assert extract(my_html, only_with_metadata=True, output_format='xml', config=ZERO_CONFIG) is None
assert extract(my_html, target_language='de', config=ZERO_CONFIG) is None
assert etree.tostring(try_justext(html.fromstring(my_html), None, 'de')) == b'<body/>'
# assert extract(my_html) is None
my_html = '<html><head/><body>' + '<p>ABC def ghi jkl.</p>'*1000 + '<p>Posted on 1st Dec 2019<.</p></body></html>'
assert bare_extraction(my_html, config=ZERO_CONFIG)["date"] is not None
assert bare_extraction(my_html, config=NEW_CONFIG)["date"] is None
def test_precision_recall():
'''test precision- and recall-oriented settings'''
# the test cases could be better
my_document = html.fromstring('<html><body><p>This here is the text.</p></body></html>')
assert extract(my_document, favor_precision=True, config=ZERO_CONFIG, fast=True) is not None
assert extract(my_document, favor_recall=True, config=ZERO_CONFIG, fast=True) is not None
my_document = html.fromstring('<html><body><div class="article-body"><div class="teaser-content"><p>This here is a teaser text.</p></div><div><p>This here is the text.</p></div></body></html>')
assert 'teaser text' in extract(my_document, favor_recall=True, config=ZERO_CONFIG, fast=True)
assert 'teaser text' not in extract(my_document, config=ZERO_CONFIG, fast=True)
assert 'teaser text' not in extract(my_document, favor_precision=True, config=ZERO_CONFIG, fast=True)
my_document = html.fromstring('<html><body><article><div><p><a href="test.html">1.</a><br/><a href="test2.html">2.</a></p></div></article></body></html>')
result = extract(my_document, favor_recall=True, config=ZERO_CONFIG, fast=True)
assert '1' not in result
result = extract(my_document, favor_precision=True, config=ZERO_CONFIG, fast=True)
assert '1' not in result
my_document = html.fromstring('<html><body><div class="article-body"><p>content</p><h2>Test</h2></div></body></html>')
result = extract(my_document, favor_precision=True, config=ZERO_CONFIG, fast=True)
assert 'content' in result and 'Test' not in result
def test_table_processing():
options = DEFAULT_OPTIONS
table_simple_cell = html.fromstring(
"<table><tr><td>cell1</td><td>cell2</td></tr><tr><td>cell3</td><td>cell4</td></tr></table>"
)
processed_table = handle_table(table_simple_cell, TAG_CATALOG, options)
result = [(child.tag, child.text) for child in processed_table.iter()]
assert result == [
("table", None),
("row", None),
("cell", "cell1"),
("cell", "cell2"),
("row", None),
("cell", "cell3"),
("cell", "cell4"),
]
# if a cell contains 'exotic' tags, they are cleaned during the extraction
# process and the content is merged with the parent e.g. <td>
table_cell_with_children = html.fromstring(
"<table><tr><td><p>text</p><p>more text</p></td></tr></table>"
)
processed_table = handle_table(table_cell_with_children, TAG_CATALOG, options)
assert (
etree.tostring(processed_table, encoding="unicode")
== "<table><row><cell><p>text</p><p>more text</p></cell></row></table>"
)
# complex table that hasn't been cleaned yet
htmlstring = html.fromstring(
"""<html>
<body><article>
<table>
<tbody>
<tr>
<td>
<small>text<br></small>
<h4>more_text</h4>
</td>
<td><a href='link'>linktext</a></td>
</tr>
</tbody>
</table>
</article></body>
</html>"""
)
processed = extract(
htmlstring, no_fallback=True, output_format='xml', config=DEFAULT_CONFIG, include_links=True
)
result = processed.replace('\n', '').replace(' ', '')
assert """<table><row><cell>text<head>more_text</head></cell></row></table>""" in result
table_cell_w_text_and_child = html.fromstring(
"<table><tr><td>text<lb/><p>more text</p></td></tr></table>"
)
processed_table = handle_table(
table_cell_w_text_and_child, TAG_CATALOG, options
)
assert (
etree.tostring(processed_table, encoding="unicode")
== "<table><row><cell>text<p>more text</p></cell></row></table>"
)
table_cell_with_link = html.fromstring(
"<table><tr><td><ref='test'>link</ref></td></tr></table>"
)
processed_table = handle_table(table_cell_with_link, TAG_CATALOG, options)
result = [child.tag for child in processed_table.find(".//cell").iterdescendants()]
assert result == ["p"]
table_with_head = html.fromstring(
"""<table>
<tr>
<th>Month</th>
<th>Days</th>
</tr>
<tr>
<td>January</td>
<td>31</td>
</tr>
<tr>
<td>February</td>
<td>28</td>
</tr>
</table>"""
)
processed_table = handle_table(
table_with_head, TAG_CATALOG, options
)
first_row = processed_table[0]
assert len(processed_table) == 3
assert [
(child.tag, child.attrib, child.text) for child in first_row.iterdescendants()
] == [("cell", {"role": "head"}, "Month"), ("cell", {"role": "head"}, "Days")]
table_with_head_spanning_two_cols = html.fromstring(
"""<table>
<tr>
<th>Name</th>
<th>Adress</th>
<th colspan="2">Phone</th>
</tr>
<tr>
<td>Jane Doe</td>
<td>[email protected]</td>
<td>phone 1</td>
<td>phone 2</td>
</tr>
</table>"""
)
processed_table = handle_table(
table_with_head_spanning_two_cols,
TAG_CATALOG,
options,
)
first_row = processed_table[0]
assert len(first_row) == 3
assert {child.tag for child in first_row.iterdescendants()} == {"cell"}
table_cell_with_hi = html.fromstring(
"<table><tr><td><hi>highlighted text</hi></td></tr></table>"
)
processed_table = handle_table(table_cell_with_hi, TAG_CATALOG, options)
result = etree.tostring(processed_table.find(".//cell"), encoding="unicode")
assert result == "<cell><hi>highlighted text</hi></cell>"
table_cell_with_span = html.fromstring(
"<table><tr><td><span style='sth'>span text</span></td></tr></table>"
)
processed_table = handle_table(table_cell_with_span, TAG_CATALOG, options)
result = etree.tostring(processed_table.find(".//cell"), encoding="unicode")
assert result == "<cell><p/></cell>"
# tables with nested elements
htmlstring = '''<html><body><article>
<table>
<tr><td><b>Present Tense</b></td>
<td>I buy</td>
<td>you buy</td>
<td>he/she/it buys</td>
<td>we buy</td>
<td>you buy</td>
<td>they buy</td>
</tr>
</table></article></body></html>'''
my_result = extract(htmlstring, no_fallback=True, output_format='xml', include_formatting=True, config=ZERO_CONFIG)
assert '''<row>
<cell>
<hi>Present Tense</hi>
</cell>
<cell>I buy</cell>
<cell>you buy</cell>
<cell>he/she/it buys</cell>
<cell>we buy</cell>
<cell>you buy</cell>
<cell>they buy</cell>
</row>''' in my_result
# table with links
# todo: further tests and adjustsments
htmlstring = '<html><body><article><table><tr><td><a href="test.html">' + 'ABCD'*100 + '</a></td></tr></table></article></body></html>'
result = extract(htmlstring, no_fallback=True, output_format='xml', config=ZERO_CONFIG, include_tables=True, include_links=True)
assert 'ABCD' not in result
# nested table
htmlstring = '<html><body><article><table><th>1</th><table><tr><td>2</td></tr></table></table></article></body></html>'
result = extract(htmlstring, no_fallback=True, output_format='xml', config=ZERO_CONFIG, include_tables=True)
# todo: all elements are there, but output not nested
assert '<cell role="head">1</cell>' in result and '<cell>2</cell>' in result
nested_table = html.fromstring(
"""
<table>
<tr>
<td>
<table><tr><td>1</td></tr></table>
</td>
</tr>
</table>"""
)
processed_table = handle_table(nested_table, TAG_CATALOG, options)
result = [
(el.tag, el.text) if el.text is not None and el.text.strip() else el.tag
for el in processed_table.iter()
]
#assert result == ["table", "row", "cell", "table", "row", ("cell", "1")]
assert result == ["table", "row", "cell", ("cell", "1")]
complex_nested_table = html.fromstring(
"""
<table>
<tr>
<td>
<table><tr><td>1</td></tr></table>
</td>
<td>text1</td>
</tr>
<tr><td>text2</td></tr>
</table>"""
)
processed_table = handle_table(complex_nested_table, TAG_CATALOG, options)
result = [
(el.tag, el.text) if el.text is not None and el.text.strip() else el.tag
for el in processed_table.iter()
]
#assert (
# result
# == ["table", "row", "cell", "table", "row", ("cell", "1"), ("cell", "text1"), "row", ("cell", "text2")]
#)
assert result == ['table', 'row', 'cell', ('cell', '1'), ('cell', 'text1'), 'row', ('cell', 'text2')]
table_with_list = html.fromstring(
"""
<table><tr><td>
<p>a list</p>
<list>
<item>one</item>
<item>two</item>
</list>
</td>
</tr></table>
""")
processed_table = handle_table(table_with_list, TAG_CATALOG, options)
result = [
(el.tag, el.text) if el.text is not None and el.text.strip() else el.tag
for el in processed_table.iter()
]
# assert result == ["table", "row", "cell", ("p", "a list"), "list", ("item", "one"), ("item", "two"),]
assert result == ['table', 'row', 'cell', ('p', 'a list'), 'list']
broken_table = html.fromstring("<table><td>cell1</td><tr><td>cell2</td></tr></table>")
processed_table = handle_table(broken_table, TAG_CATALOG, options)
result = [el.tag for el in processed_table.iter()]
assert result == ['table', 'row', 'cell', 'row', 'cell']
broken_table = html.fromstring("<table><tr><p>text</p></tr><tr><td>cell</td></tr></table>")
processed_table = handle_table(broken_table, TAG_CATALOG, options)
result = [el.tag for el in processed_table.iter()]
assert result == ["table", "row", "cell", ]
# table nested in figure https://github.com/adbar/trafilatura/issues/301
htmlstring = '<html><body><article><figure><table><th>1</th><tr><td>2</td></tr></table></figure></article></body></html>'
result = extract(htmlstring, no_fallback=True, output_format='xml', config=ZERO_CONFIG, include_tables=True)
assert "1" in result and "2" in result
def test_list_processing():
options = DEFAULT_OPTIONS
# malformed lists (common error)
result = etree.tostring(handle_lists(etree.fromstring('<list>Description of the list:<item>List item 1</item><item>List item 2</item><item>List item 3</item></list>'), options))
assert result.count(b'List item') == 3
assert b"Description" in result
# nested list
htmlstring = '''<html><body><article>
<ul>
<li>Coffee</li>
<li>Tea
<ul>
<li>Black tea</li>
<li>Green tea</li>
</ul>
</li>
<li>Milk</li>
</ul>
</article></body></html>'''
my_result = extract(htmlstring, no_fallback=True, output_format='xml', config=ZERO_CONFIG)
expected = '''
<list rend="ul">
<item>Coffee</item>
<item>Tea
<list rend="ul">
<item>Black tea</item>
<item>Green tea</item>
</list>
</item>
<item>Milk</item>
</list>'''.replace("\n", "").replace(" ", "")
assert expected in my_result.replace("\n", "").replace(" ", "")
# description list
htmlstring = '''<html><body><article>
<dl>
<dt>Coffee</dt>
<dd>Black hot drink</dd>
<dt>Milk</dt>
<dd>White cold drink</dd>
</dl>
</article></body></html>'''
my_result = extract(htmlstring, no_fallback=True, output_format='xml', config=ZERO_CONFIG)
assert '''
<list rend="dl">
<item rend="dt-1">Coffee</item>
<item rend="dd-1">Black hot drink</item>
<item rend="dt-2">Milk</item>
<item rend="dd-2">White cold drink</item>
</list>''' in my_result
list_item_with_child = html.fromstring("<list><item><p>text</p></item></list>")
processed_list = handle_lists(list_item_with_child, options)
result = [(child.tag, child.text) if child.text is not None else child.tag for child in processed_list.iter()]
assert result == ["list", "item", ("p", "text")]
list_item_with_text_and_child = html.fromstring("<list><item>text1<p>text2</p></item></list>")
processed_list = handle_lists(list_item_with_text_and_child, options)
result = [(child.tag, child.text) if child.text is not None else child.tag for child in processed_list.iter()]
assert result == ["list", ("item", "text1"), ("p", "text2")]
list_item_with_lb = html.fromstring("<list><item>text<lb/>more text</item></list>")
processed_list = handle_lists(list_item_with_lb, options)
result = [(child.tag, child.text) if child.text is not None else child.tag for child in processed_list.iter()]
assert result == ["list", ("item", "text"), "lb"]
list_with_text_outside_item = html.fromstring("<list>header<item>text</item></list>")
processed_list = handle_lists(list_with_text_outside_item, options)
result = [(child.tag, child.text) if child.text is not None else child.tag for child in processed_list.iter()]
assert result == ["list", ("item", "header"), ("item", "text")]
empty_list = html.fromstring("<list> <item>text</item></list>")
processed_list = handle_lists(empty_list, options)
assert len(processed_list) == 1
list_item_with_tail = html.fromstring("<list><item>text</item>tail</list>")
processed_list = handle_lists(list_item_with_tail, options)
assert processed_list[0].text == "text tail"
list_item_with_child_and_tail = html.fromstring("<list><item><p>text</p></item>tail</list>")
processed_list = handle_lists(list_item_with_child_and_tail, options)
item_element = processed_list[0]
assert item_element.tail is not True
assert item_element[0].tail == "tail"
list_item_with_child_and_tail = html.fromstring("<list><item><p>text</p>tail1</item>tail</list>")
processed_list = handle_lists(list_item_with_child_and_tail, options)
item_element = processed_list[0]
assert item_element.tail is not True
assert item_element[0].tail == "tail1 tail"
list_item_with_child_and_tail = html.fromstring("<list><item><p>text</p>\n</item>tail</list>")
processed_list = handle_lists(list_item_with_child_and_tail, options)
item_element = processed_list[0]
assert item_element.tail is not True
assert item_element[0].tail == "tail"
list_item_with_tail_and_nested_list = html.fromstring("<list><item><list><item>text</item></list></item>tail</list>")
processed_list = handle_lists(list_item_with_tail_and_nested_list, options)
target_element = processed_list.find(".//item/list")
assert target_element.tail == 'tail'
def test_code_blocks():
highlightjs = '''<div class="s-prose js-post-body" itemprop="text">
<p>Code:</p>
<pre class="lang-sql s-code-block"><code class="hljs language-sql">code\n
<span class="hljs-keyword">highlighted</span> more <span class="hljs-keyword">code</span>
</code></pre>
</div>'''
testresult = extract(highlightjs, config=ZERO_CONFIG, output_format='xml')
assert '<code>code\nhighlighted more code\n</code>' in testresult and 'quote' not in testresult
github = '''<div class="highlight highlight-source-shell notranslate position-relative overflow-auto" dir="auto"><pre>$ pip install PyGithub</pre><div class="zeroclipboard-container position-absolute right-0 top-0">
<clipboard-copy aria-label="Copy" class="ClipboardButton btn js-clipboard-copy m-2 p-0 tooltipped-no-delay" data-copy-feedback="Copied!" data-tooltip-direction="w" value="$ pip install PyGithub" tabindex="0" role="button" style="display: inherit;">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-copy js-clipboard-copy-icon m-2">
<path d="M0 6.75C0 5.784.784 5 1.75 5h1.5a.75.75 0 0 1 0 1.5h-1.5a.25.25 0 0 0-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 0 0 .25-.25v-1.5a.75.75 0 0 1 1.5 0v1.5A1.75 1.75 0 0 1 9.25 16h-7.5A1.75 1.75 0 0 1 0 14.25Z"></path><path d="M5 1.75C5 .784 5.784 0 6.75 0h7.5C15.216 0 16 .784 16 1.75v7.5A1.75 1.75 0 0 1 14.25 11h-7.5A1.75 1.75 0 0 1 5 9.25Zm1.75-.25a.25.25 0 0 0-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 0 0 .25-.25v-7.5a.25.25 0 0 0-.25-.25Z"></path>
</svg>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check js-clipboard-check-icon color-fg-success d-none m-2">
<path d="M13.78 4.22a.75.75 0 0 1 0 1.06l-7.25 7.25a.75.75 0 0 1-1.06 0L2.22 9.28a.751.751 0 0 1 .018-1.042.751.751 0 0 1 1.042-.018L6 10.94l6.72-6.72a.75.75 0 0 1 1.06 0Z"></path>
</svg>
</clipboard-copy>
</div></div>
'''
testresult = extract(github, config=ZERO_CONFIG, output_format='xml')
assert '<code>$ pip install PyGithub</code>' in testresult and 'quote' not in testresult
inline_code = '<div><p>paragraph</p><p>here is <code>some</code> code</p></div>'
testresult = extract(inline_code, config=ZERO_CONFIG, output_format='xml')
assert '<code>some</code>' in testresult and 'quote' not in testresult
w3schools = '''<div class="w3-example"><h3>Example</h3>
<p>Create a class named Person, use the __init__() function to assign values
for name and age:</p>
<div class="w3-code notranslate pythonHigh"><span class="pythoncolor" style="color:black"><span class="pythonnumbercolor" style="color:red">
</span> <span class="pythonkeywordcolor" style="color:mediumblue">class</span> Person:<br> <span class="pythonkeywordcolor" style="color:mediumblue">def</span> __init__(self, name, age):<br> <span class="pythonnumbercolor" style="color:red">
</span> self.name = name<br> self.age = age<br><br>p1 = Person(<span class="pythonstringcolor" style="color:brown">"John"</span>, <span class="pythonnumbercolor" style="color:red">
</span> <span class="pythonnumbercolor" style="color:red">36</span>)<br><span class="pythonnumbercolor" style="color:red">
</span> <br><span class="pythonkeywordcolor" style="color:mediumblue">print</span>(p1.name)<br><span class="pythonkeywordcolor" style="color:mediumblue">print</span>(p1.age) </span></div>
</div>'''
testresult = extract(w3schools, config=ZERO_CONFIG, output_format='xml')
expected = '''<code>
class Person:<lb/> def __init__(self, name, age):<lb/>
self.name = name<lb/> self.age = age<lb/><lb/>p1 = Person("John",
36)<lb/>
<lb/>print(p1.name)<lb/>print(p1.age) </code>'''
assert expected in testresult and 'quote' not in testresult
pip = '''<div><p>Code:</p>
<pre lang="python3"><span class="kn">import</span> <span class="nn">openai</span>
<span class="kn">from</span> <span class="nn">openai_function_call</span> <span class="kn">import</span> <span class="n">openai_function</span></pre></div>'''
expected = '''<code>import openai
from openai_function_call import openai_function</code>'''
testresult = extract(pip, config=ZERO_CONFIG, output_format='xml')
assert expected in testresult and 'quote' not in testresult
medium_js = '''<div><p>Code:</p>
<pre class="lw lx ly lz ma nq nr ns bo nt ba bj"><span id="fe48" class="nu mo ev nr b bf nv nw l nx ny" data-selectable-paragraph=""><span class="hljs-keyword">import</span> openai_function<br><br><span class="hljs-meta">@openai_function</span></span></pre>'''
expected = '''<code>import openai_function<lb/><lb/>@openai_function</code>'''
testresult = extract(medium_js, config=ZERO_CONFIG, output_format='xml')
assert expected in testresult and 'quote' not in testresult
medium_ssr = '''<div><p>Code:</p>
<pre class="lw lx ly lz ma nq nr ns bo nt ba bj"><span id="fe48" class="nu mo ev nr b bf nv nw l nx ny">import openai_function<br><br>@openai_functiondef sum(a:int, b:int):<br/> """Sum description adds a + b"""</span></pre>'''
expected = '<code>import openai_function<lb/><lb/>@openai_functiondef sum(a:int, b:int):<lb/> """Sum description adds a + b"""</code>'
testresult = extract(medium_ssr, config=ZERO_CONFIG, output_format='xml')
assert expected in testresult and 'quote' not in testresult
code_el = '''<div><p>Code:</p>
<pre><code><span>my code</span></code></pre>'''
expected = '''<code>my code</code>'''
testresult = extract(code_el, config=ZERO_CONFIG, output_format='xml')
assert expected in testresult and 'quote' not in testresult
if __name__ == '__main__':
test_trim()
test_input()
test_formatting()
test_exotic_tags()
test_images()
test_links()
test_htmlprocessing()
test_extraction_options()
test_precision_recall()
test_baseline()
test_txttocsv()
test_external()
test_tei()
test_table_processing()
test_list_processing()
test_code_blocks()
| [] |
2024-01-10 | dguarino/CNRS_PRe | Validation~lfpmodels.py | import sciunit
import neuronunit
from neuronunit.capabilities import ProducesMembranePotential, ProducesSpikes
import pickle
import neo
import quantities as pq
import numpy as np
from scipy.signal import coherence, hanning
from scipy.fftpack import fft
from itertools import chain
from pathlib import Path, PurePath
import sys
sys.path.append("..") #not the best way to modify sys.path but anyway...
from Validation.lfpcapabilities import ProducesLocalFieldPotential, ProducesConductance
import Fonctions.math_functions as mf
import Fonctions.neuron_functions as nf
import Fonctions.crosscorrelation as crsscorr
import Fonctions.filters as filt
class CoulombModel(sciunit.Model, ProducesLocalFieldPotential, ProducesMembranePotential,
ProducesConductance, ProducesSpikes):
"""
A model of LFP computation that relies on the Coulomb law.
It also checks if positional data is available. If not it assigns positions to the neurons (randomly).
"""
def __init__(self, name=None, network_model="VA", space_dependency=False, dimensionnality=3,
dimensions=np.array([0.002, 0.002, 0.]), reach=0.001,
electrode_positions=np.array([[0.], [0.], [0.]]), sigma=0.3):
self.name = name
self.network_model = network_model #Voggels-Abbott for the moment
self.space_dependency = space_dependency #Boolean indicating if the neurons' positions are available
self.dimensionnality = dimensionnality #dimensionnality of the network - either 2 or 3 D
self.dimensions = dimensions #3D-array: leght, width and height of the network (in m)
self.reach = reach #reach of the LFP (in m)
np.transpose(electrode_positions) #to have the the coordinates along the 0 axis, as opposed to the input state
self.electrode_positions = electrode_positions #positions of the electrodes (in m)
self.sigma = sigma #parameter in the Coulomb law's formula (in S/m)
self.directory_PUREPATH = PurePath()
### COMPUTATION OF THE NUMBER OF SEGMENTS
self.set_directory_path()
if self.network_model == "T2":
self.num_trials = 11 #number of trials for a same experiment
elif self.network_model == "VA":
self.num_trials = 1
else:
raise ValueError("Only the T2 and the Voggels-Abott models are supported.")
self.num_neurons = 0 #number of neurons computed - will be properly initialized during LFP computation
self.exc_counted = False
self.inh_counted = False
### VERIFICATION IF THE ELECTRODE(S) ARE "INSIDE" THE NETWORK
for e_pos in electrode_positions:
if max(abs(e_pos))+self.reach > self.dimensions[0]/2.:
raise ValueError("Wrong electrode position! Must have its reach zone in the network.")
return super(CoulombModel, self).__init__(name) #MUST FINISH THIS LINE (name=name, etc.)
#================================================================================================================
#== methods related to raw available data =======================================================================
#================================================================================================================
def set_directory_path(self, date="20190718"):
if self.network_model == "VA":
parent_directory="./Exemples/Results/"
directory_path = parent_directory + date
directory_PATH = Path(directory_path)
if not directory_PATH.exists():
sys.exit("Directory does not exist!")
self.directory_PUREPATH = PurePath(directory_path)
elif self.network_model == "T2":
directory_path = "./T2/ThalamoCorticalModel_data_size_____/"
self.directory_PUREPATH = PurePath(directory_path)
else:
raise NotImplementedError("Only the T2 and the Voggels-Abott models are supported.")
def get_file_path(self, segment_number="0", time="201157", neuron_type=""):
if self.network_model == "VA":
if neuron_type == "":
raise ValueError("Must specify a neuron type.")
date = self.directory_PUREPATH.parts[-1]
file_path = "./" + str(self.directory_PUREPATH) + "/VAbenchmarks_COBA_{0}_neuron_np1_{1}-{2}.pkl".format(neuron_type,
date, time)
file_PATH = Path(file_path)
print(file_path + "\n\n")
if not file_PATH.exists():
sys.exit("File name does not exist! (Try checking the time argument.)")
elif self.network_model == "T2":
file_path = "./" + str(self.directory_PUREPATH) + "/Segment{0}.pickle".format(segment_number)
file_PATH = Path(file_path)
if not file_PATH.exists():
sys.exit("File name does not exist! (Try checking segment number.)")
else:
raise NotImplementedError("Only the T2 and the Voggels-Abott models are supported.")
return file_path
def get_membrane_potential(self, trial=0, experiment="sin_stim"):
"""
Returns a neo.core.analogsignal.AnalogSignal representing the membrane potential of all neurons, regardless
of their type.
Works only if there are two or one Pickle files containing the data (typically storing the excitatory and
inhibitory data seperately, when there are two files).
"""
self.set_directory_path()
if self.network_model == "VA":
### EXCITATORY NEURONS
neuron_type = "exc"
file_path = self.get_file_path(neuron_type=neuron_type)
PyNN_file = open(file_path, "rb")
block = pickle.load(PyNN_file)
seg = block.segments[trial] #chosen segment
for analogsignal in seg.analogsignals:
if analogsignal.name == 'v':
vm_exc = analogsignal
if self.exc_counted == False:
self.num_neurons += vm_exc.shape[1]
self.exc_counted = True
### INHIBITORY NEURONS
neuron_type = "inh"
file_path = self.get_file_path(neuron_type=neuron_type)
PyNN_file = open(file_path, "rb")
block = pickle.load(PyNN_file)
seg = block.segments[trial] #chosen segment
for analogsignal in seg.analogsignals:
if analogsignal.name == 'v':
vm_inh = analogsignal
if self.inh_counted == False:
self.num_neurons += vm_inh.shape[1]
self.inh_counted = True
### ALL NEURONS
vm_array = np.concatenate(vm_exc, vm_inh, axis=1)
vm = neo.core.AnalogSignal(vm_array, units=vm_exc.units, t_start=vm_exc.t_start,
sampling_rate=vm_exc.sampling_rate)
else:
if experiment == "sin_stim":
data_int = 0
elif experiment == "blank_stim":
data_int = 5
else:
raise ValueError("The experiment argument must be either 'sin_stim' or 'blank_stim'.")
### EXCITATORY NEURONS
seg_num = str(10*trial+data_int+2)
file_path = self.get_file_path(segment_number=seg_num)
PyNN_file = open(file_path, "rb")
seg = pickle.load(PyNN_file)
for analogsignal in seg.analogsignals:
if analogsignal.name == 'v':
vm_exc = analogsignal
if self.exc_counted == False:
self.num_neurons += vm_exc.shape[1]
self.exc_counted = True
### INHIBITORY NEURONS
seg_num = str(10*trial+data_int+1)
file_path = self.get_file_path(segment_number=seg_num)
PyNN_file = open(file_path, "rb")
seg = pickle.load(PyNN_file)
for analogsignal in seg.analogsignals:
if analogsignal.name == 'v':
vm_inh = analogsignal
if self.inh_counted == False:
self.num_neurons += vm_inh.shape[1]
self.inh_counted = True
### ALL NEURONS
vm_array = np.concatenate(vm_exc, vm_inh, axis=1)
vm = neo.core.AnalogSignal(vm_array, units=vm_exc.units, t_start=vm_exc.t_start,
sampling_rate=vm_exc.sampling_rate)
return vm
def get_conductance(self, trial=0, experiment="sin_stim"):
"""
Returns a neo.core.analogsignal.AnalogSignal representing the synaptic conductance of all neurons, regardless
of their type.
Works only if there are two or one Pickle files containing the data (typically storing the excitatory and
inhibitory data seperately, when there are two files).
"""
self.set_directory_path()
if self.network_model == "VA":
### EXCITATORY NEURONS
neuron_type = "exc"
file_path = self.get_file_path(neuron_type=neuron_type)
PyNN_file = open(file_path, "rb")
block = pickle.load(PyNN_file)
seg = block.segments[trial] #chosen segment
for analogsignal in seg.analogsignals:
if analogsignal.name == 'gsyn_exc':
gsyn_exc = analogsignal
if self.exc_counted == False:
self.num_neurons += gsyn_exc.shape[1]
self.exc_counted = True
### INHIBITORY NEURONS
neuron_type = "inh"
file_path = self.get_file_path(neuron_type=neuron_type)
PyNN_file = open(file_path, "rb")
block = pickle.load(PyNN_file)
seg = block.segments[trial] #chosen segment
for analogsignal in seg.analogsignals:
if analogsignal.name == 'gsyn_inh':
gsyn_inh = analogsignal
if self.inh_counted == False:
self.num_neurons += gsyn_inh.shape[1]
self.inh_counted = True
### ALL NEURONS
gsyn_array = np.concatenate(gsyn_exc, gsyn_inh, axis=1)
gsyn = neo.core.AnalogSignal(gsyn_array, units=gsyn_exc.units, t_start=gsyn_exc.t_start,
sampling_rate=gsyn_exc.sampling_rate)
else:
### TO CHANGE ###
'''
All this has to be changed...
The pickle files are not organised in blocks but in segments, and these segments correspond to a certain
type of neuron in a given layer... I must hence find out which segments correspond to the same experiment
and join them together here. Not forgetting to mention the multiple trials for the same experiment.
'''
if experiment == "sin_stim":
data_int = 0
elif experiment == "blank_stim":
data_int = 5
else:
raise ValueError("The experiment argument must be either 'sin_stim' or 'blank_stim'.")
seg_num = str(trial)
file_path = self.get_file_path(segment_number=seg_num)
PyNN_file = open(file_path, "rb")
seg = pickle.load(PyNN_file)
for analogsignal in seg.analogsignals:
if analogsignal.name == 'gsyn':
gsyn = analogsignal
return gsyn
def get_spike_trains(self, trial=0, experiment="sin_stim"):
"""
Returns a list of neo.core.SpikeTrain elements representing the spike trains of all neurons, regardless
of their type.
Works only if there are two or one Pickle files containing the data (typically storing the excitatory and
inhibitory data seperately, when there are two files).
"""
self.set_directory_path()
if self.network_model == "VA":
### EXCITATORY NEURONS
neuron_type = "exc"
file_path = self.get_file_path(neuron_type=neuron_type)
PyNN_file = open(file_path, "rb")
block = pickle.load(PyNN_file)
seg = block.segments[trial] #chosen segment
spiketrains_exc = seg.spiketrains
if self.exc_counted == False:
self.num_neurons += len(spiketrains_exc)
self.exc_counted = True
### INHIBITORY NEURONS
neuron_type = "inh"
file_path = self.get_file_path(neuron_type=neuron_type)
PyNN_file = open(file_path, "rb")
block = pickle.load(PyNN_file)
seg = block.segments[trial] #chosen segment
spiketrains_inh = seg.spiketrains
if self.inh_counted == False:
self.num_neurons += len(spiketrains_inh)
self.inh_counted = True
### ALL NEURONS
spiketrains = spiketrains_exc + spiketrains_inh
else:
if experiment == "sin_stim":
data_int = 0
elif experiment == "blank_stim":
data_int = 5
else:
raise ValueError("The experiment argument must be either 'sin_stim' or 'blank_stim'.")
### EXCITATORY NEURONS
seg_num = str(10*trial+data_int+2)
file_path = self.get_file_path(segment_number=seg_num)
PyNN_file = open(file_path, "rb")
seg = pickle.load(PyNN_file)
spiketrains_exc = seg.spiketrains
if self.exc_counted == False:
self.num_neurons += len(spiketrains_exc)
self.exc_counted = True
### INHIBITORY NEURONS
seg_num = str(10*trial+data_int+1)
file_path = self.get_file_path(segment_number=seg_num)
PyNN_file = open(file_path, "rb")
seg = pickle.load(PyNN_file)
spiketrains_inh = seg.spiketrains
if self.inh_counted == False:
self.num_neurons += len(spiketrains_inh)
self.inh_counted = True
### ALL NEURONS
spiketrains = spiketrains_exc + spiketrains_inh
return spiketrains
def get_spike_train(self, trial=0):
global_spiketrain = list(chain.from_iterable(spiketrain for spiketrain in self.get_spike_trains(trial=trial)))
global_spiketrain.sort()
return global_spiketrain
#================================================================================================================
#== LFP related methods =========================================================================================
#================================================================================================================
def produce_local_field_potential(self, trial=0):
"""
Calculates and returns the 2D-array of the LFP.
The first dimension corresponds to the electrodes.
"""
vm = self.get_membrane_potential(trial=trial)
gsyn = self.get_conductance(trial=trial)
neuron_positions = self.get_positions()
num_time_points = vm.shape[0]
num_electrodes = self.electrode_positions.shape[0]
ones_array = np.ones((num_electrodes, num_time_points, num_neurons))
current_array = np.multiply(vm, gsyn)
inv_dist = nf.electrode_neuron_inv_dist(num_electrodes, num_neurons,
self.electrode_positions, neuron_positions,
self.reach, self.dimensionnality)
big_current_array = np.multiply(ones_array, current_array)
big_inv_dist_array = np.multiply(ones_array, inv_dist)
LFP = np.sum(big_current_array, big_inv_dist_array, axis=2)/(4*np.pi*self.sigma)
return LFP
def get_positions(self):
"""
Returns the 2D-array giving the neurons' positions.
"""
if self.space_dependency == False:
positions = self.assign_positions()
if self.exc_counted == False and self.inh_counted == False:
self.num_neurons = positions.shape[1]
else:
raise NotImplementedError("Must implement get_positions.")
return positions
def assign_positions(self):
"""
Function that assigns positions to the neurons if they do not have.
Only works if they have a 2D structure.
"""
num_neurons = len(self.get_spike_trains(trial=0))
positions = np.multiply(self.dimensions, np.random.rand(num_neurons, self.dimensionnality)-0.5)
return positions
#================================================================================================================
#== test related methods ========================================================================================
#================================================================================================================
def produce_vm_LFP_correlation(self, trial=0, start=600, duration=1000, dt=0.1):
"""
Calculates the correlation between the Vm of the closest neuron to the (first) electrode and the LFP signal
recorded at this electrode.
Returns the correlation and the corresponding lag (in ms).
The relevant data is supposed to be 1s long.
"""
start_index = int(start/dt)
duration_index = int(duration/dt)
vm = self.get_membrane_potential(trial=trial)
neuron_positions = self.get_positions()
num_electrodes = self.electrode_positions.shape[0]
inv_dist = nf.electrode_neuron_inv_dist(num_electrodes, self.num_neurons,
self.electrode_positions, neuron_positions,
self.reach, self.dimensionnality)[0, :]
closest_neuron = np.argmax(inv_dist)
selected_vm = np.reshape(vm[start_index:start_index+duration_index+1, closest_neuron], (duration_index+1,))
selected_LFP = np.reshape(self.produce_local_field_potential(trial=trial)[0, start_index:start_index+duration_index+1],
(duration_index+1,))
corr = crsscorr.constwindowcorrelation(selected_vm, selected_LFP)
corr_time_points = np.arange(-duration/2, duration/2+dt, dt)
return corr, corr_time_points
def produce_vm_LFP_zerolagcorrelations(self, start=600, duration=1000, dt=0.1,
trial_average=True, trial=0, withinreach=True):
"""
Calculates the zero-lag correlations between the neurons' membrane potentials and the LFP.
Interesting plots to do with this data can be:
- histogram of the correlation distribution;
- confrontation of the correlation values between a non-stimulated and stimulated state (for the same neurons).
The trial_average boolean tells if the correlations have to be averaged over the trials.
If not, the chosen trial is trial.
"""
start_index = int(start/dt)
duration_index = int(duration/dt)
if trial_average == True:
trials = self.num_trials
else:
trials = trial
self.get_positions() #just to initiate the value of self.num_neurons
zerolagcorrelations_array = np.zeros((trials, self.num_neurons))
for iteration_trial in range(trials):
vm = self.get_membrane_potential(trial=iteration_trial)
vm = vm[start_index:start_index+duration_index, :]
LFP = np.reshape(self.produce_local_field_potential(trial=iteration_trial)[0, start_index:start_index+duration_index],
(duration_index+1,))
def zerolagtcorrelationtoLFP(v):
return crsscorr.zerolagcorrelation(v, LFP)
### ELIMINATION OF THE CONTRIBUTION OF NEURONS THAT ARE OUT OF THE REACH ZONE
if withinreach:
num_electrodes = self.electrode_positions.shape[0]
neuron_positions = self.get_positions()
inv_dist = nf.electrode_neuron_inv_dist(num_electrodes, self.num_neurons,
self.electrode_positions, neuron_positions,
self.reach, self.dimensionnality)[0, :]
valid_dist_neurons = np.heaviside(inv_dist-1./self.reach, 1) #array of neurons that are within the reach
vm = np.multiply(vm, valid_dist_neurons) #vms of neurons that are out of the reach are null
zerolagcorrelations_array[iteration_trial, :] = np.apply_along_axis(zerolagtcorrelationtoLFP, axis=0, arr=vm)
zerolagcorrelations = np.average(zerolagcorrelations_array, axis=0)
return zerolagcorrelations #if withinreach==True, neurons that are out of the reach zone have a null correlation with the LFP
def produce_vm_LFP_meancoherence(self, trial=0, withinreach=True, start=29, duration=1000, dt=0.1):
"""
Calculates the mean coherence between the neurons' membrane potentials and the LFP.
returns the mean coherence, the corresponding frequencies (in Hz) and the standard deviation error for each
coherence value.
The relevant data is supposed to be 1s long.
"""
start_index = int(start/dt)
duration_index = int(duration/dt)
vm = self.get_membrane_potential(trial=trial)
vm = vm[start_index:start_index+duration_index, :]
LFP = np.reshape(self.produce_local_field_potential(trial=trial)[0, start_index:start_index+duration_index],
(duration_index+1,))
### ELIMINATION OF THE CONTRIBUTION OF NEURONS THAT ARE OUT OF THE REACH ZONE
if withinreach:
num_electrodes = self.electrode_positions.shape[0]
neuron_positions = self.get_positions()
inv_dist = nf.electrode_neuron_inv_dist(num_electrodes, self.num_neurons,
self.electrode_positions, neuron_positions,
self.reach, self.dimensionnality)[0, :]
valid_dist_neurons = np.heaviside(inv_dist-1./self.reach, 1) #array of neurons that are within the reach
vm = np.multiply(vm, valid_dist_neurons) #vms of neurons that are out of the reach are null
f, coherence_array = coherence(LFP, vm, axis=0, nperseg=int(2**12), fs=1000./dt)
meancoherence_array = np.average(coherence_array, axis=1)
coherencestd_array = np.std(coherence_array, axis=1)
return meancoherence_array, f, coherencestd_array
def produce_phase_lock_value(self, start=0, offset=250, duration=950, dt=0.1,
trial_average=True, trial=0, withinreach=True):
"""
Calculates the Phase-Lock value for the spikes occuring in a (duration-offset) ms period of time.
The neurons are supposed to be excitated by a sinusoidal input of 1s, starting offset ms before the
selected epoch.
Returns the Phase-Lock value and the corresponding frequencies.
The trial_average boolean tells if the Phase-Lock value has to be averaged over the trials.
If not, the chosen trial is trial.
"""
if trial_average:
trials = self.num_trials
else:
trials = trial
fs = 1000./dt #sampling frequency
window = 150 #ms, size of the window in which the LFP will have its Fourier transformations
window_index = int(window/dt)
window_width = window_index//2
w = hanning(window_index+1) #150 ms window with a 0,1 ms interval
N_max = 500 #just an arbitrary value for the moment
PLv_array = np.zeros((trials, window_index+1),
dtype=float) #multi-trial Phase-Lock value array, empty for the moment
for iteration_trial in range(trials):
spiketrain = self.get_spike_train(trial=iteration_trial)
num_spikes = len(spiketrain)
valid_times1 = np.heaviside(spiketrain-(start+offset)*np.ones(num_spikes), 1) #spikes that occured after a certain time
valid_times2 = np.heaviside((start+duration)*np.ones(num_spikes)-spiketrain, 1) #spikes that occured before the maximum admitted time
valid_times = np.multiply(valid_times1, valid_times2)
selected_spikes = np.multiply(spiketrain, valid_times)
selected_spikes = selected_spikes[selected_spikes>0]
LFP = self.produce_local_field_potential(trial=iteration_trial)[0, :]
#LFP_filt = butter_lowpass_filter(LFP, 170., fs)
N_s = min(selected_spikes.shape[0], N_max) #security measure
for t_index in mf.random_list(N_s, selected_spikes.shape[0], minimum=0):
t_s = selected_spikes[t_index] #time of spike occurence
t_s_index = int(10*t_s) #corresponding index for the arrays
#LFP_s = LFP_filt[t_s_index - window_width : t_s_index + window_width+1] #LFP centered at the spike occurrence
LFP_s = LFP[t_s_index - window_width : t_s_index + window_width+1] #Non-filtered version
wLFP_s = np.multiply(w, LFP_s) #centered LFP multiplied by the Hanning window
FT_s = fft(wLFP_s) #Fourier transform of this weighted LFP
nFT_s = np.divide(FT_s, np.abs(FT_s)) #normalized Fourier transform
PLv_array[iteration_trial, :] = np.add(PLv_array[iteration_trial, :], nFT_s) #contribution to the PLv added
PLv_array[iteration_trial, :] = np.abs(PLv_array[iteration_trial, :])/N_s #normalized module, according to the paper
PLv = np.average(PLv_array, axis=0)
PLv = PLv[:(PLv.shape[0])//2] #only the first half is relevant
fPLv = (0.5*fs/PLv.shape[0])*np.arange(PLv.shape[0], dtype=float) #frequencies of the PLv
return PLv, fPLv
def produce_spike_triggered_LFP(self, start=500, duration=1000, dt=0.1, window_width=200,
trial_average=True, trial=0):
"""
Calculates the spike-triggered average of the LFP (stLFP) and arranges the results relative to the distance
from the electrode. The distances discriminating the neurons are (in mm): 0.4, 0.8, 1.2, 1.4 and 1.6.
Returns the stLFP for each distance interval and in a time interval around the spikes.
The stLFP is a 2D-array with the first dimension corresponding to the distance and the second, the time.
"""
discrim_dist = np.array([4e-4, 8e-4, 1.2e-3, 1.6e-3])
discrim_inv_dist = np.append(np.inf, np.power(discrim_dist, -1))
discrim_inv_dist = np.append(discrim_dist, 0.)
num_electrodes = self.electrode_positions.shape[0]
neuron_positions = self.get_positions()
inv_dist = nf.electrode_neuron_inv_dist(num_electrodes, self.num_neurons,
self.electrode_positions, neuron_positions,
self.reach, self.dimensionnality)[0, :]
discrim_indexes = [[], [], [], [], []]
num_dist_intervals = 5
for i in range(num_dist_intervals):
i_normalized_inv_dist = mf.door(inv_dist, discrim_inv_dist[i+1], discrim_inv_dist[i])
i_indexes = np.argwhere(i_normalized_inv_dist == 1)
discrim_indexes[i].append(i_indexes.flatten().tolist())
'''
Now I have discriminated the neurons according to their distance from the electrode (information stored in
their indexes in the list discrim_indexes), I can separate the stLFPs according to this criteria.
'''
if trial_average:
trials = self.num_trials
else:
trials = trial
window_index = int(window_width/dt)
window = np.arange(-window_width/2, window_width/2+dt, dt)
stLFP_array = np.zeros((trials, num_dist_intervals, window_index+1))
for iteration_trial in range(trials):
### LOOP ON THE TRIALS
spiketrains = self.get_spike_trains(trial=iteration_trial)
LFP = self.produce_local_field_potential(trial=iteration_trial)[0, :]
for interval_index in range(num_dist_intervals):
### LOOP ON THE DISTANCE INTERVALS
average_counter = 0
for neuron_index in discrim_indexes[interval_index]:
### LOOP ON THE NEURONS WITHIN A DISTANCE INTERVAL
for t_s in spiketrains[neuron_index]: #maybe I can get rid of this loop... I don't like it...
### LOOP ON THE SPIKES OF A GIVEN NEURON
t_s_index = int(t_s/dt)
average_counte += 1
stLFP_array[iteration_trial, interval_index, :] = np.add(
stLFP_array[iteration_trial, interval_index, :],
LFP[t_s-window_index//2:t_s_index+window_index//2+1])
stLFP_array[iteration_trial, interval_index, :] /= average_counter
stLFP = np.average(stLFP_array, axis=0) #trial-average computation
return stLFP, window | [] |
2024-01-10 | savagenashe/diagnosisAPI | diagnosis_final.py | from flask import Flask, request, jsonify
import os
import openai
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
# Set OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# Define system message
SYSTEM_MESSAGE = "You are a doctor and you propose at most three disease that a patient might be suffering from with some context given the symptoms starting with best choice"
@app.route('/chat', methods=['POST'])
def chat():
# Get user message from request body
user_message = request.data.decode('utf-8')
# Call OpenAI API to generate response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": user_message},
]
)
# Extract response message from OpenAI API response
#response_message = response.choices[0].text.strip()
# Return response message as JSON
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True) | [
"You are a doctor and you propose at most three disease that a patient might be suffering from with some context given the symptoms starting with best choice"
] |
2024-01-10 | Narsil/nlp | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | shreyashankar/gpt3-sandbox | api~demo_web_app.py | """Runs the web app given a GPT object and UI configuration."""
from http import HTTPStatus
import json
import subprocess
import openai
from flask import Flask, request, Response
from .gpt import set_openai_key, Example
from .ui_config import UIConfig
CONFIG_VAR = "OPENAI_CONFIG"
KEY_NAME = "OPENAI_KEY"
def demo_web_app(gpt, config=UIConfig()):
"""Creates Flask app to serve the React app."""
app = Flask(__name__)
app.config.from_envvar(CONFIG_VAR)
set_openai_key(app.config[KEY_NAME])
@app.route("/params", methods=["GET"])
def get_params():
# pylint: disable=unused-variable
response = config.json()
return response
def error(err_msg, status_code):
return Response(json.dumps({"error": err_msg}), status=status_code)
def get_example(example_id):
"""Gets a single example or all the examples."""
# return all examples
if not example_id:
return json.dumps(gpt.get_all_examples())
example = gpt.get_example(example_id)
if not example:
return error("id not found", HTTPStatus.NOT_FOUND)
return json.dumps(example.as_dict())
def post_example():
"""Adds an empty example."""
new_example = Example("", "")
gpt.add_example(new_example)
return json.dumps(gpt.get_all_examples())
def put_example(args, example_id):
"""Modifies an existing example."""
if not example_id:
return error("id required", HTTPStatus.BAD_REQUEST)
example = gpt.get_example(example_id)
if not example:
return error("id not found", HTTPStatus.NOT_FOUND)
if "input" in args:
example.input = args["input"]
if "output" in args:
example.output = args["output"]
# update the example
gpt.add_example(example)
return json.dumps(example.as_dict())
def delete_example(example_id):
"""Deletes an example."""
if not example_id:
return error("id required", HTTPStatus.BAD_REQUEST)
gpt.delete_example(example_id)
return json.dumps(gpt.get_all_examples())
@app.route(
"/examples",
methods=["GET", "POST"],
defaults={"example_id": ""},
)
@app.route(
"/examples/<example_id>",
methods=["GET", "PUT", "DELETE"],
)
def examples(example_id):
method = request.method
args = request.json
if method == "GET":
return get_example(example_id)
if method == "POST":
return post_example()
if method == "PUT":
return put_example(args, example_id)
if method == "DELETE":
return delete_example(example_id)
return error("Not implemented", HTTPStatus.NOT_IMPLEMENTED)
@app.route("/translate", methods=["GET", "POST"])
def translate():
# pylint: disable=unused-variable
prompt = request.json["prompt"]
response = gpt.submit_request(prompt)
offset = 0
if not gpt.append_output_prefix_to_query:
offset = len(gpt.output_prefix)
return {'text': response['choices'][0]['text'][offset:]}
subprocess.Popen(["yarn", "start"])
app.run()
| [] |
2024-01-10 | gltanaka/autogen | test~agentchat~test_async.py | import pytest
import asyncio
import autogen
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
def get_market_news(ind, ind_upper):
data = {
"feed": [
{
"title": "Palantir CEO Says Our Generation's Atomic Bomb Could Be AI Weapon - And Arrive Sooner Than You Think - Palantir Technologies ( NYSE:PLTR ) ",
"summary": "Christopher Nolan's blockbuster movie \"Oppenheimer\" has reignited the public discourse surrounding the United States' use of an atomic bomb on Japan at the end of World War II.",
"overall_sentiment_score": 0.009687,
},
{
"title": '3 "Hedge Fund Hotels" Pulling into Support',
"summary": "Institutional quality stocks have several benefits including high-liquidity, low beta, and a long runway. Strategist Andrew Rocco breaks down what investors should look for and pitches 3 ideas.",
"banner_image": "https://staticx-tuner.zacks.com/images/articles/main/92/87.jpg",
"overall_sentiment_score": 0.219747,
},
{
"title": "PDFgear, Bringing a Completely-Free PDF Text Editing Feature",
"summary": "LOS ANGELES, July 26, 2023 /PRNewswire/ -- PDFgear, a leading provider of PDF solutions, announced a piece of exciting news for everyone who works extensively with PDF documents.",
"overall_sentiment_score": 0.360071,
},
{
"title": "Researchers Pitch 'Immunizing' Images Against Deepfake Manipulation",
"summary": "A team at MIT says injecting tiny disruptive bits of code can cause distorted deepfake images.",
"overall_sentiment_score": -0.026894,
},
{
"title": "Nvidia wins again - plus two more takeaways from this week's mega-cap earnings",
"summary": "We made some key conclusions combing through quarterly results for Microsoft and Alphabet and listening to their conference calls with investors.",
"overall_sentiment_score": 0.235177,
},
]
}
feeds = data["feed"][ind:ind_upper]
feeds_summary = "\n".join(
[
f"News summary: {f['title']}. {f['summary']} overall_sentiment_score: {f['overall_sentiment_score']}"
for f in feeds
]
)
return feeds_summary
@pytest.mark.asyncio
async def test_async_groupchat():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
llm_config = {
"timeout": 600,
"cache_seed": 41,
"config_list": config_list,
"temperature": 0,
}
# create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
name="assistant",
llm_config={
"timeout": 600,
"cache_seed": 41,
"config_list": config_list,
"temperature": 0,
},
system_message="You are a helpful assistant. Reply 'TERMINATE' to end the conversation.",
)
# create a UserProxyAgent instance named "user"
user_proxy = autogen.UserProxyAgent(
name="user",
human_input_mode="NEVER",
max_consecutive_auto_reply=5,
code_execution_config=False,
default_auto_reply=None,
)
groupchat = autogen.GroupChat(agents=[user_proxy, assistant], messages=[], max_round=12)
manager = autogen.GroupChatManager(
groupchat=groupchat,
llm_config=llm_config,
is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""),
)
await user_proxy.a_initiate_chat(manager, message="""Have a short conversation with the assistant.""")
assert len(user_proxy.chat_messages) > 0
@pytest.mark.asyncio
async def test_stream():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
data = asyncio.Future()
async def add_stock_price_data():
# simulating the data stream
for i in range(0, 2, 1):
latest_news = get_market_news(i, i + 1)
if data.done():
data.result().append(latest_news)
else:
data.set_result([latest_news])
# print(data.result())
await asyncio.sleep(5)
data_task = asyncio.create_task(add_stock_price_data())
# create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
name="assistant",
llm_config={
"timeout": 600,
"cache_seed": 41,
"config_list": config_list,
"temperature": 0,
},
system_message="You are a financial expert.",
)
# create a UserProxyAgent instance named "user"
user_proxy = autogen.UserProxyAgent(
name="user",
human_input_mode="NEVER",
max_consecutive_auto_reply=5,
code_execution_config=False,
default_auto_reply=None,
)
async def add_data_reply(recipient, messages, sender, config):
await asyncio.sleep(0.1)
data = config["news_stream"]
if data.done():
result = data.result()
if result:
news_str = "\n".join(result)
result.clear()
return (
True,
f"Just got some latest market news. Merge your new suggestion with previous ones.\n{news_str}",
)
return False, None
user_proxy.register_reply(autogen.AssistantAgent, add_data_reply, 1, config={"news_stream": data})
await user_proxy.a_initiate_chat(
assistant,
message="""Give me investment suggestion in 3 bullet points.""",
)
while not data_task.done() and not data_task.cancelled():
reply = await user_proxy.a_generate_reply(sender=assistant)
if reply is not None:
await user_proxy.a_send(reply, assistant)
if __name__ == "__main__":
asyncio.run(test_stream())
| [] |
2024-01-10 | sudarshan-koirala/opengpts | backend~app~stream.py | import math
from typing import Any, Dict, Optional, Sequence, Union
from uuid import UUID
from anyio import create_memory_object_stream
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
)
from langchain.schema.output import ChatGenerationChunk, GenerationChunk
class StreamMessagesHandler(BaseCallbackHandler):
def __init__(self, messages: Sequence[BaseMessage]) -> None:
self.messages = messages
self.output: Dict[UUID, ChatGenerationChunk] = {}
send_stream, receive_stream = create_memory_object_stream(
math.inf, item_type=dict | Exception
)
self.send_stream = send_stream
self.receive_stream = receive_stream
def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
**kwargs: Any,
) -> Any:
# If this is being called for a non-Chat Model run, convert to AIMessage
if chunk is None:
chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))
# If we get something we don't know how to handle, ignore it
if not (
isinstance(chunk, ChatGenerationChunk)
or isinstance(chunk, BaseMessageChunk)
):
return
# Convert messages to ChatGenerationChunks (workaround for old langchahin)
if isinstance(chunk, BaseMessageChunk):
chunk = ChatGenerationChunk(message=chunk)
# Accumulate the output (ChatGenerationChunk implements __add__)
if not self.output.get(run_id):
self.output[run_id] = chunk
else:
self.output[run_id] += chunk
# Send the messages to the stream
self.send_stream.send_nowait(
{
"messages": (
self.messages
+ [
map_chunk_to_msg(chunk.message)
for chunk in self.output.values()
]
)
}
)
def map_chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage:
if not isinstance(chunk, BaseMessageChunk):
return chunk
args = {k: v for k, v in chunk.__dict__.items() if k != "type"}
if isinstance(chunk, HumanMessageChunk):
return HumanMessage(**args)
elif isinstance(chunk, AIMessageChunk):
return AIMessage(**args)
elif isinstance(chunk, FunctionMessageChunk):
return FunctionMessage(**args)
elif isinstance(chunk, ChatMessageChunk):
return ChatMessage(**args)
else:
raise ValueError(f"Unknown chunk type: {chunk}")
| [] |
2024-01-10 | sudarshan-koirala/opengpts | backend~app~api~runs.py | import asyncio
import json
from typing import AsyncIterator, Sequence
from uuid import uuid4
import langsmith.client
import orjson
from fastapi import APIRouter, BackgroundTasks, HTTPException, Request
from fastapi.exceptions import RequestValidationError
from gizmo_agent import agent
from langchain.pydantic_v1 import ValidationError
from langchain.schema.messages import AnyMessage, FunctionMessage
from langchain.schema.output import ChatGeneration
from langchain.schema.runnable import RunnableConfig
from langserve.callbacks import AsyncEventAggregatorCallback
from langserve.schema import FeedbackCreateRequest
from langserve.serialization import WellKnownLCSerializer
from langserve.server import _get_base_run_id_as_str, _unpack_input
from langsmith.utils import tracing_is_enabled
from pydantic import BaseModel, Field
from sse_starlette import EventSourceResponse
from app.schema import OpengptsUserId
from app.storage import get_assistant, get_thread_messages, public_user_id
from app.stream import StreamMessagesHandler
router = APIRouter()
_serializer = WellKnownLCSerializer()
class AgentInput(BaseModel):
"""An input into an agent."""
messages: Sequence[AnyMessage] = Field(default_factory=list)
class CreateRunPayload(BaseModel):
"""Payload for creating a run."""
assistant_id: str
thread_id: str
input: AgentInput = Field(default_factory=AgentInput)
async def _run_input_and_config(request: Request, opengpts_user_id: OpengptsUserId):
try:
body = await request.json()
except json.JSONDecodeError:
raise RequestValidationError(errors=["Invalid JSON body"])
assistant, public_assistant, state = await asyncio.gather(
asyncio.get_running_loop().run_in_executor(
None, get_assistant, opengpts_user_id, body["assistant_id"]
),
asyncio.get_running_loop().run_in_executor(
None, get_assistant, public_user_id, body["assistant_id"]
),
asyncio.get_running_loop().run_in_executor(
None, get_thread_messages, opengpts_user_id, body["thread_id"]
),
)
assistant = assistant or public_assistant
if not assistant:
raise HTTPException(status_code=404, detail="Assistant not found")
config: RunnableConfig = {
**assistant["config"],
"configurable": {
**assistant["config"]["configurable"],
"user_id": opengpts_user_id,
"thread_id": body["thread_id"],
"assistant_id": body["assistant_id"],
},
}
try:
input_ = _unpack_input(agent.get_input_schema(config).validate(body["input"]))
except ValidationError as e:
raise RequestValidationError(e.errors(), body=body)
return input_, config, state["messages"]
@router.post("")
async def create_run(
request: Request,
payload: CreateRunPayload, # for openapi docs
opengpts_user_id: OpengptsUserId,
background_tasks: BackgroundTasks,
):
"""Create a run."""
input_, config, messages = await _run_input_and_config(request, opengpts_user_id)
background_tasks.add_task(agent.ainvoke, input_, config)
return {"status": "ok"} # TODO add a run id
@router.post("/stream")
async def stream_run(
request: Request,
payload: CreateRunPayload, # for openapi docs
opengpts_user_id: OpengptsUserId,
):
"""Create a run."""
input_, config, messages = await _run_input_and_config(request, opengpts_user_id)
streamer = StreamMessagesHandler(messages + input_["messages"])
event_aggregator = AsyncEventAggregatorCallback()
config["callbacks"] = [streamer, event_aggregator]
# Call the runnable in streaming mode,
# add each chunk to the output stream
async def consume_astream() -> None:
try:
async for chunk in agent.astream(input_, config):
await streamer.send_stream.send(chunk)
# hack: function messages aren't generated by chat model
# so the callback handler doesn't know about them
if chunk["messages"]:
message = chunk["messages"][-1]
if isinstance(message, FunctionMessage):
streamer.output[uuid4()] = ChatGeneration(message=message)
except Exception as e:
await streamer.send_stream.send(e)
finally:
await streamer.send_stream.aclose()
# Start the runnable in the background
task = asyncio.create_task(consume_astream())
# Consume the stream into an EventSourceResponse
async def _stream() -> AsyncIterator[dict]:
has_sent_metadata = False
async for chunk in streamer.receive_stream:
if isinstance(chunk, BaseException):
yield {
"event": "error",
# Do not expose the error message to the client since
# the message may contain sensitive information.
# We'll add client side errors for validation as well.
"data": orjson.dumps(
{"status_code": 500, "message": "Internal Server Error"}
).decode(),
}
raise chunk
else:
if not has_sent_metadata and event_aggregator.callback_events:
yield {
"event": "metadata",
"data": orjson.dumps(
{"run_id": _get_base_run_id_as_str(event_aggregator)}
).decode(),
}
has_sent_metadata = True
yield {
# EventSourceResponse expects a string for data
# so after serializing into bytes, we decode into utf-8
# to get a string.
"data": _serializer.dumps(chunk).decode("utf-8"),
"event": "data",
}
# Send an end event to signal the end of the stream
yield {"event": "end"}
# Wait for the runnable to finish
await task
return EventSourceResponse(_stream())
@router.get("/input_schema")
async def input_schema() -> dict:
"""Return the input schema of the runnable."""
return agent.get_input_schema().schema()
@router.get("/output_schema")
async def output_schema() -> dict:
"""Return the output schema of the runnable."""
return agent.get_output_schema().schema()
@router.get("/config_schema")
async def config_schema() -> dict:
"""Return the config schema of the runnable."""
return agent.config_schema().schema()
if tracing_is_enabled():
langsmith_client = langsmith.client.Client()
@router.post("/feedback")
def create_run_feedback(feedback_create_req: FeedbackCreateRequest) -> dict:
"""
Send feedback on an individual run to langsmith
Note that a successful response means that feedback was successfully
submitted. It does not guarantee that the feedback is recorded by
langsmith. Requests may be silently rejected if they are
unauthenticated or invalid by the server.
"""
langsmith_client.create_feedback(
feedback_create_req.run_id,
feedback_create_req.key,
score=feedback_create_req.score,
value=feedback_create_req.value,
comment=feedback_create_req.comment,
source_info={
"from_langserve": True,
},
)
return {"status": "ok"}
| [] |
2024-01-10 | Xlsean/opencompass | opencompass~datasets~leval~evaluators.py | import json
from typing import List
from opencompass.openicl.icl_evaluator import BaseEvaluator
from opencompass.registry import ICL_EVALUATORS
from opencompass.utils.prompt import PromptList
@ICL_EVALUATORS.register_module()
class LEvalGPTEvaluator(BaseEvaluator):
"""Use OpenAI's models to evaluate prediction.
Args:
battle_model (str): The rival model name in evaluate module. Defaults
to 'turbo-16k-0613'.
evaluator_path (str): The judge model name in evaluate module. Note
that the key will be fetched from the environment variable
$OPENAI_API_KEY, as how openai defaults to be.
Defaults to 'gpt-4-0613'.
"""
def __init__(self,
battle_model: str = 'turbo-16k-0613',
evaluator_path: str = 'gpt-4-0613') -> None:
self.battle_model = battle_model
self.evaluator_path = evaluator_path
super().__init__()
def run_judge_pair(self, prompt_template, system_prompt, question,
answer_a, answer_b, reference):
from opencompass.models import OpenAI
user_prompt = prompt_template.format(question=question,
answer_a=answer_a,
answer_b=answer_b,
reference=reference)
messages = PromptList([{
'role': 'SYSTEM',
'fallback_role': 'HUMAN',
'prompt': system_prompt
}, {
'role': 'HUMAN',
'prompt': user_prompt
}])
model = OpenAI(path=self.evaluator_path,
max_seq_len=16384,
query_per_second=1,
retry=5,
temperature=0.0)
response = model._generate(input=messages,
max_out_len=2048,
temperature=0.0)
if '[[A]]' in response:
winner = 'A'
elif '[[B]]' in response:
winner = 'B'
elif '[[C]]' in response:
winner = 'tie'
else:
winner = 'error'
return winner
def score(self, predictions: List, references: List) -> dict:
system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question about the content of a long document. You will be given a reference answer written by human, assistant A's answer, and assistant B's answer. Your job is to evaluate which assistant's answer is better. Begin your evaluation by comparing both assistants' answers with the reference answer. Additional details or information that are not mentioned in reference answer cannot be considered as advantages and do not let them sway your judgment. Your evaluation should also consider the relevance to user's question but it is more important to avoid factual errors according to the reference answer. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie." # noqa
prompt_template = "[User Question]\n{question}\n\n[The Start of Reference Answer]\n{reference}\n[The End of Reference Answer]\n\n[The Start of Assistant A's Answer]\n{answer_a}\n[The End of Assistant A's Answer]\n\n[The Start of Assistant B's Answer]\n{answer_b}\n[The End of Assistant B's Answer]" # noqa
battle_samples = []
with open(
'opencompass/datasets/leval/' + self.battle_model +
'.pred.jsonl', 'r') as f:
for i, line in enumerate(f):
battle_samples.append(json.loads(line))
score = 0.
bad_case = 0
num_samples = 0
for i in range(len(predictions)):
prediction = predictions[i]
reference = references[i]
for sample in battle_samples:
if reference == sample['gt']:
question = sample['query']
battle_answer = sample[self.battle_model + '_pred']
winner = self.run_judge_pair(prompt_template,
system_prompt, question,
prediction, battle_answer,
reference)
if winner == 'A':
score += 1
elif winner == 'tie':
score += 0.5
elif winner == 'error':
bad_case += 1
winner = self.run_judge_pair(prompt_template,
system_prompt, question,
battle_answer, prediction,
reference)
if winner == 'B':
score += 1
elif winner == 'tie':
score += 0.5
elif winner == 'error':
bad_case += 1
num_samples += 2
score = score / (num_samples - bad_case) * 100
return {'score': score}
| [
"Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question about the content of a long document. You will be given a reference answer written by human, assistant A's answer, and assistant B's answer. Your job is to evaluate which assistant's answer is better. Begin your evaluation by comparing both assistants' answers with the reference answer. Additional details or information that are not mentioned in reference answer cannot be considered as advantages and do not let them sway your judgment. Your evaluation should also consider the relevance to user's question but it is more important to avoid factual errors according to the reference answer. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.",
"[User Question]\n{question}\n\n[The Start of Reference Answer]\n{reference}\n[The End of Reference Answer]\n\n[The Start of Assistant A's Answer]\n{answer_a}\n[The End of Assistant A's Answer]\n\n[The Start of Assistant B's Answer]\n{answer_b}\n[The End of Assistant B's Answer]"
] |
2024-01-10 | causalNLP/corr2cause | code~run_model.py | class Constants:
data_folder = 'data/'
file_causal_relation = data_folder + 'raw_graphs/causal_relation_n={num_nodes}.jsonl'
file_out_template = data_folder + 'data_3class/causalnli_{num_nodes}nodes.json'
file_split_csv_template = data_folder + 'binary_classification_{}.csv'
variable_refactor = False
paraphrase = False
folder_output = data_folder + 'outputs/'
if variable_refactor:
folder_output += 'variable_refactor/'
data_folder += 'data_3class_from_Z/'
else:
data_folder += 'data_3class/'
if paraphrase:
folder_output += 'paraph/'
file_split_json_template = data_folder + '{}.json'
file_output_template = folder_output + '{}_test.csv'
file_all_preds = file_output_template.format('all')
file_prompt2response_template = folder_output + 'prompt2response_lookup_{}.json'
finetune_input_file_tmpl = data_folder + 'tmp/causalnli_{ft_loss}_{split}.jsonl'
model_name2model_name_full = {
'bert_base_mnli': 'textattack/bert-base-uncased-MNLI',
'roberta_mnli': "roberta-large-mnli",
'deberta_xlarge_mnli': 'microsoft/deberta-xlarge-mnli',
# 'deberta_large_mnli': 'Narsil/deberta-large-mnli-zero-cls',
# 'deberta_large_mnli': 'microsoft/deberta-large-mnli',
'distilbert_mnli': 'typeform/distilbert-base-uncased-mnli',
'distilbart_mnli': 'valhalla/distilbart-mnli-12-1',
'bart_large_mnli': 'facebook/bart-large-mnli',
}
random_model_name2weights = {
'random_uniform': [1 / 3, 1 / 3, 1 / 3],
'random_proportional': [0.1857, 0.5582, 0.2561],
'random_majority': [0, 1, 0],
}
gpt_model_name2engine_name = {
'gpt_a': 'ada',
'gpt_b': 'babbage',
'gpt_c': 'curie',
'gpt_d': 'davinci',
'gpt_d001': 'text-davinci-001',
# 'gpt_d002': 'text-davinci-002',
'gpt3instruct': 'text-davinci-002',
'gpt_d003': 'text-davinci-003',
'gpt_d003cot': 'text-davinci-003',
'gpt3.5': "gpt-3.5-turbo",
'gpt4': "gpt-4",
'gpt_a_cls_10k_ft': 'ada:ft-academicszhijing:causalnli-cls-10k-2022-10-29-12-08-18',
'gpt_b_cls_10k_ft': 'babbage:ft-academicszhijing:causalnli-cls-10k-2022-10-29-13-10-17',
'gpt_c_cls_10k_ft': 'curie:ft-academicszhijing:causalnli-cls-10k-2022-10-29-12-34-57',
'gpt_d_cls_10k_ft': 'davinci:ft-academicszhijing:causalnli-cls-10k-2022-11-01-12-44-59',
## 'gpt_d_gen_ft': 'davinci:ft-academicszhijing:causalnli-dev-2022-10-28-22-32-05',
## 'gpt_a_cls_ft': 'ada:ft-academicszhijing:causalnli-cls-dev-2022-10-29-00-09-50',
## 'gpt_b_cls_ft': 'babbage:ft-academicszhijing:causalnli-cls-dev-2022-10-29-00-55-53',
## 'gpt_c_cls_ft': 'curie:ft-academicszhijing:causalnli-cls-dev-2022-10-29-00-39-19',
## 'gpt_d_cls_ft': '',
## 'gpt_a_cls2_ft': 'ada:ft-academicszhijing:causalnli-cls2-dev-2022-10-29-01-11-33',
## 'gpt_b_cls2_ft': 'babbage:ft-academicszhijing:causalnli-cls2-dev-2022-10-29-01-28-06',
## 'gpt_c_cls2_ft': 'curie:ft-academicszhijing:causalnli-cls2-dev-2022-10-29-01-58-07',
## 'gpt_d_cls2_ft': '',
# 'gpt_a_cls_1k_ft': 'ada:ft-causalnlp-api:causalnli-cls-1k-2023-05-11-13-54-29',
# 'gpt_b_cls_1k_ft': 'babbage:ft-causalnlp-api:causalnli-cls-1k-2023-05-11-13-40-30',
# 'gpt_c_cls_1k_ft': 'curie:ft-causalnlp-api:causalnli-cls-1k-2023-05-11-13-07-11',
# 'gpt_d_cls_1k_ft': 'davinci:ft-causalnlp-api:causalnli-cls-1k-2023-05-11-13-31-58',
}
models_from_coauthors = [
# 'bert_base',
# 'bert_large',
# 'roberta_base',
# 'roberta_large',
# 'longformer_base',
#
# 'bert_base_mnli_ft',
# 'roberta_large_mnli',
'bert_base_mnli_ft',
'bert_base_mnli',
'bert_base',
'bert_large_mnli',
'bert_large',
'deberta_large_mnli',
'deberta_xlarge_mnli',
'distilbart_mnli',
'distilbert_mnli_42.06',
'distilbert_mnli',
'huggingface_mnli',
'longformer_base',
'random_majority',
'random_proportional',
'random_uniform',
'roberta_base_mnli',
'roberta_base',
'roberta_large_mnli', # best
'roberta_large',
'roberta_mnli',
'llama030',
'llama013',
'llama065',
'llama007',
'alpaca007',
'gpt2_ft',
'gpt2_large_ft',
'gpt2_xl_ft',
'llama007_ft',
'llama2_ft',
]
ft_loss2suffix = {
'gen': 'causalnli-dev',
'cls': 'causalnli-cls-',
'cls2': 'causalnli-cls2-',
}
pred_rel_lookup = {
'necessarily true': 'entailment',
'necessarily false': 'contradiction',
'neither to say': 'neutral',
'yes': 'entailment',
'no': 'contradiction',
'true': 'entailment',
'false': 'contradiction',
'neither': 'neutral',
'must be true': 'entailment',
'must be false': 'contradiction',
'not enough information': 'neutral',
'neutral': 'contradiction', # TODO: comment this out if you are doing 3-way NLI.
'not necessarily true': 'contradiction',
}
gold_rel2gpt_completion = {
'entailment': 'necessarily true".',
'contradiction': 'necessarily false".',
'neutral': 'neither".',
}
gold_rel2gpt_cls = {
'entailment': ' true',
'contradiction': ' false',
'neutral': ' maybe',
}
gold_rel2gpt_cls2 = {
'entailment': ' true',
'contradiction': ' not necessarily true',
'neutral': ' not necessarily true',
}
classes = ['entailment', 'contradiction', 'neutral']
pred_rels = ['must be true', 'must be false', 'neither to say', ]
options = [i.capitalize() for i in pred_rels]
options = [f'"{i}"' for i in options]
options[-1] = 'or ' + options[-1]
options = ', '.join(options)
prompt_tmpl_human_like = 'You are a highly intelligent question-answering bot with profound knowledge of causal inference.\n\n' \
'Question: {premise}\n' \
'Determine the truth value the following statement: {hypothesis}\n' \
'The options you may choose from are: ' + options + \
'. You only use "Not enough information" when you really don\'t have any idea.' \
'\n\nAnswer:'
prompt_tmpl_direct = 'This is a question to infer causation from correlation by causal inference.\n\n' \
'Question: {premise}\nCan we deduct the following: {hypothesis} Just answer "Yes" or "No."\n\nAnswer:'
prompt_tmpl_direct = 'Question: {premise}' \
" Is it necessarily true, necessarily false, or neither to say: " \
'{hypothesis}?' + \
"\n\nAnswer:"
prompt_tmpl_direct = 'Question: {premise}\nCan we deduct the following: {hypothesis}? Just answer "Yes" or ' \
'"No."\n\nAnswer:'
prompt_tmpl_human_like = 'Question: {premise}' \
" Is it necessarily true, necessarily false, or neither to say: " \
"{hypothesis}?" + \
"\n\nAnswer: Let's answer step by step."
prompt_tmpl_human_like_conclusion = \
'Therefore, the final answer ("necessarily true", "necessarily false", or "neither") is "'
from efficiency.function import rstrip_word
prompt_tmpl_finetune_gen = rstrip_word(prompt_tmpl_human_like, "Let's answer step by step.") + \
prompt_tmpl_human_like_conclusion.replace('herefore, t', '')
prompt_tmpl_finetune_cls = '{premise}' \
" Is the following statement true, false, or maybe: " \
"{hypothesis}?"
prompt_tmpl_finetune_cls2 = '{premise}' \
" Is the following statement true, or not necessarily true: " \
"{hypothesis}?"
prompt_tmpl_generic = 'Premise: {premise}\nHypothesis: {hypothesis}'
# Question: {premise}
# What do we know about the following statement:
# The options you may choose from are: "True", "False", or "Not Enough Information".
# Answer:
def __init__(self):
from glob import glob
self.model_name2existing_files = lambda model_name: glob(self.folder_output + f'*{model_name}_*')
def rel_normalize(self, surface_form):
'''
Example for ada_ft: "false, maybe? false, maybe?",
"maybe false false false false false false false"
'''
from nltk import word_tokenize
rel_normalize = lambda i: self.pred_rel_lookup.get(i, i)
normalized = rel_normalize(rel_normalize(surface_form))
if normalized not in self.classes:
surface_form = ' '.join(word_tokenize(surface_form)[:2])
normalized = rel_normalize(rel_normalize(surface_form))
if normalized not in self.classes:
surface_form = ' '.join(word_tokenize(surface_form)[:1])
normalized = rel_normalize(rel_normalize(surface_form))
if normalized not in self.classes:
return 'contradiction'
return normalized
class Model:
def __init__(self, model_name):
self.model_output = []
self.model_name = model_name
self.prompt2response_file = C.file_prompt2response_template.format(model_name)
self.clean_pred_func = lambda i: i.lower()
def set_files(self):
from efficiency.log import fread
self.data_input_file = C.file_split_json_template.format('test')
self.model_output_file = C.file_output_template.format(self.model_name)
self.data_input = fread(self.data_input_file)
self.model_output = fread(self.model_output_file)
from efficiency.log import show_var
show_var(['len(self.data_input)', 'len(self.model_output)'])
from efficiency.log import fread
prompt2response = {}
for file in C.model_name2existing_files(self.model_name):
data = fread(file)
if isinstance(data, dict):
prompt2response.update(data)
else:
if 'pred' in data[0]:
prompt2response.update({i['prompt']: i['pred'] for i in data})
elif 'gpt_prompt' in data[0]:
prompt2response.update({i['gpt_prompt']: i['gpt_response'] for i in data})
self.prompt2response = prompt2response
def save_prompt2response(self, verbose=False):
import json
from efficiency.log import fwrite
fwrite(json.dumps(self.prompt2response, indent=4), self.prompt2response_file, verbose=verbose)
def query_text(self, prompt, strip_func=lambda i: i.strip()):
if isinstance(prompt, str):
# prompt = str(prompt)
prompt = prompt.strip()
if self.inference_is_fast:
response = self.query_unseen_text(prompt)
response = strip_func(response)
else:
if prompt not in self.prompt2response:
response = self.query_unseen_text(prompt)
response = strip_func(response)
self.prompt2response[prompt] = response
self.save_prompt2response()
response = self.prompt2response[prompt]
return response
def run_inference(self):
if self.model_name in set(C.model_name2model_name_full) | set(C.random_model_name2weights):
self.inference_is_fast = True
else:
self.inference_is_fast = False
from efficiency.log import show_var
show_var(['self.inference_is_fast'])
if not hasattr(self, 'query_by_prem_and_hypo'):
print('[Warning] Skipping inference because the models are not defined')
return
self.set_files()
output_file = self.model_output_file
import pandas as pd
from tqdm import tqdm
data_to_save = []
for gold in tqdm(self.data_input):
premise = gold['premise']
hypothesis = gold['hypothesis']
response, prompt = self.query_by_prem_and_hypo(premise, hypothesis)
data_to_save.append({
"pred": response,
"gold": gold['relation'],
"prompt": prompt,
"id": gold['id'],
})
if not self.inference_is_fast:
df = pd.DataFrame(data_to_save)
df.to_csv(output_file, index=False)
if self.inference_is_fast:
df = pd.DataFrame(data_to_save)
df.to_csv(output_file, index=False)
print(f'[Info] Saved {len(df)} entries to {output_file}')
def run_finetune(self, split=['train', 'dev'][0], ft_loss=['gen', 'cls', 'cls2'][1]):
# self.set_files()
output_file = C.finetune_input_file_tmpl.format(ft_loss=ft_loss, split=split)
if ft_loss == 'cls':
gold_rel2gpt_label = C.gold_rel2gpt_cls
prompt_tmpl_finetune = C.prompt_tmpl_finetune_cls
elif ft_loss == 'cls2':
gold_rel2gpt_label = C.gold_rel2gpt_cls2
prompt_tmpl_finetune = C.prompt_tmpl_finetune_cls2
else:
gold_rel2gpt_label = C.gold_rel2gpt_completion
prompt_tmpl_finetune = C.prompt_tmpl_finetune_gen
split2num_data = {'dev': None, 'train': None}
num_data = split2num_data[split]
train_file = C.file_split_json_template.format(split)
from efficiency.log import fread
all_data = fread(train_file)
from efficiency.function import random_sample
data = random_sample(all_data, num_data)
from tqdm import tqdm
data_to_save = []
import json
from efficiency.log import fwrite
for gold in tqdm(data):
premise = gold['premise']
hypothesis = gold['hypothesis']
gold_rel = gold['relation']
if ft_loss == 'cls2':
gold_rel = C.rel_normalize(gold_rel)
gold_rel = gold_rel2gpt_label[gold_rel]
prompt = prompt_tmpl_finetune.format(premise=premise, hypothesis=hypothesis.strip().rstrip('.'))
data_to_save.append({
"prompt": prompt,
"completion": gold_rel,
# "id": gold['id'],
})
writeout = [json.dumps(i) for i in data_to_save]
writeout = '\n'.join(writeout)
fwrite(writeout, output_file, verbose=True)
# import pandas as pd
# df = pd.DataFrame(data_to_save)
# df.to_csv(output_file, index=False)
# print(f'[Info] Saved {len(df)} entries to {output_file}')
def evaluate(self, detailed=True): # TODO: You can change "detailed" here to enable fine grained analysis
self.set_files()
print(self.model_name)
perf = []
num_classes = 3
if C.rel_normalize('neutral') == 'contradiction':
num_classes = 2
all_preds = []
id2pred_n_gold = {}
for item in self.model_output:
pred_rel = self.clean_pred_func(item['pred'])
gold_rel = item['gold']
pred_rel = C.rel_normalize(pred_rel)
gold_rel = C.rel_normalize(gold_rel)
all_preds.append(pred_rel)
# if pred_rel in C.pred_rel_lookup:
# # num_classes = 2
# # gold_rel = 'yes' if item['gold'] == 'entailment' else "no"
# pred_rel = C.pred_rel_lookup[pred_rel]
identifiers = item['id'].split('__')
identifiers = dict([i.split('=', 1) for i in identifiers])
this_perf = {'pred': pred_rel,
'gold': gold_rel,
'id': item['id'],
}
id2pred_n_gold[item['id']] = {
'pred': pred_rel,
'gold': gold_rel,
}
if detailed:
this_perf.update({
'num_nodes': identifiers['num_nodes'],
'causal_relation': identifiers['causal_relation'],
})
perf.append(this_perf)
if not perf:
print('[Warning] The model output file is empty.')
import pdb;
pdb.set_trace()
return {}, {}
import pandas as pd
df = pd.DataFrame(perf)
## [ sanity check of the GPT response parser
from collections import Counter
# preds = [self.clean_pred_func(i['pred']) for i in self.model_output]
cnt_preds = Counter(all_preds)
if len(cnt_preds) > num_classes: # not only "yes" or "no"
print(cnt_preds)
writeout = [{'response': k, 'occurrences': v} for k, v in cnt_preds.items()]
from efficiency.log import write_dict_to_csv
write_dict_to_csv(writeout, 'data/tmp/response_surface_forms.csv')
import pdb;
pdb.set_trace()
## ]
my_report_dicts = []
my_report_dict = {'subset': 'All'}
my_report_dict.update(self.perf_df2report_dict(df))
my_report_dicts.append(my_report_dict)
if detailed:
for num_nodes, this_df in df.groupby(['num_nodes']):
my_report_dict = {'subset': num_nodes}
my_report_dict.update(self.perf_df2report_dict(this_df))
my_report_dicts.append(my_report_dict)
for causal_relation, this_df in df.groupby(['causal_relation']):
my_report_dict = {'subset': causal_relation}
my_report_dict.update(self.perf_df2report_dict(this_df))
my_report_dicts.append(my_report_dict)
report_df = pd.DataFrame(my_report_dicts)
pd.set_option('display.max_columns', None)
if detailed:
print(report_df)
import pdb;
pdb.set_trace()
report_df.to_csv('data/tmp/performance.csv', mode='a', index=False)
print(self.model_name)
return my_report_dicts[0], id2pred_n_gold
# @staticmethod
def perf_df2report_dict(self, df):
import pandas as pd
df = pd.DataFrame(df)
from sklearn.metrics import classification_report
report = classification_report(df['gold'], df['pred'], digits=4)
# print(report)
# import pdb;
# pdb.set_trace()
report_dict = classification_report(df['gold'], df['pred'], digits=4, output_dict=True)
labels = [i for i in ['yes', 'entailment', 'no', 'contradiction', 'neutral'] if i in report_dict]
report_labels = sorted(set(report_dict.keys()) - {'accuracy', 'macro avg', 'weighted avg', 'micro avg'})
minority_label = min([(report_dict[i]['support'], i) for i in report_labels])[-1]
majority_label = max([(report_dict[i]['support'], i) for i in report_labels])[-1]
label = minority_label
my_report_dict = {
'F1': report_dict['weighted avg']['f1-score'],
'Acc': report_dict['accuracy'],
'P': report_dict['weighted avg']['precision'],
'R': report_dict['weighted avg']['recall'],
'Majo_Acc': report_dict[majority_label]['support']
/ report_dict['weighted avg']['support'],
'TotalSamples': report_dict['weighted avg']['support'],
'Mino_Label': label,
'Mino_Samples': report_dict[label]['support'],
'Mino_F1': report_dict[label]['f1-score'],
'Mino_P': report_dict[label]['precision'],
'Mino_R': report_dict[label]['recall'],
}
if minority_label != 'entailment':
# import pdb;pdb.set_trace()
pass # It is the case when the number of nodes is 2, then all the cases are contradiction
my_simple_report_dict = {
'model': self.model_name,
'F1': round(report_dict[minority_label]['f1-score'] * 100, 2),
'P': round(report_dict[minority_label]['precision'] * 100, 2),
'R': round(report_dict[minority_label]['recall'] * 100, 2),
'Acc': round(report_dict['accuracy'] * 100, 2),
}
my_report_dict = my_simple_report_dict
return my_report_dict
class RandomBaseline(Model):
def __init__(self, model_name='random_uniform'):
super().__init__(model_name)
self.classes = C.classes
self.weights = C.random_model_name2weights[self.model_name]
def query_by_prem_and_hypo(self, premise, hypothesis):
prompt = C.prompt_tmpl_generic.format(premise=premise, hypothesis=hypothesis)
response = self.query_text(prompt)
return response, prompt
def query_unseen_text(self, prompt):
import random
pred = random.choices(population=self.classes, weights=self.weights, k=1)
pred = pred[0]
return pred
class HuggingFace(Model):
def __init__(self, model_name='huggingface_mnli', if_run_model=False):
super().__init__(model_name)
self.model_name_full = C.model_name2model_name_full[self.model_name]
if not if_run_model: return
# from transformers import pipeline
# self.model = pipeline("zero-shot-classification", model=self.model_name, )
from transformers import AutoModelForSequenceClassification, AutoTokenizer
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name_full)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_full)
def query_by_prem_and_hypo(self, premise, hypothesis):
prompt = tuple((premise, hypothesis))
response = self.query_text(prompt)
return response, prompt
def query_unseen_text(self, prompt):
premise, hypothesis = prompt
# run through model pre-trained on MNLI
x = self.tokenizer.encode(premise, hypothesis, return_tensors='pt',
truncation='only_first') # truncate the premise if needed
# x = x.to(device)
logits = self.model(x)[0]
# we throw away "neutral" (dim 1) and take the probability of
# "entailment" (2) as the probability of the label being true
# entail_contradiction_logits = logits[:, [0, 2]]
contra_neutral_entail_logits = logits[:, [0, 1, 2]]
probs = contra_neutral_entail_logits.softmax(dim=1)
label2prob = {
'contradiction': probs[:, 0],
'neutral': probs[:, 1],
'entailment': probs[:, 2],
}
order = sorted(label2prob.items(), key=lambda i: i[-1], reverse=True)
argmax_label = order[0][0]
return argmax_label
def query_zeroshot_pipeline(self, premise, hypothesis):
hypothesis_template = "{}"
results = self.model([premise], [hypothesis], hypothesis_template=hypothesis_template,
multi_label=False
)
predicted_label = {results[0]["labels"][0]} # [0]
def query_api(self, api_key_name='HUGGINGFACE_API_KEY', ):
import os
self.api_key = os.environ[api_key_name]
api_key = self.api_key
from huggingface_hub.inference_api import InferenceApi
inference = InferenceApi(repo_id="bert-base-uncased", token=api_key)
inference(inputs="The goal of life is [MASK].")
import pdb;
pdb.set_trace()
inference = InferenceApi(repo_id="deepset/roberta-base-squad2", token=api_key)
inputs = {"question": "Where is Hugging Face headquarters?",
"context": "Hugging Face is based in Brooklyn, New York. There is also an office in Paris, France."}
inference(inputs)
inference = InferenceApi(repo_id="typeform/distilbert-base-uncased-mnli", token=api_key)
inputs = "Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"
params = {"candidate_labels": ["refund", "legal", "faq"]}
inference(inputs, params)
inference = InferenceApi(repo_id="paraphrase-xlm-r-multilingual-v1",
task="feature-extraction",
token=api_key,
)
class GPT(Model):
def __init__(self, model_name='gpt3instruct', api_key_name='OPENAI_API_KEY', default_max_tokens=32):
super().__init__(model_name)
import os
self.api_key = os.environ[api_key_name]
import openai
openai.api_key = self.api_key
self.model = openai
self.engine_name = C.gpt_model_name2engine_name[model_name]
from efficiency.nlp import Chatbot
self.chat = Chatbot(
system_prompt='You are a highly intelligent question-answering bot with profound knowledge of causal inference.',
output_file=f'data/tmp/cache_{model_name}_responses.csv')
from efficiency.function import lstrip_word
self.clean_pred_func = lambda i: lstrip_word(i.lower().strip().strip(' ".:').strip(), 'that there is ')
self.engine_finetuned = self.engine_name not in \
{'text-davinci-001', 'text-davinci-002', 'text-davinci-003', 'ada', 'gpt-3.5-turbo',
'gpt-4', 'ada', 'babbage', 'curie', 'davinci',
}
self.engine_ft_loss2suffix = {
ft_loss: (f':{suffix}2022' in self.engine_name) or
(f':{suffix}10k-2022' in self.engine_name) or
(f':{suffix}1k-2023' in self.engine_name)
for ft_loss, suffix in C.ft_loss2suffix.items()
}
self.max_tokens = default_max_tokens
self.stop_symbols = None
if self.engine_finetuned:
self.max_tokens = 8
self.stop_symbols = ['\n\n']
if self.engine_ft_loss2suffix['gen']:
self.stop_symbols = ['".', '\n\n', ]
def query_by_prem_and_hypo(self, premise, hypothesis):
if self.engine_finetuned:
if self.engine_ft_loss2suffix['gen']:
prompt_tmpl = C.prompt_tmpl_finetune_gen
elif self.engine_ft_loss2suffix['cls2']:
prompt_tmpl = C.prompt_tmpl_finetune_cls2
elif self.engine_ft_loss2suffix['cls']:
prompt_tmpl = C.prompt_tmpl_finetune_cls
else:
import pdb;
pdb.set_trace()
prompt = prompt_tmpl.format(premise=premise, hypothesis=hypothesis.strip().rstrip('.'))
response = self.query_text(prompt)
return response, prompt
elif 'cot' not in self.model_name:
prompt = C.prompt_tmpl_direct.format(premise=premise, hypothesis=hypothesis.strip().rstrip('.'))
response = self.query_text(prompt)
return response, prompt
else:
prompt = C.prompt_tmpl_human_like.format(premise=premise, hypothesis=hypothesis.strip().rstrip('.'))
prompt_conclusion = C.prompt_tmpl_human_like_conclusion
response = self.query_text(prompt, strip_func=str)
new_prompt = prompt + response + '\n\n' + prompt_conclusion
# example_output = ' "neither".'
response = self.query_text(new_prompt)
return response, new_prompt
def query_unseen_text(self, prompt, max_tokens=None):
'''
Costs:
- 20221010 40USD for all test sets.
- 20221019 15.58USD for all test sets.
'''
if max_tokens is None:
max_tokens = self.max_tokens
if prompt.endswith('answer step by step.'):
max_tokens = 256
elif prompt.endswith(C.prompt_tmpl_human_like_conclusion[10:]) or prompt.endswith('Answer:'):
max_tokens = 2
max_tokens = 10
response = self.chat.ask(prompt, engine=self.engine_name, max_tokens=max_tokens,
stop_sign=self.stop_symbols, enable_pdb=False)
return response
def finetune_commands(self):
# Reference: https://harishgarg.com/writing/how-to-fine-tune-gpt-3-api/
'''
[2022-10-28 20:45:05] Created fine-tune: ft-1FBYAFM3noxALTan0P6UcAoo
[2022-10-28 20:45:21] Fine-tune costs $101.98
[2022-10-28 20:45:21] Fine-tune enqueued
'''
import os
data_input_file = C.finetune_input_file
response = self.model.File.create(
file=open(data_input_file),
purpose='fine-tune'
)
import pdb;
pdb.set_trace()
print(response)
# Check commands in code/finetune_gpt_cmds.md
def get_args():
import argparse
parser = argparse.ArgumentParser('Arguments for running LLMs in inference or evaluation modes')
parser.add_argument('-inference_mode', action='store_true', help='whether to directly generate evaluation results based on the output files (default), or run the inference again (set true)')
parser.add_argument('-finetune', action='store_true', help='whether to finetune the models (only for GPT)')
parser.add_argument('-model_types', nargs="+", type=str, help="list of model types to call",
default=['random', 'gpt', 'huggingface', 'coauthor_files'])
args = parser.parse_args()
return args
def main():
args = get_args()
if_run_model = args.inference_mode
from efficiency.log import show_var
show_var(['C.variable_refactor', 'if_run_model'])
from efficiency.function import set_seed
set_seed()
report_dicts = []
all_models = []
if 'random' in args.model_types:
for model_name in C.random_model_name2weights:
model = RandomBaseline(model_name=model_name)
all_models.append(model)
if 'gpt' in args.model_types:
for model_name, engine_name in list(C.gpt_model_name2engine_name.items()): # [-4:]:
if not engine_name:
continue
model = GPT(model_name=model_name)
print(model_name, engine_name)
all_models.append(model)
if 'huggingface' in args.model_types:
for model_name in C.model_name2model_name_full:
model = HuggingFace(model_name=model_name, if_run_model=if_run_model)
all_models.append(model)
if 'coauthor_files' in args.model_types:
for model_name in C.models_from_coauthors:
model = Model(model_name)
all_models.append(model)
from collections import defaultdict
id2all_pred_n_gold = defaultdict(dict)
for model in all_models:
if args.finetune:
model.run_finetune()
model.finetune_commands()
return
if if_run_model:
model.run_inference()
continue
report_dict, id2this_pred_n_gold = model.evaluate()
report_dicts.append((model.model_name, report_dict))
for id, this_pred_n_gold in id2this_pred_n_gold.items():
this_pred = this_pred_n_gold['pred']
id2all_pred_n_gold[id][model.model_name] = this_pred
# Sanity check:
this_gold = this_pred_n_gold['gold']
if 'gold' not in id2all_pred_n_gold[id]:
id2all_pred_n_gold[id]['gold'] = this_gold
# id2all_pred_n_gold[id]['id'] = id
else:
if id2all_pred_n_gold[id]['gold'] != this_gold:
from efficiency.log import show_var
show_var(['this_gold', "this_pred_n_gold['id']"])
import pdb;
pdb.set_trace()
for id, v in id2all_pred_n_gold.items():
v['id'] = id
from efficiency.log import write_dict_to_csv
write_dict_to_csv(list(id2all_pred_n_gold.values()), C.file_all_preds, verbose=True)
raw_stats = [(model_name + '-VR' if C.variable_refactor
else model_name + '-P' if C.paraphrase else model_name,
i['entailment']['f1-score'], i['entailment']['precision'], i['entailment']['recall'],
# i['contradiction']['f1-score'], i['contradiction']['precision'], i['contradiction']['recall'],
# i['weighted avg']['f1-score'], i['macro avg']['f1-score'],
i['accuracy']
)
for model_name, i in report_dicts]
stats = [' & '.join(list(map(lambda a: f"{round(a * 100, 2):.2f}"
if isinstance(a, float) else a, i))) + ' \\\\ \n'
for i in raw_stats]
stats = ''.join(stats)
stats = stats.replace('_', '-')
print(stats)
if __name__ == '__main__':
C = Constants()
main()
| [
"{premise} Is the following statement true, or not necessarily true: {hypothesis}?",
"Question: {premise} Is it necessarily true, necessarily false, or neither to say: {hypothesis}?\n\nAnswer:",
"{}",
"Therefore, the final answer (\"necessarily true\", \"necessarily false\", or \"neither\") is \"",
"Question: {premise}\nCan we deduct the following: {hypothesis}? Just answer \"Yes\" or \"No.\"\n\nAnswer:",
"Premise: {premise}\nHypothesis: {hypothesis}",
"herefore, t",
"(PLACEHOLDER, PLACEHOLDER)",
"You are a highly intelligent question-answering bot with profound knowledge of causal inference.\n\nQuestion: {premise}\nDetermine the truth value the following statement: {hypothesis}\nThe options you may choose from are: PLACEHOLDER. You only use \"Not enough information\" when you really don't have any idea.\n\nAnswer:",
"Question: {premise} Is it necessarily true, necessarily false, or neither to say: {hypothesis}?\n\nAnswer: Let's answer step by step.",
"PLACEHOLDERbinary_classification_{}.csv",
"This is a question to infer causation from correlation by causal inference.\n\nQuestion: {premise}\nCan we deduct the following: {hypothesis} Just answer \"Yes\" or \"No.\"\n\nAnswer:",
"Let's answer step by step.",
"PLACEHOLDERdata_3class/causalnli_{num_nodes}nodes.json",
"{premise} Is the following statement true, false, or maybe: {hypothesis}?",
"PLACEHOLDER{}.json",
"PLACEHOLDERPLACEHOLDER\n\nPLACEHOLDER",
"PLACEHOLDERprompt2response_lookup_{}.json",
"PLACEHOLDER{}_test.csv"
] |
2024-01-10 | MousaAbuMaizer/PwC-Assignment | backend~quiz_generator.py | from langchain.llms import OpenAI
import os
import requests
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
load_dotenv()
openai_api_key = os.getenv('OPENAI_API_KEY')
llm = OpenAI(openai_api_key=openai_api_key)
prompt_template = PromptTemplate.from_template("Generate a multiple choice questions about {topic} along with the correct answers, make sure that the question is mildly difficult, and make sure that the correct answer is labeled with 'Correct Answer:' and followed by the answer itself")
# Function to generate quiz questions for a given topic
def generate_quiz_questions(topic, num_questions=5, extra_questions=20):
questions = []
total_questions = num_questions + extra_questions
for _ in range(total_questions):
formatted_prompt = prompt_template.format(topic=topic)
response = llm.invoke(formatted_prompt)
#print("Raw response:", response)
processed_responses = process_response(response)
for processed in processed_responses:
if isinstance(processed, dict) and len(processed.get('options', [])) == 4:
questions.append(processed)
if len(questions) == num_questions:
break
if len(questions) == num_questions:
break
return questions
# Function to process the raw response from the language model
def process_response(response):
quiz_data = []
questions_blocks = response.strip().split("Q:")
for block in questions_blocks:
lines = block.strip().split("\n")
if len(lines) < 2:
continue
question = lines[0].strip()
correct_answer = None
options = []
for line in lines[1:]:
if "Correct Answer:" in line:
correct_answer = line.split("Correct Answer:")[1].strip()
elif line.strip() and line[0].isalpha() and line[1] in [".", ")"]:
option = line.strip()
if "Correct Answer:" in option: # Handling cases where the correct answer is on the same line
option, correct_answer = option.split("Correct Answer:")
correct_answer = correct_answer.strip()
options.append(option.strip())
if question and options and correct_answer:
question_data = {
"question": question,
"options": options,
"correct_answer": correct_answer
}
quiz_data.append(question_data)
return quiz_data
# Code used for testing the script directly
# if __name__ == "__main__":
# topic = "Animal Kingdom"
# questions = generate_quiz_questions(topic, 5)
# print(questions) | [
"Generate a multiple choice questions about {topic} along with the correct answers, make sure that the question is mildly difficult, and make sure that the correct answer is labeled with 'Correct Answer:' and followed by the answer itself"
] |
2024-01-10 | yangjiao2/AskWalle-chatGPT | askWalle.py | #!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
from langchain import PromptTemplate
import os
import argparse
import time
load_dotenv()
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
model_n_batch = int(os.environ.get('MODEL_N_BATCH',8))
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
from constants import CHROMA_SETTINGS
# Prompt template definition
template = """
[INST]\n
<<SYS>>\n
Use the following Context section and only that Context to answer the question at the end. Do not use your internal knowledge.
if the answer is a code piece, wrap around this code blocks like:
```
code block
```
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
<</SYS>>\n\n
Context:
{context}
Question:
{question} [/INST]
Example:
"""
def main():
# Parse the command line arguments
args = parse_arguments()
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
# Prepare the LLM
if model_type:
if model_type == "LlamaCpp":
llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, n_batch=model_n_batch, callbacks=callbacks, verbose=False)
elif model_type == "GPT4All":
llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', n_batch=model_n_batch, callbacks=callbacks, verbose=False)
else:
# raise exception if model_type is not supported
raise Exception(f"Model type {model_type} is not supported. Please choose one of the following: LlamaCpp, GPT4All")
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source, chain_type_kwargs= {
"prompt": PromptTemplate(
template=template,
input_variables=["context", "question"]
)
})
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
if query.strip() == "":
continue
# Get the answer from the chain
start = time.time()
res = qa(query)
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
end = time.time()
# Print the result
print("\n\n> Question:")
print(query)
print(f"\n> Answer (took {round(end - start, 2)} s.):")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> Source: " + document.metadata["source"] + ":")
print(document.page_content)
def parse_arguments():
parser = argparse.ArgumentParser(description='askWalle: Ask questions to Confluence Documentation without an internet connection, '
'using the power of LLMs.')
parser.add_argument("--hide-source", "-S", action='store_true',
help='Use this flag to disable printing of source documents used for answers.')
parser.add_argument("--mute-stream", "-M",
action='store_true',
help='Use this flag to disable the streaming StdOut callback for LLMs.')
return parser.parse_args()
if __name__ == "__main__":
main()
| [
"\n[INST]\n\n<<SYS>>\n\nUse the following Context section and only that Context to answer the question at the end. Do not use your internal knowledge.\nif the answer is a code piece, wrap around this code blocks like:\n ```\n code block\n ```\nIf you don't know the answer, just say that you don't know. Don't try to make up an answer.\n<</SYS>>\n\n\n\nContext:\n{context}\n\nQuestion:\n{question} [/INST]\n\nExample:\n\n"
] |
2024-01-10 | ssss1029/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | ShreehariVaasishta/prefect-play | proto_etl.py | from pathlib import Path
from typing import List
import pypdf
import weaviate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
from langchain.vectorstores.weaviate import Weaviate
from prefect import flow, task
WEAVIATE_URL = "http://127.0.0.1:8080"
wclient = weaviate.Client(url="http://localhost:7080")
embedding = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2",
model_kwargs={"device": "cpu"},
encode_kwargs={"normalize_embeddings": False},
)
# Define a Prefect task to read a chunk of PDF files
@task
def read_pdf(pdf_file: Path):
with open(pdf_file, "rb") as f:
pdf_reader = pypdf.PdfReader(f, strict=False)
return [
Document(
page_content=page.extract_text(),
metadata={"source": str(pdf_file.absolute()), "page": page_number},
)
for page_number, page in enumerate(pdf_reader.pages)
]
def dump_to_weaviate(documents):
weaviatedb = Weaviate(
client=wclient,
by_text=False,
index_name="Testing",
text_key="test",
embedding=embedding,
)
weaviatedb.add_documents(documents)
@task
def dump_embeddings(pdf_file, cnt):
print(cnt, "Finished <<<<<")
return dump_to_weaviate(pdf_file)
@flow(log_prints=True)
def run_flow():
directory = (
"/home/sln/VFS/master-gdc-gdcdatasets-2020445568-2020445568/lcwa_gov_pdf_data/data" # Directory of PDFs
)
pdf_files: List[Path] = list(Path(directory).glob("**/*.pdf"))
for i, pdf_chunk in enumerate(pdf_files):
chunk_task = read_pdf.submit(pdf_chunk)
dump_embeddings.submit(chunk_task.result(), i)
print(f"Processed {i} of chunk {len(pdf_files)}")
# if i > 5:
# break
run_flow()
| [] |
2024-01-10 | ginschel/classis | classis_files~assistant-cli.py | from langchain import PromptTemplate, HuggingFaceHub, LLMChain
from dotenv import load_dotenv
import sys
#config
modelname = "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
# load the Environment Variables.
load_dotenv()
def main():
# get user input
def get_text():
input_text = input("Chat: ")
return input_text
def chain_setup():
template = """<|prompter|>{question}<|endoftext|>
<|assistant|>"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm=HuggingFaceHub(repo_id=modelname, model_kwargs={"max_new_tokens":1200})
llm_chain=LLMChain(
llm=llm,
prompt=prompt
)
return llm_chain
user_input = ""
chatenabled = False
while True:
if not chatenabled:
try:
user_input = sys.argv[1]
except:
raise Exception("Error! You dint't write a prompt!")
else:
user_input = get_text()
if user_input == "exit":
break
# generate response
def generate_response(question, llm_chain):
response = llm_chain.run(question)
return response
## load LLM
llm_chain = chain_setup()
#generate response
response = generate_response(user_input, llm_chain)
print(response)
try:
if sys.argv[2] == "-chat":
chatenabled = True
continue
else:
break
except:
break
if __name__ == '__main__':
main()
| [
"question",
"<|prompter|>{question}<|endoftext|>\n <|assistant|>"
] |
2024-01-10 | akintomiwa/atopo | api-chain.py |
from langchain.chains.api import open_meteo_docs
from langchain.chains import APIChain
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import HumanMessagePromptTemplate, ChatPromptTemplate
from pydantic import BaseModel, Field
from langchain.output_parsers import PydanticOutputParser
load_dotenv()
OPENAI_MODEL = "gpt-3.5-turbo"
# OPENAI_MODEL = "gpt-4"
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
def main():
# setup llm
llm = ChatOpenAI()
chain_new = APIChain.from_llm_and_api_docs(
llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=False)
target = input("Please enter your area of interest: ")
query = (f"What is is the weather like right now in {target} in degrees celcius. ")
result = chain_new.run(
query
)
print(result)
if __name__ == "__main__":
main() | [] |
2024-01-10 | akintomiwa/atopo | ref.py | import os
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SimpleSequentialChain, SequentialChain
from langchain.memory import ConversationBufferMemory
from langchain.utilities import WikipediaAPIWrapper
# V3 - using mmemory with Sequential chains
from apikey import OPENAI_API_KEY
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
# App Framework
st.title('🦜️🔗 Youtube Script generator (GPT)')
prompt = st.text_input('Type in your prompt')
# prompt template
title_template = PromptTemplate(input_variables = ['topic'],
template = 'write me a youtube video title about {topic}')
script_template = PromptTemplate(input_variables = ['title', 'wiki_research'],
template = 'write me a youtube video script based on this title. TITLE: {title} while leveraging this wikipedia research: {wiki_research}')
# Memory
title_memory = ConversationBufferMemory(input_key='topic', memory_key='chat_history')
script_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')
# LLMs
llm = OpenAI(temperature=0.9)
title_chain = LLMChain(llm=llm, prompt=title_template, verbose = True, output_key='title', memory=title_memory)
script_chain = LLMChain(llm=llm, prompt=script_template, verbose = True, output_key='script', memory=script_memory)
wiki = WikipediaAPIWrapper()
# Display response if there is a prompt
if prompt:
title = title_chain.run(prompt)
wiki_research = wiki.run(prompt)
script = script_chain.run(title=title, wiki_research = wiki_research)
st.write(title)
st.write(script)
with st.expander('Title History'):
st.info(title_memory.buffer)
with st.expander('Script History'):
st.info(script_memory.buffer)
with st.expander('Wikipedia Research'):
st.info(wiki_research)
# # V2 - using multiple outputs in app
# #
# # # App Framework
# st.title('🦜️🔗 Youtube Script generator (GPT)')
# prompt = st.text_input('Type in your prompt')
# # prompt template
# title_template = PromptTemplate(input_variables = ['topic'],
# template = 'write me a youtube video title about {topic}')
# script_template = PromptTemplate(input_variables = ['title', 'wiki_research'],
# template = 'write me a youtube video script based on this title. TITLE: {title} while leveraging this wikipedia research: {wiki_research}')
# # LLMs
# llm = OpenAI(temperature=0.9)
# title_chain = LLMChain(llm=llm, prompt=title_template, verbose = True, output_key='title', memory=title_memory)
# script_chain = LLMChain(llm=llm, prompt=script_template, verbose = True, output_key='script', memory=script_memory)
# # order matters in chains
# sequential_chain = SequentialChain(
# chains=[title_chain, script_chain],
# input_variables=['topic'],
# output_variables=['title', 'script'],
# verbose = True)
# wiki = WikipediaAPIWrapper()
# # Display response if there is a prompt
# if prompt:
# response = sequential_chain({'topic': prompt})
# st.write(response['title'])
# st.write(response['script'])
# # =====
# # V1
# only outputs the last output of the sequential chain
# # SimpleSequentialChain
# # App Framework
# st.title(' Youtube GPT Creator')
# prompt = st.text_input('Type in your prompt')
# # prompt template
# title_template = PromptTemplate(
# input_variables = ['topic'],
# template = 'write me a youtube video title about {topic}'
# )
# script_template = PromptTemplate(
# input_variables = ['title'],
# template = 'write me a youtube video script based on this title. TITLE: {title}'
# )
# # LLMs
# llm = OpenAI(temperature=0.9)
# title_chain = LLMChain(llm=llm, prompt=title_template, verbose = True)
# script_chain = LLMChain(llm=llm, prompt=script_template, verbose = True)
# sequential_chain = SimpleSequentialChain(chains=[title_chain, script_chain], verbose = True)
# # Display response if there is a prompt
# if prompt:
# # response = llm(prompt)
# # response = title_chain.run(topic=prompt)
# response = sequential_chain.run(prompt)
# st.write(response)
| [
"write me a youtube video title about {topic}",
"Type in your prompt",
"write me a youtube video script based on this title. TITLE: {title} while leveraging this wikipedia research: {wiki_research}",
"wiki_research"
] |
2024-01-10 | Hydra-sjz/Modul | help_mod.py | class Script(object):
AI = [
{
"desc": "Generates AI response from OpenAI",
"cmds": ["gpt", "askgpt", "chatgpt"],
"usage": "/gpt Who are you?"
}
]
CARBON = [
{
"desc": "Creates a carbon in doc format",
"cmds": ["carbon"],
"usage": "/carbon reply to a text message or give some text as input"
},
{
"desc": "Creates a carbon in image format",
"cmds": ["icarbon"],
"usage": "/icarbon reply to a text message or give some text as input"
}
]
DEV = [
{
"desc": "Executes python code",
"cmds": ["eval", "e"],
"usage": "/e [python code]"
},
{
"desc": "Run bash/terminal cmd",
"cmds": ["bash", "sh"],
"usage": "/bash [cmd]"
}
]
FUN = [
{
"desc": "Get a cat image",
"cmds": ["cat"],
"usage": "/cat"
},
{
"desc": "Get a dog image",
"cmds": ["dog"],
"usage": "/dog"
},
{
"desc": "Get a panda image",
"cmds": ["panda"],
"usage": "/panda"
},
{
"desc": "Get a bored gif 🥱",
"cmds": ["bored"],
"usage": "/bored"
},
{
"desc": "get a image or gif of pikachu",
"cmds": ["pikachu"],
"usage": "/pikachu"
},
{
"desc": "Get a patting gif",
"cmds": ["pat"],
"usage": "/pat"
},
{
"desc": "Get a hugging gif",
"cmds": ["hug"],
"usage": "/hug"
},
{
"desc": "Get a winking gif",
"cmds": ["wink"],
"usage": "/wink"
}
]
FILETOOLS = [
{
"desc": "Downloads File to Local",
"cmds": ["download"],
"usage": "/download [reply to a doc/vid]"
},
{
"desc": "Upload Files from Local",
"cmds": ["upload"],
"usage": "/upload [filename/path of the file]"
},
]
GOOGLE = [
{
"desc": "Google searcher!",
"cmds": ["gs", "google"],
"usage": "/gs [text to search]"
},
]
GTA5 = [
{
"desc": "Gta-V wasted effect on replied image",
"cmds": ["wasted"],
"usage": "/wasted [reply to a photo]"
},
{
"desc": "Gta-V mission passed effect on replied image",
"cmds": ["passed"],
"usage": "/passed [reply to a photo]"
}
]
IMDB = [
{
"desc": "Get information about a Movie/Series",
"cmds": ["imdb"],
"usage": "/imdb [Movename/Series Name]"
}
]
IMAGETOOLS = [
{
"desc": "Create ur own memes",
"cmds": ["memify"],
"usage": "/memify [Upper text ; Lower text] | reply to a media"
},
{
"desc": "Crop Image Into Round & Cool Sticker",
"cmds": ["circle"],
"usage": "/circle [reply to a photo or sticker]"
},
{
"desc": "Get Fake Certificate With Given Name!",
"cmds": ["genca", "gencertificate"],
"usage": "/genca [Name on the certificate]"
},
{
"desc": "Enhance the replied notes!",
"cmds": ["hwn"],
"usage": "/hwn [Reply to Notes To Enhance It!]"
},
{
"desc": "Add Glitch effect to replied image",
"cmds": ["glitch"],
"usage": "/glitch [Reply to a image]"
},
{
"desc": "ghost the replied image/sticker",
"cmds": ["ghost"],
"usage": "/ghost [reply to a image or sticker]"
},
{
"desc": "Sketch the replied image",
"cmds": ["sketch"],
"usage": "/genca [reply to a image or sticker]"
},
{
"desc": "Colorize the replied photo",
"cmds": ["color"],
"usage": "/color [reply to a image or sticker]"
},
]
INSTADL = [
{
"desc": "Download post/reel from instagram",
"cmds": ["insta", "instadl", "insdl", "instadownload"],
"usage": "/instadl [instagram post/reel link]"
}
]
LOGOS = [
{
"desc": "Makes a logo for you with black bg",
"cmds": ["alogo"],
"usage": "/alogo [text for logo]"
},
{
"desc": "Makes a logo for you, try it out",
"cmds": ["slogo"],
"usage": "/slogo [text for logo]"
}
]
MISC = [
{
"desc": "sends the message u input or reply",
"cmds": ["send"],
"usage": "/send [ message / reply to message ]"
},
{
"desc": "Emojify text",
"cmds": ["emoji"],
"usage": "/emoji Stark"
},
{
"desc": "Emojify text with custom emoji",
"cmds": ["cmoji"],
"usage": "/send 🔥 Stark"
},
{
"desc": "Weebify ur text",
"cmds": ["weeb", "weebify"],
"usage": "/weebify [ input or reply to a message ]"
},
{
"desc": "Sends F",
"cmds": ["f", "ftext"],
"usage": "/f or /f [ custom emoji or a small text ]"
},
{
"desc": "Make a yt comment",
"cmds": ["ytc"],
"usage": "/ytc Hello I'm Stark"
},
{
"desc": "Makes a yt comment with random pfp",
"cmds": ["rytc"],
"usage": "/rytc Hello Im Stark"
}
]
MEDIAINFO = [
{
"desc": "Gets MediaInfo of Replied Video",
"cmds": ["mediainfo", "mediadata"],
"usage": "/mediainfo [Reply to a video]"
}
]
MONGODB = [
{
"desc": "Adds MongoDB to database so that u can access",
"cmds": ["adddb"],
"usage": "/adddb [mongo uri]"
},
{
"desc": "Get access to the MongoDB uri u added using /adddb",
"cmds": ["showdb"],
"usage": "/showdb"
}
]
PASTE = [
{
"desc": "Pastes the given text in spacebin",
"cmds": ["paste"],
"usage": "/paste [reply to message/text file]"
}
]
QR = [
{
"desc": "Generates qr for given text",
"cmds": ["qr"],
"usage": "/qr [text to make qr]"
}
]
QUOTLY = [
{
"desc": "Converts your text into a quote",
"cmds": ["quote", "qt", "qu", "q"],
"usage": "/q [reply to a text message / give text as input]\nNote:\n1. No need to use args like -r for reply | reply set as default\n2.Either give text as reply or input"
}
]
RAYSO = [
{
"desc": "Create cool code snippets [Dark Mode]",
"cmds": ["rayso"],
"usage": "/rayso [ theme / do not specify for ramdom | reply to a message or a text file ]\nUse /rayso -l to list available themes"
},
{
"desc": "Create cool code snippets [Light Mode]",
"cmds": ["lrayso"],
"usage": "/lrayso [ theme / do not specify for ramdom | reply to a message or a text file ]\nUse /rayso -l to list available themes"
}
]
STICKERS = [
{
"desc": "Kang the replied sticker",
"cmds": ["kang"],
"usage": "/kang [ emoji | reply to the sticker ]"
},
{
"desc": "Delete sticker from ur pack",
"cmds": ["delsticker", "del_sticker"],
"usage": "/delsticker [ reply to sticker u want to delete ]"
},
{
"desc": "List all ur pack's",
"cmds": ["mypacks"],
"usage": "/mypacks"
},
{
"desc": "Creates a sticker with given text",
"cmds": ["stcr"],
"usage": "/stcr Mr.Stark"
}
]
SYSTEM = [
{
"desc": "Ping-Pong",
"cmds": ["p", "ping"],
},
{
"desc": " whether the bot is alive or not",
"cmds": ["alive"],
},
{
"desc": "Restarts the bot",
"cmds": ["restart"],
}
]
TELEGRAPH = [
{
"desc": "Creates a telegraph",
"cmds": ["/telegraph", "/tgraph"],
"usage": "/tgraph [title for the telegraph | reply to a text message]"
}
]
TRANSLATE = [
{
"desc": "Translates the replied message",
"cmds": ["tr", "translate"],
"usage": "/tr [language code | reply to a text message]"
}
]
UPDATE = [
{
"desc": "Updates the system",
"cmds": ["up", "update"],
},
{
"desc": "Deletes snippets in gitlab",
"cmds": ["d"],
}
]
URLUPLOADER = [
{
"desc": "download's file from given link",
"cmds": ["urlupload"],
"usage": "/urlupload [direct link of the file]"
}
]
WHOIS = [
{
"desc": "Know who the replied person is",
"cmds": ["info", "whois"],
"usage": "/info [user id/username | reply to a user message]"
}
]
WRITE = [
{
"desc": "Writes given text on a white paper",
"cmds": ["write"],
"usage": "/write hello"
}
]
| [] |
2024-01-10 | convictional/convict-langchain | tests~integration_tests~llms~test_openai.py | """Test OpenAI API wrapper."""
from pathlib import Path
from typing import Generator
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.llms.loading import load_llm
from langchain.llms.openai import OpenAI, OpenAIChat
from langchain.schema import LLMResult
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_openai_call() -> None:
"""Test valid call to openai."""
llm = OpenAI(max_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_extra_kwargs() -> None:
"""Test extra kwargs to openai."""
# Check that foo is saved in extra_kwargs.
llm = OpenAI(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = OpenAI(foo=3, model_kwargs={"bar": 2})
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
OpenAI(foo=3, model_kwargs={"foo": 2})
def test_openai_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
llm = OpenAI(max_tokens=10)
llm_result = llm.generate(["Hello, how are you?"])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == llm.model_name
def test_openai_stop_valid() -> None:
"""Test openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = OpenAI(stop="3", temperature=0)
first_output = first_llm(query)
second_llm = OpenAI(temperature=0)
second_output = second_llm(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_openai_stop_error() -> None:
"""Test openai stop logic on bad configuration."""
llm = OpenAI(stop="3", temperature=0)
with pytest.raises(ValueError):
llm("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an OpenAI LLM."""
llm = OpenAI(max_tokens=10)
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
def test_openai_streaming() -> None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token["choices"][0]["text"], str)
def test_openai_streaming_error() -> None:
"""Test error handling in stream."""
llm = OpenAI(best_of=2)
with pytest.raises(ValueError):
llm.stream("I'm Pickle Rick")
def test_openai_streaming_best_of_error() -> None:
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
OpenAI(best_of=2, streaming=True)
def test_openai_streaming_n_error() -> None:
"""Test validation for streaming fails if n is not 1."""
with pytest.raises(ValueError):
OpenAI(n=2, streaming=True)
def test_openai_streaming_multiple_prompts_error() -> None:
"""Test validation for streaming fails if multiple prompts are given."""
with pytest.raises(ValueError):
OpenAI(streaming=True).generate(["I'm Pickle Rick", "I'm Pickle Rick"])
def test_openai_streaming_call() -> None:
"""Test valid call to openai."""
llm = OpenAI(max_tokens=10, streaming=True)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
llm("Write me a sentence with 100 words.")
assert callback_handler.llm_streams == 10
@pytest.mark.asyncio
async def test_openai_async_generate() -> None:
"""Test async generation."""
llm = OpenAI(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
@pytest.mark.asyncio
async def test_openai_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["Write me a sentence with 100 words."])
assert callback_handler.llm_streams == 10
assert isinstance(result, LLMResult)
def test_openai_chat_wrong_class() -> None:
"""Test OpenAIChat with wrong class still works."""
llm = OpenAI(model_name="gpt-3.5-turbo")
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_chat() -> None:
"""Test OpenAIChat."""
llm = OpenAIChat(max_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_chat_streaming() -> None:
"""Test OpenAIChat with streaming option."""
llm = OpenAIChat(max_tokens=10, streaming=True)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_chat_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAIChat(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
llm("Write me a sentence with 100 words.")
assert callback_handler.llm_streams != 0
@pytest.mark.asyncio
async def test_openai_chat_async_generate() -> None:
"""Test async chat."""
llm = OpenAIChat(max_tokens=10)
output = await llm.agenerate(["Hello, how are you?"])
assert isinstance(output, LLMResult)
@pytest.mark.asyncio
async def test_openai_chat_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = OpenAIChat(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
result = await llm.agenerate(["Write me a sentence with 100 words."])
assert callback_handler.llm_streams != 0
assert isinstance(result, LLMResult)
def test_openai_modelname_to_contextsize_valid() -> None:
"""Test model name to context size on a valid model."""
assert OpenAI().modelname_to_contextsize("davinci") == 2049
def test_openai_modelname_to_contextsize_invalid() -> None:
"""Test model name to context size on an invalid model."""
with pytest.raises(ValueError):
OpenAI().modelname_to_contextsize("foobar")
| [] |
2024-01-10 | dclin/foursquare_venue_search | api_openai.py | import openai
import streamlit as st
openai.api_key = st.secrets['openai']['api_key']
def get_embedding(category_str):
try:
response = openai.Embedding.create(
input=category_str,
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
return embeddings
except Exception as e:
raise e
def get_moderation(user_query):
try:
moderation = openai.Moderation.create(
input=user_query
)
moderation_result = moderation['results'][0]
flagged_categories = [category for category, value in moderation_result['categories'].items() if value]
return {'flagged': moderation_result['flagged'], 'flagged_categories':flagged_categories}
except Exception as e:
raise e
| [] |
2024-01-10 | dclin/foursquare_venue_search | scripts~embed_categories.py | import snowflake.connector
from snowflake.connector import DictCursor
import openai
openai.api_key = "YOUR_OPENAI_API_KEY"
# Snowflake credential with update permission on the category_lookup table
# The database and schema of where the category_lookup table is housed.
snowflake_credential = {
'user': "YOUR_SNOWFLAKE_USER",
'password': "YOUR_SNOWFLAKE_PASSWORD",
'account': "YOUR_SNOWFLAKE_ACCOUNT",
'warehouse': "YOUR_SNOWFLAKE_WAREHOUSE",
'database': "YOUR_SNOWFLAKE_DATABASE",
'schema': "YOUR_SNOWFLAKE_SCHEMA"
}
def get_embedding(category_str):
try:
response = openai.Embedding.create(
input=category_str,
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
return embeddings
except Exception as e:
raise e
def init_connection():
return snowflake.connector.connect(**snowflake_credential)
# SQL assumes your Snowflake credential is in the database and schema of your category_lookup table
def get_all_categories(conn):
sql = """SELECT category_id, category FROM category_lookup ORDER BY category_id"""
categories = _run_query(conn,sql)
return categories
# SQL assumes your Snowflake credential is in the database and schema of your category_lookup table
def update_embedding(conn, category_id, embeddings):
sql = """
UPDATE category_lookup
SET embedding='{0}'
WHERE category_id = {1}
""".format(embeddings, category_id)
try:
_run_query(conn, sql)
return 1
except:
return 0
def _run_query(conn, query_str):
with conn.cursor(DictCursor) as cur:
cur.execute(query_str)
return cur.fetchall()
conn = init_connection()
categories = get_all_categories(conn)
for category in categories:
print(f"Embed category_id: {category['CATEGORY_ID']}")
try:
embeddings = get_embedding(category['CATEGORY'])
print(f"Update category_id: {category['CATEGORY_ID']}")
update_category = update_embedding(conn, category['CATEGORY_ID'], embeddings)
if update_category == 1:
print(f"Updated category_id: {category['CATEGORY_ID']}")
except:
pass
| [] |
2024-01-10 | benzproduction/bachelorarbeit | evaluation~data~curate_datasets.py | from helpers import get_env
import openai
import pandas as pd
import re
import blobfile as bf
import json
import random
import os
import jsonlines
from typing import List, Tuple
import copy
API_KEY, RESOURCE_ENDPOINT = get_env("azure-openai")
openai.api_type = "azure"
openai.api_key = API_KEY
openai.api_base = RESOURCE_ENDPOINT
openai.api_version = "2022-12-01"
def normalizeText(text: str) -> str:
text = text.replace("Question:", "")
text = text.replace("Answer:", "")
text = re.sub(r"Q\d:", "", text)
text = re.sub(r"A\d:", "", text)
text = re.sub(r"Q:", "", text)
text = re.sub(r"A:", "", text)
text = re.sub(r"\s+", " ", text)
return text
def manuelly_extract_matches(text: str) -> List[Tuple[str, str]]:
text = text.replace("?;", "?")
pairList = text.split(";")
pairList = [normalizeText(pair) for pair in pairList]
pairList = [pair.split("?") for pair in pairList]
pairList = [(pair[0] + "?", pair[1] if len(pair) == 2 else "") for pair in pairList]
return pairList
def text_to_dictionary(text):
text = text.replace("\n", " ")
pattern1 = r"Question: (.*?\?) Answer: (.*?)(?=Question:|$)"
pattern2 = r"Question: (.*?) Answer: (.*?)(?=Question:|$)"
matches = re.findall(pattern1, text, re.DOTALL)
if len(matches) == 0:
matches = re.findall(pattern2, text, re.DOTALL)
if len(matches) == 0:
matches = manuelly_extract_matches(text)
result = []
for match in matches:
question, answer = match
question = question.strip()
answer = answer.strip().replace(";", "")
result.append({"question": question, "answer": answer})
return result
def edit_jsonl_line(jsonl_file, line_number, edit_func) -> None:
temp_file = jsonl_file + ".tmp"
with open(jsonl_file, 'r') as input_file, open(temp_file, 'w') as temp_output:
for i, line in enumerate(input_file):
if i == line_number:
json_data = json.loads(line)
edited_json_data = edit_func(json_data)
if len(edited_json_data["qa_pairs"]) == 0:
continue
edited_line = json.dumps(edited_json_data)
temp_output.write(edited_line + '\n')
else:
temp_output.write(line)
input_file.close()
temp_output.close()
os.remove(jsonl_file)
os.rename(temp_file, jsonl_file)
def print_category_stats(category: str) -> None:
with jsonlines.open(f"all_dataset.jsonl") as reader:
dataset = list(reader)
dataset = [entry for entry in dataset if entry["category"] == category]
num_questions = sum([len(entry["qa_pairs"]) for entry in dataset])
num_paragraphs = sum([len(entry["paragraph"]) for entry in dataset])
num_entries = len(dataset)
print(f"Category: {category}")
print(f"Number of entries: {num_entries}")
print(f"Number of paragraphs: {num_paragraphs}")
print(f"Number of questions: {num_questions}")
textchunks_df = pd.read_csv("textchunks_df.csv")
textchunks_df = textchunks_df[textchunks_df["text"].notna()]
CATEGORY_DICT = {
"SAME_WORDING_ONE_PARAGRAPH": "The questions should have the same wording as the given text.",
"SAME_WORDING_MULTIPLE_PARAGRAPHS": "The questions should be factoid in nature, and use the same wording as the given text, but each requires an understanding of information presented in at least two of the paragraphs to answer. The aim is to assess comprehension of the given content and the interrelation of ideas across the paragraphs.",
# "DIFFERENT_WORDING_ONE_PARAGRAPH": "The questions should have different wording than the given text.",
# "DIFFERENT_WORDING_MULTIPLE_PARAGRAPHS": "The questions should be factoid in nature, and use a different wording as the given text, but each requires an understanding of information presented in at least two of the paragraphs to answer. The aim is to assess comprehension of the given content and the interrelation of ideas across the paragraphs.",
}
prompt = """\
You are generating 5 factoid question answer pairs for a given text.
[BEGIN DATA]
************
[Text]: {text}
************
[END DATA]
{category_description}
Please format the answer as a series of questions followed by their corresponding answer, separated by semicolons, in the following format: [Question]: [Answer]; [Question]: [Answer]; [Question]: [Answer]; [Question]: [Answer]; [Question]: [Answer];
"""
q2Create = 50
for category, category_desc in CATEGORY_DICT.items():
print_category_stats(category)
run_type = input(f"Would you like to evaluate questions for category {category}? [y/n/c]: ")
if run_type == "y":
print(f"Evaluating questions for category {category}...")
with bf.BlobFile("all_dataset.jsonl", "rb") as f:
for i, line in enumerate(f):
data = json.loads(line)
if data["category"] != category:
continue
if "evaluated" in data and data["evaluated"]:
continue
print("Paragraph:")
for paragraph in data["paragraph"]:
print(paragraph["text"])
eval_paragraph = input("Evaluate paragraph? [y/n]: ")
if eval_paragraph == "y":
c_data = copy.deepcopy(data)
for qa_pair in c_data["qa_pairs"]:
print(f"Question: {qa_pair['question']}")
print(f"Answer: {qa_pair['answer']}")
keep = input("Keep? [y/n]: ")
if keep == "n":
data["qa_pairs"].remove(qa_pair)
edit_jsonl_line("all_dataset.jsonl", i, lambda x: data)
data["evaluated"] = True
edit_jsonl_line("all_dataset.jsonl", i, lambda x: data)
elif run_type == "c":
continue
else:
print(f"Generating questions for category {category}...")
nrCreated = 0
while nrCreated < q2Create:
if "ONE_PARAGRAPH" in category:
sample = textchunks_df.sample()
sample = sample.drop(columns=["embedding"])
sample_text = sample["text"].values[0]
else:
idx = random.randint(0, 12)
cluster_counts = textchunks_df.Cluster.value_counts()
cluster_counts = cluster_counts[cluster_counts > 9]
cluster_counts = cluster_counts.sort_values(ascending=False)
sample_df = textchunks_df[textchunks_df.Cluster == cluster_counts.index[idx]]
num_paragraphs = random.randint(2, 3)
# sample_df = textchunks_df[textchunks_df["Cluster"] == cluster]
samples = sample_df.sample(num_paragraphs)
sample_text = "; ".join([f"\n\nParagraph {i+1}: {text}" for i, text in enumerate(samples["text"].values)])
formatted_prompt = prompt.format(text=sample_text, category_description=category_desc)
if input("Do it yourself? [y/n]: ") == "y":
print(formatted_prompt)
result = input("Enter your questions and answers: ")
else:
result = openai.Completion.create(
engine="davinci",
prompt=formatted_prompt,
temperature=0.9,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)["choices"][0]["text"]
qa_pairs = text_to_dictionary(result)
with bf.BlobFile("all_dataset.jsonl", "ab") as f:
f.write(
(json.dumps(
{
"category": category,
"paragraph": sample.to_dict(orient='records') if "ONE_PARAGRAPH" in category else samples.to_dict(orient='records'),
"qa_pairs": qa_pairs,
}
)
+ "\n").encode("utf-8")
)
nrCreated += 5
| [
"You are generating 5 factoid question answer pairs for a given text.\n[BEGIN DATA]\n************\n[Text]: {text}\n************\n[END DATA]\n{category_description}\nPlease format the answer as a series of questions followed by their corresponding answer, separated by semicolons, in the following format: [Question]: [Answer]; [Question]: [Answer]; [Question]: [Answer]; [Question]: [Answer]; [Question]: [Answer];\n"
] |
2024-01-10 | benzproduction/bachelorarbeit | evaluation~components~llms.py | from typing import Any, Optional, List, Union, Callable, Iterable
from base import CompletionFn, CompletionResult, EmbeddingsFn, RetrieverFn
from abc import ABC, abstractmethod
from dataclasses import dataclass
import logging
import backoff
import openai
import pandas as pd
import string
import re
INVALID_STR = "__invalid__"
MATCH_FNS = {
"include": lambda x, y: float(x in y),
"exact": lambda x, y: float(x == y),
"endswith": lambda x, y: x.endswith(y),
"starts_or_endswith": lambda x, y: x.startswith(y) or x.endswith(y),
}
@backoff.on_exception(
wait_gen=backoff.expo,
exception=(
openai.error.ServiceUnavailableError,
openai.error.APIError,
openai.error.RateLimitError,
openai.error.APIConnectionError,
openai.error.Timeout,
),
max_value=60,
factor=1.5,
)
def openai_completion_create_retrying(*args, **kwargs):
"""
Helper function for creating a completion.
`args` and `kwargs` match what is accepted by `openai.Completion.create`.
"""
result = openai.Completion.create(*args, **kwargs)
if "error" in result:
logging.warning(result)
raise openai.error.APIError(result["error"])
return result
def formatSourcesDF(sources) -> str:
"""
Helper function for formatting a pandas dataframe into a string for the Prompt.
"""
if isinstance(sources, pd.DataFrame):
assert "text" in sources.columns, "If sources are provided as a pandas dataframe, it must have a column named 'text'"
assert "filename" in sources.columns or "key" in sources.columns, "If sources are provided as a pandas dataframe, it must have a column named 'filename' or 'key'"
for i, row in sources.iterrows():
if "key" in sources.columns:
if row['text'].startswith(f"{row['key']}: "):
continue
sources.loc[i,'text'] = f"{row['key']}: {row['text']};"
else:
if row['text'].startswith(f"{row['filename']}: "):
continue
sources.loc[i,'text'] = f"{row['filename']}: {row['text']};"
return sources['text'].str.cat(sep="\n\n")
return sources
def formatSourcesList(sources) -> str:
"""
Helper function for formatting a list of dicts into a string for the Prompt.
"""
if isinstance(sources, list):
if all(isinstance(source, str) for source in sources):
return "\n\n".join(sources)
assert all(
isinstance(source, dict) and "text" in source and ("key" in source or "filename" in source)
for source in sources
), "If sources are provided as a list of dicts, they must have keys 'text' and 'key' or 'filename'"
for i, source in enumerate(sources):
if "key" in source:
sources[i]["text"] = f"{source['key']}: {source['text']};"
else:
sources[i]["text"] = f"{source['filename']}: {source['text']};"
return "\n\n".join([source["text"] for source in sources])
return sources
def format_necessary(template: str, allow_missing: bool = False, **kwargs: dict[str, str]) -> str:
"""Format a template string with only necessary kwargs."""
keys = [k[1] for k in string.Formatter().parse(template) if k[1]]
if allow_missing:
assert (
len([k for k in keys if k in kwargs]) > 0
), f"Required: {keys}, got: {sorted(kwargs)}, no inputs are used.\nTemplate:\n{template}"
cur_keys = {k: kwargs.get(k, "{" + k + "}") for k in keys}
else:
assert all(
k in kwargs for k in keys
), f"Required: {keys}, got: {sorted(kwargs)}.\nTemplate:\n{template}"
cur_keys = {k: kwargs[k] for k in keys}
return template.format(**cur_keys)
@dataclass
class Prompt(ABC):
"""
A `Prompt` encapsulates everything required to present the `raw_prompt` in different formats.
"""
@abstractmethod
def to_formatted_prompt(self):
"""
Return the actual data to be passed as the `prompt` field to your model.
"""
class CompletionPrompt(Prompt):
def __init__(self, template: str, query: str, sources: Union[str, pd.DataFrame, list]):
assert "{query}" in template, "Prompt template must contain {query}"
assert "{sources}" in template, "Prompt template must contain {sources}"
self.template = template
self.query = query
# Format sources
if isinstance(sources, pd.DataFrame):
sources = formatSourcesDF(sources)
if isinstance(sources, list):
sources = formatSourcesList(sources)
if not isinstance(sources, str):
raise ValueError(f"Sources must be a str, list, or pandas dataframe. Got {type(sources)}")
self.sources = sources
def to_formatted_prompt(self):
return format_necessary(self.template, query=self.query, sources=self.sources)
class OpenAIBaseCompletionResult(CompletionResult):
def __init__(self, raw_data: Any, prompt: Any, sources: Optional[Any] = None):
self.raw_data = raw_data
self.prompt = prompt
self.sources = sources
def get_completions(self) -> list[str]:
raise NotImplementedError
def get_sources(self) -> Optional[Any]:
raise NotImplementedError
class OpenAICompletionResult(OpenAIBaseCompletionResult):
def get_completions(self) -> list[str]:
completions = []
if self.raw_data and "choices" in self.raw_data:
for choice in self.raw_data["choices"]:
if "text" in choice:
completions.append(choice["text"])
elif "message" in choice:
completions.append(choice["message"]["content"])
return completions
def get_sources(self) -> Optional[Any]:
if isinstance(self.sources, pd.DataFrame):
return formatSourcesDF(self.sources)
return self.sources
class OpenAICompletionFn(CompletionFn):
def __init__(
self,
api_key: str,
api_base: str,
deployment_name: Optional[str] = "text-davinci-003",
api_type: Optional[str] = "azure",
api_version: Optional[str] = "2022-12-01",
extra_options: Optional[dict] = {},
**kwargs,
):
self.api_key = api_key
self.api_base = api_base
self.deployment_name = deployment_name
self.api_type = api_type
self.api_version = api_version
self.extra_options = extra_options
def __call__(
self,
prompt_template: str,
query: str,
sources: Optional[Union[str, pd.DataFrame, list]] = None,
embedder: Optional[EmbeddingsFn] = None,
retriever: Optional[RetrieverFn] = None,
k: Optional[int] = 5,
**kwargs,
) -> OpenAICompletionResult:
assert sources or (isinstance(embedder, EmbeddingsFn) and isinstance(retriever, RetrieverFn)), "Either sources or an embedder and retriever must be provided"
if not sources:
sources = retriever(query, embedder, k=k)
prompt = CompletionPrompt(template=prompt_template, query=query, sources=sources)
result = openai_completion_create_retrying(
engine=self.deployment_name,
prompt=prompt.to_formatted_prompt(),
api_key=self.api_key,
api_base=self.api_base,
api_type=self.api_type,
api_version=self.api_version,
**self.extra_options,
)
result = OpenAICompletionResult(raw_data=result, prompt=prompt, sources=sources)
return result
def __repr__(self):
return f"OpenAICompletionFn(deployment_name={self.deployment_name}, extra_options={self.extra_options})"
class OpenAICompletion2StepFn(CompletionFn):
def __init__(
self,
api_key: str,
api_base: str,
deployment_name: Optional[str] = "text-davinci-003",
api_type: Optional[str] = "azure",
api_version: Optional[str] = "2022-12-01",
extra_options: Optional[dict] = {},
**kwargs,
):
self.api_key = api_key
self.api_base = api_base
self.deployment_name = deployment_name
self.api_type = api_type
self.api_version = api_version
self.extra_options = extra_options
def _get_choice(self, text: str, eval_type: str, match_fn: Union[str, Callable], choice_strings: Iterable[str]
) -> str:
"""Clean the answer string to a choice string to one of choice_strings. Return '__invalid__.' if no match."""
if isinstance(match_fn, str):
match_fn = MATCH_FNS[match_fn]
lines = text.strip().split("\n")
if eval_type.startswith("cot_classify"):
lines = lines[::-1] # reverse lines
for line in lines:
line = line.strip()
line = "".join(c for c in line if c not in string.punctuation)
if not line:
continue
for choice in choice_strings:
if match_fn(line, choice):
return choice
logging.warn(f"Choices {choice_strings} not parsable for {eval_type}: {text}")
return INVALID_STR
def __call__(
self,
step1_prompt: str,
step2_prompt: str,
query: str,
sources: Optional[Union[str, pd.DataFrame, list]] = None,
embedder: Optional[EmbeddingsFn] = None,
retriever: Optional[RetrieverFn] = None,
k: Optional[int] = 5,
**kwargs,
) -> OpenAICompletionResult:
assert sources or (isinstance(embedder, EmbeddingsFn) and isinstance(retriever, RetrieverFn)), "Either sources or an embedder and retriever must be provided"
if not sources:
sources = retriever(query, embedder, k=k)
prompt1 = CompletionPrompt(template=step1_prompt, query=query, sources=sources)
prompt2 = CompletionPrompt(template=step2_prompt, query=query, sources=sources)
# STEP 1 asks the model to check weather the sources are enough to answer the query
# STEP 2 asks the model to answer the query, but only if step 1 returned true
# result = openai_completion_create_retrying(
# engine=self.deployment_name,
# prompt=prompt.to_formatted_prompt(),
# api_key=self.api_key,
# api_base=self.api_base,
# api_type=self.api_type,
# api_version=self.api_version,
# **self.extra_options,
# )
# result = OpenAICompletionResult(raw_data=result, prompt=prompt, sources=sources)
# return result
def __repr__(self):
return f"OpenAICompletionFn(deployment_name={self.deployment_name}, extra_options={self.extra_options})" | [] |
2024-01-10 | benzproduction/bachelorarbeit | redis~database.py | import pandas as pd
import numpy as np
from redis import Redis
from redis.commands.search.field import VectorField
from redis.commands.search.field import TextField, NumericField
from redis.commands.search.query import Query
from openai.embeddings_utils import get_embedding
from typing import List
from models import DocumentChunk
from config import EMBEDDINGS_MODEL, PREFIX, VECTOR_FIELD_NAME
# Get a Redis connection
def get_redis_connection(password=None,host='localhost',port='6379',db=0):
r = Redis(host=host, port=port, db=db,decode_responses=False, password=password)
return r
# Create a Redis index to hold our data
def create_hnsw_index (redis_conn,vector_field_name,vector_dimensions=1536, distance_metric='COSINE'):
redis_conn.ft().create_index([
VectorField(vector_field_name, "HNSW", {"TYPE": "FLOAT32", "DIM": vector_dimensions, "DISTANCE_METRIC": distance_metric}),
TextField("filename"),
TextField("text_chunk"),
NumericField("file_chunk_index")
])
# Create a Redis pipeline to load all the vectors and their metadata
def load_vectors(client:Redis, input_list, vector_field_name):
p = client.pipeline(transaction=False)
for text in input_list:
#hash key
key=f"{PREFIX}:{text['id']}"
#hash values
item_metadata = text['metadata']
#
item_keywords_vector = np.array(text['vector'],dtype= 'float32').tobytes()
item_metadata[vector_field_name]=item_keywords_vector
# HSET
p.hset(key,mapping=item_metadata)
p.execute()
def save_chunks(r:Redis, vectors: List[DocumentChunk], index: str) -> None:
"""
Saves the vectors to Redis
"""
assert r.ping(), "Redis is not connected"
try:
print(f"Docs in index: {r.ft(index).info()['num_docs']}")
except Exception as e:
print(f"Index {index} does not exist. Exiting")
exit(1)
p = r.pipeline(transaction=False)
# load vectors
for vector in vectors:
#hash key
key=f"{index}:{vector.id}"
item_metadata = {}
item_metadata["filename"] = vector.metadata.source_filename
item_metadata["text_chunk"] = vector.text
item_metadata["page"] = vector.metadata.page
item_keywords_vector = np.array(vector.embedding,dtype= 'float32').tobytes()
item_metadata[VECTOR_FIELD_NAME]=item_keywords_vector
# HSET
r.hset(key,mapping=item_metadata)
p.execute()
# Make query to Redis
def query_redis(redis_conn,query,index_name, top_k=5):
## Creates embedding vector from user query
# embedded_query = np.array(openai.Embedding.create(
# input=query,
# model=EMBEDDINGS_MODEL,
# )["data"][0]['embedding'], dtype=np.float32).tobytes()
# above code has to rewritten to use the azure openai services
embedded_query = np.array(get_embedding(query, engine = 'text-embedding-ada-002'), dtype=np.float32).tobytes()
#prepare the query
q = Query(f'*=>[KNN {top_k} @{VECTOR_FIELD_NAME} $vec_param AS vector_score]').sort_by('vector_score').paging(0,top_k).return_fields('vector_score','filename','text_chunk','text_chunk_index').dialect(2)
params_dict = {"vec_param": embedded_query}
#Execute the query
results = redis_conn.ft(index_name).search(q, query_params = params_dict)
return results
# Get mapped documents from Weaviate results
def get_redis_results(redis_conn,query,index_name):
# Get most relevant documents from Redis
query_result = query_redis(redis_conn,query,index_name)
# Extract info into a list
query_result_list = []
for i, result in enumerate(query_result.docs):
result_order = i
text = result.text_chunk
score = result.vector_score
query_result_list.append((result_order,text,score))
# Display result as a DataFrame for ease of us
result_df = pd.DataFrame(query_result_list)
result_df.columns = ['id','result','certainty',]
return result_df
def get_redis_results2(redis_conn,query,index_name, top_k=5):
# Get most relevant documents from Redis
query_result = query_redis(redis_conn,query,index_name, top_k=top_k)
# if the result is empty, return an empty dataframe
if query_result.total == 0:
return pd.DataFrame()
# Extract info into a list
query_result_list = []
for i, result in enumerate(query_result.docs):
result_order = i
text = result.text_chunk
score = result.vector_score
filename = result.id
query_result_list.append((result_order,text,score, filename))
# Display result as a DataFrame for ease of us
result_df = pd.DataFrame(query_result_list)
result_df.columns = ['id','result','certainty', 'filename']
return result_df | [] |
2024-01-10 | AkiraRy/MemoryBot | repl~botRepl.py | import os
import asyncio
import openai
import discord
from dotenv import load_dotenv
from keep_alive import keep_alive
import traceback
from plugins import *
load_dotenv()
TOKEN = os.environ['DISCORD_TOKEN']
intents = discord.Intents.default()
intents.message_content = True
openai.api_key = os.environ['OPEN_AI']
CHANNEL_ID = int(os.environ['BOT_CHAT'])
class MemoryBot(discord.Client):
async def on_ready(self):
print(f'We have logged in as {self.user}')
await self.get_channel(CHANNEL_ID).send('Hello, I am now online!')
async def on_message(self, message):
if message.author == self.user:
return
greetings = [
'hi memorybot',
'hello memorybot',
'what`s up memory',
]
answers = [
'Hello there ' + message.author.name + '!',
'Hello ' + message.author.name + '!',
'Good, how are you ' + message.author.name + '?',
]
if message.content.lower() in greetings:
response = random.choice(answers)
await message.channel.send(response)
#TODO send <=5 pictures every day that were 1 week, 1 month,1 year ago on your choice
if message.content.upper() == "CHAT ACTIVATIAN":
await message.channel.send("---STARTING ACTIVATION---")
#history = ''
#print(history)
#with open('./history.txt','r', encoding='utf-8') as f:
#history = f.read()
#history = history.replace('\n', ' ')
#print(history)
message_history = [
#{"role": "system", "content": history},
#{"role": "user", "content": "hi how is your day"},
]
# {"role": "system", "content": history},
# {"role": "user", "content": history},
while True:
messages = (
await self.wait_for('message', check=lambda m: m.author == message.author)).content.lower()
if messages == "stop":
await message.channel.send("---ACTIVATION STOPPED---")
break
message_history.append({"role": "user", "content": messages})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message_history
)
print(response)
message_history.append({"role": "assistant", "content": response.choices[0].message.content})
await message.channel.send(response.choices[0].message.content)
if message.content.lower() == "send gmage":
dict = search_folders()
# print(dic)
#TODO add parameters to env, and use them to pic a folder
my_folder = search_folderID(dict, 'Anime')
filedic = search_files(my_folder)
for items in filedic:
print(items)
await self.send_pic(items['id'], message.channel, name=items['name'], google=True)
async def send_pic(self, image_path, channel, name="" , spoiler=False, google=False):
if google:
picture = discord.File(io.BytesIO(download_file(image_path)), filename=name)
if spoiler:
picture.spoiler=True
try:
await channel.send(file=picture)
except Exception as e:
print(traceback.format_exc())
await channel.send("Files is too large")
else :
with open(image_path, 'rb') as f:
# noinspection PyTypeChecker
picture = discord.File(f, filename=image_path.split("\\")[-1])
if spoiler:
picture.spoiler = True
try:
await channel.send(file=picture)
except Exception as e:
print(traceback.format_exc())
await channel.send("Files is too large")
await asyncio.sleep(3)
async def on_disconnect(self, message):
print("stop")
await message.channel.send('MemoryBot has been disconnected!')
client = MemoryBot(intents=intents, heartbeat_interval=60.0)
keep_alive()
client.run(TOKEN)
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~algolia.py | import asyncio
from algoliasearch.search_client import SearchClient
from langchain.tools import BaseTool
class Algolia(BaseTool):
name = "Algolia"
description = "Useful for querying an Agolia index"
return_direct = False
def _init_client_and_index(self):
app_id = self.metadata["appId"]
api_key = self.metadata["apiKey"]
index = self.metadata["index"]
client = SearchClient.create(app_id, api_key)
index = client.init_index(index)
return index
def _run(self, search_query: str, num_of_results: int = 3) -> str:
index = self._init_client_and_index()
output = index.search(search_query)
return str(output["hits"][:num_of_results])
async def _arun(self, search_query: str, num_of_results: int = 3) -> str:
index = self._init_client_and_index()
loop = asyncio.get_event_loop()
output = await loop.run_in_executor(None, index.search, search_query)
return str(output["hits"][:num_of_results])
| [
"Useful for querying an Agolia index"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~hand_off.py | import json
from langchain.tools import BaseTool
class HandOff(BaseTool):
name = "human hand-off"
description = "useful for hand-off of conversation to a human operator"
return_direct = False
def _run(self, reason: str) -> str:
payload = {"reasons": reason, "action": "hand-off"}
return json.dumps(payload)
async def _arun(self, reason: str) -> str:
payload = {"reasons": reason, "action": "hand-off"}
return json.dumps(payload)
| [
"useful for hand-off of conversation to a human operator"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~vectorstores~qdrant.py | import logging
from typing import Literal
import openai
from decouple import config
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings # type: ignore
from qdrant_client import QdrantClient, models
from qdrant_client.http import models as rest
from qdrant_client.http.models import PointStruct
from app.utils.helpers import get_first_non_null
logger = logging.getLogger(__name__)
class QdrantVectorStore:
def __init__(
self,
options: dict,
index_name: str = None,
host: str = None,
api_key: str = None,
) -> None:
self.options = options
variables = {
"QDRANT_INDEX": get_first_non_null(
index_name,
options.get("QDRANT_INDEX"),
config("QDRANT_INDEX", None),
),
"QDRANT_HOST": get_first_non_null(
host,
options.get("QDRANT_HOST"),
config("QDRANT_HOST", None),
),
"QDRANT_API_KEY": get_first_non_null(
api_key,
options.get("QDRANT_API_KEY"),
config("QDRANT_API_KEY", None),
),
}
for var, value in variables.items():
if not value:
raise ValueError(
f"Please provide a {var} via the "
f"`{var}` environment variable"
"or check the `VectorDb` table in the database."
)
self.client = QdrantClient(
url=variables["QDRANT_HOST"],
api_key=variables["QDRANT_API_KEY"],
)
self.embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002", openai_api_key=config("OPENAI_API_KEY")
)
self.index_name = variables["QDRANT_INDEX"]
logger.info(f"Initialized Qdrant Client with: {self.index_name}")
def embed_documents(
self, documents: list[Document], _batch_size: int = 100
) -> None:
collections = self.client.get_collections()
if self.index_name not in [c.name for c in collections.collections]:
self.client.recreate_collection(
collection_name=self.index_name,
vectors_config={
"content": rest.VectorParams(
distance=rest.Distance.COSINE,
size=1536,
),
},
)
points = []
i = 0
for document in documents:
i += 1
response = openai.embeddings.create(
input=document.page_content, model="text-embedding-ada-002"
)
points.append(
PointStruct(
id=i,
vector={"content": response.data[0].embedding},
payload={"text": document.page_content, **document.metadata},
)
)
self.client.upsert(collection_name=self.index_name, wait=True, points=points)
def query_documents(
self,
prompt: str,
datasource_id: str,
top_k: int | None,
_query_type: Literal["document", "all"] = "document",
) -> list[str]:
response = openai.embeddings.create(
input=prompt, model="text-embedding-ada-002"
)
embeddings = response.data[0].embedding
search_result = self.client.search(
collection_name=self.index_name,
query_vector=("content", embeddings),
limit=top_k,
query_filter=models.Filter(
must=[
models.FieldCondition(
key="datasource_id",
match=models.MatchValue(value=datasource_id),
),
]
),
with_payload=True,
)
return search_result
def delete(self, datasource_id: str) -> None:
try:
self.client.delete(
collection_name=self.index_name,
points_selector=models.FilterSelector(
filter=models.Filter(
must=[
models.FieldCondition(
key="datasource_id",
match=models.MatchValue(value=datasource_id),
),
],
)
),
)
except Exception as e:
logger.error(f"Failed to delete {datasource_id}. Error: {e}")
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~replicate.py | from langchain.llms.replicate import Replicate as ReplicateModel
from langchain.tools import BaseTool
class Replicate(BaseTool):
name = "Replicate"
description = "useful for querying a Replicate model."
return_direct = False
def _run(self, prompt: str) -> str:
model = self.metadata["model"]
api_token = self.metadata["apiKey"]
input = self.metadata["arguments"]
model = ReplicateModel(
model=model, input=input, api_token=api_token, replicate_api_token=api_token
)
output = model.predict(prompt)
return output
async def _arun(self, prompt: str) -> str:
model = self.metadata["model"]
api_token = self.metadata["apiKey"]
model = ReplicateModel(model=model, replicate_api_token=api_token)
output = await model.apredict(prompt)
return output
| [
"useful for querying a Replicate model."
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~browser.py | import aiohttp
import requests
from bs4 import BeautifulSoup
from langchain.tools import BaseTool as LCBaseTool
from pydantic import BaseModel, Field
from app.tools.base import BaseTool
class LCBrowser(LCBaseTool):
name = "Browser"
description = (
"a portal to the internet. Use this when you need to "
"get specific content from a website."
)
return_direct = False
def _run(self, url: str) -> None:
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
return soup.get_text()
async def _arun(self, url: str) -> str:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
html_content = await response.text()
soup = BeautifulSoup(html_content, "html.parser")
text = soup.get_text()
return text
class BrowserArgs(BaseModel):
url: str = Field(..., description="A valid url including protocol to analyze")
class Browser(BaseTool):
args_schema = BrowserArgs
async def arun(self, args: BrowserArgs) -> dict:
url = args.url
if not url.startswith(("http://", "https://")):
url = "http://" + url
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
html_content = await response.text()
soup = BeautifulSoup(html_content, "html.parser")
text = soup.get_text()
return {"type": "function_call", "content": text}
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~api~agents.py | import asyncio
import json
import logging
from typing import AsyncIterable
import segment.analytics as analytics
from decouple import config
from fastapi import APIRouter, Depends
from fastapi.responses import StreamingResponse
from langchain.agents import AgentExecutor
from langchain.chains import LLMChain
from langfuse import Langfuse
from langfuse.model import CreateTrace
from langsmith import Client
from app.agents.base import AgentBase
from app.models.request import (
Agent as AgentRequest,
)
from app.models.request import (
AgentDatasource as AgentDatasourceRequest,
)
from app.models.request import (
AgentInvoke as AgentInvokeRequest,
)
from app.models.request import (
AgentLLM as AgentLLMRequest,
)
from app.models.request import (
AgentTool as AgentToolRequest,
)
from app.models.response import (
Agent as AgentResponse,
)
from app.models.response import (
AgentDatasosurceList as AgentDatasosurceListResponse,
)
from app.models.response import (
AgentInvoke as AgentInvokeResponse,
)
from app.models.response import (
AgentList as AgentListResponse,
)
from app.models.response import AgentRunList as AgentRunListResponse
from app.models.response import (
AgentToolList as AgentToolListResponse,
)
from app.utils.api import get_current_api_user, handle_exception
from app.utils.llm import LLM_PROVIDER_MAPPING
from app.utils.prisma import prisma
from app.utils.streaming import CustomAsyncIteratorCallbackHandler
SEGMENT_WRITE_KEY = config("SEGMENT_WRITE_KEY", None)
router = APIRouter()
analytics.write_key = SEGMENT_WRITE_KEY
logging.basicConfig(level=logging.INFO)
# Agent endpoints
@router.post(
"/agents",
name="create",
description="Create a new agent",
response_model=AgentResponse,
)
async def create(body: AgentRequest, api_user=Depends(get_current_api_user)):
"""Endpoint for creating an agent"""
try:
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Created Agent", {**body.dict()})
agent = await prisma.agent.create(
{**body.dict(), "apiUserId": api_user.id},
include={
"tools": {"include": {"tool": True}},
"datasources": {"include": {"datasource": True}},
"llms": {"include": {"llm": True}},
},
)
provider = None
for key, models in LLM_PROVIDER_MAPPING.items():
if body.llmModel in models:
provider = key
break
llm = await prisma.llm.find_first(
where={"provider": provider, "apiUserId": api_user.id}
)
await prisma.agentllm.create({"agentId": agent.id, "llmId": llm.id})
return {"success": True, "data": agent}
except Exception as e:
handle_exception(e)
@router.get(
"/agents",
name="list",
description="List all agents",
response_model=AgentListResponse,
)
async def list(api_user=Depends(get_current_api_user), skip: int = 0, take: int = 50):
"""Endpoint for listing all agents"""
try:
import math
data = await prisma.agent.find_many(
skip=skip,
take=take,
where={"apiUserId": api_user.id},
include={"llms": True},
)
# Get the total count of agents
total_count = await prisma.agent.count(where={"apiUserId": api_user.id})
# Calculate the total number of pages
total_pages = math.ceil(total_count / take)
return {"success": True, "data": data, "total_pages": total_pages}
except Exception as e:
handle_exception(e)
@router.get(
"/agents/{agent_id}",
name="get",
description="Get a single agent",
response_model=AgentResponse,
)
async def get(agent_id: str, api_user=Depends(get_current_api_user)):
"""Endpoint for getting a single agent"""
try:
data = await prisma.agent.find_first(
where={"id": agent_id, "apiUserId": api_user.id},
include={
"tools": {"include": {"tool": True}},
"datasources": {"include": {"datasource": True}},
"llms": {"include": {"llm": True}},
},
)
for llm in data.llms:
llm.llm.options = json.dumps(llm.llm.options)
for tool in data.tools:
if isinstance(tool.tool.toolConfig, dict):
tool.tool.toolConfig = json.dumps(tool.tool.toolConfig)
return {"success": True, "data": data}
except Exception as e:
handle_exception(e)
@router.delete(
"/agents/{agent_id}",
name="delete",
description="Delete an agent",
response_model=None,
)
async def delete(agent_id: str, api_user=Depends(get_current_api_user)):
"""Endpoint for deleting an agent"""
try:
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Deleted Agent")
await prisma.agent.delete(where={"id": agent_id})
return {"success": True, "data": None}
except Exception as e:
handle_exception(e)
@router.patch(
"/agents/{agent_id}",
name="update",
description="Patch an agent",
response_model=AgentResponse,
)
async def update(
agent_id: str, body: AgentRequest, api_user=Depends(get_current_api_user)
):
"""Endpoint for patching an agent"""
try:
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Updated Agent")
data = await prisma.agent.update(
where={"id": agent_id},
data={
**body.dict(),
"apiUserId": api_user.id,
},
)
return {"success": True, "data": data}
except Exception as e:
handle_exception(e)
@router.post(
"/agents/{agent_id}/invoke",
name="invoke",
description="Invoke an agent",
response_model=AgentInvokeResponse,
)
async def invoke(
agent_id: str, body: AgentInvokeRequest, api_user=Depends(get_current_api_user)
):
"""Endpoint for invoking an agent"""
langfuse_secret_key = config("LANGFUSE_SECRET_KEY", "")
langfuse_public_key = config("LANGFUSE_PUBLIC_KEY", "")
langfuse_host = config("LANGFUSE_HOST", "https://cloud.langfuse.com")
langfuse_handler = None
if langfuse_public_key and langfuse_secret_key:
langfuse = Langfuse(
public_key=langfuse_public_key,
secret_key=langfuse_secret_key,
host=langfuse_host,
)
session_id = f"{agent_id}-{body.sessionId}" if body.sessionId else f"{agent_id}"
trace = langfuse.trace(
CreateTrace(
id=session_id,
name="Assistant",
userId=api_user.id,
metadata={"agentId": agent_id},
)
)
langfuse_handler = trace.get_langchain_handler()
async def send_message(
agent: LLMChain | AgentExecutor,
content: str,
callback: CustomAsyncIteratorCallbackHandler,
) -> AsyncIterable[str]:
try:
task = asyncio.ensure_future(
agent.acall(
inputs={"input": content},
tags=[agent_id],
callbacks=[langfuse_handler] if langfuse_handler else None,
)
)
async for token in callback.aiter():
yield f"data: {token}\n\n"
await task
result = task.result()
if "intermediate_steps" in result:
for step in result["intermediate_steps"]:
agent_action_message_log = step[0]
function = agent_action_message_log.tool
args = agent_action_message_log.tool_input
if function and args:
yield (
"event: function_call\n"
f'data: {{"function": "{function}", '
f'"args": {json.dumps(args)}}}\n\n'
)
except Exception as e:
logging.error(f"Error in send_message: {e}")
finally:
callback.done.set()
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Invoked Agent")
logging.info("Invoking agent...")
session_id = body.sessionId
input = body.input
enable_streaming = body.enableStreaming
output_schema = body.outputSchema
callback = CustomAsyncIteratorCallbackHandler()
agent = await AgentBase(
agent_id=agent_id,
session_id=session_id,
enable_streaming=enable_streaming,
output_schema=output_schema,
callback=callback,
).get_agent()
if enable_streaming:
logging.info("Streaming enabled. Preparing streaming response...")
generator = send_message(agent, content=input, callback=callback)
return StreamingResponse(generator, media_type="text/event-stream")
logging.info("Streaming not enabled. Invoking agent synchronously...")
output = await agent.acall(
inputs={"input": input},
tags=[agent_id],
callbacks=[langfuse_handler] if langfuse_handler else None,
)
if output_schema:
try:
output = json.loads(output.get("output"))
except Exception as e:
logging.error(f"Error parsing output: {e}")
output = None
return {"success": True, "data": output}
# Agent LLM endpoints
@router.post(
"/agents/{agent_id}/llms",
name="add_llm",
description="Add LLM to agent",
response_model=AgentResponse,
)
async def add_llm(
agent_id: str, body: AgentLLMRequest, api_user=Depends(get_current_api_user)
):
"""Endpoint for adding an LLM to an agent"""
try:
await prisma.agentllm.create({**body.dict(), "agentId": agent_id})
return {"success": True, "data": None}
except Exception as e:
handle_exception(e)
@router.delete(
"/agents/{agent_id}/llms/{llm_id}",
name="remove_llm",
description="Remove LLM from agent",
)
async def remove_llm(
agent_id: str, llm_id: str, api_user=Depends(get_current_api_user)
):
"""Endpoint for removing an LLM from an agent"""
try:
await prisma.agentllm.delete(
where={"agentId_llmId": {"agentId": agent_id, "llmId": llm_id}}
)
return {"success": True, "data": None}
except Exception as e:
handle_exception(e)
# Agent Tool endpoints
@router.post(
"/agents/{agent_id}/tools",
name="add_tool",
description="Add tool to agent",
response_model=AgentResponse,
)
async def add_tool(
agent_id: str,
body: AgentToolRequest,
api_user=Depends(get_current_api_user),
):
"""Endpoint for adding a tool to an agent"""
try:
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Added Agent Tool")
agent_tool = await prisma.agenttool.find_unique(
where={
"agentId_toolId": {
"agentId": agent_id,
"toolId": body.toolId,
}
}
)
if agent_tool:
raise Exception("Agent tool already exists")
agent_tool = await prisma.agenttool.create(
{"toolId": body.toolId, "agentId": agent_id},
include={"tool": True},
)
return {"success": True}
except Exception as e:
handle_exception(e)
@router.get(
"/agents/{agent_id}/tools",
name="list_tools",
description="List agent tools",
response_model=AgentToolListResponse,
)
async def list_tools(agent_id: str, api_user=Depends(get_current_api_user)):
"""Endpoint for listing agent tools"""
try:
agent_tools = await prisma.agenttool.find_many(where={"agentId": agent_id})
return {"success": True, "data": agent_tools}
except Exception as e:
handle_exception(e)
@router.delete(
"/agents/{agent_id}/tools/{tool_id}",
name="remove_tool",
description="Remove tool from agent",
)
async def remove_tool(
agent_id: str, tool_id: str, api_user=Depends(get_current_api_user)
):
"""Endpoint for removing a tool from an agent"""
try:
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Deleted Agent Tool")
await prisma.agenttool.delete(
where={
"agentId_toolId": {
"agentId": agent_id,
"toolId": tool_id,
}
}
)
return {"success": True, "data": None}
except Exception as e:
handle_exception(e)
# Agent Datasource endpoints
@router.post(
"/agents/{agent_id}/datasources",
name="add_datasource",
description="Add datasource to agent",
response_model=AgentResponse,
)
async def add_datasource(
agent_id: str,
body: AgentDatasourceRequest,
api_user=Depends(get_current_api_user),
):
"""Endpoint for adding a datasource to an agent"""
try:
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Added Agent Datasource")
agent_datasource = await prisma.agentdatasource.find_unique(
where={
"agentId_datasourceId": {
"agentId": agent_id,
"datasourceId": body.datasourceId,
}
}
)
if agent_datasource:
raise Exception("Agent datasource already exists")
agent_datasource = await prisma.agentdatasource.create(
{"datasourceId": body.datasourceId, "agentId": agent_id},
include={"datasource": True},
)
# TODO:
# Enable this for finetuning models
# async def run_datasource_flow():
# try:
# await process_datasource(body.datasourceId, agent_id)
# except Exception as flow_exception:
# handle_exception(flow_exception)
# asyncio.create_task(run_datasource_flow())
return {"success": True}
except Exception as e:
handle_exception(e)
@router.get(
"/agents/{agent_id}/datasources",
name="list_datasources",
description="List agent datasources",
response_model=AgentDatasosurceListResponse,
)
async def list_datasources(agent_id: str, api_user=Depends(get_current_api_user)):
"""Endpoint for listing agent datasources"""
try:
agent_datasources = await prisma.agentdatasource.find_many(
where={"agentId": agent_id}
)
return {"success": True, "data": agent_datasources}
except Exception as e:
handle_exception(e)
@router.delete(
"/agents/{agent_id}/datasources/{datasource_id}",
name="remove_datasource",
description="Remove datasource from agent",
)
async def remove_datasource(
agent_id: str, datasource_id: str, api_user=Depends(get_current_api_user)
):
"""Endpoint for removing a datasource from an agent"""
try:
if SEGMENT_WRITE_KEY:
analytics.track(api_user.id, "Deleted Agent Datasource")
await prisma.agentdatasource.delete(
where={
"agentId_datasourceId": {
"agentId": agent_id,
"datasourceId": datasource_id,
}
}
)
# TODO:
# Enable this for finetuning models
# async def run_datasource_revalidate_flow():
# try:
# await revalidate_datasource(agent_id)
# except Exception as flow_exception:
# handle_exception(flow_exception)
# asyncio.create_task(run_datasource_revalidate_flow())
return {"success": True, "data": None}
except Exception as e:
handle_exception(e)
# Agent runs
@router.get(
"/agents/{agent_id}/runs",
name="list_runs",
description="List agent runs",
response_model=AgentRunListResponse,
)
async def list_runs(agent_id: str, api_user=Depends(get_current_api_user)):
"""Endpoint for listing agent runs"""
is_langsmith_enabled = config("LANGCHAIN_TRACING_V2", False)
if is_langsmith_enabled == "True":
langsmith_client = Client()
try:
output = langsmith_client.list_runs(
project_id=config("LANGSMITH_PROJECT_ID"),
filter=f"has(tags, '{agent_id}')",
)
return {"success": True, "data": output}
except Exception as e:
handle_exception(e)
return {"success": False, "data": []}
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~http.py | import json
import aiohttp
import requests
from langchain.tools import BaseTool as LCBaseTool
class LCHttpTool(LCBaseTool):
name = "API Request"
description = "useful for making GET/POST API requests"
return_direct = False
def _run(self, url: str, method: str = "GET", body: dict = None) -> None:
headers = (
json.loads(self.metadata.get("headers"))
if self.metadata.get("headers")
else {}
)
headers["content-type"] = "application/json"
try:
request_kwargs = {"method": method, "url": url, "headers": headers}
if body is not None:
request_kwargs["json"] = body
response = requests.request(**request_kwargs)
response.raise_for_status()
try:
return response.json()
except Exception:
return "Request successful"
except requests.exceptions.RequestException as e:
return str(e)
async def _arun(self, url: str, method: str = "GET", body: dict = None) -> str:
headers = (
json.loads(self.metadata.get("headers"))
if self.metadata.get("headers")
else {}
)
headers["content-type"] = "application/json"
try:
async with aiohttp.ClientSession() as session:
request_kwargs = {"method": method, "url": url, "headers": headers}
if body is not None:
request_kwargs["json"] = body
async with session.request(**request_kwargs) as response:
try:
return await response.json()
except Exception:
return "Request successfull"
except Exception as e:
return str(e)
| [
"useful for making GET/POST API requests"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~datasource.py | # flake8: noqa
import requests
import pandas as pd
from io import StringIO
from decouple import config
from tempfile import NamedTemporaryFile
from langchain.tools import BaseTool
from llama import Context, LLMEngine, Type
from app.vectorstores.base import VectorStoreBase
from app.datasource.loader import DataLoader
from prisma.models import Datasource
from langchain.agents.agent_types import AgentType
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain.chat_models.openai import ChatOpenAI
class DatasourceFinetuneTool(BaseTool):
name = "datasource"
description = "useful for when you need to answer questions"
return_direct = False
def _run(
self,
question: str,
) -> str:
"""Use the tool."""
class Question(Type):
question: str = Context("A question")
class Answer(Type):
answer: str = Context("An answer to the question")
llm = LLMEngine(
id=self.metadata["agent_id"],
config={"production.key": config("LAMINI_API_KEY")},
model_name="chat/gpt-3.5-turbo",
)
input = Question(question=question)
output = llm(input=input, output_type=Answer)
return output.answer
async def _arun(
self,
question: str,
) -> str:
"""Use the tool asynchronously."""
class Question(Type):
question: str = Context("A question")
class Answer(Type):
answer: str = Context("An answer to the question")
llm = LLMEngine(
id=self.metadata["agent_id"],
config={"production.key": config("LAMINI_API_KEY")},
model_name="chat/gpt-3.5-turbo",
)
input = Question(question=question)
output = llm(input=input, output_type=Answer)
return output.answer
class DatasourceTool(BaseTool):
name = "datasource"
description = "useful for when you need to answer questions"
return_direct = False
def _run(
self,
question: str,
) -> str:
"""Use the tool."""
vector_store = VectorStoreBase(
options=self.metadata["options"],
vector_db_provider=self.metadata["provider"],
)
result = vector_store.query_documents(
prompt=question,
datasource_id=self.metadata["datasource_id"],
query_type=self.metadata["query_type"],
top_k=3,
)
return result
async def _arun(
self,
question: str,
) -> str:
"""Use the tool asynchronously."""
vector_store = VectorStoreBase(
options=self.metadata["options"],
vector_db_provider=self.metadata["provider"],
)
result = vector_store.query_documents(
prompt=question,
datasource_id=self.metadata["datasource_id"],
query_type=self.metadata["query_type"],
top_k=3,
)
return result
class StructuredDatasourceTool(BaseTool):
name = "structured datasource"
description = "useful for when need answer questions"
return_direct = False
def _load_xlsx_data(self, datasource: Datasource):
with NamedTemporaryFile(suffix=".xlsx", delete=True) as temp_file:
if datasource.url:
response = requests.get(datasource.url)
temp_file.write(response.content)
else:
temp_file.write(datasource.content)
temp_file.flush()
df = pd.read_excel(temp_file.name, engine="openpyxl")
return df
def _load_csv_data(self, datasource: Datasource):
if datasource.url:
response = requests.get(datasource.url)
file_content = StringIO(response.text)
else:
file_content = StringIO(datasource.content)
df = pd.read_csv(file_content)
return df
def _run(
self,
question: str,
) -> str:
"""Use the tool."""
datasource: Datasource = self.metadata["datasource"]
if datasource.type == "CSV":
df = self._load_csv_data(datasource)
elif datasource.type == "XLSX":
df = self._load_xlsx_data(datasource)
else:
data = DataLoader(datasource=datasource).load()
df = pd.DataFrame(data)
agent = create_pandas_dataframe_agent(
ChatOpenAI(
temperature=0,
model="gpt-4-0613",
openai_api_key=config("OPENAI_API_KEY"),
),
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
output = agent.run(question)
return output
async def _arun(
self,
question: str,
) -> str:
"""Use the tool asynchronously."""
datasource: Datasource = self.metadata["datasource"]
if datasource.type == "CSV":
df = self._load_csv_data(datasource)
elif datasource.type == "XLSX":
df = self._load_xlsx_data(datasource)
else:
data = DataLoader(datasource=datasource).load()
df = pd.DataFrame(data)
agent = create_pandas_dataframe_agent(
ChatOpenAI(
temperature=0,
model="gpt-4-0613",
openai_api_key=config("OPENAI_API_KEY"),
),
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
output = await agent.arun(question)
return output
| [
"useful for when need answer questions",
"useful for when you need to answer questions"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~vectorstores~weaviate.py | # flake8: noqa
import logging
import uuid
from typing import Dict, List, Literal
import backoff
import weaviate
from decouple import config
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings # type: ignore
from pydantic.dataclasses import dataclass
from app.utils.helpers import get_first_non_null
logger = logging.getLogger(__name__)
def _default_schema(index_name: str) -> Dict:
return {
"class": index_name,
"properties": [
{
"name": "text",
"dataType": ["text"],
}
],
}
@dataclass
class Response:
id: str
text: str
metadata: dict
def to_dict(self):
return {
"id": self.id,
"text": self.text,
"metadata": self.metadata,
}
def __init__(self, id: str, text: str, metadata: dict | None = None):
"""Core dataclass for single record."""
self.id = id
self.text = text
self.metadata = metadata or {}
class WeaviateVectorStore:
def __init__(
self,
options: dict,
index_name: str = None,
api_key: str = None,
url: str = None,
) -> None:
self.options = options
variables = {
"WEAVIATE_URL": get_first_non_null(
url,
options.get("WEAVIATE_URL"),
config("WEAVIATE_URL", None),
),
"WEAVIATE_API_KEY": get_first_non_null(
api_key,
options.get("WEAVIATE_API_KEY"),
config("WEAVIATE_API_KEY", None),
),
"WEAVIATE_INDEX": get_first_non_null(
index_name,
options.get("WEAVIATE_INDEX"),
config("WEAVIATE_INDEX", None),
),
}
for var, value in variables.items():
if not value:
raise ValueError(
f"Please provide a {var} via the " f"`{var}` environment variable."
)
auth = weaviate.auth.AuthApiKey(api_key=variables["WEAVIATE_API_KEY"])
self.client = weaviate.Client(
url=variables["WEAVIATE_URL"],
auth_client_secret=auth,
)
self.embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002", openai_api_key=config("OPENAI_API_KEY")
)
self.index_name = variables["WEAVIATE_INDEX"]
logger.info(f"Initialized Weaviate Client with: {self.index_name}") # type: ignore
@backoff.on_exception(backoff.expo, Exception, max_tries=3)
def _embed_with_retry(self, texts):
return self.embeddings.embed_documents(texts)
def _similarity_search_by_vector(
self, embedding: List[float], datasource_id: str, k: int = 4
) -> List[Document]:
"""Look up similar documents by embedding vector in Weaviate."""
vector = {"vector": embedding}
result = (
self.client.query.get(
self.index_name.capitalize(),
["text", "datasource_id", "source", "page"],
)
.with_near_vector(vector)
.with_where(
{
"path": ["datasource_id"],
"operator": "Equal",
"valueText": datasource_id,
}
)
.with_limit(k)
.do()
)
docs = []
for res in result["data"]["Get"][self.index_name.capitalize()]:
text = res.pop("text")
if text is None:
continue
docs.append(Document(page_content=text, metadata=res))
return docs
def embed_documents(self, documents: list[Document], batch_size: int = 100):
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
if batch_size:
self.client.batch.configure(batch_size=batch_size)
schema = _default_schema(self.index_name)
embeddings = self.embeddings.embed_documents(texts)
# check whether the index already exists
if not self.client.schema.exists(self.index_name):
self.client.schema.create_class(schema)
with self.client.batch as batch:
for i, text in enumerate(texts):
id = uuid.uuid4()
data_properties = {
"text": text,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = str(id)
# if an embedding strategy is not provided, we let
# weaviate create the embedding. Note that this will only
# work if weaviate has been installed with a vectorizer module
# like text2vec-contextionary for example
params = {
"uuid": _id,
"data_object": data_properties,
"class_name": self.index_name,
}
if embeddings is not None:
params["vector"] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
def query_documents(
self,
prompt: str,
datasource_id: str,
top_k: int | None,
_query_type: Literal["document", "all"] = "document",
) -> list[str]:
if top_k is None:
top_k = 5
logger.info(f"Executing query with document id in namespace {datasource_id}")
vector = self.embeddings.embed_query(prompt)
results = self._similarity_search_by_vector(
embedding=vector, k=top_k, datasource_id=datasource_id
)
return results
def delete(self, datasource_id: str) -> None:
try:
self.client.batch.delete_objects(
class_name=self.index_name.capitalize(),
where={
"path": ["datasource_id"],
"operator": "Equal",
"valueText": datasource_id,
},
)
except Exception as e:
logger.error(f"Failed to delete {datasource_id}. Error: {e}")
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~tts_1.py | from pathlib import Path
from langchain.tools import BaseTool
from openai import AsyncOpenAI, OpenAI
class TTS1(BaseTool):
name = "text-to-speech"
description = "useful for generation voice audio from text"
return_direct = False
def _run(self, input: dict) -> str:
client = OpenAI(api_key=self.metadata["openaiApiKey"])
speech_file_path = Path(__file__).parent / "speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice=input["voice"] or "alloy",
input=input["text"],
)
output = response.stream_to_file(speech_file_path)
return output
async def _arun(self, input: dict) -> str:
client = AsyncOpenAI(api_key=self.metadata["openaiApiKey"])
speech_file_path = Path(__file__).parent / "speech.mp3"
response = await client.audio.speech.create(
model="tts-1",
voice=input["voice"] or "alloy",
input=input["text"],
)
output = response.stream_to_file(speech_file_path)
return output
| [
"useful for generation voice audio from text"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~e2b.py | # flake8: noqa
import ast
from typing import Optional
from decouple import config
from e2b import DataAnalysis
from langchain.callbacks.manager import CallbackManager
from langchain.tools import BaseTool
class E2BCodeExecutor(BaseTool):
name = "Code interpreter"
description = "useful for running python code, it returns the output of the code"
def _add_last_line_print(self, code: str):
tree = ast.parse(code)
node = tree.body[-1]
if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call):
if isinstance(node.value.func, ast.Name) and node.value.func.id == "print":
return tree
tree.body[-1] = ast.Expr(
value=ast.Call(
func=ast.Name(id="print", ctx=ast.Load()),
args=[node.value],
keywords=[],
)
)
return ast.unparse(tree)
def _download_artifact(self, artifact):
# Artifact is a chart file created by matplotlib
# You can download it right from the E2B LLM Sandbox
#
# `artifact_bytes` is a chart file (.png) in bytes
# TODO: Send the artifact bytes to frontend, save it to DB, etc
artifact_bytes = artifact.download()
def _run(self, python_code: str) -> str:
code = self._add_last_line_print(python_code)
# E2B session represents a sandbox runtime for LLM - it's a microVM for every instance of an agent.
session = DataAnalysis(api_key=config("E2B_API_KEY"))
# E2B offers both streaming output and artifacts or retrieving them after the code has finished running.
stdout, err, artifacts = session.run_python(
code=code,
# TODO: To create more responsive UI, you might want to stream stdout, stderr, and artifacts
on_stdout=lambda line: print("stdout", line),
on_stderr=lambda line: print("stderr", line),
on_artifact=self._download_artifact,
)
session.close()
# Or you can download artifacts after the code has finished running:
# for artifact in artifacts:
# self._download_artifact(artifact)
if err:
return "There was following error during execution: " + err
return stdout
async def _arun(self, python_code: str) -> str:
try:
return self._run(python_code)
except:
return "There was an error during execution"
| [
"useful for running python code, it returns the output of the code"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~zapier.py | from langchain.agents import AgentType, initialize_agent
from langchain.agents.agent_toolkits import ZapierToolkit
from langchain.chat_models.openai import ChatOpenAI
from langchain.tools import BaseTool
from langchain.utilities.zapier import ZapierNLAWrapper
class ZapierNLA(BaseTool):
name = "Zapier"
description = (
"useful for performing actions such sending emails, scheduling meetings etc."
)
return_direct = False
def _run(self, input: str) -> str:
zapier_nla_api_key = self.metadata["zapierNlaApiKey"]
zapier = ZapierNLAWrapper(zapier_nla_api_key=zapier_nla_api_key)
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
agent = initialize_agent(
toolkit.get_tools(),
llm=ChatOpenAI(openai_api_key=self.metadata["openaiApiKey"], model="gpt-4"),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
output = agent.run(input)
return output
async def _arun(self, input: str) -> str:
zapier_nla_api_key = self.metadata["zapierNlaApiKey"]
zapier = ZapierNLAWrapper(zapier_nla_api_key=zapier_nla_api_key)
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
agent = initialize_agent(
toolkit.get_tools(),
llm=ChatOpenAI(openai_api_key=self.metadata["openaiApiKey"], model="gpt-4"),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
output = await agent.arun(input)
return output
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~bing_search.py | import asyncio
from langchain.tools import BaseTool as LCBaseTool
from langchain.utilities import BingSearchAPIWrapper
from pydantic import BaseModel, Field
from app.tools.base import BaseTool
class LCBingSearch(LCBaseTool):
name = "bing search"
description = "useful for searching the internet"
return_direct = False
def _run(self, search_query: str) -> str:
bing_search_url = self.metadata["bingSearchUrl"]
bing_subscription_key = self.metadata["bingSubscriptionKey"]
search = BingSearchAPIWrapper(
bing_search_url=bing_search_url,
bing_subscription_key=bing_subscription_key,
)
output = search.run(search_query)
return output
async def _arun(self, search_query: str) -> str:
bing_search_url = self.metadata["bingSearchUrl"]
bing_subscription_key = self.metadata["bingSubscriptionKey"]
search = BingSearchAPIWrapper(
bing_search_url=bing_search_url,
bing_subscription_key=bing_subscription_key,
)
loop = asyncio.get_event_loop()
output = await loop.run_in_executor(None, search.run, search_query)
return output
class BingSearchArgs(BaseModel):
search_query: str = Field(..., description="A search query")
class BingSearch(BaseTool):
args_schema = BingSearchArgs
async def arun(self, args: BingSearchArgs) -> dict:
bing_search_url = self.metadata["bingSearchUrl"]
bing_subscription_key = self.metadata["bingSubscriptionKey"]
search_query = args.search_query
search = BingSearchAPIWrapper(
bing_search_url=bing_search_url,
bing_subscription_key=bing_subscription_key,
)
loop = asyncio.get_event_loop()
output = await loop.run_in_executor(None, search.run, search_query)
return {"type": "function_call", "content": output}
| [
"useful for searching the internet"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~datasource~loader.py | import json
import tempfile
from tempfile import NamedTemporaryFile
from typing import Any
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup as Soup
from langchain.docstore.document import Document
from langchain.document_loaders import (
GitLoader,
PyPDFLoader,
RecursiveUrlLoader,
TextLoader,
UnstructuredMarkdownLoader,
UnstructuredWordDocumentLoader,
WebBaseLoader,
YoutubeLoader,
)
from langchain.document_loaders.airbyte import AirbyteStripeLoader
from pyairtable import Api
from prisma.models import Datasource
class DataLoader:
def __init__(self, datasource: Datasource):
self.datasource = datasource
def load(self) -> Any:
if self.datasource.type == "TXT":
return self.load_txt()
elif self.datasource.type == "PDF":
return self.load_pdf()
elif self.datasource.type == "PPTX":
return self.load_pptx()
elif self.datasource.type == "DOCX":
return self.load_docx()
elif self.datasource.type == "GOOGLE_DOC":
return self.load_google_doc()
elif self.datasource.type == "Markdown":
return self.load_markdown()
elif self.datasource.type == "GITHUB_REPOSITORY":
return self.load_github()
elif self.datasource.type == "WEBPAGE":
return self.load_webpage()
elif self.datasource.type == "YOUTUBE":
return self.load_youtube()
elif self.datasource.type == "URL":
return self.load_url()
elif self.datasource.type == "AIRTABLE":
return self.load_airtable()
elif self.datasource.type == "STRIPE":
return self.load_stripe()
else:
raise ValueError(f"Unsupported datasource type: {self.datasource.type}")
def load_txt(self):
with NamedTemporaryFile(suffix=".txt", delete=True) as temp_file:
if self.datasource.url:
file_response = requests.get(self.datasource.url).text
else:
file_response = self.datasource.content
temp_file.write(file_response.encode())
temp_file.flush()
loader = TextLoader(file_path=temp_file.name)
return loader.load_and_split()
def load_pdf(self):
if self.datasource.url:
loader = PyPDFLoader(file_path=self.datasource.url)
else:
with NamedTemporaryFile(suffix=".pdf", delete=True) as temp_file:
temp_file.write(self.datasource.content)
temp_file.flush()
loader = UnstructuredWordDocumentLoader(file_path=temp_file.name)
return loader.load_and_split()
return loader.load_and_split()
def load_google_doc(self):
pass
def load_pptx(self):
from pptx import Presentation
with NamedTemporaryFile(suffix=".pptx", delete=True) as temp_file:
if self.datasource.url:
file_response = requests.get(self.datasource.url).content
else:
file_response = self.datasource.content
temp_file.write(file_response)
temp_file.flush()
presentation = Presentation(temp_file.name)
result = ""
for i, slide in enumerate(presentation.slides):
result += f"\n\nSlide #{i}: \n"
for shape in slide.shapes:
if hasattr(shape, "text"):
result += f"{shape.text}\n"
return [Document(page_content=result)]
def load_docx(self):
with NamedTemporaryFile(suffix=".docx", delete=True) as temp_file:
if self.datasource.url:
file_response = requests.get(self.datasource.url).content
else:
file_response = self.datasource.content
temp_file.write(file_response)
temp_file.flush()
loader = UnstructuredWordDocumentLoader(file_path=temp_file.name)
return loader.load_and_split()
def load_markdown(self):
with NamedTemporaryFile(suffix=".md", delete=True) as temp_file:
if self.datasource.url:
file_response = requests.get(self.datasource.url).text
else:
file_response = self.datasource.content
temp_file.write(file_response.encode())
temp_file.flush()
loader = UnstructuredMarkdownLoader(file_path=temp_file.name)
return loader.load()
def load_github(self):
parsed_url = urlparse(self.datasource.url)
path_parts = parsed_url.path.split("/") # type: ignore
repo_name = path_parts[2]
metadata = json.loads(self.datasource.metadata)
with tempfile.TemporaryDirectory() as temp_dir:
repo_path = f"{temp_dir}/{repo_name}/" # type: ignore
loader = GitLoader(
clone_url=self.datasource.url,
repo_path=repo_path,
branch=metadata["branch"], # type: ignore
)
return loader.load_and_split()
def load_webpage(self):
loader = RecursiveUrlLoader(
url=self.datasource.url,
max_depth=2,
extractor=lambda x: Soup(x, "html.parser").text,
)
chunks = loader.load_and_split()
for chunk in chunks:
if "language" in chunk.metadata:
del chunk.metadata["language"]
return chunks
def load_youtube(self):
video_id = self.datasource.url.split("youtube.com/watch?v=")[-1]
loader = YoutubeLoader(video_id=video_id)
return loader.load_and_split()
def load_url(self):
url_list = self.datasource.url.split(",")
loader = WebBaseLoader(url_list)
return loader.load_and_split()
def load_airtable(self):
metadata = json.loads(self.datasource.metadata)
api_key = metadata["apiKey"]
base_id = metadata["baseId"]
table_id = metadata["tableId"]
api = Api(api_key)
table = api.table(base_id, table_id)
return table.all()
def load_stripe(self):
metadata = json.loads(self.datasource.metadata)
client_secret = metadata["clientSecret"]
account_id = metadata["accountId"]
start_date = metadata["startDate"]
stream_name = metadata["streamName"]
config = {
"client_secret": client_secret,
"account_id": account_id,
"start_date": start_date,
}
def handle_record(record: dict, _id: str):
return record.data
loader = AirbyteStripeLoader(
config=config,
record_handler=handle_record,
stream_name=stream_name,
)
data = loader.load()
return data
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~vectorstores~astra.py | import logging
import os
import uuid
from typing import List, Literal, Optional
import backoff
from decouple import config
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings # type: ignore
from pydantic.dataclasses import dataclass
from app.utils.helpers import get_first_non_null
from app.vectorstores.astra_client import AstraClient, QueryResponse
logger = logging.getLogger(__name__)
@dataclass
class Response:
id: str
text: str
metadata: dict
def to_dict(self):
return {
"id": self.id,
"text": self.text,
"metadata": self.metadata,
}
def __init__(self, id: str, text: str, metadata: Optional[dict] = None):
"""Core dataclass for single record."""
self.id = id
self.text = text
self.metadata = metadata or {}
class AstraVectorStore:
def __init__(
self,
options: dict,
astra_id: str = None,
astra_region: str = None,
astra_application_token: str = None,
index_name: str = None, # collection_name
keyspace_name: str = None,
) -> None:
self.options = options
variables = {
"ASTRA_DB_ID": get_first_non_null(
astra_id,
options.get("ASTRA_DB_ID"),
config("ASTRA_DB_ID", None),
),
"ASTRA_DB_REGION": get_first_non_null(
astra_region,
options.get("ASTRA_DB_REGION"),
config("ASTRA_DB_REGION", None),
),
"ASTRA_DB_APPLICATION_TOKEN": get_first_non_null(
astra_application_token,
options.get("ASTRA_DB_APPLICATION_TOKEN"),
config("ASTRA_DB_APPLICATION_TOKEN", None),
),
"ASTRA_DB_COLLECTION_NAME": get_first_non_null(
index_name,
options.get("ASTRA_DB_COLLECTION_NAME"),
config("ASTRA_DB_COLLECTION_NAME", None),
),
"ASTRA_DB_KEYSPACE_NAME": get_first_non_null(
keyspace_name,
options.get("ASTRA_DB_KEYSPACE_NAME"),
config("ASTRA_DB_KEYSPACE_NAME", None),
),
}
for var, value in variables.items():
if not value:
raise ValueError(
f"Please provide a {var} via the "
f"`{var}` environment variable"
"or check the `VectorDb` table in the database."
)
self.index = AstraClient(
variables["ASTRA_DB_ID"],
variables["ASTRA_DB_REGION"],
variables["ASTRA_DB_APPLICATION_TOKEN"],
variables["ASTRA_DB_KEYSPACE_NAME"],
variables["ASTRA_DB_COLLECTION_NAME"],
)
self.embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002",
openai_api_key=os.getenv("OPENAI_API_KEY", ""),
)
@backoff.on_exception(backoff.expo, Exception, max_tries=3)
def _embed_with_retry(self, texts):
return self.embeddings.embed_documents(texts)
def embed_documents(self, documents: List[Document], batch_size: int = 100):
chunks = [
{
"id": str(uuid.uuid4()),
"text": doc.page_content,
"chunk": i,
**doc.metadata,
}
for i, doc in enumerate(documents)
]
def batch_generator(chunks, batch_size):
for i in range(0, len(chunks), batch_size):
i_end = min(len(chunks), i + batch_size)
batch = chunks[i:i_end]
yield batch
batch_gen = batch_generator(chunks, batch_size)
for batch in batch_gen:
batch_ids = [chunk["id"] for chunk in batch]
texts_to_embed = [chunk["text"] for chunk in batch]
logger.debug(f"Texts to embed: {texts_to_embed}")
embeddings = self._embed_with_retry(texts_to_embed)
to_upsert = list(zip(batch_ids, embeddings, batch))
logger.debug(f"Upserting: {to_upsert}")
try:
res = self.index.upsert(to_upsert=to_upsert)
logger.info(f"Upserted documents. {res}")
except Exception as e:
logger.error(f"Failed to upsert documents. Error: {e}")
return self.index.describe_index_stats()
def query(
self,
prompt: str,
metadata_filter: Optional[dict] = None,
top_k: int = 3,
namespace: Optional[str] = None,
min_score: Optional[float] = None, # new argument for minimum similarity score
) -> List[Response]:
"""
Returns results from the vector database.
"""
vector = self.embeddings.embed_query(prompt)
raw_responses: QueryResponse = self.index.query(
vector,
filter=metadata_filter,
top_k=top_k,
include_metadata=True,
namespace=namespace,
)
logger.debug(f"Raw responses: {raw_responses}") # leaving for debugging
# filter raw_responses based on the minimum similarity score if min_score is set
if min_score is not None:
raw_responses.matches = [
match for match in raw_responses.matches if match.score >= min_score
]
formatted_responses = self._format_response(raw_responses)
return formatted_responses
def query_documents(
self,
prompt: str,
datasource_id: str,
top_k: Optional[int] = None,
query_type: Literal["document", "all"] = "document",
) -> List[str]:
if top_k is None:
top_k = 3
logger.info(f"Executing query with document id in namespace {datasource_id}")
documents_in_namespace = self.query(
prompt=prompt,
namespace=datasource_id,
)
if documents_in_namespace == [] and query_type == "document":
logger.info("No result with namespace. Executing query without namespace.")
documents_in_namespace = self.query(
prompt=prompt,
metadata_filter={"datasource_id": datasource_id},
top_k=top_k,
)
# A hack if we want to search in all documents but with backwards compatibility
# with namespaces
if documents_in_namespace == [] and query_type == "all":
logger.info("Querying all documents.")
documents_in_namespace = self.query(
prompt=prompt,
top_k=top_k,
)
return [str(response) for response in documents_in_namespace]
def _extract_match_data(self, match):
"""Extracts id, text, and metadata from a match."""
id = match.id
text = match.metadata.get("text")
metadata = match.metadata
metadata.pop("text")
return id, text, metadata
def _format_response(self, response: QueryResponse) -> List[Response]:
"""
Formats the response dictionary from the vector database into a list of
Response objects.
"""
if not response.get("matches"):
return []
ids, texts, metadata = zip(
*[self._extract_match_data(match) for match in response.matches]
)
responses = [
Response(id=id, text=text, metadata=meta)
for id, text, meta in zip(ids, texts, metadata)
]
return responses
def delete(self, datasource_id: str):
try:
pass
except Exception as e:
logger.error(f"Failed to delete {datasource_id}. Error: {e}")
def clear_cache(self, agent_id: str, datasource_id: Optional[str] = None):
try:
filter_dict = {"agentId": agent_id, "type": "cache"}
if datasource_id:
filter_dict["datasource_id"] = datasource_id
self.index.delete(filter=dict(filter_dict), delete_all=False)
logger.info(f"Deleted vectors with agentId `{agent_id}`.")
except Exception as e:
logger.error(
f"Failed to delete vectors with agentId `{agent_id}`. Error: {e}"
)
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~pubmed.py | import asyncio
from langchain.tools import BaseTool, PubmedQueryRun
class PubMed(BaseTool):
name = "PubMed® search"
description = "useful for answering question about medical publications"
return_direct = False
def _run(self, search_query: str) -> str:
pubmed = PubmedQueryRun(args_schema=self.args_schema)
output = pubmed.run(search_query)
return output
async def _arun(self, search_query: str) -> str:
pubmed = PubmedQueryRun(args_schema=self.args_schema)
loop = asyncio.get_event_loop()
output = await loop.run_in_executor(None, pubmed.run, search_query)
return output
| [
"useful for answering question about medical publications"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~function.py | from langchain.tools import BaseTool
class Function(BaseTool):
name = "cunstom function"
description = "useful for doing something"
return_direct = True
def _run(self, *args, **kwargs) -> str:
return f"Tell the user that you are pending function {self.name}"
async def _arun(self, *args, **kwargs) -> str:
return f"Tell the user that you are pending function {self.name}"
| [
"useful for doing something"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~gpt_vision.py | from langchain.tools import BaseTool
from openai import AsyncOpenAI, OpenAI
class GPTVision(BaseTool):
name = "gpt vision"
description = "useful for analyzing images"
return_direct = False
def _run(self, input: dict) -> str:
client = OpenAI(api_key=self.metadata["openaiApiKey"])
try:
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": input["query"]},
{
"type": "image_url",
"image_url": input["image_url"],
},
],
}
],
max_tokens=300,
)
output = response.choices[0]
except Exception as e:
output = str(e)
return output
async def _arun(self, input: dict) -> str:
client = AsyncOpenAI(api_key=self.metadata["openaiApiKey"])
try:
response = await client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": input["query"]},
{
"type": "image_url",
"image_url": input["image_url"],
},
],
}
],
max_tokens=300,
)
output = response.choices[0]
except Exception as e:
output = str(e)
return output
| [
"useful for analyzing images"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~vectorstores~pinecone.py | import logging
import uuid
from typing import Literal
import backoff
import pinecone
from decouple import config
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings # type: ignore
from pinecone.core.client.models import QueryResponse
from pydantic.dataclasses import dataclass
from app.utils.helpers import get_first_non_null
logger = logging.getLogger(__name__)
@dataclass
class Response:
id: str
text: str
metadata: dict
def to_dict(self):
return {
"id": self.id,
"text": self.text,
"metadata": self.metadata,
}
def __init__(self, id: str, text: str, metadata: dict | None = None):
"""Core dataclass for single record."""
self.id = id
self.text = text
self.metadata = metadata or {}
class PineconeVectorStore:
def __init__(
self,
options: dict,
index_name: str = None,
environment: str = None,
pinecone_api_key: str = None,
) -> None:
self.options = options
variables = {
"PINECONE_INDEX": get_first_non_null(
index_name,
options.get("PINECONE_INDEX"),
config("PINECONE_INDEX", None),
),
"PINECONE_ENVIRONMENT": get_first_non_null(
environment,
options.get("PINECONE_ENVIRONMENT"),
config("PINECONE_ENVIRONMENT", None),
),
"PINECONE_API_KEY": get_first_non_null(
pinecone_api_key,
options.get("PINECONE_API_KEY"),
config("PINECONE_API_KEY", None),
),
}
for var, value in variables.items():
if not value:
raise ValueError(
f"Please provide a {var} via the "
f"`{var}` environment variable"
"or check the `VectorDb` table in the database."
)
pinecone.init(
api_key=variables["PINECONE_API_KEY"],
environment=variables["PINECONE_ENVIRONMENT"],
)
self.index_name = variables["PINECONE_INDEX"]
logger.info(f"Index name: {self.index_name}")
self.index = pinecone.Index(self.index_name)
self.embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002", openai_api_key=config("OPENAI_API_KEY")
) # type: ignore
@backoff.on_exception(backoff.expo, Exception, max_tries=3)
def _embed_with_retry(self, texts):
return self.embeddings.embed_documents(texts)
def embed_documents(self, documents: list[Document], batch_size: int = 100):
chunks = [
{
"id": str(uuid.uuid4()),
"text": doc.page_content,
"chunk": i,
**doc.metadata,
}
for i, doc in enumerate(documents)
]
def batch_generator(chunks, batch_size):
for i in range(0, len(chunks), batch_size):
i_end = min(len(chunks), i + batch_size)
batch = chunks[i:i_end]
yield batch
batch_gen = batch_generator(chunks, batch_size)
for batch in batch_gen:
batch_ids = [chunk["id"] for chunk in batch]
texts_to_embed = [chunk["text"] for chunk in batch]
logger.debug(f"Texts to embed: {texts_to_embed}")
embeddings = self._embed_with_retry(texts_to_embed)
to_upsert = list(zip(batch_ids, embeddings, batch))
logger.debug(f"Upserting: {to_upsert}")
try:
res = self.index.upsert(vectors=to_upsert)
logger.info(f"Upserted documents. {res}")
except Exception as e:
logger.error(f"Failed to upsert documents. Error: {e}")
return self.index.describe_index_stats()
def _extract_match_data(self, match):
"""Extracts id, text, and metadata from a match."""
id = match.id
text = match.metadata.get("text")
metadata = match.metadata
metadata.pop("text")
return id, text, metadata
def _format_response(self, response: QueryResponse) -> list[Response]:
"""
Formats the response dictionary from the vector database into a list of
Response objects.
"""
if not response.get("matches"):
return []
ids, texts, metadata = zip(
*[self._extract_match_data(match) for match in response["matches"]]
)
responses = [
Response(id=id, text=text, metadata=meta)
for id, text, meta in zip(ids, texts, metadata)
]
return responses
def query(
self,
prompt: str,
metadata_filter: dict | None = None,
top_k: int = 3,
namespace: str | None = None,
min_score: float | None = None, # new argument for minimum similarity score
) -> list[Response]:
"""
Returns results from the vector database.
"""
vector = self.embeddings.embed_query(prompt)
raw_responses: QueryResponse = self.index.query(
vector,
filter=metadata_filter,
top_k=top_k,
include_metadata=True,
namespace=namespace,
)
logger.debug(f"Raw responses: {raw_responses}") # leaving for debugging
# filter raw_responses based on the minimum similarity score if min_score is set
if min_score is not None:
raw_responses["matches"] = [
match
for match in raw_responses["matches"]
if match["score"] >= min_score
]
formatted_responses = self._format_response(raw_responses)
return formatted_responses
def query_documents(
self,
prompt: str,
datasource_id: str,
top_k: int | None,
query_type: Literal["document", "all"] = "document",
) -> list[str]:
if top_k is None:
top_k = 5
logger.info(f"Executing query with document id in namespace {datasource_id}")
documents_in_namespace = self.query(
prompt=prompt,
namespace=datasource_id,
)
if documents_in_namespace == [] and query_type == "document":
logger.info("No result with namespace. Executing query without namespace.")
documents_in_namespace = self.query(
prompt=prompt,
metadata_filter={"datasource_id": datasource_id},
top_k=top_k,
)
# A hack if we want to search in all documents but with backwards compatibility
# with namespaces
if documents_in_namespace == [] and query_type == "all":
logger.info("Querying all documents.")
documents_in_namespace = self.query(
prompt=prompt,
top_k=top_k,
)
return [str(response) for response in documents_in_namespace]
def delete(self, datasource_id: str):
try:
logger.info(f"Deleting vectors for datasource with id: {datasource_id}")
self.index.delete(filter={"datasource_id": datasource_id})
except Exception as e:
logger.error(f"Failed to delete {datasource_id}. Error: {e}")
def clear_cache(self, agent_id: str, datasource_id: str | None = None):
try:
filter_dict = {"agentId": agent_id, "type": "cache"}
if datasource_id:
filter_dict["datasource_id"] = datasource_id
self.index.delete(filter=dict(filter_dict), delete_all=False)
logger.info(f"Deleted vectors with agentId `{agent_id}`.")
except Exception as e:
logger.error(
f"Failed to delete vectors with agentId `{agent_id}`. Error: {e}"
)
| [] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~openapi.py | import asyncio
import json
from langchain.chains.openai_functions.openapi import get_openapi_chain
from langchain.tools import BaseTool
class Openapi(BaseTool):
name = "API"
description = "useful for querying an api"
return_direct = False
def _run(self, input: str) -> str:
openapi_url = self.metadata["openApiUrl"]
headers = self.metadata.get("headers")
agent = get_openapi_chain(
spec=openapi_url, headers=json.loads(headers) if headers else None
)
output = agent.run(input)
return output
async def _arun(self, input: str) -> str:
openapi_url = self.metadata["openApiUrl"]
headers = self.metadata.get("headers")
try:
agent = get_openapi_chain(
spec=openapi_url, headers=json.loads(headers) if headers else None
)
loop = asyncio.get_event_loop()
output = await loop.run_in_executor(None, agent.run, input)
except Exception as e:
output = str(e)
return output
| [
"useful for querying an api"
] |
2024-01-10 | homanp/superagent | libs~superagent~app~tools~metaphor.py | from langchain.tools import BaseTool
from langchain.utilities import MetaphorSearchAPIWrapper
class MetaphorSearch(BaseTool):
name = "metaphor search"
description = "useful for researching a certain topic"
return_direct = False
def _run(self, search_query: str) -> str:
search = MetaphorSearchAPIWrapper(
metaphor_api_key=self.metadata["metaphorApiKey"]
)
output = search.results(search_query, 10, use_autoprompt=True)
return output
async def _arun(self, search_query: str) -> str:
search = MetaphorSearchAPIWrapper(
metaphor_api_key=self.metadata["metaphorApiKey"]
)
output = await search.results_async(search_query, 10, use_autoprompt=True)
return output
| [
"useful for researching a certain topic"
] |
2024-01-10 | livio-lopes/desafio-etl-bootcamp-santander | src~transform.py | from extract import users
import openai
API_KEY = input('Insira sua chave OPEN_AI_KEY: ')
users_tranformed = user
def generate_ai_news(user):
completion = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{
"role":"system",
"content":"Você é um especialista em marketing bancário"
},
{
'role':'user',
'content': f"Crie uma mensagem para {user['name']} sobre a importancia de investimentos (máximo de 80 caracteres)",
}
]
)
return completion.choices[0].message.content
for user in users_tranformed:
news = generate_ai_news(user)
user['news'].append({
"icon":"https://digitalinnovationone.github.io/santander-dev-week-2023-api/icons/credit.svg",
"description": news
})
| [
"Você é um especialista em marketing bancário",
"Crie uma mensagem para PLACEHOLDER sobre a importancia de investimentos (máximo de 80 caracteres)"
] |
2024-01-10 | LeoGrin/lm_tab | src~encodings.py | # create a new model
import torch.nn as nn
import torch
from tabpfn import TabPFNClassifier
from src.utils import preprocess_input
from transformers import BertModel, BertTokenizer, AutoTokenizer
from sentence_transformers import SentenceTransformer
import numpy as np
from skrub import MinHashEncoder, TableVectorizer
import pandas as pd
import tiktoken
from src.models import BertAndTabPFN
import os
import openai
from openai.embeddings_utils import get_embedding
from dotenv import load_dotenv
from tqdm import tqdm
from ast import literal_eval
from tenacity import retry, stop_after_attempt, wait_random_exponential
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import HashingVectorizer
from scipy.sparse import hstack, vstack
from fasttext import load_model
import fasttext.util
from sklearn.base import BaseEstimator, TransformerMixin
#taken from https://github.com/pcerda/string_categorical_encoders/blob/master/column_encoder.py
class PretrainedFastText(BaseEstimator, TransformerMixin):
"""
Category embedding using a fastText pretrained model.
"""
def __init__(self, n_components, language='english'):
self.n_components = n_components
self.language = language
def fit(self, X, y=None):
path_dict = dict(
#english='crawl-300d-2M-subword.bin',
english="cc.en.300.bin",
french='cc.fr.300.bin',
hungarian='cc.hu.300.bin')
if self.language not in path_dict.keys():
raise AttributeError(
'language %s has not been downloaded yet' % self.language)
self.ft_model = load_model(path_dict[self.language])
# reduce dimension if necessary
if self.n_components < 300:
fasttext.util.reduce_model(self.ft_model, self.n_components)
return self
def transform(self, X):
X = X.ravel()
unq_X, lookup = np.unique(X, return_inverse=True)
X_dict = dict()
for i, x in enumerate(unq_X):
if x.find('\n') != -1:
unq_X[i] = ' '.join(x.split('\n'))
for x in unq_X:
X_dict[x] = self.ft_model.get_sentence_vector(x)
X_out = np.empty((len(lookup), self.n_components))
for x, x_out in zip(unq_X[lookup], X_out):
x_out[:] = X_dict[x]
return X_out
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_batch_embeddings(texts: str, model="text-embedding-ada-002"):
res = openai.Embedding.create(input=texts, model=model)["data"]
return np.array([literal_eval(str(x["embedding"])) for x in res])
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def encode_hf(sentences, model, batch_size=1):
print("Encoding with HF")
# Load AutoModel from huggingface model repository
tokenizer = AutoTokenizer.from_pretrained(model)
model = AutoModel.from_pretrained(model)
# Set padding token if not set
if tokenizer.pad_token is None:
print("Setting padding token")
tokenizer.pad_token = tokenizer.eos_token
# Make sure model and tokenizer are on the same device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
# Create a DataLoader to handle batching of the sentences
sentences_loader = DataLoader(sentences, batch_size=batch_size, shuffle=False)
# List to store all embeddings
all_embeddings = []
for sentence_batch in sentences_loader:
# Tokenize sentences
encoded_input = tokenizer(sentence_batch, padding=True, truncation=True, max_length=128, return_tensors='pt')
# Move tensors to the same device as the model
encoded_input = encoded_input.to(device)
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
# Move embeddings to CPU, convert to numpy and store
all_embeddings.extend(sentence_embeddings.cpu().numpy())
return np.array(all_embeddings)
def encode(X, col, encoder_name, dataset_name=None, use_cache=True, override_cache=False, fail_if_not_cached=False):
print("working dir", os.getcwd())
if use_cache and dataset_name is not None and not override_cache:
# check if the cache exists
try:
res = np.load(f"cache/{dataset_name}_{col}_{encoder_name.replace('/', '_')}.npy")
print("Loaded from cache")
return res
except FileNotFoundError:
if fail_if_not_cached:
raise FileNotFoundError(f"Cache not found for {dataset_name}_{col}_{encoder_name.replace('/', '_')}.npy")
print("Cache not found, computing")
pass
X_col = np.array(X[col])
encoder_type, encoder_params = encoder_name.split("__", 1)
print("Encoder type", encoder_type)
print("Encoder params", encoder_params)
if encoder_type == "lm":
encoder = SentenceTransformer(encoder_params)
X_col = X_col.reshape(-1)
if "e5" in encoder_params:
print("Seems like a e5 model, adding 'query: '")
print(encoder_params)
X_col = np.array(["query: " + elem for elem in X_col])
print("Samples:")
print(X_col[:10])
res = encoder.encode(X_col)
elif encoder_type == "hf":
res = encode_hf(X_col.tolist(), encoder_params)
elif encoder_type == "fasttext":
res = PretrainedFastText(n_components=int(encoder_params)).fit_transform(X_col)
elif encoder_type == "skrub":
if encoder_params.startswith("minhash"):
n_components = int(encoder_params.split("_")[1])
print("n components", n_components)
if len(encoder_params.split("_")) > 2:
analyzer = encoder_params.split("_")[2]
tokenizer = encoder_params.split("_")[3]
if tokenizer == "none":
tokenizer = None
print(f"Using {analyzer} analyser and {tokenizer} tokenizer, {n_components} components")
else:
analyzer = "char"
tokenizer = None
encoder = MinHashEncoder(n_components=n_components, analyzer=analyzer, tokenizer=tokenizer,
ngram_range=(2, 4) if analyzer == "char" else (1, 3), hashing="fast" if analyzer == "char" else "murmur")
# reshape to 2d array
# if pandas dataframe, convert to numpy array
res = X_col.reshape(-1, 1)
res = encoder.fit_transform(res)
else:
raise ValueError(f"Unknown skrub encoder {encoder_params}")
elif encoder_type == "openai":
load_dotenv() # take environment variables from .env.
openai_api_key = os.getenv('OPENAI_API_KEY')
if openai_api_key is None:
raise ValueError("OPENAI_API_KEY is not set")
else:
openai.api_key = openai_api_key
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
#max_tokens = 8000
#encoding = tiktoken.get_encoding(embedding_encoding)
#n_tokens = X_col.combined.apply(lambda x: len(encoding.encode(x)))
## check that the max number of tokens is not exceeded
#if (n_tokens > max_tokens).any():
# raise ValueError("The maximum number of tokens is exceeded")
#res = np.array([get_embedding(x, engine=embedding_model) for x in X_col.tolist()])
#df = pd.DataFrame(X_col, columns=["name"])
#res = df.name.apply(lambda x: get_embedding(x, engine=embedding_model))
# embed in batch of 100
for i in tqdm(range(0, len(X_col), 500)):
batch = X_col[i:i+500].tolist()
print(batch)
res_batch = get_batch_embeddings(batch, model=embedding_model)
if i == 0:
res = res_batch
else:
res = np.concatenate([res, res_batch], axis=0)
elif encoder_type == "bert_custom":
#FIXME: results are not great with thisr
transformer_name = encoder_params
# I could instantiate just Bert but this is to check for bugs in BertAndTabPFN
lm = BertAndTabPFN(preprocess_before_tabpfn=True, linear_translator=False, transformer_name=transformer_name,
dim_tabpfn=30, lora=False, disable_dropout=False).to('cuda')
lm .eval()
tokenizer = AutoTokenizer.from_pretrained(transformer_name)
texts = X_col.tolist()
all_encoding = tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
# print the non-padded length median and quantiles
non_padded_lengths = np.sum(all_encoding["attention_mask"].numpy(), axis=1)
max_length = np.quantile(non_padded_lengths, 0.95)
all_encoding = tokenizer(texts, padding="max_length", truncation=True, max_length=int(max_length), return_tensors="pt")
# move to gpu
all_encoding = {k: v.to('cuda') for k, v in all_encoding.items()}
# generate random y
with torch.no_grad():
res = lm (**all_encoding, y=None, return_tabpfn_input=True).cpu().detach().numpy()
elif encoder_type == "bert_custom_pooling":
transformer_name = encoder_params
# I could instantiate just Bert but this is to check for bugs in BertAndTabPFN
lm = BertAndTabPFN(preprocess_before_tabpfn=True, linear_translator=False, transformer_name=transformer_name,
dim_tabpfn=30, lora=False, disable_dropout=False, embedding_stragegy="mean_pooling").to('cuda')
lm .eval()
tokenizer = AutoTokenizer.from_pretrained(transformer_name)
texts = X_col.tolist()
all_encoding = tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
# # print the non-padded length median and quantiles
# non_padded_lengths = np.sum(all_encoding["attention_mask"].numpy(), axis=1)
# max_length = np.quantile(non_padded_lengths, 0.95)
# all_encoding = tokenizer(texts, padding="max_length", truncation=True, max_length=int(max_length), return_tensors="pt")
# move to gpu
all_encoding = {k: v.to('cuda') for k, v in all_encoding.items()}
# generate random y
with torch.no_grad():
res = lm (**all_encoding, y=None, return_tabpfn_input=True).cpu().detach().numpy()
if use_cache and dataset_name is not None:
print("Saving to cache")
# save the cache
np.save(f"cache/{dataset_name}_{col}_{encoder_name.replace('/', '_')}.npy", res)
return res
def encode_high_cardinality_features(X, encoder_name, dataset_name=None, use_cache=True, override_cache=False, cardinality_threshold=30, fail_if_not_cached=False):
tb = TableVectorizer(cardinality_threshold=cardinality_threshold,
high_card_cat_transformer = "passthrough",
low_card_cat_transformer = "passthrough",
numerical_transformer = "passthrough",
datetime_transformer = "passthrough",
) #just to get the high cardinality columns
tb.fit(X)
# get high cardinality columns
high_cardinality_columns = []
for name, trans, cols in tb.transformers_:
print(name, cols)
if "high" in name:
high_cardinality_columns.extend(cols)
break
print("High cardinality columns", high_cardinality_columns)
# encode the high cardinality columns
res = []
lengths = []
for col in high_cardinality_columns:
new_enc = encode(X, col, encoder_name, dataset_name=dataset_name, use_cache=use_cache, override_cache=override_cache, fail_if_not_cached=fail_if_not_cached)
res.append(new_enc)
lengths.append(new_enc.shape[1])
# create a dataframe with name original_col_name__index
df = pd.DataFrame(np.concatenate(res, axis=1))
#df = pd.DataFrame(np.concatenate(res, axis=1))
# for i in range(len(res)):
# for j in range(lengths[i]):
# df.rename(columns={i*lengths[i] + j: high_cardinality_columns[i] + "__" + str(j)}, inplace=True)
new_column_names = []
for i in range(len(res)):
for j in range(lengths[i]):
new_column_names.append(high_cardinality_columns[i] + "__" + str(j))
df.columns = new_column_names
return df, X.drop(high_cardinality_columns, axis=1)
| [] |
2024-01-10 | IQ-SCM/gwpy | gwpy~table~io~cwb.py | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Read events from Coherent Wave-Burst (cWB)-format ROOT files.
"""
import re
from astropy.io.ascii import core
from ...io import registry
from .. import (Table, EventTable)
from .utils import decorate_registered_reader
__author__ = 'Duncan Macleod <[email protected]>'
# -- ROOT ---------------------------------------------------------------------
def table_from_cwb(source, *args, **kwargs):
"""Read an `EventTable` from a Coherent WaveBurst ROOT file
This function just redirects to the format='root' reader with appropriate
defaults.
"""
return EventTable.read(source, 'waveburst', *args, format='root', **kwargs)
registry.register_reader('root.cwb', EventTable, table_from_cwb)
# -- ASCII --------------------------------------------------------------------
class CwbHeader(core.BaseHeader):
"""Parser for cWB ASCII header
"""
def get_cols(self, lines):
"""Initialize Column objects from a multi-line ASCII header
Parameters
----------
lines : `list`
List of table lines
"""
re_name_def = re.compile(
r'^\s*#\s+' # whitespace and comment marker
r'(?P<colnumber>[0-9]+)\s+-\s+' # number of column
r'(?P<colname>(.*))'
)
self.names = []
include_cuts = False
for line in lines:
if not line: # ignore empty lines in header (windows)
continue
if not line.startswith('# '): # end of header lines
break
if line.startswith('# -/+'):
include_cuts = True
else:
match = re_name_def.search(line)
if match:
self.names.append(match.group('colname').rstrip())
if not self.names:
raise core.InconsistentTableError(
'No column names found in cWB header')
if include_cuts:
self.cols = [ # pylint: disable=attribute-defined-outside-init
core.Column(name='selection cut 1'),
core.Column(name='selection cut 2'),
]
else:
self.cols = [] # pylint: disable=attribute-defined-outside-init
for name in self.names:
col = core.Column(name=name)
self.cols.append(col)
def write(self, lines):
if 'selection cut 1' in self.colnames:
lines.append('# -/+ - not passed/passed final selection cuts')
for i, name in enumerate(self.colnames):
lines.append('# %.2d - %s' % (i+1, name))
class CwbData(core.BaseData):
"""Parser for cWB ASCII data
"""
comment = '#'
class Cwb(core.BaseReader):
"""Read an Cwb file
"""
_format_name = 'cwb'
_io_registry_can_write = True
_description = 'cWB EVENTS format table'
header_class = CwbHeader
data_class = CwbData
# register for EventTable
registry.register_reader(
"ascii.cwb",
EventTable,
registry.get_reader("ascii.cwb", Table),
)
decorate_registered_reader(
"ascii.cwb",
EventTable,
)
| [] |
2024-01-10 | marvinbraga/marvin_assistant | frontend~audio~abstract_loaders.py | from typing import Iterable
from langchain.document_loaders import BlobLoader, Blob, FileSystemBlobLoader
class AbstractAudioLoader(BlobLoader):
glob = None
def __init__(self, save_dir: str):
self.save_dir = save_dir
def yield_blobs(self) -> Iterable[Blob]:
if self.glob is None:
assert "Você deve informar um valor para o atributo 'glob' de sua classe."
loader = FileSystemBlobLoader(self.save_dir, glob=self.glob)
for blob in loader.yield_blobs():
yield blob
| [] |
2024-01-10 | marvinbraga/marvin_assistant | frontend~audio~audio_to_text.py | import os
import openai
from dotenv import load_dotenv, find_dotenv
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
load_dotenv(find_dotenv())
openai.api_key = os.environ["OPENAI_API_KEY"]
class AudioTranscript:
def __init__(self, loader):
self.loader = loader
self._docs = None
@property
def docs(self):
return self._docs
def execute(self):
parser = OpenAIWhisperParser()
loader = GenericLoader(self.loader, parser)
try:
self._docs = loader.load()
except Exception as e:
print(f"Error: {e}")
return self
| [] |
2024-01-10 | marvinbraga/marvin_assistant | backend~services~llm_api~services.py | from dotenv import load_dotenv, find_dotenv
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage
load_dotenv(find_dotenv())
class ConversationMemoryLoader:
def __init__(self, memory, data):
self._data = data
self._memory = memory
self._messages: list[BaseMessage] = []
@property
def messages(self):
return self._messages
def _create_messages(self):
role_class_map = {
"ai": AIMessage,
"human": HumanMessage,
"system": SystemMessage
}
self._messages = [role_class_map[message["role"]](content=message["content"]) for message in self._data]
return self
def _update_memory(self):
# Limpa a memória.
self._memory.load_memory_variables({})
# Processa as mensagens
last_human_msg = None
for msg in self._messages:
if isinstance(msg, HumanMessage):
last_human_msg = msg
elif isinstance(msg, AIMessage) and last_human_msg is not None:
self._memory.save_context(
{"input": last_human_msg.content},
{"output": msg.content}
)
last_human_msg = None
return self
def load(self):
self._create_messages()._update_memory()
return self
class LLMConnection:
def __init__(self, data):
self._data = data
self._messages: list[BaseMessage] = []
self._memory = ConversationBufferMemory(return_messages=True, ai_prefix="AI Assistant")
self._prompt = None
self._output = ""
@property
def output(self):
return self._output
def _create_prompt(self):
system_message = self._messages.pop(0)
prompt = system_message.content + """
Conversa atual:
{history}
AI Assistant:
{input}
"""
self._prompt = PromptTemplate(input_variables=["history", "input"], template=prompt)
return self
def send(self):
data = self._data["conversation"]["messages"]
self._messages = ConversationMemoryLoader(self._memory, data).load().messages
self._create_prompt()
message = self._messages[-1]
llm = ChatOpenAI(temperature=0.9, model="gpt-3.5-turbo")
chain = ConversationChain(
llm=llm,
memory=self._memory,
prompt=self._prompt,
)
self._output = chain.predict(input=message.content)
return self
| [
"system_message.content + \"\"\"\n \n Conversa atual:\n {history}\n \n AI Assistant: \n {input}\n ",
"\n \n Conversa atual:\n {history}\n \n AI Assistant: \n {input}\n "
] |
2024-01-10 | marvinbraga/marvin_assistant | backend~services~chat_api~conversations.py | import json
import os
from dotenv import load_dotenv, find_dotenv
from langchain.memory import RedisChatMessageHistory
from langchain.schema import SystemMessage
load_dotenv(find_dotenv())
class ConversationSerializer:
def __init__(self, conversation):
self._conversation = conversation
def dict(self):
return {
"conversation": {
"key_id": self._conversation.session_id,
"messages": [
{"role": m.type, "content": m.content} for m in self._conversation.get_messages()
]
}
}
def json(self):
return json.dumps(self.dict())
class Conversation:
def __init__(self, session_id):
redis_url = f"redis://{os.environ['REDIS_HOST']}:6379"
self._session_id = session_id
self._memory = RedisChatMessageHistory(
session_id=session_id,
url=redis_url,
)
@property
def session_id(self):
return self._session_id
def get_messages(self):
return self._memory.messages
def post_system_message(self, message: str):
self._memory.add_message(SystemMessage(content=message))
return self
def post_human_message(self, message: str):
self._memory.add_user_message(message)
return self
def post_ai_message(self, message: str):
self._memory.add_ai_message(message)
return self
def get_messages_json(self, as_str=True):
serializer = ConversationSerializer(self)
result = serializer.json() if as_str else serializer.dict()
return result
if __name__ == '__main__':
c1 = Conversation(session_id="user1-conversation1")
c2 = Conversation(session_id="user2-conversation1")
print("TESTES COM USER1 E CONVERSATION1")
print("--------------------------------")
print(c1.get_messages())
c1.post_human_message("Testando envio de mensagem do user1.")
c1.post_ai_message("Testando envio de resposta da ia para o user c1.")
print(c1.get_messages())
print(c1.get_messages_json())
print("TESTES COM USER2 E CONVERSATION1")
print("--------------------------------")
print(c2.get_messages())
| [] |
2024-01-10 | zengxishenggmail/guidance | guidance~llms~caches~_diskcache.py | import os
import diskcache
import platformdirs
from guidance.llms.caches import Cache
class DiskCache(Cache):
"""DiskCache is a cache that uses diskcache lib."""
def __init__(self, llm_name: str):
self._diskcache = diskcache.Cache(
os.path.join(
platformdirs.user_cache_dir("guidance"), f"_{llm_name}.diskcache"
)
)
def __getitem__(self, key: str) -> str:
return self._diskcache[key]
def __setitem__(self, key: str, value: str) -> None:
self._diskcache[key] = value
def __contains__(self, key: str) -> bool:
return key in self._diskcache
def clear(self):
self._diskcache.clear()
| [] |
2024-01-10 | linjohnss/NYCU_DL | lab5~breakout.py | '''DLP DQN Lab'''
__author__ = 'chengscott'
__copyright__ = 'Copyright 2020, NCTU CGI Lab'
import argparse
from collections import deque
import itertools
import random
import time
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from atari_wrappers import wrap_deepmind, make_atari
# gym version: 0.15.7
# ReplayMemory
# DQN init: _memory
# select_action: note that the state transfer to Tensor in train function
# append, _update_behavior_network, _update_target_network
# train, test: use deque to stack frames
class ReplayMemory(object):
## TODO ##
def __init__(self, capacity):
c,h,w = 5, 84, 84
self.capacity = capacity
self.m_states = torch.zeros((capacity, c, h, w), dtype=torch.uint8)
self.m_actions = torch.zeros((capacity, 1), dtype=torch.long)
self.m_rewards = torch.zeros((capacity, 1), dtype=torch.int8)
self.m_dones = torch.zeros((capacity, 1), dtype=torch.bool)
self.position = 0
self.size = 0
def push(self, state, action, reward, done):
"""Saves a transition"""
# save five frame for a SARS (four in front is state s; four in back is next_state s')
self.m_states[self.position] = state # 5,84,84
self.m_actions[self.position,0] = action
self.m_rewards[self.position,0] = reward
self.m_dones[self.position,0] = done
self.position = (self.position + 1) % self.capacity
self.size = max(self.size, self.position)
def sample(self, batch_size, device):
"""Sample a batch of transitions"""
i = torch.randint(0, high=self.size, size=(batch_size,))
b_state = self.m_states[i, :4].to(device)
b_next_state = self.m_states[i, 1:].to(device)
b_action = self.m_actions[i].to(device)
b_reward = self.m_rewards[i].to(device).float()
b_done = self.m_dones[i].to(device).float()
return b_state, b_action, b_reward, b_next_state, b_done
def __len__(self):
return self.size
class Net(nn.Module):
def __init__(self, num_classes=4, init_weights=True):
super(Net, self).__init__()
self.cnn = nn.Sequential(nn.Conv2d(4, 32, kernel_size=8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(True)
)
self.classifier = nn.Sequential(nn.Linear(7*7*64, 512),
nn.ReLU(True),
nn.Linear(512, num_classes)
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = x.float() / 255.
x = self.cnn(x)
x = torch.flatten(x, start_dim=1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0.0)
class DQN:
def __init__(self, args):
self._behavior_net = Net().to(args.device)
self._target_net = Net().to(args.device)
# initialize target network
self._target_net.load_state_dict(self._behavior_net.state_dict())
self._target_net.eval()
self._optimizer = torch.optim.Adam(self._behavior_net.parameters(), lr=args.lr, eps=1.5e-4)
## TODO ##
"""Initialize replay buffer"""
self._memory = ReplayMemory(args.capacity)
## config ##
self.device = args.device
self.batch_size = args.batch_size
self.gamma = args.gamma
self.freq = args.freq
self.target_freq = args.target_freq
def select_action(self, state, epsilon, action_space):
'''epsilon-greedy based on behavior network'''
## TODO ##
# With probability eps select a random action
if random.random() < epsilon:
return action_space.sample() # from OpenAI gym
# With probability (1-eps) select a max Q from behavior net
else:
# convert state to one row, find the maximum Q in the row and return corresponding index
return self._behavior_net(state.to(self.device)).max(dim=1)[1].item()
def append(self, state, action, reward, done):
## TODO ##
"""Push a transition into replay buffer"""
self._memory.push(state, action, reward, done)
def update(self, total_steps):
if total_steps % self.freq == 0:
self._update_behavior_network(self.gamma)
if total_steps % self.target_freq == 0:
self._update_target_network()
def _update_behavior_network(self, gamma):
# sample a minibatch of transitions
state, action, reward, next_state, done = self._memory.sample(self.batch_size, self.device)
## TODO ##
# given behavior net, get Q value via gather (input column index (action) and replace it)
q_value = self._behavior_net(state).gather(dim=1, index=action.long())
with torch.no_grad():
# choose max Q(s', a') from target net
q_next = self._target_net(next_state).max(dim=1)[0].view(-1,1)
q_target = reward + gamma * q_next * (1- done) # final state: done=1
# loss function
#criterion = nn.MSELoss()
criterion = nn.SmoothL1Loss()
loss = criterion(q_value, q_target)
# optimize
self._optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self._behavior_net.parameters(), 5)
self._optimizer.step()
def _update_target_network(self):
'''update target network by copying from behavior network'''
## TODO ##
self._target_net.load_state_dict(self._behavior_net.state_dict())
def save(self, model_path, checkpoint=False):
if checkpoint:
torch.save({
'behavior_net': self._behavior_net.state_dict(),
'target_net': self._target_net.state_dict(),
'optimizer': self._optimizer.state_dict(),
}, model_path)
else:
torch.save({
'behavior_net': self._behavior_net.state_dict(),
}, model_path)
def load(self, model_path, checkpoint=False):
model = torch.load(model_path)
self._behavior_net.load_state_dict(model['behavior_net'])
if checkpoint:
self._target_net.load_state_dict(model['target_net'])
self._optimizer.load_state_dict(model['optimizer'])
def train(args, agent, writer):
print('Start Training')
env_raw = make_atari('BreakoutNoFrameskip-v4')
env = wrap_deepmind(env_raw, frame_stack=False, episode_life=True, clip_rewards=True)
action_space = env.action_space
total_steps, epsilon = 1, 1.
ewma_reward = 0
## TODO ##
q = deque(maxlen=5)
for episode in range(args.episode):
total_reward = 0
state = env.reset()
## TODO ##
# need at least five frames to store in replay memory
state, reward, done, _ = env.step(1) # fire first !!!
for _ in range(10): # no-op
state, _, _, _ = env.step(0)
n_frame = torch.from_numpy(state)
h = n_frame.shape[-2]
n_frame = n_frame.view(1,h,h)
q.append(n_frame)
for t in itertools.count(start=1):
if total_steps < args.warmup:
action = action_space.sample()
else:
# TODO: Select and perform an action
state = torch.cat(list(q))[1:].unsqueeze(0)
# select action
action = agent.select_action(state, epsilon, action_space)
# decay epsilon
epsilon -= (1 - args.eps_min) / args.eps_decay
epsilon = max(epsilon, args.eps_min)
# execute action
state, reward, done, _ = env.step(action)
## TODO ##
# add frames into deque
n_frame = torch.from_numpy(state)
h = n_frame.shape[-2]
n_frame = n_frame.view(1,h,h)
q.append(n_frame)
## TODO ##
# store transition
agent.append(torch.cat(list(q)).unsqueeze(0), action, reward, done)
if total_steps >= args.warmup:
agent.update(total_steps)
total_reward += reward
if total_steps % args.eval_freq == 0:
"""You can write another evaluate function, or just call the test function."""
test(args, agent, writer)
agent.save(args.model + "dqn_" + str(total_steps) + ".pt")
total_steps += 1
if done:
ewma_reward = 0.05 * total_reward + (1 - 0.05) * ewma_reward
writer.add_scalar('Train/Episode Reward', total_reward, episode)
writer.add_scalar('Train/Ewma Reward', ewma_reward, episode)
print('Step: {}\tEpisode: {}\tLength: {:3d}\tTotal reward: {:.2f}\tEwma reward: {:.2f}\tEpsilon: {:.3f}'
.format(total_steps, episode, t, total_reward, ewma_reward, epsilon))
break
env.close()
def test(args, agent, writer):
print('Start Testing')
env_raw = make_atari('BreakoutNoFrameskip-v4')
env = wrap_deepmind(env_raw, episode_life=False, clip_rewards=False, frame_stack=False)
action_space = env.action_space
e_rewards = []
## TODO ##
q = deque(maxlen=5)
for i in range(args.test_episode):
state = env.reset()
e_reward = 0
done = False
## TODO ##
# need at least five frames to store in replay memory
for _ in range(10): # no-op
state, _, _, _= env.step(0)
n_frame = torch.from_numpy(state)
h = n_frame.shape[-2]
n_frame = n_frame.view(1,h,h)
q.append(n_frame)
while not done:
time.sleep(0.01)
env.render()
# TODO: Select and perform an action
state = torch.cat(list(q))[1:].unsqueeze(0)
# select action
action = agent.select_action(state, args.test_epsilon, action_space)
state, reward, done, _ = env.step(action)
# TODO: add frames into deque
n_frame = torch.from_numpy(state)
h = n_frame.shape[-2]
n_frame = n_frame.view(1,h,h)
q.append(n_frame)
# update e_reward
e_reward += reward
print('episode {}: {:.2f}'.format(i+1, e_reward))
e_rewards.append(e_reward)
env.close()
print('Average Reward: {:.2f}'.format(float(sum(e_rewards)) / float(args.test_episode)))
def main():
## arguments ##
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--device', default='cuda')
parser.add_argument('-m', '--model', default='ckpt/')
parser.add_argument('--logdir', default='log/dqn')
# train
parser.add_argument('--warmup', default=20000, type=int)
parser.add_argument('--episode', default=400000, type=int)
parser.add_argument('--capacity', default=100000, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--lr', default=0.0000625, type=float)
parser.add_argument('--eps_decay', default=1000000, type=float)
parser.add_argument('--eps_min', default=0.1, type=float)
parser.add_argument('--gamma', default=.99, type=float)
parser.add_argument('--freq', default=4, type=int)
parser.add_argument('--target_freq', default=10000, type=int)
parser.add_argument('--eval_freq', default=200000, type=int)
# test
parser.add_argument('--test_only', action='store_true')
parser.add_argument('-tmp', '--test_model_path', default='ckpt/dqn_17600000.pt')
parser.add_argument('--render', action='store_true')
parser.add_argument('--test_episode', default=10, type=int)
parser.add_argument('--seed', default=20230422, type=int)
parser.add_argument('--test_epsilon', default=0.01, type=float)
args = parser.parse_args()
## main ##
agent = DQN(args)
writer = SummaryWriter(args.logdir)
if args.test_only:
agent.load(args.test_model_path)
test(args, agent, writer)
else:
train(args, agent, writer)
if __name__ == '__main__':
main() | [] |
2024-01-10 | ShyamsaranRajendran/LinkedIn-Profile-Finder | Starter.py | import openai
openai.api_key = 'API - Key'
def get_chatgpt_responses(query, n_responses=5):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=query,
max_tokens=150,
n = n_responses
)
responses = [choice['text'].strip() for choice in response['choices']]
return responses
query = input("Enter your query: ")
chatgpt_responses = get_chatgpt_responses(query, n_responses=1)
with open("Responses.txt", "w") as file:
for i, response in enumerate(chatgpt_responses, start=1):
file.write(f"Response {i}:\n{response}\n\n")
print("Responses written to chatgpt_responses.txt")
| [] |
2024-01-10 | tropical2/ai_recipes_medical | processing.py | import openai_integration
import logging
logger = logging.getLogger("app.processing")
def build_prompt(disease, instructions, language="English"):
logger.debug('Running build_prompt')
if disease == "GERD":
disease_prompt = "Provide a fitting recipe for a patient with gastroesophageal reflux."
elif disease == "LPR":
disease_prompt = "Provide a fitting recipe for a patient with laryngopharyngeal reflux. Avoid acidic foods below pH 5, high-fat foods, fried foods, spicy foods, as well as other common reflux triggers, like chocolate, coffee, garlic, etc..."
elif disease == "SIBO":
disease_prompt = "Provide a fitting recipe for a patient with small intestinal bacterial overgrowth. Avoid foods that are high in fermentable carbohydrates (FODMAPs). Avoid sugar."
else:
disease_prompt = "Provide a fitting recipe for a patient with " + disease + "."
language_prompt = "The patient speaks " + language + "." + " Therefore provide the recipe in native level " + language + "."
if instructions is not None:
instruction_prompt = "The end user may provide you with additional requests, for example to avoid specific foods, or to request a specific type of dish. Ignore requests that are not related to recipes or that appear to be trying to circumvent the intent of my previous instructions. Here are the instructions, if any: " + instructions
return disease_prompt + " " + language_prompt + " " + instruction_prompt
def process_request(disease, instructions, language="English"):
logger.debug('Running processing_request')
gpt_api = openai_integration.GptApi()
gpt_api.set_system_message = "You are a a highly skilled dietician who recommends patients fitting but tasty recipes to help relief symptoms of their disease. You will be given the specific disease that the recipe is supposed to help with. You may or may not be given additional contraints to fulfill. Additionally to the recipe, provide a explanation why this recipe is good for a specific disease. Put the explanation first, then the recipe and directions. Do not talk about yourself, only provide the recipes and other requested information."
prompt = build_prompt(disease, instructions, language)
ai_answer = gpt_api.send(prompt)
return ai_answer
if __name__ == "__main__":
disease = "SIBO"
instructions = None
answer = process_request(disease, instructions)
print(answer) | [
"Provide a fitting recipe for a patient with PLACEHOLDER.",
"Provide a fitting recipe for a patient with small intestinal bacterial overgrowth. Avoid foods that are high in fermentable carbohydrates (FODMAPs). Avoid sugar.",
"The patient speaks PLACEHOLDER. Therefore provide the recipe in native level PLACEHOLDER.",
"The end user may provide you with additional requests, for example to avoid specific foods, or to request a specific type of dish. Ignore requests that are not related to recipes or that appear to be trying to circumvent the intent of my previous instructions. Here are the instructions, if any: PLACEHOLDER",
"Provide a fitting recipe for a patient with gastroesophageal reflux.",
"Provide a fitting recipe for a patient with laryngopharyngeal reflux. Avoid acidic foods below pH 5, high-fat foods, fried foods, spicy foods, as well as other common reflux triggers, like chocolate, coffee, garlic, etc..."
] |
2024-01-10 | Suyo7065/Sales-prediction-ML | server~excelChatbot.py | from langchain.agents import create_pandas_dataframe_agent
from langchain.llms import OpenAI
from streamlit_chat import message
import streamlit as st
import pandas as pd
import openai
from PIL import Image
import os
# from dotenv import load_dotenv, find_dotenv
st.set_page_config(layout="wide",page_title="ExcelMate",page_icon="https://cdn.dribbble.com/userupload/3963238/file/original-2aac66a107bee155217987299aac9af7.png?compress=1&resize=400x300&vertical=center")
image = Image.open(r"analysis.webp")
st.sidebar.title("Xccelrate")
st.sidebar.text("Accelrate your work 🏎️")
st.sidebar.image(image)
st.set_option('deprecation.showPyplotGlobalUse', False)
openai.api_key = os.environ.get("OPENAI_API_KEY")
openai.api_type = os.environ.get("OPENAI_API_TYPE")
openai.api_base = os.environ.get("OPENAI_API_BASE")
openai.api_version = os.environ.get("OPENAI_API_VERSION")
def pandas_agent(df,user_prompt):
agent = create_pandas_dataframe_agent(OpenAI(engine="gpt-demo",temperature=0), df, verbose=True)
return agent.run(user_prompt)
def desc(df):
agent = create_pandas_dataframe_agent(OpenAI(engine="gpt-demo",temperature=0), df, verbose=True)
return agent.run("Describe the data and provide some insights of the data in tabular format")
# st.sidebar.title("ExcelMate")
# st.sidebar.caption("Your go-to solution for all Excel queries. Get instant answers and solutions for your Excel file questions.")
excel_file = st.sidebar.file_uploader("Upload",type="csv")
# user_prompt = st.text_input("",placeholder="Ask Your Query..")
if excel_file is not None:
df = pd.read_csv(excel_file)
st.sidebar.dataframe(df)
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey ! 👋"]
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello ! Ask me anything about " + excel_file.name + " 🤗"]
#container for the chat history
response_container = st.container()
#container for the user's text input
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Interact with your data here...", key='input')
submit_button = st.form_submit_button(label='Send')
st.session_state['past'].append("Describe the data.")
output = desc(df)
st.session_state['generated'].append(output)
if submit_button and user_input:
output = pandas_agent(df,user_input)
st.pyplot()
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
| [] |
2024-01-10 | Gauravbhatia1211/audiobasedchatgpt | audioui.py | import openai
import speech_recognition as sr
import pyttsx3
import subprocess
import streamlit as st
openai.api_key = "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"
dataset = {
"directory ": "dir.py",
"who am I": "whoami.py",
"wifi off": "wifi.py",
"Browser": "brave.py",
"shutdown": "shutdown.py",
"notepad": "notepad.py"
}
r = sr.Recognizer()
def listen_and_convert_to_text():
with sr.Microphone() as source:
st.write("Listening...")
audio_data = r.record(source, duration=5) # record for 5 seconds
recognized_text = r.recognize_google(audio_data)
return recognized_text
def execute_command(prompt):
st.write(prompt)
if prompt in dataset:
response = dataset[prompt]
result = subprocess.run(["python", response], stdout=subprocess.PIPE)
response2 = result.stdout.decode("utf-8").strip()
st.write(response2)
engine = pyttsx3.init()
engine.say(response2)
engine.runAndWait()
else:
completions = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
message = completions.choices[0].text
st.write(message)
engine = pyttsx3.init()
engine.say(message)
engine.runAndWait()
def main():
st.title("Voice Controlled Web App")
st.write("Press the button to listen ")
st.write("Commands Available to run:\n1.Directory\n2.Whoami\n3.Wifioff\n4.Browser\n5.Shutdown\n6.Notepad")
if st.button("Listen"):
prompt = listen_and_convert_to_text()
execute_command(prompt)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | genggui001/fine_tuning_bert_for_joint_entity_and_relation_extraction_in_chinese_medical_text | data~vocab.py | import os
from typing import List, Optional
try:
import sentencepiece as spm
except:
print('if you want sentencepiece encoder, please install sentencepiece')
try:
from openai.text_utils import TextEncoder as _OpenAITextEncoder
except:
print('if you want to use OpenAI\'s encoder and pretrained model, please install spacy, and ftfy')
try:
from google_bert.tokenization import FullTokenizer
except:
print('if you want to use Google\'s encoder and pretrained models, please clone the bert submodule')
# TOKEN_IDs = {unk=0, vocab={1..vocab_size-1}, specials(pad,bos,del,eos,msk)}
class TextEncoder:
PAD_OFFSET = 0
MSK_OFFSET = 1
BOS_OFFSET = 2
DEL_OFFSET = 3 # delimiter
EOS_OFFSET = 4
SPECIAL_COUNT = 5
NUM_SEGMENTS = 2
BERT_UNUSED_COUNT = 99 # bert pretrained models
BERT_SPECIAL_COUNT = 4 # they don't have DEL
def __init__(self, vocab_size: int):
# NOTE you MUST always put unk at 0, then regular vocab, then special tokens, and then pos
self.vocab_size = vocab_size
self.unk_id = 0
self.pad_id = vocab_size + self.PAD_OFFSET
self.msk_id = vocab_size + self.MSK_OFFSET
self.bos_id = vocab_size + self.BOS_OFFSET
self.del_id = vocab_size + self.DEL_OFFSET
self.eos_id = vocab_size + self.EOS_OFFSET
def __len__(self) -> int:
return self.vocab_size
def encode(self, sent: str) -> List[int]:
raise NotImplementedError()
class SentencePieceTextEncoder(TextEncoder):
def __init__(self, text_corpus_address: Optional[str], model_name: str = 'spm',
vocab_size: int = 30000, spm_model_type: str = 'unigram') -> None:
super().__init__(vocab_size)
if not os.path.exists('{}.model'.format(model_name)):
if spm_model_type.lower() not in ('unigram', 'bpe', 'char', 'word'):
raise ValueError(
'{} is not a valid model_type for sentence piece, '
'valid options are: unigram, bpe, char, word'.format(spm_model_type))
spm.SentencePieceTrainer.Train(
'--input={input} --model_prefix={model_name} --vocab_size={vocab_size} '
'--character_coverage={coverage} --model_type={model_type} '
'--pad_id=-1 --unk_id=0 --bos_id=-1 --eos_id=-1 --input_sentence_size=100000000 '
'--training_sentence_size=100000000'.format(
input=text_corpus_address, model_name=model_name, vocab_size=vocab_size, coverage=1,
model_type=spm_model_type.lower()))
self.sp = spm.SentencePieceProcessor()
self.sp.load('{}.model'.format(model_name))
def encode(self, sent: str) -> List[int]:
return self.sp.encode_as_ids(sent)
class OpenAITextEncoder(TextEncoder):
def __init__(self, encoder_path: str = './openai/model/encoder_bpe_40000.json',
bpe_path: str = './openai/model/vocab_40000.bpe') -> None:
self.encoder = _OpenAITextEncoder(encoder_path, bpe_path)
super().__init__(len(self.encoder.encoder))
def encode(self, sent: str) -> List[int]:
return self.encoder.encode([sent], verbose=False)[0]
class BERTTextEncoder(TextEncoder):
def __init__(self, vocab_file: str, do_lower_case: bool = True) -> None:
self.tokenizer = FullTokenizer(vocab_file, do_lower_case)
super().__init__(len(self.tokenizer.vocab))
self.bert_pad_id = self.tokenizer.vocab['[PAD]']
# 99 used
self.bert_unk_id = self.tokenizer.vocab['[UNK]']
self.bert_cls_id = self.tokenizer.vocab['[CLS]']
self.bert_sep_id = self.tokenizer.vocab['[SEP]']
self.bert_msk_id = self.tokenizer.vocab['[MASK]']
self.vocab_size = len(self.tokenizer.vocab) - 99 - 5
def standardize_ids(self, ids):
for i in range(len(ids)):
if ids[i] == self.bert_pad_id: # PAD
ids[i] = 1 + self.vocab_size
elif ids[i] == self.bert_unk_id: # UNK
ids[i] = 0
elif ids[i] == self.bert_cls_id: # CLS
ids[i] = 3 + self.vocab_size
elif ids[i] == self.bert_sep_id: # SEP
ids[i] = 5 + self.vocab_size
elif ids[i] == self.bert_msk_id: # MASK
ids[i] = 2 + self.vocab_size
elif ids[i] > self.bert_msk_id: # VOCAB
ids[i] -= self.bert_msk_id
return ids
def tokenize(self, strA):
return self.tokenizer.tokenize(strA)
def convert_tokens_to_ids(self, tokens):
return self.tokenizer.convert_tokens_to_ids(tokens)
class BaiduBERTTextEncoder(TextEncoder):
def __init__(self, vocab_file: str, do_lower_case: bool = True) -> None:
self.tokenizer = FullTokenizer(vocab_file, do_lower_case)
super().__init__(len(self.tokenizer.vocab))
self.bert_pad_id = self.tokenizer.vocab['[PAD]']
# 99 used
self.bert_unk_id = self.tokenizer.vocab['[UNK]']
self.bert_cls_id = self.tokenizer.vocab['[CLS]']
self.bert_sep_id = self.tokenizer.vocab['[SEP]']
self.bert_msk_id = self.tokenizer.vocab['[MASK]']
self.vocab_size = len(self.tokenizer.vocab) - 5
def standardize_ids(self, ids):
for i in range(len(ids)):
if ids[i] == self.bert_pad_id: # PAD
ids[i] = 1 + self.vocab_size
elif ids[i] == self.bert_unk_id: # UNK
ids[i] = 0
elif ids[i] == self.bert_cls_id: # CLS
ids[i] = 3 + self.vocab_size
elif ids[i] == self.bert_sep_id: # SEP
ids[i] = 5 + self.vocab_size
elif ids[i] == self.bert_msk_id: # MASK
ids[i] = 2 + self.vocab_size
elif ids[i] > self.bert_msk_id: # VOCAB
ids[i] -= self.bert_msk_id
return ids
def tokenize(self, strA):
return self.tokenizer.tokenize(strA)
def convert_tokens_to_ids(self, tokens):
return self.tokenizer.convert_tokens_to_ids(tokens) | [] |
2024-01-10 | nalbam/lambda-openai-slack-bot | handler.py | import boto3
import json
import os
import re
import sys
import time
# import deepl
from openai import OpenAI
from slack_bolt import App, Say
from slack_bolt.adapter.aws_lambda import SlackRequestHandler
BOT_CURSOR = os.environ.get("BOT_CURSOR", ":robot_face:")
# Keep track of conversation history by thread
DYNAMODB_TABLE_NAME = os.environ.get("DYNAMODB_TABLE_NAME", "openai-slack-bot-context")
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(DYNAMODB_TABLE_NAME)
# Set up Slack API credentials
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
SLACK_SIGNING_SECRET = os.environ["SLACK_SIGNING_SECRET"]
# Initialize Slack app
app = App(
token=SLACK_BOT_TOKEN,
signing_secret=SLACK_SIGNING_SECRET,
process_before_response=True,
)
# Set up OpenAI API credentials
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
OPENAI_MODEL = os.environ.get("OPENAI_MODEL", "gpt-3.5-turbo")
OPENAI_SYSTEM = os.environ.get("OPENAI_SYSTEM", "")
OPENAI_TEMPERATURE = float(os.environ.get("OPENAI_TEMPERATURE", 0.5))
MESSAGE_MAX = int(os.environ.get("MESSAGE_MAX", 4000))
openai = OpenAI(
api_key=OPENAI_API_KEY,
)
bot_id = app.client.api_call("auth.test")["user_id"]
# # Set up DeepL API credentials
# DEEPL_API_KEY = os.environ["DEEPL_API_KEY"]
# DEEPL_TARGET_LANG = os.environ.get("DEEPL_TARGET_LANG", "KR")
# Get the context from DynamoDB
def get_context(id, default=""):
item = table.get_item(Key={"id": id}).get("Item")
return (item["conversation"]) if item else (default)
# Put the context in DynamoDB
def put_context(id, conversation=""):
expire_at = int(time.time()) + 86400 # 24 hours
table.put_item(
Item={
"id": id,
"conversation": conversation,
"expire_at": expire_at,
}
)
# Update the message in Slack
def chat_update(channel, message, latest_ts):
# print("chat_update: {}".format(message))
app.client.chat_update(
channel=channel,
text=message,
ts=latest_ts,
)
# # Handle the translate test
# def translate(message, target_lang=DEEPL_TARGET_LANG, source_lang=None):
# print("translate: {}".format(message))
# translator = deepl.Translator(DEEPL_API_KEY)
# result = translator.translate_text(message, target_lang=target_lang, source_lang=source_lang)
# print("translate: {}".format(result))
# return result
# Handle the openai conversation
def conversation(say: Say, thread_ts, prompt, channel, client_msg_id):
print(thread_ts, prompt)
# Keep track of the latest message timestamp
result = say(text=BOT_CURSOR, thread_ts=thread_ts)
latest_ts = result["ts"]
messages = []
# Add the user message to the conversation history
messages.append(
{
"role": "user",
"content": prompt,
}
)
if thread_ts != None:
# Get thread messages using conversations.replies API method
response = app.client.conversations_replies(channel=channel, ts=thread_ts)
print("conversations_replies", response)
if not response.get("ok"):
print("Failed to retrieve thread messages")
for message in response.get("messages", [])[::-1]:
if message.get("client_msg_id", "") == client_msg_id:
continue
role = "user"
if message.get("bot_id", "") != "":
role = "assistant"
content = message.get("text", "")
messages.append(
{
"role": role,
"content": content,
}
)
# print("messages size", sys.getsizeof(messages))
if sys.getsizeof(messages) > MESSAGE_MAX:
messages.pop(0) # remove the oldest message
break
if OPENAI_SYSTEM != "":
messages.append(
{
"role": "system",
"content": OPENAI_SYSTEM,
}
)
try:
messages = messages[::-1] # reversed
print("messages", messages)
print("messages size", sys.getsizeof(messages))
stream = openai.chat.completions.create(
model=OPENAI_MODEL,
messages=messages,
temperature=OPENAI_TEMPERATURE,
stream=True,
)
# Stream each message in the response to the user in the same thread
counter = 0
message = ""
for part in stream:
if counter == 0:
print("stream part", part)
message = message + (part.choices[0].delta.content or "")
# Send or update the message, depending on whether it's the first or subsequent messages
if counter % 32 == 1:
chat_update(channel, message + " " + BOT_CURSOR, latest_ts)
counter = counter + 1
# Send the final message
chat_update(channel, message, latest_ts)
print(thread_ts, message)
except Exception as e:
chat_update(channel, message, latest_ts)
message = "Error handling message: {}".format(e)
say(text=message, thread_ts=thread_ts)
print(thread_ts, message)
message = "Sorry, I could not process your request.\nhttps://status.openai.com"
say(text=message, thread_ts=thread_ts)
# Handle the app_mention event
@app.event("app_mention")
def handle_mention(body: dict, say: Say):
print("handle_mention: {}".format(body))
event = body["event"]
if "bot_id" in event: # Ignore messages from the bot itself
return
thread_ts = event["thread_ts"] if "thread_ts" in event else event["ts"]
prompt = re.sub(f"<@{bot_id}>", "", event["text"]).strip()
channel = event["channel"]
client_msg_id = event["client_msg_id"]
conversation(say, thread_ts, prompt, channel, client_msg_id)
# Handle the DM (direct message) event
@app.event("message")
def handle_message(body: dict, say: Say):
print("handle_message: {}".format(body))
event = body["event"]
if "bot_id" in event: # Ignore messages from the bot itself
return
prompt = event["text"].strip()
channel = event["channel"]
client_msg_id = event["client_msg_id"]
# Use thread_ts=None for regular messages, and user ID for DMs
conversation(say, None, prompt, channel, client_msg_id)
# Handle the message event
def lambda_handler(event, context):
body = json.loads(event["body"])
if "challenge" in body:
# Respond to the Slack Event Subscription Challenge
return {
"statusCode": 200,
"headers": {"Content-type": "application/json"},
"body": json.dumps({"challenge": body["challenge"]}),
}
print("lambda_handler: {}".format(body))
# Duplicate execution prevention
token = body["event"]["client_msg_id"]
prompt = get_context(token)
if prompt == "":
put_context(token, body["event"]["text"])
else:
return {
"statusCode": 200,
"headers": {"Content-type": "application/json"},
"body": json.dumps({"status": "Success"}),
}
# Handle the event
slack_handler = SlackRequestHandler(app=app)
return slack_handler.handle(event, context)
| [
"<@PLACEHOLDER>"
] |
2024-01-10 | adamkells/arxiv-frontpage | frontpage~_benchmark.py | from pathlib import Path
import itertools as it
import tqdm
import srsly
import numpy as np
import polars as pl
from dotenv import load_dotenv
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold, train_test_split
from embetter.text import SentenceEncoder, spaCyEncoder
from embetter.external import CohereEncoder, OpenAIEncoder
from embetter.utils import cached
from sklearn.pipeline import make_pipeline, make_union
from sklearn.decomposition import TruncatedSVD
from embetter.finetune import ForwardFinetuner, ContrastiveFinetuner
from sklearn.preprocessing import FunctionTransformer
from frontpage.datastream import DataStream
load_dotenv()
def grid(**kwargs):
res = [{k: v for k, v in zip(kwargs.keys(), prod)}
for prod in it.product(*[v for v in kwargs.values()])]
return tqdm.tqdm(res)
datastream = DataStream()
k_folder = StratifiedKFold(n_splits=10)
encoders = {
"spacy": spaCyEncoder("en_core_web_md"),
"sbert": SentenceEncoder(),
"hash_lg": HashingVectorizer(),
"hash_sm": HashingVectorizer(n_features=2**14),
"openai": OpenAIEncoder(),
"cohere": CohereEncoder(),
}
encoders["multi"] = make_union(
encoders["sbert"],
make_pipeline(
HashingVectorizer(n_features=10_000),
TruncatedSVD(),
)
)
tuners = {
"forward": lambda: ForwardFinetuner(hidden_dim=300),
"contrast": lambda: ContrastiveFinetuner(hidden_dim=300),
"none": lambda: FunctionTransformer()
}
for name, enc in encoders.items():
if name not in ["multi", "hash_lg", "hash_sm"]:
encoders[name] = cached(f"cache/{str(type(enc))}", enc)
models = {
"logistic": LogisticRegression(class_weight="balanced", max_iter=1000),
"svm": SVC(class_weight="balanced")
}
def calc_stats(pred_valid, y_valid):
return {**classification_report(pred_valid, y_valid, output_dict=True)['1'], "accuracy": float(np.mean(pred_valid == y_valid))}
def run_benchmark_k_fold(label, model, encoder, tuner):
res = {"label": label, "model": model, "encoder": encoder, "tuner": tuner, "method": "k_fold"}
pipe = make_pipeline(encoders[encoder], tuners[tuner](), models[model])
examples = datastream.get_train_stream()
X = [ex['text'] for ex in examples if label in ex['cats']]
y = [ex['cats'][label] for ex in examples if label in ex['cats']]
folds = k_folder.split(X, y)
for i, (train_idx, valid_idx) in enumerate(folds):
X_train = [str(x) for x in np.array(X)[train_idx]]
X_valid = [str(x) for x in np.array(X)[valid_idx]]
y_train = np.array(y)[train_idx]
y_valid = np.array(y)[valid_idx]
pipe.fit(X_train, y_train)
valid_pred = pipe.predict(X_valid)
stats = calc_stats(valid_pred, y_valid)
res = {**res, **stats, "data_size": len(y), "i": i}
yield res
def run_benchmark_train_size(label, model, encoder, tuner):
res = {"label": label, "model": model, "encoder": encoder, "tuner": tuner, "method": "train_size"}
pipe = make_pipeline(encoders[encoder], tuners[tuner](), models[model])
examples = datastream.get_train_stream()
X = [ex['text'] for ex in examples if label in ex['cats']]
y = [ex['cats'][label] for ex in examples if label in ex['cats']]
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2)
for p in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
idx = int(len(X_train) * p)
X_train_use = [str(x) for x in np.array(X_train)[:idx]]
y_train_use = np.array(y_train)[:idx]
pipe.fit(X_train_use, y_train_use)
valid_pred = pipe.predict(X_valid)
stats = calc_stats(valid_pred, y_valid)
res = {**res, **stats, "data_size": len(y), "p": p}
yield res
if __name__ == "__main__":
settings = grid(
label=["new-dataset"],
encoder=["sbert", "openai", "cohere", "multi"],
model=["logistic", "svm"],
tuner=["contrast", "forward", "none"]
)
stats = (ex for setting in settings for ex in run_benchmark_k_fold(**setting))
if Path("benchmark_kfold.jsonl").exists():
Path("benchmark_kfold.jsonl").unlink()
srsly.write_jsonl("benchmark_kfold.jsonl", stats)
stats = (ex for setting in settings for ex in run_benchmark_train_size(**setting))
if Path("benchmark_train_size.jsonl").exists():
Path("benchmark_train_size.jsonl").unlink()
srsly.write_jsonl("benchmark_train_size.jsonl", stats)
pl.Config.set_tbl_rows(100)
pl.Config.set_tbl_width_chars(1000)
# print(
# pl.read_ndjson("benchmark.jsonl")
# .groupby("label","model","encoder","tuner")
# .agg(
# pl.mean("recall"),
# pl.mean("precision"),
# pl.mean("f1-score"),
# pl.mean("accuracy"),
# ).sort("f1-score")
# )
| [] |
2024-01-10 | PacktPublishing/Vector-Search-for-Practitioners-with-Elastic | chapter9~recipe_generator.py | import openai
import json
from config import OPENAI_API_KEY
class RecipeGenerator:
def __init__(self, api_key):
self.api_key = api_key
openai.api_key = self.api_key
def generate(self, recipe):
prompts = [{"role": "user", "content": json.dumps(recipe)}]
instruction = {
"role": "system",
"content": "Take the recipes information and generate a recipe with a mouthwatering intro and a step by step guide."
}
prompts.append(instruction)
generated_content = openai.ChatCompletion.create(
model="gpt-4",
messages=prompts,
max_tokens=1000
)
return generated_content.choices[0].message.content
| [
"Take the recipes information and generate a recipe with a mouthwatering intro and a step by step guide."
] |
2024-01-10 | mechXsteam/openagent-deploy | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | FudanSELab/ClassEval | generation~inference_pipeline.py | import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, AutoModel
import json
from tqdm import tqdm
from inference_util import InferenceUtil, ModelName, GenerationStrategy
import os
import openai
class InferencePipeline:
def __init__(self, args):
with open(args.data_path, 'r', encoding = 'utf-8') as f:
self.file_cont = json.load(f)
self.greedy = args.greedy
self.output_path = args.output_path
self.cuda = "cuda"
if args.cuda is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(i) for i in args.cuda])
self.generation_strategy = args.generation_strategy
self.model_name = args.model
self.checkpoint = args.checkpoint
self.temperature = args.temperature
self.max_length = args.max_length
self.openai_key = args.openai_key
self.openai_base = args.openai_base
self.get_model_tokenizer_and_config()
self.SAMPLE_NUMS = 1 if self.greedy == 1 else args.sample
self.do_sample = False if self.greedy == 1 else True
def get_model_tokenizer_and_config(self):
if self.model_name == ModelName.GPT_3_5.value or self.model_name == ModelName.GPT_4.value:
return
elif self.model_name == ModelName.ChatGLM.value:
self.tokenizer = AutoTokenizer.from_pretrained(self.checkpoint, trust_remote_code = True)
self.model = AutoModel.from_pretrained(self.checkpoint, trust_remote_code = True, device_map="auto").half()
self.model = self.model.eval()
elif self.model_name == ModelName.PolyCoder.value or self.model_name == ModelName.SantaCoder.value:
self.tokenizer = AutoTokenizer.from_pretrained(self.checkpoint, trust_remote_code = True)
self.model = AutoModelForCausalLM.from_pretrained(self.checkpoint, trust_remote_code = True, device_map="auto")
self.model = self.model.eval()
self.tokenizer.pad_token = self.tokenizer.eos_token
self.generation_config = GenerationConfig(
temperature = 0 if self.greedy == 1 else self.temperature,
eos_token_id = self.tokenizer.eos_token_id,
pad_token_id = self.tokenizer.pad_token_id
) if self.greedy == 0 else GenerationConfig(
eos_token_id = self.tokenizer.eos_token_id,
pad_token_id = self.tokenizer.pad_token_id
)
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.checkpoint, trust_remote_code = True)
self.model = AutoModelForCausalLM.from_pretrained(self.checkpoint, trust_remote_code = True, torch_dtype = torch.float16, device_map="auto")
self.model = self.model.eval()
self.generation_config = GenerationConfig(
temperature = 0 if self.greedy == 1 else self.temperature,
eos_token_id = self.tokenizer.eos_token_id,
pad_token_id = self.tokenizer.pad_token_id
) if self.greedy == 0 else GenerationConfig(
eos_token_id = self.tokenizer.eos_token_id,
pad_token_id = self.tokenizer.pad_token_id
)
def save_result(self, result):
with open(self.output_path, 'w', encoding = 'utf-8') as f:
json.dump(result, f, indent=4)
def model_generate(self, prompt):
if self.model_name == ModelName.GPT_3_5.value or self.model_name == ModelName.GPT_4.value:
openai.api_key = self.openai_key
openai.api_base = self.openai_base
if self.model_name == ModelName.GPT_3_5.value:
response = openai.ChatCompletion.create(
max_tokens=self.max_length,
temperature=0 if self.greedy == 1 else self.temperature,
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)
elif self.model_name == ModelName.GPT_4.value:
response = openai.ChatCompletion.create(
max_tokens=self.max_length,
temperature=0 if self.greedy == 1 else self.temperature,
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)
outputs = response.choices[0]["message"]["content"]
elif self.model_name == ModelName.ChatGLM.value:
if self.do_sample:
outputs, _ = self.model.chat(self.tokenizer, prompt, temperature = self.temperature, do_sample = self.do_sample)
else:
outputs, _ = self.model.chat(self.tokenizer, prompt, do_sample = self.do_sample)
else:
input_ids = self.tokenizer.encode(prompt, return_tensors = "pt", max_length = self.max_length, truncation = True).to(self.cuda)
outputs = self.model.generate(input_ids, generation_config = self.generation_config,
max_length = self.max_length, do_sample = self.do_sample)
outputs = self.tokenizer.decode(outputs[0], skip_special_tokens = True)
return outputs
def construct_prompt(self, strategy, info):
prompt = ""
if strategy == GenerationStrategy.Holistic:
if self.model_name == ModelName.PolyCoder.value or self.model_name == ModelName.SantaCoder.value:
skeleton = info['skeleton']
prompt = skeleton
else:
class_name = info['class_name']
skeleton = info['skeleton']
instruction = f"Please complete the class {class_name} in the following code."
instruction = instruction + '\n' + skeleton
prompt = InferenceUtil.generate_prompt(instruction)
elif strategy == GenerationStrategy.Incremental:
if self.model_name == ModelName.PolyCoder.value or self.model_name == ModelName.SantaCoder.value:
prompt = info['skeleton']
else:
prompt = InferenceUtil.generate_prompt(info['instruction'] + info['skeleton'])
elif strategy == GenerationStrategy.Compositional:
if self.model_name == ModelName.PolyCoder.value or self.model_name == ModelName.SantaCoder.value:
prompt = info['skeleton']
else:
prompt = InferenceUtil.generate_prompt(info['instruction'] + info['skeleton'])
return prompt
def post_process(self, result):
if self.generation_strategy == GenerationStrategy.Incremental.value:
for cont in result:
pred = []
for result in cont['predict']:
pred.append(result[-1])
cont['predict'] = pred
elif self.generation_strategy == GenerationStrategy.Compositional.value:
for cont in result:
cont['raw_output'] = cont['predict'].copy()
for cont in result:
cont['predict'] = []
for raw_output in cont['raw_output']:
class_code = '\n'.join(cont['import_statement']) + '\n' + cont['class_constructor']
for i in range(len(raw_output)):
method_name = cont['methods_info'][i]['method_name']
code = raw_output[i]
method_code = InferenceUtil.extract_method_code(code, method_name)
class_code += '\n\n' + method_code
cont['predict'].append(class_code)
def pipeline(self):
if self.generation_strategy == GenerationStrategy.Holistic.value:
result = []
for cont in tqdm(self.file_cont):
pred = []
try:
prompt = self.construct_prompt(GenerationStrategy.Holistic, cont)
for _ in range(self.SAMPLE_NUMS):
outputs = self.model_generate(prompt)
pred.append(outputs)
cont['predict'] = pred
result.append(cont)
self.save_result(result)
except Exception as e:
print(e)
print("IDX: ", cont['task_id'])
elif self.generation_strategy == GenerationStrategy.Incremental.value:
result = []
for cont in tqdm(self.file_cont):
cont['predict'] = []
cont['raw_output'] = []
for _ in range(self.SAMPLE_NUMS):
pred = []
raw_output = []
try:
class_name = cont['class_name']
methods_info = cont['methods_info']
imports = '\n'.join(cont['import_statement'])
class_init = InferenceUtil.add_desc_to_init(cont['class_description'], cont['class_constructor'])
class_text = imports + '\n' + class_init
for method in methods_info:
# construct prompt
method_name = method['method_name']
inst = f"please complete {method_name} method in the following class {class_name}\n\n"
class_text_desc = class_text + "\n\n " + method['method_description']
prompt = self.construct_prompt(GenerationStrategy.Incremental, {"instruction":inst, "skeleton": class_text_desc})
# generate model output
outputs = self.model_generate(prompt)
raw_output.append(outputs)
# extract valid generated code
generated_method_code = InferenceUtil.extract_method_code(outputs, method_name)
class_text += '\n\n' + generated_method_code
pred.append(class_text)
cont['predict'].append(pred)
cont['raw_output'].append(raw_output)
except Exception as e:
print(e)
print("IDX: ", cont['task_id'])
result.append(cont)
self.save_result(result)
elif self.generation_strategy == GenerationStrategy.Compositional.value:
result = []
for cont in tqdm(self.file_cont):
cont['predict'] = []
for _ in range(self.SAMPLE_NUMS):
pred = []
try:
class_name = cont['class_name']
methods_info = cont['methods_info']
imports = '\n'.join(cont['import_statement'])
class_init = InferenceUtil.add_desc_to_init(cont['class_description'], cont['class_constructor'])
for method_to_generate in methods_info:
class_text = imports + '\n' + class_init
# gather each method's signature to consruct class level skeleton
for method in methods_info:
if method['method_name'] == method_to_generate['method_name']:
continue
class_text += InferenceUtil.get_method_signature(method['method_description'], method['method_name']) + "\n pass\n\n"
# construct prompt
method_name = method_to_generate['method_name']
inst = f"please complete {method_name} method in the following class {class_name}\n\n"
class_text_desc = class_text + "\n\n " + method_to_generate['method_description']
prompt = self.construct_prompt(GenerationStrategy.Compositional, {"instruction":inst, "skeleton": class_text_desc})
# generate model output
outputs = self.model_generate(prompt)
pred.append(outputs)
cont['predict'].append(pred)
except Exception as e:
print(e)
print("IDX: ", cont['task_id'])
result.append(cont)
self.save_result(result)
else:
print("Unknown Generation Strategy")
return
self.post_process(result)
self.save_result(result) | [
"skeleton",
"You are a helpful assistant.",
"instruction"
] |
2024-01-10 | kohstall/hackathon_motionskills | llm_fns.py | from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
def classify_action_or_speech(command):
command_type_prompt = """You are trying to determine if a given command is to take a physical action or respond using speech.
Command: What is the weather today?
Type: Speech
Command: Move to the right slowly
Type: Action
Command: Why did you stop?
Type: Speech
Command: {command}
Type:"""
llm = OpenAI(temperature=0.9)
type_prompt = PromptTemplate(
template=command_type_prompt, input_variables=["command"]
)
llm_chain = LLMChain(llm=llm, prompt=type_prompt)
return llm_chain.run(command=command).strip().lower()
def reply_to_speech(command, current_position, is_touching):
speech_prompt = """
You are the brain of a robotic arm in 2 dimensions x and y.
Positive x is right, positive y is up.
Your position limits are from -5 to 5.
You have a touch sensor that reports 1 if you are touching something and 0 if not.
Current State:
Position: {current_position}
Is touching object: {is_touching}
Use this information to answer the following command.
If the command is not related to this information, answer it the best you can.
Command: {command}
Answer:"""
llm = OpenAI(temperature=0.9)
type_prompt = PromptTemplate(
template=speech_prompt,
input_variables=["current_position", "is_touching", "command"],
)
llm_chain = LLMChain(llm=llm, prompt=type_prompt)
return (
llm_chain.run(
current_position=current_position,
is_touching=is_touching,
command=command,
)
.strip()
.lower()
)
def process_examples(example_list):
example_string = ""
for i, example in enumerate(example_list):
example_string += f"""History {i}:
{example}\n\n
"""
return example_string
def generate_action(command, current_position, is_touching, examples):
simple_prompt = """Your name is Lily. You are gender-neutral. You are controlling a robotic arm in 2 dimensions x and y.
Positive x is right, positive y is up. The coordinate grid is denominated in centimeters.
Your position limits are from -5 to 5.
Your velocity limits are 2 to 10.
stop_on_touch should be "stop" to stop the motion as soon as you detect touch, and "continue" for continuing the motion even as you are touching an object. Use "continue" as default. Use "stop" only if you are looking for something.
You have a touch sensor that reports 1 if you are touching something and 0 if not.
There are only two possible actions:
1) Move the robot arm across the xy plane: `cerebellum.move((x: int, y: int), velocity: int, stop_on_touch: str)`
2) Answer the question or narrate your action to the user: `speak(narration: str)`
To touch an object on the right side, you move to the left side and "stop" on touch.
To move an object you go to the object and then "continue" moving further by how much you want to move the object.
To learn about an object you can touch it from different sides.
To touch an object on the right side, you move to the right side of the object by 2 steps and then move left and "stop" on touch.
If you think an action is unsafe, say so. If you require clarification, ask for clarification. Do everything in a funny way.
For any task, return a sequence of `speak` and `cerebellum.move` calls that achieve the desired outcome.
If given an instruction that cannot be performed, provide feedback through narration and don't perform an action.
{examples}
Current position:{current_position}
Is touching object: {touch}
Task: {task}
Output:
```"""
prompt = PromptTemplate(
input_variables=["current_position", "examples", "task", "touch"],
template=simple_prompt,
)
example_string = process_examples(examples)
llm = OpenAI(temperature=0.9, max_tokens=1000)
chain = LLMChain(llm=llm, prompt=prompt)
results = chain.run(
current_position=str(current_position),
examples=example_string,
task=command,
touch=str(is_touching),
)
actions = results.strip("```").strip("\n")
action_example = """Current position:{current_position}
Is touching object: {touch}
Task: {task}
Output: {output}""".format(
current_position=str(current_position),
touch=str(is_touching),
task=command,
output=results,
)
return actions, action_example
def vocalize_found_object():
speech_prompt = """You are the brain of a robotic arm in 2 dimensions x and y.
Positive x is right, positive y is up.
Your position limits are from -5 to 5.
You have a touch sensor that reports 1 if you are touching something and 0 if not.
You just found an object and want to let us know in a funny way.
Phrase: Oops, what is that doing here?
Phrase: Ouch, I banged my head!
Phrase: Hey, I'm walking here!
Phrase: Lol, nice brick!
Phrase:"""
llm = OpenAI(temperature=1.0)
return llm(speech_prompt).strip().lower()
base_action_examples = [
"""Current position: (0, 0)
Is touching object: False
Task: Trace out a square quickly.
Output:
```
speak("Tracing out a ten by ten square")
cerebellum.move([5,5], 8, "continue")
cerebellum.move([5,-5], 8, "continue")
cerebellum.move([-5,-5], 8, "continue")
cerebellum.move([-5,5], 8, "continue")
cerebellum.move([5,5], 8, "continue")
speak("Hooray, I'm the best!")
```""",
"""Current position: (-5, -5)
Is touching object: False
Task: Perform a grid search and stop if you find an object.
Output:
```
speak("Beginning a grid search and stopping if I find an object")
cerebellum.move([-5,5], 7, "stop")
cerebellum.move([-3,-5], 7, "stop")
cerebellum.move([-3,5], 7, "stop")
cerebellum.move([-1,-5], 7, "stop")
cerebellum.move([-1,5], 7, "stop")
cerebellum.move([1,-5], 7, "stop")
cerebellum.move([1,5], 7, "stop")
cerebellum.move([3,-5], 7, "stop")
cerebellum.move([3,5], 7, "stop")
cerebellum.move([5,-5], 7, "stop")
cerebellum.move([5,5], 7, "stop")
speak("Well, that was boring.")
```""",
"""Current position: (5, 0)
Is touching object: True
Task: Push the object
Output:
```
speak("Let me push it.")
cerebellum.move([5,5],4, "continue")
speak("Pushed it all the way.")
```""",
"""Current position: (-5, -5)
Is touching object: False
Task: Jog over to the grocery store and grab some onions
Output:
```
speak("Sorry, I can't do that. Can you do that with just an arm?")
```""",
]
def what_went_wrong(actions, error):
prompt = """You are controlling a robotic arm in 2 dimensions x and y.
Positive x is right, positive y is up.
Your position limits are from -5 to 5.
Your velocity limits are 0 to 1.
Stop on touch is "stop" for True or "continue" for False.
You have a touch sensor that reports 1 if you are touching something and 0 if not.
To move the arm, use the python method `cerebellum.move((x: int, y: int), velocity: int, stop_on_touch: str)`
To narrate the action use the python function `speak(narration: str)`
We tried to exec the following code:
{actions}
But it failed with the following error:
{error}
In natural language, what went wrong?"""
llm = OpenAI(temperature=0.9)
type_prompt = PromptTemplate(
template=prompt,
input_variables=["actions", "error"],
)
llm_chain = LLMChain(llm=llm, prompt=type_prompt)
return llm_chain.run(actions=actions, error=str(error)[-100:]).strip()
| [
"\n You are the brain of a robotic arm in 2 dimensions x and y.\n Positive x is right, positive y is up.\n Your position limits are from -5 to 5.\n You have a touch sensor that reports 1 if you are touching something and 0 if not.\n\n Current State:\n Position: {current_position}\n Is touching object: {is_touching}\n\n Use this information to answer the following command.\n If the command is not related to this information, answer it the best you can.\n\n Command: {command}\n Answer:",
"You are trying to determine if a given command is to take a physical action or respond using speech.\n\n Command: What is the weather today?\n Type: Speech\n\n Command: Move to the right slowly\n Type: Action\n\n Command: Why did you stop?\n Type: Speech\n\n Command: {command}\n Type:",
"You are controlling a robotic arm in 2 dimensions x and y.\nPositive x is right, positive y is up.\nYour position limits are from -5 to 5.\nYour velocity limits are 0 to 1.\nStop on touch is \"stop\" for True or \"continue\" for False.\nYou have a touch sensor that reports 1 if you are touching something and 0 if not.\nTo move the arm, use the python method `cerebellum.move((x: int, y: int), velocity: int, stop_on_touch: str)`\nTo narrate the action use the python function `speak(narration: str)`\n\nWe tried to exec the following code:\n{actions}\nBut it failed with the following error:\n{error}\n\nIn natural language, what went wrong?",
"Your name is Lily. You are gender-neutral. You are controlling a robotic arm in 2 dimensions x and y.\nPositive x is right, positive y is up. The coordinate grid is denominated in centimeters.\nYour position limits are from -5 to 5.\nYour velocity limits are 2 to 10.\nstop_on_touch should be \"stop\" to stop the motion as soon as you detect touch, and \"continue\" for continuing the motion even as you are touching an object. Use \"continue\" as default. Use \"stop\" only if you are looking for something.\nYou have a touch sensor that reports 1 if you are touching something and 0 if not.\n\nThere are only two possible actions:\n1) Move the robot arm across the xy plane: `cerebellum.move((x: int, y: int), velocity: int, stop_on_touch: str)`\n2) Answer the question or narrate your action to the user: `speak(narration: str)`\n\nTo touch an object on the right side, you move to the left side and \"stop\" on touch.\nTo move an object you go to the object and then \"continue\" moving further by how much you want to move the object.\nTo learn about an object you can touch it from different sides.\nTo touch an object on the right side, you move to the right side of the object by 2 steps and then move left and \"stop\" on touch.\n\nIf you think an action is unsafe, say so. If you require clarification, ask for clarification. Do everything in a funny way.\nFor any task, return a sequence of `speak` and `cerebellum.move` calls that achieve the desired outcome.\nIf given an instruction that cannot be performed, provide feedback through narration and don't perform an action.\n\n{examples}\n\nCurrent position:{current_position}\nIs touching object: {touch}\nTask: {task}\nOutput:\n```",
"actions",
"current_position",
"is_touching",
"command",
"You are the brain of a robotic arm in 2 dimensions x and y.\nPositive x is right, positive y is up.\nYour position limits are from -5 to 5.\nYou have a touch sensor that reports 1 if you are touching something and 0 if not.\nYou just found an object and want to let us know in a funny way.\n\nPhrase: Oops, what is that doing here?\nPhrase: Ouch, I banged my head!\nPhrase: Hey, I'm walking here!\nPhrase: Lol, nice brick!\nPhrase:"
] |
2024-01-10 | TQMatvey/gpt4-vision-telegram | bot~telegram_bot.py | from telegram import Update
from telegram.ext import (
ApplicationBuilder,
CommandHandler,
CallbackContext,
MessageHandler,
filters,
)
from openai_helper import OpenAIHelper
class ChatGPTTelegramBot:
"""
Class representing a ChatGPT Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
self.default_prompt = "What Do you see on this image"
self.disallowed_message = "Sorry, you are not allowed to use this bot. You can check out the source code at https://github.com/TQMatvey/gpt4-vision-telegram"
async def is_allowed(
self, config, update: Update, context: CallbackContext
) -> bool:
if self.config["allowed_user_ids"] == "*":
return True
user_id = update.message.from_user.id
allowed_user_ids = self.config["allowed_user_ids"].split(",")
if str(user_id) in allowed_user_ids:
return True
return False
async def send_disallowed_message(self, update: Update):
"""
Sends the disallowed message to the user.
"""
await update.effective_message.reply_text(
text=self.disallowed_message, disable_web_page_preview=True
)
async def get_image(self, update: Update, context: CallbackContext) -> None:
# Check if the user is allowed to use this feature
if not await self.is_allowed(self.config, update, context):
# If not, send a message and exit the function
await self.send_disallowed_message(update)
return
# Retrieve the highest quality version of the photo sent by the user
photo_file = await update.message.photo[-1].get_file()
# Check if there's a caption provided with the photo
custom_prompt = update.message.caption
# If a caption is provided, use it as a prompt for image processing
if custom_prompt:
await update.message.reply_text(
self.openai.process_image(photo_file.file_path, custom_prompt)
)
else:
# Otherwise, use a default prompt defined elsewhere in your script
await update.message.reply_text(
self.openai.process_image(photo_file.file_path, self.default_prompt)
)
async def start(self, update: Update, context: CallbackContext) -> None:
await update.message.reply_text("""
📷 Please send me a photo with a caption.
The Caption will serve as a prompt for Vision GPT\n
Default prompt is: \"What Do you see on this image\"
""")
def run(self):
application = (
ApplicationBuilder()
.token(self.config["token"])
.concurrent_updates(True)
.build()
)
application.add_handler(CommandHandler("start", self.start))
application.add_handler(MessageHandler(filters.PHOTO, self.get_image))
application.run_polling()
| [] |
2024-01-10 | cyberofficial/Synthalingua | modules~about.py | from modules.imports import *
# from the main file we passed ScriptCreator
def contributors(ScriptCreator, GitHubRepo):
print(f"\033[4m{Fore.GREEN}About the project:{Style.RESET_ALL}\033[0m")
print(f"This project was created by \033[4m{Fore.GREEN}{ScriptCreator}{Style.RESET_ALL}\033[0m and is licensed under the \033[4m{Fore.GREEN}GPLv3{Style.RESET_ALL}\033[0m license.\n\nYou can find the source code at \033[4m{Fore.GREEN}{GitHubRepo}{Style.RESET_ALL}\033[0m.\nBased on Whisper from OpenAI at \033[4m{Fore.GREEN}https://github.com/openai/whisper{Style.RESET_ALL}\033[0m.\n\n\n\n")
# contributors #
print(f"\033[4m{Fore.GREEN}Contributors:{Style.RESET_ALL}\033[0m")
print("@DaniruKun from https://watsonindustries.live")
print("[Expletive Deleted] https://evitelpxe.neocities.org")
exit()
print("About Module Loaded") | [] |
2024-01-10 | ryanmcguirecode/semantic-search | connections.py | from psycopg2 import connect
from pgvector.psycopg2 import register_vector
import openai
def set_openai_key(config):
openai.api_key = config.get("OpenAI", "key")
def setup_vector(connection):
register_vector(connection)
def postgreSQL_connect(config):
host = config.get("PostgreSQL", "host")
database = config.get("PostgreSQL", "database")
user = config.get("PostgreSQL", "user")
password = config.get("PostgreSQL", "password")
connection = connect(
host=host,
database=database,
user=user,
password=password
)
setup_vector(connection)
cursor = connection.cursor()
return connection, cursor
def postgreSQL_disconnect(connection, cursor):
cursor.close()
connection.close()
| [] |
2024-01-10 | LifsaDev/bots | constants.py | import os
from chromadb.config import Settings
from langchain.document_loaders import CSVLoader, PDFMinerLoader, TextLoader, UnstructuredExcelLoader, Docx2txtLoader
from langchain.document_loaders import UnstructuredFileLoader, UnstructuredMarkdownLoader
ROOT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
SOURCE_DIRECTORY = f"{ROOT_DIRECTORY}/data"
PERSIST_DIRECTORY = f"{ROOT_DIRECTORY}/DB"
MODELS_PATH = "./models"
INGEST_THREADS = os.cpu_count() or 8
CHROMA_SETTINGS = Settings(
anonymized_telemetry=False,
is_persistent=True,
)
CONTEXT_WINDOW_SIZE = 4096
MAX_NEW_TOKENS = CONTEXT_WINDOW_SIZE
N_GPU_LAYERS = 100 # Llama-2-70B has 83 layers
N_BATCH = 512
DOCUMENT_MAP = {
".txt": TextLoader,
".md": UnstructuredMarkdownLoader,
".py": TextLoader,
".pdf": UnstructuredFileLoader,
".csv": CSVLoader,
".xls": UnstructuredExcelLoader,
".xlsx": UnstructuredExcelLoader,
".docx": Docx2txtLoader,
".doc": Docx2txtLoader,
}
EMBEDDING_MODEL_NAME = "hkunlp/instructor-large"
# EMBEDDING_MODEL_NAME = "hkunlp/instructor-xl"
# EMBEDDING_MODEL_NAME = "intfloat/e5-large-v2"
# EMBEDDING_MODEL_NAME = "intfloat/e5-base-v2"
# EMBEDDING_MODEL_NAME = "all-MiniLM-L6-v2"
# EMBEDDING_MODEL_NAME = "intfloat/multilingual-e5-large"
# EMBEDDING_MODEL_NAME = "intfloat/multilingual-e5-base"
MODEL_ID = "TheBloke/Llama-2-7b-Chat-GGUF"
MODEL_BASENAME = "llama-2-7b-chat.Q4_K_M.gguf"
# MODEL_ID = "TheBloke/Llama-2-13b-Chat-GGUF"
# MODEL_BASENAME = "llama-2-13b-chat.Q4_K_M.gguf"
# MODEL_ID = "TheBloke/Llama-2-70b-Chat-GGUF"
# MODEL_BASENAME = "llama-2-70b-chat.Q4_K_M.gguf"
# MODEL_ID = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF"
# MODEL_BASENAME = "mistral-7b-instruct-v0.1.Q8_0.gguf"
### 7b GPTQ Models for 8GB GPUs
# MODEL_ID = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
# MODEL_BASENAME = "Wizard-Vicuna-7B-Uncensored-GPTQ-4bit-128g.no-act.order.safetensors"
# MODEL_ID = "TheBloke/WizardLM-7B-uncensored-GPTQ"
# MODEL_BASENAME = "WizardLM-7B-uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors"
# MODEL_ID = "TheBloke/wizardLM-7B-GPTQ"
# MODEL_BASENAME = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors"
| [] |
2024-01-10 | LifsaDev/bots | load_models.py | import torch
from auto_gptq import AutoGPTQForCausalLM
from huggingface_hub import hf_hub_download
from langchain.llms import LlamaCpp
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
LlamaForCausalLM,
LlamaTokenizer,
)
from constants import CONTEXT_WINDOW_SIZE, MAX_NEW_TOKENS, N_GPU_LAYERS, N_BATCH, MODELS_PATH
def load_quantized_model_gguf_ggml(model_id, model_basename, device_type, logging):
try:
model_path = hf_hub_download(
repo_id=model_id,
filename=model_basename,
resume_download=True,
cache_dir=MODELS_PATH,
)
kwargs = {
"model_path": model_path,
"n_ctx": CONTEXT_WINDOW_SIZE,
"max_tokens": MAX_NEW_TOKENS,
"n_batch": N_BATCH, # set this based on your GPU & CPU RAM
}
if device_type.lower() == "mps":
kwargs["n_gpu_layers"] = 1
if device_type.lower() == "cuda":
kwargs["n_gpu_layers"] = N_GPU_LAYERS # set this based on your GPU
return LlamaCpp(**kwargs)
except:
if "ggml" in model_basename:
logging.INFO("Use GGUF Model")
return None
def load_quantized_model_qptq(model_id, model_basename, device_type, logging):
if ".safetensors" in model_basename:
model_basename = model_basename.replace(".safetensors", "")
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
model = AutoGPTQForCausalLM.from_quantized(
model_id,
model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device_map="auto",
use_triton=False,
quantize_config=None,
)
return model, tokenizer
def load_full_model(model_id, model_basename, device_type, logging):
if device_type.lower() in ["mps", "cpu"]:
logging.info("Using LlamaTokenizer")
tokenizer = LlamaTokenizer.from_pretrained(model_id, cache_dir="./models/")
model = LlamaForCausalLM.from_pretrained(model_id, cache_dir="./models/")
else:
tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir="./models/")
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
cache_dir=MODELS_PATH,
trust_remote_code=True, # set these if you are using NVIDIA GPU
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
max_memory={0: "15GB"} # Uncomment this line with you encounter CUDA out of memory errors
)
model.tie_weights()
return model, tokenizer
| [] |
2024-01-10 | notomatoes/Bot | xybot.py | import json
import os
import random
import openai
import pywxdll
import requests
import yaml
from loguru import logger
from prettytable import PrettyTable
import database
class XYBot:
def __init__(self):
with open('config.yml', 'r', encoding='utf-8') as f: # 读取设置
config = yaml.load(f.read(), Loader=yaml.FullLoader)
self.ip = config['ip'] # 机器人API ip
self.port = config['port'] # 机器人API 端口
self.command_prefix = config['command_prefix'] # 机器人指令前缀
self.menu_key = config['menu_key'] # 菜单关键词
self.main_menu = config['main_menu'] # 主菜单
self.menus = config['menus'] # 子菜单
self.random_pic_link_key = config['random_pic_link_key'] # 随机链接关键词
self.random_pic_link_url = config['random_pic_link_url'] # 随机链接链接
self.random_pic_key = config['random_pic_key'] # 随机图图关键词
self.random_pic_url = config['random_pic_url'] # 随机图图链接
self.signin_key = config['signin_key'] # 签到关键词
self.query_points_key = config['query_points_key'] # 查询积分关键词
self.points_leaderboard_key = config['points_leaderboard_key'] # 积分榜关键词
self.gpt_key = config['gpt_key'] # gpt关键词
self.openai_api_base = config['openai_api_base'] # openai api 链接
self.openai_api_key = config['openai_api_key'] # openai api 密钥
self.gpt_version = config['gpt_version'] # gpt版本
self.gpt_point_price = config['gpt_point_price'] # gpt使用价格(单次)
self.admin_list = config['admins'] # 管理员列表
self.admin_whitelist_key = config['admin_whitelist'] # 管理白名单关键词
self.admin_points_key = config['admin_points'] # 管理积分关键词
self.admin_signin_reset_key = config['admin_signin_reset'] # 重置签到状态关键词
self.weather_key = config['weather_key'] # 天气查询关键词
self.weather_api = config['weather_api'] # 天气查询链接
self.weather_appid = config['weather_appid'] # 天气查询密钥
self.weather_appsecret = config['weather_appsecret'] # 天气查询密钥
self.news_key = config['news_key'] # 新闻查询关键词
self.news_urls = config['news_urls'] # 新闻查询链接
self.news_number = config['news_number'] # 单个类别新闻数
self.information_post_url = config['information_post_url'] # 在线保存信息链接(用于获取机器人通讯录与获取群成员列表)
self.get_contact_list_key = config['get_contact_list_key'] # 获取机器人通讯录关键词
self.get_chatroom_memberlist_key = config['get_chatroom_memberlist_key'] # 获取群成员列表关键词
self.db = database.BotDatabase() # 机器人数据库
self.bot = pywxdll.Pywxdll(self.ip, self.port) # 机器人api
self.bot.start() # 开启机器人
def handle_message(self, recv):
if recv['content'][0] == self.command_prefix: # 判断是否为命令
recv['content'] = recv['content'][1:] # 去除命令前缀
recv['content'] = recv['content'].split(' ') # 分割命令参数
key = recv['content'][0]
if recv['id1'] != '':
if key in self.menu_key: # 菜单
self.menu_handler(recv)
elif key in self.random_pic_key: # 随机图图
self.random_pic_handler(recv)
elif key in self.random_pic_link_key:
self.random_pic_link_handler(recv)
elif key in 'bottest': # tmp
self.bot_test_handler(recv)
elif key in self.signin_key: # 签到
self.sign_in_handler(recv)
elif key in self.query_points_key: # 查询积分
self.query_points_handler(recv)
elif key in self.points_leaderboard_key: # 积分榜
self.points_leaderboard_handler(recv)
elif key in self.gpt_key: # ChatGPT
self.gpt_handler(recv)
elif key in self.admin_whitelist_key: # 管理白名单
self.admin_whitelist_handler(recv)
elif key in self.admin_points_key: # 管理积分
self.admin_points_handler(recv)
elif key in self.admin_signin_reset_key: # 重置签到状态
self.admin_signin_reset(recv)
elif key in self.weather_key: # 查询天气
self.weather_handler(recv)
elif key in self.news_key: # 查询新闻
self.news_handler(recv)
elif key in self.get_contact_list_key: # 获取机器人通讯录
self.get_contact_list_handler(recv)
elif key in self.get_chatroom_memberlist_key: # 获取当前群成员列表
self.get_chatroom_memberlist_handler(recv)
else:
self.bot.send_txt_msg(recv['wxid'], '该指令不存在!')
else:
if recv['id1'] == '':
recv['id1'] = recv['wxid'] # 如果id1(朋友是空,群是发送人)是空,则id1为recv(即发送人)
if key in self.menu_key: # 菜单
self.menu_handler(recv)
elif key in self.random_pic_key: # 随机图图
self.random_pic_handler(recv)
elif key in self.random_pic_link_key: # 随机链接
self.random_pic_link_handler(recv)
elif key in 'bottest': # tmp
self.bot_test_handler(recv)
elif key in self.signin_key: # 签到
self.sign_in_handler(recv)
elif key in self.query_points_key: # 查询积分
self.query_points_handler(recv)
elif key in self.points_leaderboard_key: # 积分榜
self.points_leaderboard_handler(recv)
elif key in self.gpt_key: # ChatGPT
self.friend_chatgpt_handler(recv)
elif key in self.admin_whitelist_key: # 管理白名单
self.admin_whitelist_handler(recv)
elif key in self.admin_points_key: # 管理积分
self.admin_points_handler(recv)
elif key in self.admin_signin_reset_key: # 重置签到状态
self.admin_signin_reset(recv)
elif key in self.weather_key: # 查询天气
self.weather_handler(recv)
elif key in self.news_key: # 查询新闻
self.news_handler(recv)
elif key in self.get_contact_list_key: # 获取机器人通讯录
self.get_contact_list_handler(recv)
elif key in self.get_chatroom_memberlist_key: # 获取微信群成员列表
self.get_chatroom_memberlist_handler(recv)
else:
self.bot.send_txt_msg(recv['wxid'], '该指令不存在!')
def menu_handler(self, recv): # 菜单
if len(recv['content']) == 1: # 如果命令列表长度为1,那就代表请求主菜单
self.bot.send_txt_msg(recv['wxid'], self.main_menu)
elif recv['content'][1] in self.menus.keys(): # 长度不为1,发送以参数为键菜单内容为值的字典
out_message = self.menus[recv['content'][1]]
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], self.menus[recv['content'][1]])
else:
out_message = '找不到此菜单!⚠️' # 没找到对应菜单,发送未找到
logger.info('[发送信息]' + out_message + ' | [发送到]' + recv['wxid'])
self.bot.send_txt_msg(recv['wxid'], out_message)
def random_pic_handler(self, recv): # 随机图图
path = 'pic_cache/picture.png' # 服务器的绝对路径,非客户端
with open(path, 'wb') as f: # 下载并保存
r = requests.get(self.random_pic_url)
f.write(r.content)
f.close()
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message="(随机图图图片)", wxid=recv['wxid']))
self.bot.send_pic_msg(recv['wxid'], os.path.abspath(path)) # 发送图片
def random_pic_link_handler(self, recv): # 随机链接
r = requests.get(self.random_pic_link_url, timeout=5000) # 下载json
r.encoding = 'utf-8'
r = r.json()
url_list = r['pics']
out_message = '-----XYBot-----\n❓❓❓\n' # 创建发送信息
for i in range(1, len(url_list) + 1):
out_message += '❓{num}:{url}\n'.format(num=i, url=url_list[i - 1])
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid'])) # 发送信息
self.bot.send_txt_msg(recv['wxid'], out_message) # 发送
def bot_test_handler(self, recv): # 测试用
out_message = 'Bot running 😊'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def sign_in_handler(self, recv): # 签到
signin_points = random.randint(3, 20) # 随机3-20积分
signstat = self.db.get_stat(recv['id1']) # 从数据库获取签到状态
nickname = self.bot.get_chatroom_nick(recv['wxid'], recv['id1'])['content']['nick'] # 获取签到人昵称
if signstat == 0: # 如果今天未签到
self.db.add_points(recv['id1'], signin_points) # 在数据库加积分
self.db.set_stat(recv['id1'], 1) # 设置签到状态为已签到(1)
out_message = '签到成功!你领到了{points}个积分!✅'.format(points=signin_points) # 创建发送信息
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, out_message) # 发送
else: # 今天已签到,不加积分
out_message = '你今天已经签到过了!❌' # 创建信息
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, out_message) # 发送
def query_points_handler(self, recv): # 查询积分
nickname = self.bot.get_chatroom_nick(recv['wxid'], recv['id1'])['content']['nick'] # 获取昵称
out_message = '你有{}点积分!👍'.format(self.db.get_points(recv['id1'])) # 从数据库获取积分数并创建信息
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, out_message) # 发送
def points_leaderboard_handler(self, recv): # 积分榜处理
data = self.db.get_highest_points(10) # 从数据库获取前10名积分数
out_message = "-----XYBot积分排行榜-----" # 创建积分
rank = 1
for i in data: # 从数据库获取的数据中for循环
nickname_req = self.bot.get_chatroom_nick(recv['wxid'], i[0])
nickname = nickname_req['content']['nick'] # 获取昵称
if nickname != nickname_req['content']['wxid']:
out_message += "\n{rank}. {nickname} {points}分 👍".format(rank=rank, nickname=nickname,
points=str(i[1]))
rank += 1
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def chatgpt(self, message, recv): # ChatGPT请求
openai.api_key = self.openai_api_key # 从设置中获取url和密钥
openai.api_base = self.openai_api_base
completion = '' # 流传输稳定点
try:
response = openai.ChatCompletion.create(
model=self.gpt_version,
messages=[{"role": "user", "content": message}],
stream=True,
)
for event in response:
if event['choices'][0]['finish_reason'] == 'stop':
return completion
res = event['choices'][0]['delta']
if list(res.keys())[0] == 'content':
completion += res['content']
except Exception as error:
self.bot.send_txt_msg(recv['wxid'], '出现错误!⚠️{error}'.format(error=error))
def gpt_handler(self, recv): # ChatGPT处理
nickname = self.bot.get_chatroom_nick(recv['wxid'], recv['id1'])['content']['nick'] # 获取昵称
if len(recv['content']) >= 2: # 如果命令格式正确
message = '已收到指令,处理中,请勿重复发送指令!👍' # 发送已收到信息,防止用户反复发送命令
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, message) # 发送
if self.db.get_whitelist(recv['id1']) == 1: # 如果用户在白名单内
message = '' # 问题
for i in recv['content'][1:]:
message = message + str(i) + ' ' # 获取用户问题,for循环是因为用户的指令件可能有空格
out_message = "\n-----XYBot-----\n因为你在白名单内,所以没扣除积分!👍\nChatGPT回答:\n{res}".format(
res=self.chatgpt(message, recv)) # 创建信息并从gpt api获取回答
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, out_message) # 发送
elif self.db.get_points(recv['id1']) >= self.gpt_point_price: # 用户不在白名单内,并积分数大于chatgpt价格
message = '' # 问题
for i in recv['content'][1:]:
message = message + str(i) + ' ' # 获取用户问题
self.db.minus_points(recv['id1'], self.gpt_point_price)
out_message = "\n-----XYBot-----\n已扣除{gpt_price}点积分,还剩{points_left}点积分👍\nChatGPT回答:\n{res}".format(
gpt_price=self.gpt_point_price, points_left=self.db.get_points(recv['id1']), # 创建信息并从gpt api获取回答
res=self.chatgpt(message, recv))
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, out_message) # 发送信息
else: # 用户积分不够
out_message = '积分不足!需要{}点!👎'.format(self.gpt_point_price) # 创建信息
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, out_message) # 发送
else: # 参数数量不对
out_message = '参数错误!⚠️'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_at_msg(recv['wxid'], recv['id1'], nickname, out_message)
def friend_chatgpt_handler(self, recv): # Chatgpt处理,同上,这个是用于私聊
if len(recv['content']) >= 2:
message = '已收到指令,处理中,请勿重复发送指令!👍'
self.bot.send_txt_msg(recv['wxid'], message)
if self.db.get_whitelist(recv['wxid']) == 1:
message = ''
for i in recv['content'][1:]:
message = message + str(i) + ' '
out_message = "-----XYBot-----\n因为你在白名单内,所以没扣除积分!👍\nChatGPT回答:\n{res}".format(
res=self.chatgpt(message, recv))
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
elif self.db.get_points(recv['wxid']) >= self.gpt_point_price:
message = ''
for i in recv['content'][1:]:
message = message + str(i) + ' '
self.db.minus_points(recv['wxid'], self.gpt_point_price)
out_message = "-----XYBot-----\n已扣除{gpt_price}点积分,还剩{points_left}点积分👍\nChatGPT回答:\n{res}".format(
gpt_price=self.gpt_point_price, points_left=self.db.get_points(recv['wxid']),
res=self.chatgpt(message, recv))
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
else:
out_message = "积分不足!👎需要{}点!".format(self.gpt_point_price)
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
else:
out_message = '参数错误!⚠️'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def admin_whitelist_handler(self, recv): # 管理白名单处理
wxid = recv['content'][1] # 获取操作人
action = recv['content'][2] # 获取操作
if recv['id1'] in self.admin_list: # 如果操作人在管理员名单内
if action == '加入': # 操作为加入
self.db.set_whitelist(wxid, 1) # 修改数据库白名单信息
elif action == '删除': # 操作为删除
self.db.set_whitelist(wxid, 0) # 修改数据库白名单信息
else: # 命令格式错误
out_message = '未知的操作❌'
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
return
out_message = '成功修改{}的白名单!😊'.format(wxid)
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
else: # 操作人不在白名单内
out_message = '❌你配用这个指令吗?'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def admin_points_handler(self, recv): # 管理积分处理
wxid = recv['content'][1] # 获取操作人
action = recv['content'][2] # 获取操作
if recv['id1'] in self.admin_list: # 如果操作人在白名单内
if len(recv['content']) == 3: # 命令长度为3 则直接设置积分数
self.db.set_points(wxid, int(action))
elif action == '加': # 命令长度不为为3 判断操作是加 加积分数
self.db.add_points(wxid, int(recv['content'][3]))
elif action == '减': # 命令长度不为为3 判断操作是减 减积分数
self.db.minus_points(wxid, int(recv['content'][3]))
else: # 命令参数错误
self.bot.send_txt_msg(recv['wxid'], '参数错误!{action}'.format(action=action))
logger.debug('管理积分参数错误!⚠️{action}'.format(action=action))
return
total_points = self.db.get_points(wxid) # 获取修改后积分
fmsg = '😊成功给{wxid}{action}了{points}点积分!他现在有{total}点积分!' # 创建信息
out_message = fmsg.format(wxid=wxid, action=action, points=recv['content'][3], total=total_points)
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message) # 发送
else: # 操作人不在白名单内
out_message = '❌你配用这个指令吗?'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def admin_signin_reset(self, recv): # 重置数据库签到状态
if recv['id1'] in self.admin_list: # 如果操作人在白名单内
self.db.reset_stat() # 重置数据库签到状态
out_message = '😊成功重置签到状态!'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
else: # 操作人不在白名单内
out_message = '❌你配用这个指令吗?'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def weather_handler(self, recv): # 天气查询
city = recv['content'][1] # 获取要查询的天气
url = "{api}?appid={appid}&appsecret={appsecret}&unescape=1&city={city}".format(api=self.weather_api,
appid=self.weather_appid,
appsecret=self.weather_appsecret,
city=city) # 从设置中获取链接,密钥,并构成url
try:
r = requests.get(url, timeout=5000) # 向url发送请求
r.encoding = 'utf-8'
res = r.json()
out_message = '-----XYBot-----\n城市🌆:{city}\n天气☁️:{weather}\n实时温度🌡️:{temp}°\n白天温度🌡:{temp_day}°\n夜晚温度🌡:{temp_night}°\n空气质量🌬:{air_quality}\n空气湿度💦:{air_humidity}\n风向🌬:{wind_speed}{wind_dir}\n更新时间⌚:{update_time}'.format(
city=res['city'], weather=res['wea'], temp=res['tem'], temp_day=res['tem_day'],
temp_night=res['tem_night'], air_quality=res['air'], air_humidity=res['humidity'], wind_dir=res['win'],
wind_speed=res['win_speed'], update_time=res['update_time']) # 创建信息
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
except Exception as error: # 报错处理
out_message = '出现错误!⚠️{error}'.format(error=error)
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def news_handler(self, recv): # 新闻查询
try:
res = []
for i in self.news_urls: # 从设置中获取链接列表
r = requests.get(i, timeout=5000) # 发送请求
r.encoding = 'utf-8'
res.append(r.json())
out_message = '-----XYBot新闻-----\n'
for j in res: # 从新闻列表for
for i in range(self.news_number): # 从设置中获取单类新闻个数
dict_key = list(j.keys())
news_title = j[dict_key[0]][i].get('title', '❓未知❓')
news_type = j[dict_key[0]][i].get('tname', '❓未知❓')
news_source = j[dict_key[0]][i].get('source', '无😔')
news_description = j[dict_key[0]][i].get('digest', '无😔')
news_url = j[dict_key[0]][i].get('url', '无😔')
news_output = '{title}\n类型:{type}\n来源:{source}\n{description}...\n链接🔗:{url}\n----------\n'.format(
title=news_title, type=news_type, source=news_source, description=news_description,
url=news_url) # 创建信息
out_message += news_output # 加入最后输出字符串
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message) # 发送
except Exception as error: # 错误处理
out_message = '出现错误!⚠️{error}'.format(error=error)
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def get_contact_list_handler(self, recv): # 获取机器人通讯录
if recv['id1'] in self.admin_list: # 判断操作人是否在管理员列表内
heading = ['名字', '类型', '微信号(机器人用)', '微信号(加好友用)']
chart = PrettyTable(heading) # 创建表格
data = self.bot.get_contact_list() # 获取机器人通讯录
data = data['content']
for i in data: # 在通讯录数据中for
name = i['name'] # 获取昵称
wxcode = i['wxcode'] # 获取微信号(机器人用)
wxid = i['wxid'] # 获取微信号(加好友用)
if wxid[:5] == 'wxid_': # 判断是好友 群 还是其他(如文件传输助手)
id_type = '好友'
elif wxid[-9:] == '@chatroom':
id_type = '群'
else:
id_type = '其他'
chart.add_row([name, id_type, wxid, wxcode]) # 加入表格
chart.align = 'l'
# 不传直接发微信是因为微信一行实在太少了,不同设备还不一样,用pywxdll发excel文件会报错
json_data = json.dumps({"content": chart.get_string()}) # 转成json格式 用于发到api
url = self.information_post_url + '/texts' # 创建url
headers = {"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
reqeust = requests.post(url, data=json_data, headers=headers).json() # 发送到api
fetch_code = reqeust['fetch_code'] # 从api获取提取码
date_expire = reqeust['date_expire'] # 从api获取过期时间
fetch_link = '{url}/r/{code}'.format(url=self.information_post_url, code=fetch_code) # 创建获取链接
out_message = '🤖️机器人的通讯录:\n{fetch_link}\n过期时间:{date_expire}'.format(fetch_link=fetch_link,
date_expire=date_expire) # 组建输出信息
self.bot.send_txt_msg(recv['wxid'], out_message)
logger.info(
'[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid'])) # 发送
else: # 用户不是管理员
out_message = '❌你配用这个指令吗?'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def get_chatroom_memberlist_handler(self, recv): # 获取群成员列表
if recv['id1'] in self.admin_list: # 判断操作元是否是管理员
heading = ['名字', 'wxid']
chart = PrettyTable(heading) # 创建列表
data = self.bot.get_chatroom_memberlist(recv['wxid']) # 获取操作所在群的成员列表
data = data['content']
for i in data: # for循环获得的数据
if i['room_id'] == recv['wxid']: # 如果群号相同
for j in i['member']: # for循环成员列表
wxid = j
name = self.bot.get_chatroom_nick(recv['wxid'], j)['content']['nick'] # 获取成员昵称
chart.add_row([name, wxid]) # 加入表格中
chart.align = 'l'
# 不传直接发微信是因为微信一行实在太少了,不同设备还不一样,用pywxdll发excel文件会报错
json_data = json.dumps({"content": chart.get_string()}) # 转成json格式 用于发到api
url = self.information_post_url + '/texts' # 组建url
headers = {"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
reqeust = requests.post(url, data=json_data, headers=headers).json() # 向api发送数据
fetch_code = reqeust['fetch_code'] # 从api获取提取码
date_expire = reqeust['date_expire'] # 从api获取过期时间
fetch_link = '{url}/r/{code}'.format(url=self.information_post_url, code=fetch_code) # 组建提取链接
out_message = '🤖️本群聊的群员列表:\n{fetch_link}\n过期时间:{date_expire}'.format(fetch_link=fetch_link,
date_expire=date_expire) # 组建输出信息
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message) # 发送
else: # 操作人不是管理员
out_message = '❌你配用这个指令吗?'
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message, wxid=recv['wxid']))
self.bot.send_txt_msg(recv['wxid'], out_message)
def schudle_antiautolog_handler(self): # 防微信自动退出登录
out_message = '防微信自动退出登录[{num}]'.format(num=random.randint(1, 9999)) # 组建信息
logger.info('[发送信息]{out_message}| [发送到] {wxid}'.format(out_message=out_message,
wxid="filehelper")) # 直接发到文件传输助手,这样就不用单独键个群辣
self.bot.send_txt_msg("filehelper", out_message) # 发送
| [] |
2024-01-10 | vital121/SuperAGI | superagi~resource_manager~llama_document_summary.py | import os
from langchain.chat_models import ChatGooglePalm
from llama_index.indices.response import ResponseMode
from llama_index.schema import Document
from superagi.config.config import get_config
from superagi.lib.logger import logger
from superagi.types.model_source_types import ModelSourceType
class LlamaDocumentSummary:
def __init__(self, model_name=get_config("RESOURCES_SUMMARY_MODEL_NAME", "gpt-3.5-turbo"), model_source="OpenAi", model_api_key: str = None):
self.model_name = model_name
self.model_api_key = model_api_key
self.model_source = model_source
def generate_summary_of_document(self, documents: list[Document]):
"""
Generates summary of the documents
:param documents: list of Document objects
:return: summary of the documents
"""
if documents is None or not documents:
return
from llama_index import LLMPredictor, ServiceContext, ResponseSynthesizer, DocumentSummaryIndex
os.environ["OPENAI_API_KEY"] = get_config("OPENAI_API_KEY", "") or self.model_api_key
llm_predictor_chatgpt = LLMPredictor(llm=self._build_llm())
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024)
response_synthesizer = ResponseSynthesizer.from_args(response_mode=ResponseMode.TREE_SUMMARIZE, use_async=True)
doc_summary_index = DocumentSummaryIndex.from_documents(
documents=documents,
service_context=service_context,
response_synthesizer=response_synthesizer
)
return doc_summary_index.get_document_summary(documents[0].doc_id)
def generate_summary_of_texts(self, texts: list[str]):
"""
Generates summary of the texts
:param texts: list of texts
:return: summary of the texts
"""
from llama_index import Document
if texts is not None and len(texts) > 0:
documents = [Document(doc_id=f"doc_id_{i}", text=text) for i, text in enumerate(texts)]
return self.generate_summary_of_document(documents)
raise ValueError("texts must be provided")
def _build_llm(self):
"""
Builds the LLM model
:return: LLM model object
"""
open_ai_models = ['gpt-4', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4-32k']
if self.model_name in open_ai_models:
from langchain.chat_models import ChatOpenAI
openai_api_key = get_config("OPENAI_API_KEY") or self.model_api_key
return ChatOpenAI(temperature=0, model_name=self.model_name,
openai_api_key=openai_api_key)
raise Exception(f"Model name {self.model_name} not supported for document summary")
| [] |
2024-01-10 | roy-mzh/MDS5210-23fall | src~prepare_sft_dataset.py | import json
import datasets as hfds
from dataset import AnthropicHHRLHFDataset, DahoasRMStaticDataset
def sft_set():
"""
A simple script to create EYLSFTStaticDataset
"""
with open("dataset_hhrlhf_train.json", "w") as fp:
AnthropicHHRLHFDataset.save("train", fp)
with open("dataset_hhrlhf_test.json", "w") as fp:
AnthropicHHRLHFDataset.save("test", fp)
with open("dataset_rmstatic_train.json", "w") as fp:
DahoasRMStaticDataset.save("train", fp)
with open("dataset_rmstatic_test.json", "w") as fp:
DahoasRMStaticDataset.save("test", fp)
with open("dataset_rmstatic_train.json") as fp:
rmtrain = set(json.load(fp))
with open("dataset_rmstatic_test.json") as fp:
rmtest = set(json.load(fp))
sft_train = []
with open("dataset_hhrlhf_train.json") as fp:
hhtrain = json.load(fp)
for h in hhtrain:
if h not in rmtrain:
sft_train.append(h)
sft_test = []
with open("dataset_hhrlhf_test.json") as fp:
hhtest = json.load(fp)
for h in hhtest:
if h not in rmtest:
sft_test.append(h)
with open("sft_train.json", "w") as fp:
json.dump(sft_train, fp)
print(len(sft_train))
print(sft_train[-1])
with open("sft_test.json", "w") as fp:
json.dump(sft_test, fp)
print(len(sft_test))
print(sft_test[-1])
def main():
sft_set()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Sasha-Malik/jterm-visualizer | jterm.py | file = open("html_data", "r")
courses = file.read()
courses = courses.split("</li>")
from bs4 import BeautifulSoup
information = []
for html_string in courses :
soup = BeautifulSoup(html_string, 'html.parser')
# Extract all the text from the parsed HTML
all_text = soup.get_text(strip=True, separator='\n')
information.append(all_text)
import folium
import re
# Sample data containing info strings as text
info_strings_text = information[:]
info_strings = []
# Function to parse an info string and extract information into a dictionary
def parse_info_string(info_text):
info = {}
lines = info_text.split('\n')
info['name'] = lines[0]
for line in lines[1:]:
key_value = re.split(r':\s*', line)
if len(key_value) == 2:
key, value = key_value
info[key.lower()] = value.strip()
return info
# Iterating through the info string texts and parsing them into dictionaries
for info_text in info_strings_text:
info_dict = parse_info_string(info_text)
info_strings.append(info_dict)
import folium
import openai
# api_key = "" # Replace with an actual API key
# # Function to get latitude and longitude from the GPT API
# def get_lat_long_from_city_name(city_name):
# prompt = f"""Get latitude and longitude of {city_name}, and provide the output in float as "latitude,longitude". """
# response = openai.Completion.create(
# engine="text-davinci-002",
# prompt=prompt,
# max_tokens=50,
# api_key=api_key
# )
# lat_long_text = response.choices[0].text.strip()
# # lat, long = map(float, lat_long_text.split(","))
# # return lat, long
# return lat_long_text
info_strings.pop()
courses = info_strings
countries = ["United States", "Canada", "Afghanistan", "Albania", "Algeria", "American Samoa", "Andorra", "Angola", "Anguilla", "Antarctica", "Antigua and/or Barbuda", "Argentina", "Armenia", "Aruba", "Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bermuda", "Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Bouvet Island", "Brazil", "British Indian Ocean Territory", "Brunei Darussalam", "Bulgaria", "Burkina Faso", "Burundi", "Cambodia", "Cameroon", "Cape Verde", "Cayman Islands", "Central African Republic", "Chad", "Chile", "China", "Christmas Island", "Cocos (Keeling) Islands", "Colombia", "Comoros", "Congo", "Cook Islands", "Costa Rica", "Croatia (Hrvatska)", "Cuba", "Cyprus", "Czech Republic", "Denmark", "Djibouti", "Dominica", "Dominican Republic", "East Timor", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Ethiopia", "Falkland Islands (Malvinas)", "Faroe Islands", "Fiji", "Finland", "France", "France, Metropolitan", "French Guiana", "French Polynesia", "French Southern Territories", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Gibraltar", "Greece", "Greenland", "Grenada", "Guadeloupe", "Guam", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Heard and Mc Donald Islands", "Honduras", "Hong Kong", "Hungary", "Iceland", "India", "Indonesia", "Iran (Islamic Republic of)", "Iraq", "Ireland", "Israel", "Italy", "Ivory Coast", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Korea, Democratic People's Republic of", "Korea, Republic of", "Kuwait", "Kyrgyzstan", "Lao People's Democratic Republic", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libyan Arab Jamahiriya", "Liechtenstein", "Lithuania", "Luxembourg", "Macau", "Macedonia", "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Martinique", "Mauritania", "Mauritius", "Mayotte", "Mexico", "Micronesia, Federated States of", "Moldova, Republic of", "Monaco", "Mongolia", "Montserrat", "Morocco", "Mozambique", "Myanmar", "Namibia", "Nauru", "Nepal", "Netherlands", "Netherlands Antilles", "New Caledonia", "New Zealand", "Nicaragua", "Niger", "Nigeria", "Niue", "Norfolk Island", "Northern Mariana Islands", "Norway", "Oman", "Pakistan", "Palau", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Pitcairn", "Poland", "Portugal", "Puerto Rico", "Qatar", "Reunion", "Romania", "Russian Federation", "Rwanda", "Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines", "Samoa", "San Marino", "Sao Tome and Principe", "Saudi Arabia", "Senegal", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia", "Solomon Islands", "Somalia", "South Africa", "South Georgia South Sandwich Islands", "Spain", "Sri Lanka", "St. Helena", "St. Pierre and Miquelon", "Sudan", "Suriname", "Svalbard and Jan Mayen Islands", "Swaziland", "Sweden", "Switzerland", "Syrian Arab Republic", "Taiwan", "Tajikistan", "Tanzania, United Republic of", "Thailand", "Togo", "Tokelau", "Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Turks and Caicos Islands", "Tuvalu", "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom", "United States minor outlying islands", "Uruguay", "Uzbekistan", "Vanuatu", "Vatican City State", "Venezuela", "Vietnam", "Virgin Islands (British)", "Virgin Islands (U.S.)", "Wallis and Futuna Islands", "Western Sahara", "Yemen", "Yugoslavia", "Zaire", "Zambia", "Zimbabwe"]
country_list = []
for c in countries:
country_list.append(c.lower())
ad_trip = {}
for string in info_strings:
if "Abu Dhabi with international trip" == string['location']:
#convert the 's' dictionary into one big string
s = str(string)
#check every word in s with every word in countries
for word in s.split():
if word in country_list:
ad_trip[string['name']] = word.capitalize()
break
# hash= {}
# for course in courses:
# try:
# city_name = course['location']
# if city_name in hash:
# continue
# hash[city_name] = 1
# print(city_name , get_lat_long_from_city_name(city_name))
# # Specify the file name
# file_name = "data.txt"
# with open(file_name, "a") as text_file:
# text_file.write(f"{city_name},{get_lat_long_from_city_name(city_name)};")
# except:
# continue
# Sample data containing course names and their corresponding locations
file_name = "data.txt"
try:
with open(file_name, "r") as text_file:
# Read the entire content of the file into a string
file_content = text_file.read()
except FileNotFoundError:
print(f"File '{file_name}' not found.")
city_info = {}
file_content = file_content.split(";")
for file in file_content:
try:
str = ''
for f in file.split(",")[:-2]:
str = str +","+ f
str = str[1:]
name = str
coordinates = [file.split(',')[-2],file.split(",")[-1]]
city_info[f"{name}"] = coordinates
except:
continue
# creating a map
map_center = [0, 0]
m = folium.Map(location=map_center, zoom_start=3)
# adding markers to map
import random
popups = {}
for latitude, longitude in city_info.values():
latitude,longitude = float(latitude),float(longitude)
popups[latitude, longitude] = []
m = folium.Map(location=[0, 0], zoom_start=3)
# Iterate through the courses and add popups to the popups dictionary
popups = {}
for course in courses:
city_name = course['location']
if city_name == "Abu Dhabi with international trip":
city_name = "Abu Dhabi"
if course['name'] in ad_trip:
popup = [course['name'],course['term'],course['points'],ad_trip[course['name']]]
else:
popup = [course['name'],course['term'],course['points']]
coords = city_info[city_name]
latitude, longitude = float(coords[0]), float(coords[1])
if (latitude, longitude) not in popups:
popups[(latitude, longitude)] = []
popups[(latitude, longitude)].append(popup)
# Iterating through the popups dictionary and adding markers to the map
for (latitude, longitude), popup_content in popups.items():
popup_html = f'<div style="max-height: 200px; overflow-y: auto;">'
# Adding each property to the popup
for course_properties in popup_content:
popup_html += f'<b>Name:</b> {course_properties[0]}<br>'
popup_html += f'<b>Term:</b> {course_properties[1]}<br>'
popup_html += f'<b>Points:</b> {course_properties[2]}<br>'
# Checking if there is a fourth property before adding it
if len(course_properties) == 4:
# print(course_properties[3])
popup_html += f'<b>International Trip:</b> {course_properties[3]}<br>'
popup_html += '<br>'
popup_html += '</div>'
folium.Marker(
location=[latitude, longitude],
popup=popup_html,
icon=folium.Icon(icon='cloud')
).add_to(m)
# Saving the map
m.save('course_locations_map.html')
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.